]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
[PATCH 6/57][Arm][GAS] Add support for MVE instructions: vst/vld{2,4}
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 static const arm_feature_set mve_ext =
306 ARM_FEATURE_COPROC (FPU_MVE);
307 static const arm_feature_set mve_fp_ext =
308 ARM_FEATURE_COPROC (FPU_MVE_FP);
309 #ifdef OBJ_ELF
310 static const arm_feature_set fpu_vfp_fp16 =
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
312 static const arm_feature_set fpu_neon_ext_fma =
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
314 #endif
315 static const arm_feature_set fpu_vfp_ext_fma =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
317 static const arm_feature_set fpu_vfp_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
319 static const arm_feature_set fpu_vfp_ext_armv8xd =
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
321 static const arm_feature_set fpu_neon_ext_armv8 =
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
323 static const arm_feature_set fpu_crypto_ext_armv8 =
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
325 static const arm_feature_set crc_ext_armv8 =
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
327 static const arm_feature_set fpu_neon_ext_v8_1 =
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
329 static const arm_feature_set fpu_neon_ext_dotprod =
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
331
332 static int mfloat_abi_opt = -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
334 directive. */
335 static arm_feature_set selected_arch = ARM_ARCH_NONE;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
337 directive. */
338 static arm_feature_set selected_ext = ARM_ARCH_NONE;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
341 directive. */
342 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu = FPU_NONE;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name[20];
349
350 extern FLONUM_TYPE generic_floating_point_number;
351
352 /* Return if no cpu was selected on command-line. */
353 static bfd_boolean
354 no_cpu_selected (void)
355 {
356 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
357 }
358
359 #ifdef OBJ_ELF
360 # ifdef EABI_DEFAULT
361 static int meabi_flags = EABI_DEFAULT;
362 # else
363 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
364 # endif
365
366 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
367
368 bfd_boolean
369 arm_is_eabi (void)
370 {
371 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
372 }
373 #endif
374
375 #ifdef OBJ_ELF
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS * GOT_symbol;
378 #endif
379
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
383 instructions. */
384 static int thumb_mode = 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
389
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
392 {
393 IMPLICIT_IT_MODE_NEVER = 0x00,
394 IMPLICIT_IT_MODE_ARM = 0x01,
395 IMPLICIT_IT_MODE_THUMB = 0x02,
396 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
397 };
398 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
399
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
402
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
407 there.)
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
410 machine code.
411
412 Important differences from the old Thumb mode:
413
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
422
423 static bfd_boolean unified_syntax = FALSE;
424
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars[] = "#[]{}";
430
431 enum neon_el_type
432 {
433 NT_invtype,
434 NT_untyped,
435 NT_integer,
436 NT_float,
437 NT_poly,
438 NT_signed,
439 NT_unsigned
440 };
441
442 struct neon_type_el
443 {
444 enum neon_el_type type;
445 unsigned size;
446 };
447
448 #define NEON_MAX_TYPE_ELS 4
449
450 struct neon_type
451 {
452 struct neon_type_el el[NEON_MAX_TYPE_ELS];
453 unsigned elems;
454 };
455
456 enum pred_instruction_type
457 {
458 OUTSIDE_PRED_INSN,
459 INSIDE_VPT_INSN,
460 INSIDE_IT_INSN,
461 INSIDE_IT_LAST_INSN,
462 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN, /* The IT insn has been parsed. */
467 VPT_INSN, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 MVE_UNPREDICABLE_INSN /* MVE instruction that is non-predicable. */
471 };
472
473 /* The maximum number of operands we need. */
474 #define ARM_IT_MAX_OPERANDS 6
475 #define ARM_IT_MAX_RELOCS 3
476
477 struct arm_it
478 {
479 const char * error;
480 unsigned long instruction;
481 int size;
482 int size_req;
483 int cond;
484 /* "uncond_value" is set to the value in place of the conditional field in
485 unconditional versions of the instruction, or -1 if nothing is
486 appropriate. */
487 int uncond_value;
488 struct neon_type vectype;
489 /* This does not indicate an actual NEON instruction, only that
490 the mnemonic accepts neon-style type suffixes. */
491 int is_neon;
492 /* Set to the opcode if the instruction needs relaxation.
493 Zero if the instruction is not relaxed. */
494 unsigned long relax;
495 struct
496 {
497 bfd_reloc_code_real_type type;
498 expressionS exp;
499 int pc_rel;
500 } relocs[ARM_IT_MAX_RELOCS];
501
502 enum pred_instruction_type pred_insn_type;
503
504 struct
505 {
506 unsigned reg;
507 signed int imm;
508 struct neon_type_el vectype;
509 unsigned present : 1; /* Operand present. */
510 unsigned isreg : 1; /* Operand was a register. */
511 unsigned immisreg : 1; /* .imm field is a second register. */
512 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
513 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
514 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
515 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
516 instructions. This allows us to disambiguate ARM <-> vector insns. */
517 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
518 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
519 unsigned isquad : 1; /* Operand is SIMD quad register. */
520 unsigned issingle : 1; /* Operand is VFP single-precision register. */
521 unsigned hasreloc : 1; /* Operand has relocation suffix. */
522 unsigned writeback : 1; /* Operand has trailing ! */
523 unsigned preind : 1; /* Preindexed address. */
524 unsigned postind : 1; /* Postindexed address. */
525 unsigned negative : 1; /* Index register was negated. */
526 unsigned shifted : 1; /* Shift applied to operation. */
527 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
528 } operands[ARM_IT_MAX_OPERANDS];
529 };
530
531 static struct arm_it inst;
532
533 #define NUM_FLOAT_VALS 8
534
535 const char * fp_const[] =
536 {
537 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
538 };
539
540 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
541
542 #define FAIL (-1)
543 #define SUCCESS (0)
544
545 #define SUFF_S 1
546 #define SUFF_D 2
547 #define SUFF_E 3
548 #define SUFF_P 4
549
550 #define CP_T_X 0x00008000
551 #define CP_T_Y 0x00400000
552
553 #define CONDS_BIT 0x00100000
554 #define LOAD_BIT 0x00100000
555
556 #define DOUBLE_LOAD_FLAG 0x00000001
557
558 struct asm_cond
559 {
560 const char * template_name;
561 unsigned long value;
562 };
563
564 #define COND_ALWAYS 0xE
565
566 struct asm_psr
567 {
568 const char * template_name;
569 unsigned long field;
570 };
571
572 struct asm_barrier_opt
573 {
574 const char * template_name;
575 unsigned long value;
576 const arm_feature_set arch;
577 };
578
579 /* The bit that distinguishes CPSR and SPSR. */
580 #define SPSR_BIT (1 << 22)
581
582 /* The individual PSR flag bits. */
583 #define PSR_c (1 << 16)
584 #define PSR_x (1 << 17)
585 #define PSR_s (1 << 18)
586 #define PSR_f (1 << 19)
587
588 struct reloc_entry
589 {
590 const char * name;
591 bfd_reloc_code_real_type reloc;
592 };
593
594 enum vfp_reg_pos
595 {
596 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
597 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
598 };
599
600 enum vfp_ldstm_type
601 {
602 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
603 };
604
605 /* Bits for DEFINED field in neon_typed_alias. */
606 #define NTA_HASTYPE 1
607 #define NTA_HASINDEX 2
608
609 struct neon_typed_alias
610 {
611 unsigned char defined;
612 unsigned char index;
613 struct neon_type_el eltype;
614 };
615
616 /* ARM register categories. This includes coprocessor numbers and various
617 architecture extensions' registers. Each entry should have an error message
618 in reg_expected_msgs below. */
619 enum arm_reg_type
620 {
621 REG_TYPE_RN,
622 REG_TYPE_CP,
623 REG_TYPE_CN,
624 REG_TYPE_FN,
625 REG_TYPE_VFS,
626 REG_TYPE_VFD,
627 REG_TYPE_NQ,
628 REG_TYPE_VFSD,
629 REG_TYPE_NDQ,
630 REG_TYPE_NSD,
631 REG_TYPE_NSDQ,
632 REG_TYPE_VFC,
633 REG_TYPE_MVF,
634 REG_TYPE_MVD,
635 REG_TYPE_MVFX,
636 REG_TYPE_MVDX,
637 REG_TYPE_MVAX,
638 REG_TYPE_MQ,
639 REG_TYPE_DSPSC,
640 REG_TYPE_MMXWR,
641 REG_TYPE_MMXWC,
642 REG_TYPE_MMXWCG,
643 REG_TYPE_XSCALE,
644 REG_TYPE_RNB,
645 };
646
647 /* Structure for a hash table entry for a register.
648 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
649 information which states whether a vector type or index is specified (for a
650 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
651 struct reg_entry
652 {
653 const char * name;
654 unsigned int number;
655 unsigned char type;
656 unsigned char builtin;
657 struct neon_typed_alias * neon;
658 };
659
660 /* Diagnostics used when we don't get a register of the expected type. */
661 const char * const reg_expected_msgs[] =
662 {
663 [REG_TYPE_RN] = N_("ARM register expected"),
664 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
665 [REG_TYPE_CN] = N_("co-processor register expected"),
666 [REG_TYPE_FN] = N_("FPA register expected"),
667 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
668 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
669 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
670 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
671 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
672 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
673 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
674 " expected"),
675 [REG_TYPE_VFC] = N_("VFP system register expected"),
676 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
677 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
678 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
679 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
680 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
681 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
682 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
683 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
684 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
685 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
686 [REG_TYPE_MQ] = N_("MVE vector register expected"),
687 [REG_TYPE_RNB] = N_("")
688 };
689
690 /* Some well known registers that we refer to directly elsewhere. */
691 #define REG_R12 12
692 #define REG_SP 13
693 #define REG_LR 14
694 #define REG_PC 15
695
696 /* ARM instructions take 4bytes in the object file, Thumb instructions
697 take 2: */
698 #define INSN_SIZE 4
699
700 struct asm_opcode
701 {
702 /* Basic string to match. */
703 const char * template_name;
704
705 /* Parameters to instruction. */
706 unsigned int operands[8];
707
708 /* Conditional tag - see opcode_lookup. */
709 unsigned int tag : 4;
710
711 /* Basic instruction code. */
712 unsigned int avalue;
713
714 /* Thumb-format instruction code. */
715 unsigned int tvalue;
716
717 /* Which architecture variant provides this instruction. */
718 const arm_feature_set * avariant;
719 const arm_feature_set * tvariant;
720
721 /* Function to call to encode instruction in ARM format. */
722 void (* aencode) (void);
723
724 /* Function to call to encode instruction in Thumb format. */
725 void (* tencode) (void);
726
727 /* Indicates whether this instruction may be vector predicated. */
728 unsigned int mayBeVecPred : 1;
729 };
730
731 /* Defines for various bits that we will want to toggle. */
732 #define INST_IMMEDIATE 0x02000000
733 #define OFFSET_REG 0x02000000
734 #define HWOFFSET_IMM 0x00400000
735 #define SHIFT_BY_REG 0x00000010
736 #define PRE_INDEX 0x01000000
737 #define INDEX_UP 0x00800000
738 #define WRITE_BACK 0x00200000
739 #define LDM_TYPE_2_OR_3 0x00400000
740 #define CPSI_MMOD 0x00020000
741
742 #define LITERAL_MASK 0xf000f000
743 #define OPCODE_MASK 0xfe1fffff
744 #define V4_STR_BIT 0x00000020
745 #define VLDR_VMOV_SAME 0x0040f000
746
747 #define T2_SUBS_PC_LR 0xf3de8f00
748
749 #define DATA_OP_SHIFT 21
750 #define SBIT_SHIFT 20
751
752 #define T2_OPCODE_MASK 0xfe1fffff
753 #define T2_DATA_OP_SHIFT 21
754 #define T2_SBIT_SHIFT 20
755
756 #define A_COND_MASK 0xf0000000
757 #define A_PUSH_POP_OP_MASK 0x0fff0000
758
759 /* Opcodes for pushing/poping registers to/from the stack. */
760 #define A1_OPCODE_PUSH 0x092d0000
761 #define A2_OPCODE_PUSH 0x052d0004
762 #define A2_OPCODE_POP 0x049d0004
763
764 /* Codes to distinguish the arithmetic instructions. */
765 #define OPCODE_AND 0
766 #define OPCODE_EOR 1
767 #define OPCODE_SUB 2
768 #define OPCODE_RSB 3
769 #define OPCODE_ADD 4
770 #define OPCODE_ADC 5
771 #define OPCODE_SBC 6
772 #define OPCODE_RSC 7
773 #define OPCODE_TST 8
774 #define OPCODE_TEQ 9
775 #define OPCODE_CMP 10
776 #define OPCODE_CMN 11
777 #define OPCODE_ORR 12
778 #define OPCODE_MOV 13
779 #define OPCODE_BIC 14
780 #define OPCODE_MVN 15
781
782 #define T2_OPCODE_AND 0
783 #define T2_OPCODE_BIC 1
784 #define T2_OPCODE_ORR 2
785 #define T2_OPCODE_ORN 3
786 #define T2_OPCODE_EOR 4
787 #define T2_OPCODE_ADD 8
788 #define T2_OPCODE_ADC 10
789 #define T2_OPCODE_SBC 11
790 #define T2_OPCODE_SUB 13
791 #define T2_OPCODE_RSB 14
792
793 #define T_OPCODE_MUL 0x4340
794 #define T_OPCODE_TST 0x4200
795 #define T_OPCODE_CMN 0x42c0
796 #define T_OPCODE_NEG 0x4240
797 #define T_OPCODE_MVN 0x43c0
798
799 #define T_OPCODE_ADD_R3 0x1800
800 #define T_OPCODE_SUB_R3 0x1a00
801 #define T_OPCODE_ADD_HI 0x4400
802 #define T_OPCODE_ADD_ST 0xb000
803 #define T_OPCODE_SUB_ST 0xb080
804 #define T_OPCODE_ADD_SP 0xa800
805 #define T_OPCODE_ADD_PC 0xa000
806 #define T_OPCODE_ADD_I8 0x3000
807 #define T_OPCODE_SUB_I8 0x3800
808 #define T_OPCODE_ADD_I3 0x1c00
809 #define T_OPCODE_SUB_I3 0x1e00
810
811 #define T_OPCODE_ASR_R 0x4100
812 #define T_OPCODE_LSL_R 0x4080
813 #define T_OPCODE_LSR_R 0x40c0
814 #define T_OPCODE_ROR_R 0x41c0
815 #define T_OPCODE_ASR_I 0x1000
816 #define T_OPCODE_LSL_I 0x0000
817 #define T_OPCODE_LSR_I 0x0800
818
819 #define T_OPCODE_MOV_I8 0x2000
820 #define T_OPCODE_CMP_I8 0x2800
821 #define T_OPCODE_CMP_LR 0x4280
822 #define T_OPCODE_MOV_HR 0x4600
823 #define T_OPCODE_CMP_HR 0x4500
824
825 #define T_OPCODE_LDR_PC 0x4800
826 #define T_OPCODE_LDR_SP 0x9800
827 #define T_OPCODE_STR_SP 0x9000
828 #define T_OPCODE_LDR_IW 0x6800
829 #define T_OPCODE_STR_IW 0x6000
830 #define T_OPCODE_LDR_IH 0x8800
831 #define T_OPCODE_STR_IH 0x8000
832 #define T_OPCODE_LDR_IB 0x7800
833 #define T_OPCODE_STR_IB 0x7000
834 #define T_OPCODE_LDR_RW 0x5800
835 #define T_OPCODE_STR_RW 0x5000
836 #define T_OPCODE_LDR_RH 0x5a00
837 #define T_OPCODE_STR_RH 0x5200
838 #define T_OPCODE_LDR_RB 0x5c00
839 #define T_OPCODE_STR_RB 0x5400
840
841 #define T_OPCODE_PUSH 0xb400
842 #define T_OPCODE_POP 0xbc00
843
844 #define T_OPCODE_BRANCH 0xe000
845
846 #define THUMB_SIZE 2 /* Size of thumb instruction. */
847 #define THUMB_PP_PC_LR 0x0100
848 #define THUMB_LOAD_BIT 0x0800
849 #define THUMB2_LOAD_BIT 0x00100000
850
851 #define BAD_SYNTAX _("syntax error")
852 #define BAD_ARGS _("bad arguments to instruction")
853 #define BAD_SP _("r13 not allowed here")
854 #define BAD_PC _("r15 not allowed here")
855 #define BAD_ODD _("Odd register not allowed here")
856 #define BAD_EVEN _("Even register not allowed here")
857 #define BAD_COND _("instruction cannot be conditional")
858 #define BAD_OVERLAP _("registers may not be the same")
859 #define BAD_HIREG _("lo register required")
860 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
861 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
862 #define BAD_BRANCH _("branch must be last instruction in IT block")
863 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
864 #define BAD_NOT_IT _("instruction not allowed in IT block")
865 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
866 #define BAD_FPU _("selected FPU does not support instruction")
867 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
868 #define BAD_OUT_VPT \
869 _("vector predicated instruction should be in VPT/VPST block")
870 #define BAD_IT_COND _("incorrect condition in IT block")
871 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
872 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
873 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
874 #define BAD_PC_ADDRESSING \
875 _("cannot use register index with PC-relative addressing")
876 #define BAD_PC_WRITEBACK \
877 _("cannot use writeback with PC-relative addressing")
878 #define BAD_RANGE _("branch out of range")
879 #define BAD_FP16 _("selected processor does not support fp16 instruction")
880 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
881 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
882 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
883 "block")
884 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
885 "block")
886 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
887 " operand")
888 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
889 " operand")
890 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
891 #define BAD_MVE_AUTO \
892 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
893 " use a valid -march or -mcpu option.")
894 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
895 "and source operands makes instruction UNPREDICTABLE")
896 #define BAD_EL_TYPE _("bad element type for instruction")
897
898 static struct hash_control * arm_ops_hsh;
899 static struct hash_control * arm_cond_hsh;
900 static struct hash_control * arm_vcond_hsh;
901 static struct hash_control * arm_shift_hsh;
902 static struct hash_control * arm_psr_hsh;
903 static struct hash_control * arm_v7m_psr_hsh;
904 static struct hash_control * arm_reg_hsh;
905 static struct hash_control * arm_reloc_hsh;
906 static struct hash_control * arm_barrier_opt_hsh;
907
908 /* Stuff needed to resolve the label ambiguity
909 As:
910 ...
911 label: <insn>
912 may differ from:
913 ...
914 label:
915 <insn> */
916
917 symbolS * last_label_seen;
918 static int label_is_thumb_function_name = FALSE;
919
920 /* Literal pool structure. Held on a per-section
921 and per-sub-section basis. */
922
923 #define MAX_LITERAL_POOL_SIZE 1024
924 typedef struct literal_pool
925 {
926 expressionS literals [MAX_LITERAL_POOL_SIZE];
927 unsigned int next_free_entry;
928 unsigned int id;
929 symbolS * symbol;
930 segT section;
931 subsegT sub_section;
932 #ifdef OBJ_ELF
933 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
934 #endif
935 struct literal_pool * next;
936 unsigned int alignment;
937 } literal_pool;
938
939 /* Pointer to a linked list of literal pools. */
940 literal_pool * list_of_pools = NULL;
941
942 typedef enum asmfunc_states
943 {
944 OUTSIDE_ASMFUNC,
945 WAITING_ASMFUNC_NAME,
946 WAITING_ENDASMFUNC
947 } asmfunc_states;
948
949 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
950
951 #ifdef OBJ_ELF
952 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
953 #else
954 static struct current_pred now_pred;
955 #endif
956
957 static inline int
958 now_pred_compatible (int cond)
959 {
960 return (cond & ~1) == (now_pred.cc & ~1);
961 }
962
963 static inline int
964 conditional_insn (void)
965 {
966 return inst.cond != COND_ALWAYS;
967 }
968
969 static int in_pred_block (void);
970
971 static int handle_pred_state (void);
972
973 static void force_automatic_it_block_close (void);
974
975 static void it_fsm_post_encode (void);
976
977 #define set_pred_insn_type(type) \
978 do \
979 { \
980 inst.pred_insn_type = type; \
981 if (handle_pred_state () == FAIL) \
982 return; \
983 } \
984 while (0)
985
986 #define set_pred_insn_type_nonvoid(type, failret) \
987 do \
988 { \
989 inst.pred_insn_type = type; \
990 if (handle_pred_state () == FAIL) \
991 return failret; \
992 } \
993 while(0)
994
995 #define set_pred_insn_type_last() \
996 do \
997 { \
998 if (inst.cond == COND_ALWAYS) \
999 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1000 else \
1001 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1002 } \
1003 while (0)
1004
1005 /* Pure syntax. */
1006
1007 /* This array holds the chars that always start a comment. If the
1008 pre-processor is disabled, these aren't very useful. */
1009 char arm_comment_chars[] = "@";
1010
1011 /* This array holds the chars that only start a comment at the beginning of
1012 a line. If the line seems to have the form '# 123 filename'
1013 .line and .file directives will appear in the pre-processed output. */
1014 /* Note that input_file.c hand checks for '#' at the beginning of the
1015 first line of the input file. This is because the compiler outputs
1016 #NO_APP at the beginning of its output. */
1017 /* Also note that comments like this one will always work. */
1018 const char line_comment_chars[] = "#";
1019
1020 char arm_line_separator_chars[] = ";";
1021
1022 /* Chars that can be used to separate mant
1023 from exp in floating point numbers. */
1024 const char EXP_CHARS[] = "eE";
1025
1026 /* Chars that mean this number is a floating point constant. */
1027 /* As in 0f12.456 */
1028 /* or 0d1.2345e12 */
1029
1030 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
1031
1032 /* Prefix characters that indicate the start of an immediate
1033 value. */
1034 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1035
1036 /* Separator character handling. */
1037
1038 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1039
1040 static inline int
1041 skip_past_char (char ** str, char c)
1042 {
1043 /* PR gas/14987: Allow for whitespace before the expected character. */
1044 skip_whitespace (*str);
1045
1046 if (**str == c)
1047 {
1048 (*str)++;
1049 return SUCCESS;
1050 }
1051 else
1052 return FAIL;
1053 }
1054
1055 #define skip_past_comma(str) skip_past_char (str, ',')
1056
1057 /* Arithmetic expressions (possibly involving symbols). */
1058
1059 /* Return TRUE if anything in the expression is a bignum. */
1060
1061 static bfd_boolean
1062 walk_no_bignums (symbolS * sp)
1063 {
1064 if (symbol_get_value_expression (sp)->X_op == O_big)
1065 return TRUE;
1066
1067 if (symbol_get_value_expression (sp)->X_add_symbol)
1068 {
1069 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1070 || (symbol_get_value_expression (sp)->X_op_symbol
1071 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1072 }
1073
1074 return FALSE;
1075 }
1076
1077 static bfd_boolean in_my_get_expression = FALSE;
1078
1079 /* Third argument to my_get_expression. */
1080 #define GE_NO_PREFIX 0
1081 #define GE_IMM_PREFIX 1
1082 #define GE_OPT_PREFIX 2
1083 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1084 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1085 #define GE_OPT_PREFIX_BIG 3
1086
1087 static int
1088 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1089 {
1090 char * save_in;
1091
1092 /* In unified syntax, all prefixes are optional. */
1093 if (unified_syntax)
1094 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1095 : GE_OPT_PREFIX;
1096
1097 switch (prefix_mode)
1098 {
1099 case GE_NO_PREFIX: break;
1100 case GE_IMM_PREFIX:
1101 if (!is_immediate_prefix (**str))
1102 {
1103 inst.error = _("immediate expression requires a # prefix");
1104 return FAIL;
1105 }
1106 (*str)++;
1107 break;
1108 case GE_OPT_PREFIX:
1109 case GE_OPT_PREFIX_BIG:
1110 if (is_immediate_prefix (**str))
1111 (*str)++;
1112 break;
1113 default:
1114 abort ();
1115 }
1116
1117 memset (ep, 0, sizeof (expressionS));
1118
1119 save_in = input_line_pointer;
1120 input_line_pointer = *str;
1121 in_my_get_expression = TRUE;
1122 expression (ep);
1123 in_my_get_expression = FALSE;
1124
1125 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1126 {
1127 /* We found a bad or missing expression in md_operand(). */
1128 *str = input_line_pointer;
1129 input_line_pointer = save_in;
1130 if (inst.error == NULL)
1131 inst.error = (ep->X_op == O_absent
1132 ? _("missing expression") :_("bad expression"));
1133 return 1;
1134 }
1135
1136 /* Get rid of any bignums now, so that we don't generate an error for which
1137 we can't establish a line number later on. Big numbers are never valid
1138 in instructions, which is where this routine is always called. */
1139 if (prefix_mode != GE_OPT_PREFIX_BIG
1140 && (ep->X_op == O_big
1141 || (ep->X_add_symbol
1142 && (walk_no_bignums (ep->X_add_symbol)
1143 || (ep->X_op_symbol
1144 && walk_no_bignums (ep->X_op_symbol))))))
1145 {
1146 inst.error = _("invalid constant");
1147 *str = input_line_pointer;
1148 input_line_pointer = save_in;
1149 return 1;
1150 }
1151
1152 *str = input_line_pointer;
1153 input_line_pointer = save_in;
1154 return SUCCESS;
1155 }
1156
1157 /* Turn a string in input_line_pointer into a floating point constant
1158 of type TYPE, and store the appropriate bytes in *LITP. The number
1159 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1160 returned, or NULL on OK.
1161
1162 Note that fp constants aren't represent in the normal way on the ARM.
1163 In big endian mode, things are as expected. However, in little endian
1164 mode fp constants are big-endian word-wise, and little-endian byte-wise
1165 within the words. For example, (double) 1.1 in big endian mode is
1166 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1167 the byte sequence 99 99 f1 3f 9a 99 99 99.
1168
1169 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1170
1171 const char *
1172 md_atof (int type, char * litP, int * sizeP)
1173 {
1174 int prec;
1175 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1176 char *t;
1177 int i;
1178
1179 switch (type)
1180 {
1181 case 'f':
1182 case 'F':
1183 case 's':
1184 case 'S':
1185 prec = 2;
1186 break;
1187
1188 case 'd':
1189 case 'D':
1190 case 'r':
1191 case 'R':
1192 prec = 4;
1193 break;
1194
1195 case 'x':
1196 case 'X':
1197 prec = 5;
1198 break;
1199
1200 case 'p':
1201 case 'P':
1202 prec = 5;
1203 break;
1204
1205 default:
1206 *sizeP = 0;
1207 return _("Unrecognized or unsupported floating point constant");
1208 }
1209
1210 t = atof_ieee (input_line_pointer, type, words);
1211 if (t)
1212 input_line_pointer = t;
1213 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1214
1215 if (target_big_endian)
1216 {
1217 for (i = 0; i < prec; i++)
1218 {
1219 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1220 litP += sizeof (LITTLENUM_TYPE);
1221 }
1222 }
1223 else
1224 {
1225 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1226 for (i = prec - 1; i >= 0; i--)
1227 {
1228 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1229 litP += sizeof (LITTLENUM_TYPE);
1230 }
1231 else
1232 /* For a 4 byte float the order of elements in `words' is 1 0.
1233 For an 8 byte float the order is 1 0 3 2. */
1234 for (i = 0; i < prec; i += 2)
1235 {
1236 md_number_to_chars (litP, (valueT) words[i + 1],
1237 sizeof (LITTLENUM_TYPE));
1238 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1239 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1240 litP += 2 * sizeof (LITTLENUM_TYPE);
1241 }
1242 }
1243
1244 return NULL;
1245 }
1246
1247 /* We handle all bad expressions here, so that we can report the faulty
1248 instruction in the error message. */
1249
1250 void
1251 md_operand (expressionS * exp)
1252 {
1253 if (in_my_get_expression)
1254 exp->X_op = O_illegal;
1255 }
1256
1257 /* Immediate values. */
1258
1259 #ifdef OBJ_ELF
1260 /* Generic immediate-value read function for use in directives.
1261 Accepts anything that 'expression' can fold to a constant.
1262 *val receives the number. */
1263
1264 static int
1265 immediate_for_directive (int *val)
1266 {
1267 expressionS exp;
1268 exp.X_op = O_illegal;
1269
1270 if (is_immediate_prefix (*input_line_pointer))
1271 {
1272 input_line_pointer++;
1273 expression (&exp);
1274 }
1275
1276 if (exp.X_op != O_constant)
1277 {
1278 as_bad (_("expected #constant"));
1279 ignore_rest_of_line ();
1280 return FAIL;
1281 }
1282 *val = exp.X_add_number;
1283 return SUCCESS;
1284 }
1285 #endif
1286
1287 /* Register parsing. */
1288
1289 /* Generic register parser. CCP points to what should be the
1290 beginning of a register name. If it is indeed a valid register
1291 name, advance CCP over it and return the reg_entry structure;
1292 otherwise return NULL. Does not issue diagnostics. */
1293
1294 static struct reg_entry *
1295 arm_reg_parse_multi (char **ccp)
1296 {
1297 char *start = *ccp;
1298 char *p;
1299 struct reg_entry *reg;
1300
1301 skip_whitespace (start);
1302
1303 #ifdef REGISTER_PREFIX
1304 if (*start != REGISTER_PREFIX)
1305 return NULL;
1306 start++;
1307 #endif
1308 #ifdef OPTIONAL_REGISTER_PREFIX
1309 if (*start == OPTIONAL_REGISTER_PREFIX)
1310 start++;
1311 #endif
1312
1313 p = start;
1314 if (!ISALPHA (*p) || !is_name_beginner (*p))
1315 return NULL;
1316
1317 do
1318 p++;
1319 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1320
1321 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1322
1323 if (!reg)
1324 return NULL;
1325
1326 *ccp = p;
1327 return reg;
1328 }
1329
1330 static int
1331 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1332 enum arm_reg_type type)
1333 {
1334 /* Alternative syntaxes are accepted for a few register classes. */
1335 switch (type)
1336 {
1337 case REG_TYPE_MVF:
1338 case REG_TYPE_MVD:
1339 case REG_TYPE_MVFX:
1340 case REG_TYPE_MVDX:
1341 /* Generic coprocessor register names are allowed for these. */
1342 if (reg && reg->type == REG_TYPE_CN)
1343 return reg->number;
1344 break;
1345
1346 case REG_TYPE_CP:
1347 /* For backward compatibility, a bare number is valid here. */
1348 {
1349 unsigned long processor = strtoul (start, ccp, 10);
1350 if (*ccp != start && processor <= 15)
1351 return processor;
1352 }
1353 /* Fall through. */
1354
1355 case REG_TYPE_MMXWC:
1356 /* WC includes WCG. ??? I'm not sure this is true for all
1357 instructions that take WC registers. */
1358 if (reg && reg->type == REG_TYPE_MMXWCG)
1359 return reg->number;
1360 break;
1361
1362 default:
1363 break;
1364 }
1365
1366 return FAIL;
1367 }
1368
1369 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1370 return value is the register number or FAIL. */
1371
1372 static int
1373 arm_reg_parse (char **ccp, enum arm_reg_type type)
1374 {
1375 char *start = *ccp;
1376 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1377 int ret;
1378
1379 /* Do not allow a scalar (reg+index) to parse as a register. */
1380 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1381 return FAIL;
1382
1383 if (reg && reg->type == type)
1384 return reg->number;
1385
1386 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1387 return ret;
1388
1389 *ccp = start;
1390 return FAIL;
1391 }
1392
1393 /* Parse a Neon type specifier. *STR should point at the leading '.'
1394 character. Does no verification at this stage that the type fits the opcode
1395 properly. E.g.,
1396
1397 .i32.i32.s16
1398 .s32.f32
1399 .u16
1400
1401 Can all be legally parsed by this function.
1402
1403 Fills in neon_type struct pointer with parsed information, and updates STR
1404 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1405 type, FAIL if not. */
1406
1407 static int
1408 parse_neon_type (struct neon_type *type, char **str)
1409 {
1410 char *ptr = *str;
1411
1412 if (type)
1413 type->elems = 0;
1414
1415 while (type->elems < NEON_MAX_TYPE_ELS)
1416 {
1417 enum neon_el_type thistype = NT_untyped;
1418 unsigned thissize = -1u;
1419
1420 if (*ptr != '.')
1421 break;
1422
1423 ptr++;
1424
1425 /* Just a size without an explicit type. */
1426 if (ISDIGIT (*ptr))
1427 goto parsesize;
1428
1429 switch (TOLOWER (*ptr))
1430 {
1431 case 'i': thistype = NT_integer; break;
1432 case 'f': thistype = NT_float; break;
1433 case 'p': thistype = NT_poly; break;
1434 case 's': thistype = NT_signed; break;
1435 case 'u': thistype = NT_unsigned; break;
1436 case 'd':
1437 thistype = NT_float;
1438 thissize = 64;
1439 ptr++;
1440 goto done;
1441 default:
1442 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1443 return FAIL;
1444 }
1445
1446 ptr++;
1447
1448 /* .f is an abbreviation for .f32. */
1449 if (thistype == NT_float && !ISDIGIT (*ptr))
1450 thissize = 32;
1451 else
1452 {
1453 parsesize:
1454 thissize = strtoul (ptr, &ptr, 10);
1455
1456 if (thissize != 8 && thissize != 16 && thissize != 32
1457 && thissize != 64)
1458 {
1459 as_bad (_("bad size %d in type specifier"), thissize);
1460 return FAIL;
1461 }
1462 }
1463
1464 done:
1465 if (type)
1466 {
1467 type->el[type->elems].type = thistype;
1468 type->el[type->elems].size = thissize;
1469 type->elems++;
1470 }
1471 }
1472
1473 /* Empty/missing type is not a successful parse. */
1474 if (type->elems == 0)
1475 return FAIL;
1476
1477 *str = ptr;
1478
1479 return SUCCESS;
1480 }
1481
1482 /* Errors may be set multiple times during parsing or bit encoding
1483 (particularly in the Neon bits), but usually the earliest error which is set
1484 will be the most meaningful. Avoid overwriting it with later (cascading)
1485 errors by calling this function. */
1486
1487 static void
1488 first_error (const char *err)
1489 {
1490 if (!inst.error)
1491 inst.error = err;
1492 }
1493
1494 /* Parse a single type, e.g. ".s32", leading period included. */
1495 static int
1496 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1497 {
1498 char *str = *ccp;
1499 struct neon_type optype;
1500
1501 if (*str == '.')
1502 {
1503 if (parse_neon_type (&optype, &str) == SUCCESS)
1504 {
1505 if (optype.elems == 1)
1506 *vectype = optype.el[0];
1507 else
1508 {
1509 first_error (_("only one type should be specified for operand"));
1510 return FAIL;
1511 }
1512 }
1513 else
1514 {
1515 first_error (_("vector type expected"));
1516 return FAIL;
1517 }
1518 }
1519 else
1520 return FAIL;
1521
1522 *ccp = str;
1523
1524 return SUCCESS;
1525 }
1526
1527 /* Special meanings for indices (which have a range of 0-7), which will fit into
1528 a 4-bit integer. */
1529
1530 #define NEON_ALL_LANES 15
1531 #define NEON_INTERLEAVE_LANES 14
1532
1533 /* Record a use of the given feature. */
1534 static void
1535 record_feature_use (const arm_feature_set *feature)
1536 {
1537 if (thumb_mode)
1538 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1539 else
1540 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1541 }
1542
1543 /* If the given feature available in the selected CPU, mark it as used.
1544 Returns TRUE iff feature is available. */
1545 static bfd_boolean
1546 mark_feature_used (const arm_feature_set *feature)
1547 {
1548
1549 /* Do not support the use of MVE only instructions when in auto-detection or
1550 -march=all. */
1551 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1552 && ARM_CPU_IS_ANY (cpu_variant))
1553 {
1554 first_error (BAD_MVE_AUTO);
1555 return FALSE;
1556 }
1557 /* Ensure the option is valid on the current architecture. */
1558 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1559 return FALSE;
1560
1561 /* Add the appropriate architecture feature for the barrier option used.
1562 */
1563 record_feature_use (feature);
1564
1565 return TRUE;
1566 }
1567
1568 /* Parse either a register or a scalar, with an optional type. Return the
1569 register number, and optionally fill in the actual type of the register
1570 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1571 type/index information in *TYPEINFO. */
1572
1573 static int
1574 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1575 enum arm_reg_type *rtype,
1576 struct neon_typed_alias *typeinfo)
1577 {
1578 char *str = *ccp;
1579 struct reg_entry *reg = arm_reg_parse_multi (&str);
1580 struct neon_typed_alias atype;
1581 struct neon_type_el parsetype;
1582
1583 atype.defined = 0;
1584 atype.index = -1;
1585 atype.eltype.type = NT_invtype;
1586 atype.eltype.size = -1;
1587
1588 /* Try alternate syntax for some types of register. Note these are mutually
1589 exclusive with the Neon syntax extensions. */
1590 if (reg == NULL)
1591 {
1592 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1593 if (altreg != FAIL)
1594 *ccp = str;
1595 if (typeinfo)
1596 *typeinfo = atype;
1597 return altreg;
1598 }
1599
1600 /* Undo polymorphism when a set of register types may be accepted. */
1601 if ((type == REG_TYPE_NDQ
1602 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1603 || (type == REG_TYPE_VFSD
1604 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1605 || (type == REG_TYPE_NSDQ
1606 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1607 || reg->type == REG_TYPE_NQ))
1608 || (type == REG_TYPE_NSD
1609 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1610 || (type == REG_TYPE_MMXWC
1611 && (reg->type == REG_TYPE_MMXWCG)))
1612 type = (enum arm_reg_type) reg->type;
1613
1614 if (type == REG_TYPE_MQ)
1615 {
1616 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1617 return FAIL;
1618
1619 if (!reg || reg->type != REG_TYPE_NQ)
1620 return FAIL;
1621
1622 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1623 {
1624 first_error (_("expected MVE register [q0..q7]"));
1625 return FAIL;
1626 }
1627 type = REG_TYPE_NQ;
1628 }
1629 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1630 && (type == REG_TYPE_NQ))
1631 return FAIL;
1632
1633
1634 if (type != reg->type)
1635 return FAIL;
1636
1637 if (reg->neon)
1638 atype = *reg->neon;
1639
1640 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1641 {
1642 if ((atype.defined & NTA_HASTYPE) != 0)
1643 {
1644 first_error (_("can't redefine type for operand"));
1645 return FAIL;
1646 }
1647 atype.defined |= NTA_HASTYPE;
1648 atype.eltype = parsetype;
1649 }
1650
1651 if (skip_past_char (&str, '[') == SUCCESS)
1652 {
1653 if (type != REG_TYPE_VFD
1654 && !(type == REG_TYPE_VFS
1655 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1656 {
1657 first_error (_("only D registers may be indexed"));
1658 return FAIL;
1659 }
1660
1661 if ((atype.defined & NTA_HASINDEX) != 0)
1662 {
1663 first_error (_("can't change index for operand"));
1664 return FAIL;
1665 }
1666
1667 atype.defined |= NTA_HASINDEX;
1668
1669 if (skip_past_char (&str, ']') == SUCCESS)
1670 atype.index = NEON_ALL_LANES;
1671 else
1672 {
1673 expressionS exp;
1674
1675 my_get_expression (&exp, &str, GE_NO_PREFIX);
1676
1677 if (exp.X_op != O_constant)
1678 {
1679 first_error (_("constant expression required"));
1680 return FAIL;
1681 }
1682
1683 if (skip_past_char (&str, ']') == FAIL)
1684 return FAIL;
1685
1686 atype.index = exp.X_add_number;
1687 }
1688 }
1689
1690 if (typeinfo)
1691 *typeinfo = atype;
1692
1693 if (rtype)
1694 *rtype = type;
1695
1696 *ccp = str;
1697
1698 return reg->number;
1699 }
1700
1701 /* Like arm_reg_parse, but also allow the following extra features:
1702 - If RTYPE is non-zero, return the (possibly restricted) type of the
1703 register (e.g. Neon double or quad reg when either has been requested).
1704 - If this is a Neon vector type with additional type information, fill
1705 in the struct pointed to by VECTYPE (if non-NULL).
1706 This function will fault on encountering a scalar. */
1707
1708 static int
1709 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1710 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1711 {
1712 struct neon_typed_alias atype;
1713 char *str = *ccp;
1714 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1715
1716 if (reg == FAIL)
1717 return FAIL;
1718
1719 /* Do not allow regname(... to parse as a register. */
1720 if (*str == '(')
1721 return FAIL;
1722
1723 /* Do not allow a scalar (reg+index) to parse as a register. */
1724 if ((atype.defined & NTA_HASINDEX) != 0)
1725 {
1726 first_error (_("register operand expected, but got scalar"));
1727 return FAIL;
1728 }
1729
1730 if (vectype)
1731 *vectype = atype.eltype;
1732
1733 *ccp = str;
1734
1735 return reg;
1736 }
1737
1738 #define NEON_SCALAR_REG(X) ((X) >> 4)
1739 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1740
1741 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1742 have enough information to be able to do a good job bounds-checking. So, we
1743 just do easy checks here, and do further checks later. */
1744
1745 static int
1746 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1747 {
1748 int reg;
1749 char *str = *ccp;
1750 struct neon_typed_alias atype;
1751 enum arm_reg_type reg_type = REG_TYPE_VFD;
1752
1753 if (elsize == 4)
1754 reg_type = REG_TYPE_VFS;
1755
1756 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1757
1758 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1759 return FAIL;
1760
1761 if (atype.index == NEON_ALL_LANES)
1762 {
1763 first_error (_("scalar must have an index"));
1764 return FAIL;
1765 }
1766 else if (atype.index >= 64 / elsize)
1767 {
1768 first_error (_("scalar index out of range"));
1769 return FAIL;
1770 }
1771
1772 if (type)
1773 *type = atype.eltype;
1774
1775 *ccp = str;
1776
1777 return reg * 16 + atype.index;
1778 }
1779
1780 /* Types of registers in a list. */
1781
1782 enum reg_list_els
1783 {
1784 REGLIST_RN,
1785 REGLIST_CLRM,
1786 REGLIST_VFP_S,
1787 REGLIST_VFP_S_VPR,
1788 REGLIST_VFP_D,
1789 REGLIST_VFP_D_VPR,
1790 REGLIST_NEON_D
1791 };
1792
1793 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1794
1795 static long
1796 parse_reg_list (char ** strp, enum reg_list_els etype)
1797 {
1798 char *str = *strp;
1799 long range = 0;
1800 int another_range;
1801
1802 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1803
1804 /* We come back here if we get ranges concatenated by '+' or '|'. */
1805 do
1806 {
1807 skip_whitespace (str);
1808
1809 another_range = 0;
1810
1811 if (*str == '{')
1812 {
1813 int in_range = 0;
1814 int cur_reg = -1;
1815
1816 str++;
1817 do
1818 {
1819 int reg;
1820 const char apsr_str[] = "apsr";
1821 int apsr_str_len = strlen (apsr_str);
1822
1823 reg = arm_reg_parse (&str, REGLIST_RN);
1824 if (etype == REGLIST_CLRM)
1825 {
1826 if (reg == REG_SP || reg == REG_PC)
1827 reg = FAIL;
1828 else if (reg == FAIL
1829 && !strncasecmp (str, apsr_str, apsr_str_len)
1830 && !ISALPHA (*(str + apsr_str_len)))
1831 {
1832 reg = 15;
1833 str += apsr_str_len;
1834 }
1835
1836 if (reg == FAIL)
1837 {
1838 first_error (_("r0-r12, lr or APSR expected"));
1839 return FAIL;
1840 }
1841 }
1842 else /* etype == REGLIST_RN. */
1843 {
1844 if (reg == FAIL)
1845 {
1846 first_error (_(reg_expected_msgs[REGLIST_RN]));
1847 return FAIL;
1848 }
1849 }
1850
1851 if (in_range)
1852 {
1853 int i;
1854
1855 if (reg <= cur_reg)
1856 {
1857 first_error (_("bad range in register list"));
1858 return FAIL;
1859 }
1860
1861 for (i = cur_reg + 1; i < reg; i++)
1862 {
1863 if (range & (1 << i))
1864 as_tsktsk
1865 (_("Warning: duplicated register (r%d) in register list"),
1866 i);
1867 else
1868 range |= 1 << i;
1869 }
1870 in_range = 0;
1871 }
1872
1873 if (range & (1 << reg))
1874 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1875 reg);
1876 else if (reg <= cur_reg)
1877 as_tsktsk (_("Warning: register range not in ascending order"));
1878
1879 range |= 1 << reg;
1880 cur_reg = reg;
1881 }
1882 while (skip_past_comma (&str) != FAIL
1883 || (in_range = 1, *str++ == '-'));
1884 str--;
1885
1886 if (skip_past_char (&str, '}') == FAIL)
1887 {
1888 first_error (_("missing `}'"));
1889 return FAIL;
1890 }
1891 }
1892 else if (etype == REGLIST_RN)
1893 {
1894 expressionS exp;
1895
1896 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1897 return FAIL;
1898
1899 if (exp.X_op == O_constant)
1900 {
1901 if (exp.X_add_number
1902 != (exp.X_add_number & 0x0000ffff))
1903 {
1904 inst.error = _("invalid register mask");
1905 return FAIL;
1906 }
1907
1908 if ((range & exp.X_add_number) != 0)
1909 {
1910 int regno = range & exp.X_add_number;
1911
1912 regno &= -regno;
1913 regno = (1 << regno) - 1;
1914 as_tsktsk
1915 (_("Warning: duplicated register (r%d) in register list"),
1916 regno);
1917 }
1918
1919 range |= exp.X_add_number;
1920 }
1921 else
1922 {
1923 if (inst.relocs[0].type != 0)
1924 {
1925 inst.error = _("expression too complex");
1926 return FAIL;
1927 }
1928
1929 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1930 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1931 inst.relocs[0].pc_rel = 0;
1932 }
1933 }
1934
1935 if (*str == '|' || *str == '+')
1936 {
1937 str++;
1938 another_range = 1;
1939 }
1940 }
1941 while (another_range);
1942
1943 *strp = str;
1944 return range;
1945 }
1946
1947 /* Parse a VFP register list. If the string is invalid return FAIL.
1948 Otherwise return the number of registers, and set PBASE to the first
1949 register. Parses registers of type ETYPE.
1950 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1951 - Q registers can be used to specify pairs of D registers
1952 - { } can be omitted from around a singleton register list
1953 FIXME: This is not implemented, as it would require backtracking in
1954 some cases, e.g.:
1955 vtbl.8 d3,d4,d5
1956 This could be done (the meaning isn't really ambiguous), but doesn't
1957 fit in well with the current parsing framework.
1958 - 32 D registers may be used (also true for VFPv3).
1959 FIXME: Types are ignored in these register lists, which is probably a
1960 bug. */
1961
1962 static int
1963 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
1964 bfd_boolean *partial_match)
1965 {
1966 char *str = *ccp;
1967 int base_reg;
1968 int new_base;
1969 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1970 int max_regs = 0;
1971 int count = 0;
1972 int warned = 0;
1973 unsigned long mask = 0;
1974 int i;
1975 bfd_boolean vpr_seen = FALSE;
1976 bfd_boolean expect_vpr =
1977 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
1978
1979 if (skip_past_char (&str, '{') == FAIL)
1980 {
1981 inst.error = _("expecting {");
1982 return FAIL;
1983 }
1984
1985 switch (etype)
1986 {
1987 case REGLIST_VFP_S:
1988 case REGLIST_VFP_S_VPR:
1989 regtype = REG_TYPE_VFS;
1990 max_regs = 32;
1991 break;
1992
1993 case REGLIST_VFP_D:
1994 case REGLIST_VFP_D_VPR:
1995 regtype = REG_TYPE_VFD;
1996 break;
1997
1998 case REGLIST_NEON_D:
1999 regtype = REG_TYPE_NDQ;
2000 break;
2001
2002 default:
2003 gas_assert (0);
2004 }
2005
2006 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2007 {
2008 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2009 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2010 {
2011 max_regs = 32;
2012 if (thumb_mode)
2013 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2014 fpu_vfp_ext_d32);
2015 else
2016 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2017 fpu_vfp_ext_d32);
2018 }
2019 else
2020 max_regs = 16;
2021 }
2022
2023 base_reg = max_regs;
2024 *partial_match = FALSE;
2025
2026 do
2027 {
2028 int setmask = 1, addregs = 1;
2029 const char vpr_str[] = "vpr";
2030 int vpr_str_len = strlen (vpr_str);
2031
2032 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2033
2034 if (expect_vpr)
2035 {
2036 if (new_base == FAIL
2037 && !strncasecmp (str, vpr_str, vpr_str_len)
2038 && !ISALPHA (*(str + vpr_str_len))
2039 && !vpr_seen)
2040 {
2041 vpr_seen = TRUE;
2042 str += vpr_str_len;
2043 if (count == 0)
2044 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2045 }
2046 else if (vpr_seen)
2047 {
2048 first_error (_("VPR expected last"));
2049 return FAIL;
2050 }
2051 else if (new_base == FAIL)
2052 {
2053 if (regtype == REG_TYPE_VFS)
2054 first_error (_("VFP single precision register or VPR "
2055 "expected"));
2056 else /* regtype == REG_TYPE_VFD. */
2057 first_error (_("VFP/Neon double precision register or VPR "
2058 "expected"));
2059 return FAIL;
2060 }
2061 }
2062 else if (new_base == FAIL)
2063 {
2064 first_error (_(reg_expected_msgs[regtype]));
2065 return FAIL;
2066 }
2067
2068 *partial_match = TRUE;
2069 if (vpr_seen)
2070 continue;
2071
2072 if (new_base >= max_regs)
2073 {
2074 first_error (_("register out of range in list"));
2075 return FAIL;
2076 }
2077
2078 /* Note: a value of 2 * n is returned for the register Q<n>. */
2079 if (regtype == REG_TYPE_NQ)
2080 {
2081 setmask = 3;
2082 addregs = 2;
2083 }
2084
2085 if (new_base < base_reg)
2086 base_reg = new_base;
2087
2088 if (mask & (setmask << new_base))
2089 {
2090 first_error (_("invalid register list"));
2091 return FAIL;
2092 }
2093
2094 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2095 {
2096 as_tsktsk (_("register list not in ascending order"));
2097 warned = 1;
2098 }
2099
2100 mask |= setmask << new_base;
2101 count += addregs;
2102
2103 if (*str == '-') /* We have the start of a range expression */
2104 {
2105 int high_range;
2106
2107 str++;
2108
2109 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2110 == FAIL)
2111 {
2112 inst.error = gettext (reg_expected_msgs[regtype]);
2113 return FAIL;
2114 }
2115
2116 if (high_range >= max_regs)
2117 {
2118 first_error (_("register out of range in list"));
2119 return FAIL;
2120 }
2121
2122 if (regtype == REG_TYPE_NQ)
2123 high_range = high_range + 1;
2124
2125 if (high_range <= new_base)
2126 {
2127 inst.error = _("register range not in ascending order");
2128 return FAIL;
2129 }
2130
2131 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2132 {
2133 if (mask & (setmask << new_base))
2134 {
2135 inst.error = _("invalid register list");
2136 return FAIL;
2137 }
2138
2139 mask |= setmask << new_base;
2140 count += addregs;
2141 }
2142 }
2143 }
2144 while (skip_past_comma (&str) != FAIL);
2145
2146 str++;
2147
2148 /* Sanity check -- should have raised a parse error above. */
2149 if ((!vpr_seen && count == 0) || count > max_regs)
2150 abort ();
2151
2152 *pbase = base_reg;
2153
2154 if (expect_vpr && !vpr_seen)
2155 {
2156 first_error (_("VPR expected last"));
2157 return FAIL;
2158 }
2159
2160 /* Final test -- the registers must be consecutive. */
2161 mask >>= base_reg;
2162 for (i = 0; i < count; i++)
2163 {
2164 if ((mask & (1u << i)) == 0)
2165 {
2166 inst.error = _("non-contiguous register range");
2167 return FAIL;
2168 }
2169 }
2170
2171 *ccp = str;
2172
2173 return count;
2174 }
2175
2176 /* True if two alias types are the same. */
2177
2178 static bfd_boolean
2179 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2180 {
2181 if (!a && !b)
2182 return TRUE;
2183
2184 if (!a || !b)
2185 return FALSE;
2186
2187 if (a->defined != b->defined)
2188 return FALSE;
2189
2190 if ((a->defined & NTA_HASTYPE) != 0
2191 && (a->eltype.type != b->eltype.type
2192 || a->eltype.size != b->eltype.size))
2193 return FALSE;
2194
2195 if ((a->defined & NTA_HASINDEX) != 0
2196 && (a->index != b->index))
2197 return FALSE;
2198
2199 return TRUE;
2200 }
2201
2202 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2203 The base register is put in *PBASE.
2204 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2205 the return value.
2206 The register stride (minus one) is put in bit 4 of the return value.
2207 Bits [6:5] encode the list length (minus one).
2208 The type of the list elements is put in *ELTYPE, if non-NULL. */
2209
2210 #define NEON_LANE(X) ((X) & 0xf)
2211 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2212 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2213
2214 static int
2215 parse_neon_el_struct_list (char **str, unsigned *pbase,
2216 int mve,
2217 struct neon_type_el *eltype)
2218 {
2219 char *ptr = *str;
2220 int base_reg = -1;
2221 int reg_incr = -1;
2222 int count = 0;
2223 int lane = -1;
2224 int leading_brace = 0;
2225 enum arm_reg_type rtype = REG_TYPE_NDQ;
2226 const char *const incr_error = mve ? _("register stride must be 1") :
2227 _("register stride must be 1 or 2");
2228 const char *const type_error = _("mismatched element/structure types in list");
2229 struct neon_typed_alias firsttype;
2230 firsttype.defined = 0;
2231 firsttype.eltype.type = NT_invtype;
2232 firsttype.eltype.size = -1;
2233 firsttype.index = -1;
2234
2235 if (skip_past_char (&ptr, '{') == SUCCESS)
2236 leading_brace = 1;
2237
2238 do
2239 {
2240 struct neon_typed_alias atype;
2241 if (mve)
2242 rtype = REG_TYPE_MQ;
2243 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2244
2245 if (getreg == FAIL)
2246 {
2247 first_error (_(reg_expected_msgs[rtype]));
2248 return FAIL;
2249 }
2250
2251 if (base_reg == -1)
2252 {
2253 base_reg = getreg;
2254 if (rtype == REG_TYPE_NQ)
2255 {
2256 reg_incr = 1;
2257 }
2258 firsttype = atype;
2259 }
2260 else if (reg_incr == -1)
2261 {
2262 reg_incr = getreg - base_reg;
2263 if (reg_incr < 1 || reg_incr > 2)
2264 {
2265 first_error (_(incr_error));
2266 return FAIL;
2267 }
2268 }
2269 else if (getreg != base_reg + reg_incr * count)
2270 {
2271 first_error (_(incr_error));
2272 return FAIL;
2273 }
2274
2275 if (! neon_alias_types_same (&atype, &firsttype))
2276 {
2277 first_error (_(type_error));
2278 return FAIL;
2279 }
2280
2281 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2282 modes. */
2283 if (ptr[0] == '-')
2284 {
2285 struct neon_typed_alias htype;
2286 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2287 if (lane == -1)
2288 lane = NEON_INTERLEAVE_LANES;
2289 else if (lane != NEON_INTERLEAVE_LANES)
2290 {
2291 first_error (_(type_error));
2292 return FAIL;
2293 }
2294 if (reg_incr == -1)
2295 reg_incr = 1;
2296 else if (reg_incr != 1)
2297 {
2298 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2299 return FAIL;
2300 }
2301 ptr++;
2302 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2303 if (hireg == FAIL)
2304 {
2305 first_error (_(reg_expected_msgs[rtype]));
2306 return FAIL;
2307 }
2308 if (! neon_alias_types_same (&htype, &firsttype))
2309 {
2310 first_error (_(type_error));
2311 return FAIL;
2312 }
2313 count += hireg + dregs - getreg;
2314 continue;
2315 }
2316
2317 /* If we're using Q registers, we can't use [] or [n] syntax. */
2318 if (rtype == REG_TYPE_NQ)
2319 {
2320 count += 2;
2321 continue;
2322 }
2323
2324 if ((atype.defined & NTA_HASINDEX) != 0)
2325 {
2326 if (lane == -1)
2327 lane = atype.index;
2328 else if (lane != atype.index)
2329 {
2330 first_error (_(type_error));
2331 return FAIL;
2332 }
2333 }
2334 else if (lane == -1)
2335 lane = NEON_INTERLEAVE_LANES;
2336 else if (lane != NEON_INTERLEAVE_LANES)
2337 {
2338 first_error (_(type_error));
2339 return FAIL;
2340 }
2341 count++;
2342 }
2343 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2344
2345 /* No lane set by [x]. We must be interleaving structures. */
2346 if (lane == -1)
2347 lane = NEON_INTERLEAVE_LANES;
2348
2349 /* Sanity check. */
2350 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2351 || (count > 1 && reg_incr == -1))
2352 {
2353 first_error (_("error parsing element/structure list"));
2354 return FAIL;
2355 }
2356
2357 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2358 {
2359 first_error (_("expected }"));
2360 return FAIL;
2361 }
2362
2363 if (reg_incr == -1)
2364 reg_incr = 1;
2365
2366 if (eltype)
2367 *eltype = firsttype.eltype;
2368
2369 *pbase = base_reg;
2370 *str = ptr;
2371
2372 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2373 }
2374
2375 /* Parse an explicit relocation suffix on an expression. This is
2376 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2377 arm_reloc_hsh contains no entries, so this function can only
2378 succeed if there is no () after the word. Returns -1 on error,
2379 BFD_RELOC_UNUSED if there wasn't any suffix. */
2380
2381 static int
2382 parse_reloc (char **str)
2383 {
2384 struct reloc_entry *r;
2385 char *p, *q;
2386
2387 if (**str != '(')
2388 return BFD_RELOC_UNUSED;
2389
2390 p = *str + 1;
2391 q = p;
2392
2393 while (*q && *q != ')' && *q != ',')
2394 q++;
2395 if (*q != ')')
2396 return -1;
2397
2398 if ((r = (struct reloc_entry *)
2399 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2400 return -1;
2401
2402 *str = q + 1;
2403 return r->reloc;
2404 }
2405
2406 /* Directives: register aliases. */
2407
2408 static struct reg_entry *
2409 insert_reg_alias (char *str, unsigned number, int type)
2410 {
2411 struct reg_entry *new_reg;
2412 const char *name;
2413
2414 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2415 {
2416 if (new_reg->builtin)
2417 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2418
2419 /* Only warn about a redefinition if it's not defined as the
2420 same register. */
2421 else if (new_reg->number != number || new_reg->type != type)
2422 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2423
2424 return NULL;
2425 }
2426
2427 name = xstrdup (str);
2428 new_reg = XNEW (struct reg_entry);
2429
2430 new_reg->name = name;
2431 new_reg->number = number;
2432 new_reg->type = type;
2433 new_reg->builtin = FALSE;
2434 new_reg->neon = NULL;
2435
2436 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2437 abort ();
2438
2439 return new_reg;
2440 }
2441
2442 static void
2443 insert_neon_reg_alias (char *str, int number, int type,
2444 struct neon_typed_alias *atype)
2445 {
2446 struct reg_entry *reg = insert_reg_alias (str, number, type);
2447
2448 if (!reg)
2449 {
2450 first_error (_("attempt to redefine typed alias"));
2451 return;
2452 }
2453
2454 if (atype)
2455 {
2456 reg->neon = XNEW (struct neon_typed_alias);
2457 *reg->neon = *atype;
2458 }
2459 }
2460
2461 /* Look for the .req directive. This is of the form:
2462
2463 new_register_name .req existing_register_name
2464
2465 If we find one, or if it looks sufficiently like one that we want to
2466 handle any error here, return TRUE. Otherwise return FALSE. */
2467
2468 static bfd_boolean
2469 create_register_alias (char * newname, char *p)
2470 {
2471 struct reg_entry *old;
2472 char *oldname, *nbuf;
2473 size_t nlen;
2474
2475 /* The input scrubber ensures that whitespace after the mnemonic is
2476 collapsed to single spaces. */
2477 oldname = p;
2478 if (strncmp (oldname, " .req ", 6) != 0)
2479 return FALSE;
2480
2481 oldname += 6;
2482 if (*oldname == '\0')
2483 return FALSE;
2484
2485 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2486 if (!old)
2487 {
2488 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2489 return TRUE;
2490 }
2491
2492 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2493 the desired alias name, and p points to its end. If not, then
2494 the desired alias name is in the global original_case_string. */
2495 #ifdef TC_CASE_SENSITIVE
2496 nlen = p - newname;
2497 #else
2498 newname = original_case_string;
2499 nlen = strlen (newname);
2500 #endif
2501
2502 nbuf = xmemdup0 (newname, nlen);
2503
2504 /* Create aliases under the new name as stated; an all-lowercase
2505 version of the new name; and an all-uppercase version of the new
2506 name. */
2507 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2508 {
2509 for (p = nbuf; *p; p++)
2510 *p = TOUPPER (*p);
2511
2512 if (strncmp (nbuf, newname, nlen))
2513 {
2514 /* If this attempt to create an additional alias fails, do not bother
2515 trying to create the all-lower case alias. We will fail and issue
2516 a second, duplicate error message. This situation arises when the
2517 programmer does something like:
2518 foo .req r0
2519 Foo .req r1
2520 The second .req creates the "Foo" alias but then fails to create
2521 the artificial FOO alias because it has already been created by the
2522 first .req. */
2523 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2524 {
2525 free (nbuf);
2526 return TRUE;
2527 }
2528 }
2529
2530 for (p = nbuf; *p; p++)
2531 *p = TOLOWER (*p);
2532
2533 if (strncmp (nbuf, newname, nlen))
2534 insert_reg_alias (nbuf, old->number, old->type);
2535 }
2536
2537 free (nbuf);
2538 return TRUE;
2539 }
2540
2541 /* Create a Neon typed/indexed register alias using directives, e.g.:
2542 X .dn d5.s32[1]
2543 Y .qn 6.s16
2544 Z .dn d7
2545 T .dn Z[0]
2546 These typed registers can be used instead of the types specified after the
2547 Neon mnemonic, so long as all operands given have types. Types can also be
2548 specified directly, e.g.:
2549 vadd d0.s32, d1.s32, d2.s32 */
2550
2551 static bfd_boolean
2552 create_neon_reg_alias (char *newname, char *p)
2553 {
2554 enum arm_reg_type basetype;
2555 struct reg_entry *basereg;
2556 struct reg_entry mybasereg;
2557 struct neon_type ntype;
2558 struct neon_typed_alias typeinfo;
2559 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2560 int namelen;
2561
2562 typeinfo.defined = 0;
2563 typeinfo.eltype.type = NT_invtype;
2564 typeinfo.eltype.size = -1;
2565 typeinfo.index = -1;
2566
2567 nameend = p;
2568
2569 if (strncmp (p, " .dn ", 5) == 0)
2570 basetype = REG_TYPE_VFD;
2571 else if (strncmp (p, " .qn ", 5) == 0)
2572 basetype = REG_TYPE_NQ;
2573 else
2574 return FALSE;
2575
2576 p += 5;
2577
2578 if (*p == '\0')
2579 return FALSE;
2580
2581 basereg = arm_reg_parse_multi (&p);
2582
2583 if (basereg && basereg->type != basetype)
2584 {
2585 as_bad (_("bad type for register"));
2586 return FALSE;
2587 }
2588
2589 if (basereg == NULL)
2590 {
2591 expressionS exp;
2592 /* Try parsing as an integer. */
2593 my_get_expression (&exp, &p, GE_NO_PREFIX);
2594 if (exp.X_op != O_constant)
2595 {
2596 as_bad (_("expression must be constant"));
2597 return FALSE;
2598 }
2599 basereg = &mybasereg;
2600 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2601 : exp.X_add_number;
2602 basereg->neon = 0;
2603 }
2604
2605 if (basereg->neon)
2606 typeinfo = *basereg->neon;
2607
2608 if (parse_neon_type (&ntype, &p) == SUCCESS)
2609 {
2610 /* We got a type. */
2611 if (typeinfo.defined & NTA_HASTYPE)
2612 {
2613 as_bad (_("can't redefine the type of a register alias"));
2614 return FALSE;
2615 }
2616
2617 typeinfo.defined |= NTA_HASTYPE;
2618 if (ntype.elems != 1)
2619 {
2620 as_bad (_("you must specify a single type only"));
2621 return FALSE;
2622 }
2623 typeinfo.eltype = ntype.el[0];
2624 }
2625
2626 if (skip_past_char (&p, '[') == SUCCESS)
2627 {
2628 expressionS exp;
2629 /* We got a scalar index. */
2630
2631 if (typeinfo.defined & NTA_HASINDEX)
2632 {
2633 as_bad (_("can't redefine the index of a scalar alias"));
2634 return FALSE;
2635 }
2636
2637 my_get_expression (&exp, &p, GE_NO_PREFIX);
2638
2639 if (exp.X_op != O_constant)
2640 {
2641 as_bad (_("scalar index must be constant"));
2642 return FALSE;
2643 }
2644
2645 typeinfo.defined |= NTA_HASINDEX;
2646 typeinfo.index = exp.X_add_number;
2647
2648 if (skip_past_char (&p, ']') == FAIL)
2649 {
2650 as_bad (_("expecting ]"));
2651 return FALSE;
2652 }
2653 }
2654
2655 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2656 the desired alias name, and p points to its end. If not, then
2657 the desired alias name is in the global original_case_string. */
2658 #ifdef TC_CASE_SENSITIVE
2659 namelen = nameend - newname;
2660 #else
2661 newname = original_case_string;
2662 namelen = strlen (newname);
2663 #endif
2664
2665 namebuf = xmemdup0 (newname, namelen);
2666
2667 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2668 typeinfo.defined != 0 ? &typeinfo : NULL);
2669
2670 /* Insert name in all uppercase. */
2671 for (p = namebuf; *p; p++)
2672 *p = TOUPPER (*p);
2673
2674 if (strncmp (namebuf, newname, namelen))
2675 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2676 typeinfo.defined != 0 ? &typeinfo : NULL);
2677
2678 /* Insert name in all lowercase. */
2679 for (p = namebuf; *p; p++)
2680 *p = TOLOWER (*p);
2681
2682 if (strncmp (namebuf, newname, namelen))
2683 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2684 typeinfo.defined != 0 ? &typeinfo : NULL);
2685
2686 free (namebuf);
2687 return TRUE;
2688 }
2689
2690 /* Should never be called, as .req goes between the alias and the
2691 register name, not at the beginning of the line. */
2692
2693 static void
2694 s_req (int a ATTRIBUTE_UNUSED)
2695 {
2696 as_bad (_("invalid syntax for .req directive"));
2697 }
2698
2699 static void
2700 s_dn (int a ATTRIBUTE_UNUSED)
2701 {
2702 as_bad (_("invalid syntax for .dn directive"));
2703 }
2704
2705 static void
2706 s_qn (int a ATTRIBUTE_UNUSED)
2707 {
2708 as_bad (_("invalid syntax for .qn directive"));
2709 }
2710
2711 /* The .unreq directive deletes an alias which was previously defined
2712 by .req. For example:
2713
2714 my_alias .req r11
2715 .unreq my_alias */
2716
2717 static void
2718 s_unreq (int a ATTRIBUTE_UNUSED)
2719 {
2720 char * name;
2721 char saved_char;
2722
2723 name = input_line_pointer;
2724
2725 while (*input_line_pointer != 0
2726 && *input_line_pointer != ' '
2727 && *input_line_pointer != '\n')
2728 ++input_line_pointer;
2729
2730 saved_char = *input_line_pointer;
2731 *input_line_pointer = 0;
2732
2733 if (!*name)
2734 as_bad (_("invalid syntax for .unreq directive"));
2735 else
2736 {
2737 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2738 name);
2739
2740 if (!reg)
2741 as_bad (_("unknown register alias '%s'"), name);
2742 else if (reg->builtin)
2743 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2744 name);
2745 else
2746 {
2747 char * p;
2748 char * nbuf;
2749
2750 hash_delete (arm_reg_hsh, name, FALSE);
2751 free ((char *) reg->name);
2752 if (reg->neon)
2753 free (reg->neon);
2754 free (reg);
2755
2756 /* Also locate the all upper case and all lower case versions.
2757 Do not complain if we cannot find one or the other as it
2758 was probably deleted above. */
2759
2760 nbuf = strdup (name);
2761 for (p = nbuf; *p; p++)
2762 *p = TOUPPER (*p);
2763 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2764 if (reg)
2765 {
2766 hash_delete (arm_reg_hsh, nbuf, FALSE);
2767 free ((char *) reg->name);
2768 if (reg->neon)
2769 free (reg->neon);
2770 free (reg);
2771 }
2772
2773 for (p = nbuf; *p; p++)
2774 *p = TOLOWER (*p);
2775 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2776 if (reg)
2777 {
2778 hash_delete (arm_reg_hsh, nbuf, FALSE);
2779 free ((char *) reg->name);
2780 if (reg->neon)
2781 free (reg->neon);
2782 free (reg);
2783 }
2784
2785 free (nbuf);
2786 }
2787 }
2788
2789 *input_line_pointer = saved_char;
2790 demand_empty_rest_of_line ();
2791 }
2792
2793 /* Directives: Instruction set selection. */
2794
2795 #ifdef OBJ_ELF
2796 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2797 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2798 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2799 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2800
2801 /* Create a new mapping symbol for the transition to STATE. */
2802
2803 static void
2804 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2805 {
2806 symbolS * symbolP;
2807 const char * symname;
2808 int type;
2809
2810 switch (state)
2811 {
2812 case MAP_DATA:
2813 symname = "$d";
2814 type = BSF_NO_FLAGS;
2815 break;
2816 case MAP_ARM:
2817 symname = "$a";
2818 type = BSF_NO_FLAGS;
2819 break;
2820 case MAP_THUMB:
2821 symname = "$t";
2822 type = BSF_NO_FLAGS;
2823 break;
2824 default:
2825 abort ();
2826 }
2827
2828 symbolP = symbol_new (symname, now_seg, value, frag);
2829 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2830
2831 switch (state)
2832 {
2833 case MAP_ARM:
2834 THUMB_SET_FUNC (symbolP, 0);
2835 ARM_SET_THUMB (symbolP, 0);
2836 ARM_SET_INTERWORK (symbolP, support_interwork);
2837 break;
2838
2839 case MAP_THUMB:
2840 THUMB_SET_FUNC (symbolP, 1);
2841 ARM_SET_THUMB (symbolP, 1);
2842 ARM_SET_INTERWORK (symbolP, support_interwork);
2843 break;
2844
2845 case MAP_DATA:
2846 default:
2847 break;
2848 }
2849
2850 /* Save the mapping symbols for future reference. Also check that
2851 we do not place two mapping symbols at the same offset within a
2852 frag. We'll handle overlap between frags in
2853 check_mapping_symbols.
2854
2855 If .fill or other data filling directive generates zero sized data,
2856 the mapping symbol for the following code will have the same value
2857 as the one generated for the data filling directive. In this case,
2858 we replace the old symbol with the new one at the same address. */
2859 if (value == 0)
2860 {
2861 if (frag->tc_frag_data.first_map != NULL)
2862 {
2863 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2864 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2865 }
2866 frag->tc_frag_data.first_map = symbolP;
2867 }
2868 if (frag->tc_frag_data.last_map != NULL)
2869 {
2870 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2871 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2872 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2873 }
2874 frag->tc_frag_data.last_map = symbolP;
2875 }
2876
2877 /* We must sometimes convert a region marked as code to data during
2878 code alignment, if an odd number of bytes have to be padded. The
2879 code mapping symbol is pushed to an aligned address. */
2880
2881 static void
2882 insert_data_mapping_symbol (enum mstate state,
2883 valueT value, fragS *frag, offsetT bytes)
2884 {
2885 /* If there was already a mapping symbol, remove it. */
2886 if (frag->tc_frag_data.last_map != NULL
2887 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2888 {
2889 symbolS *symp = frag->tc_frag_data.last_map;
2890
2891 if (value == 0)
2892 {
2893 know (frag->tc_frag_data.first_map == symp);
2894 frag->tc_frag_data.first_map = NULL;
2895 }
2896 frag->tc_frag_data.last_map = NULL;
2897 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2898 }
2899
2900 make_mapping_symbol (MAP_DATA, value, frag);
2901 make_mapping_symbol (state, value + bytes, frag);
2902 }
2903
2904 static void mapping_state_2 (enum mstate state, int max_chars);
2905
2906 /* Set the mapping state to STATE. Only call this when about to
2907 emit some STATE bytes to the file. */
2908
2909 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2910 void
2911 mapping_state (enum mstate state)
2912 {
2913 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2914
2915 if (mapstate == state)
2916 /* The mapping symbol has already been emitted.
2917 There is nothing else to do. */
2918 return;
2919
2920 if (state == MAP_ARM || state == MAP_THUMB)
2921 /* PR gas/12931
2922 All ARM instructions require 4-byte alignment.
2923 (Almost) all Thumb instructions require 2-byte alignment.
2924
2925 When emitting instructions into any section, mark the section
2926 appropriately.
2927
2928 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2929 but themselves require 2-byte alignment; this applies to some
2930 PC- relative forms. However, these cases will involve implicit
2931 literal pool generation or an explicit .align >=2, both of
2932 which will cause the section to me marked with sufficient
2933 alignment. Thus, we don't handle those cases here. */
2934 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2935
2936 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2937 /* This case will be evaluated later. */
2938 return;
2939
2940 mapping_state_2 (state, 0);
2941 }
2942
2943 /* Same as mapping_state, but MAX_CHARS bytes have already been
2944 allocated. Put the mapping symbol that far back. */
2945
2946 static void
2947 mapping_state_2 (enum mstate state, int max_chars)
2948 {
2949 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2950
2951 if (!SEG_NORMAL (now_seg))
2952 return;
2953
2954 if (mapstate == state)
2955 /* The mapping symbol has already been emitted.
2956 There is nothing else to do. */
2957 return;
2958
2959 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2960 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2961 {
2962 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2963 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2964
2965 if (add_symbol)
2966 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2967 }
2968
2969 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2970 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2971 }
2972 #undef TRANSITION
2973 #else
2974 #define mapping_state(x) ((void)0)
2975 #define mapping_state_2(x, y) ((void)0)
2976 #endif
2977
2978 /* Find the real, Thumb encoded start of a Thumb function. */
2979
2980 #ifdef OBJ_COFF
2981 static symbolS *
2982 find_real_start (symbolS * symbolP)
2983 {
2984 char * real_start;
2985 const char * name = S_GET_NAME (symbolP);
2986 symbolS * new_target;
2987
2988 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2989 #define STUB_NAME ".real_start_of"
2990
2991 if (name == NULL)
2992 abort ();
2993
2994 /* The compiler may generate BL instructions to local labels because
2995 it needs to perform a branch to a far away location. These labels
2996 do not have a corresponding ".real_start_of" label. We check
2997 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2998 the ".real_start_of" convention for nonlocal branches. */
2999 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3000 return symbolP;
3001
3002 real_start = concat (STUB_NAME, name, NULL);
3003 new_target = symbol_find (real_start);
3004 free (real_start);
3005
3006 if (new_target == NULL)
3007 {
3008 as_warn (_("Failed to find real start of function: %s\n"), name);
3009 new_target = symbolP;
3010 }
3011
3012 return new_target;
3013 }
3014 #endif
3015
3016 static void
3017 opcode_select (int width)
3018 {
3019 switch (width)
3020 {
3021 case 16:
3022 if (! thumb_mode)
3023 {
3024 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3025 as_bad (_("selected processor does not support THUMB opcodes"));
3026
3027 thumb_mode = 1;
3028 /* No need to force the alignment, since we will have been
3029 coming from ARM mode, which is word-aligned. */
3030 record_alignment (now_seg, 1);
3031 }
3032 break;
3033
3034 case 32:
3035 if (thumb_mode)
3036 {
3037 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3038 as_bad (_("selected processor does not support ARM opcodes"));
3039
3040 thumb_mode = 0;
3041
3042 if (!need_pass_2)
3043 frag_align (2, 0, 0);
3044
3045 record_alignment (now_seg, 1);
3046 }
3047 break;
3048
3049 default:
3050 as_bad (_("invalid instruction size selected (%d)"), width);
3051 }
3052 }
3053
3054 static void
3055 s_arm (int ignore ATTRIBUTE_UNUSED)
3056 {
3057 opcode_select (32);
3058 demand_empty_rest_of_line ();
3059 }
3060
3061 static void
3062 s_thumb (int ignore ATTRIBUTE_UNUSED)
3063 {
3064 opcode_select (16);
3065 demand_empty_rest_of_line ();
3066 }
3067
3068 static void
3069 s_code (int unused ATTRIBUTE_UNUSED)
3070 {
3071 int temp;
3072
3073 temp = get_absolute_expression ();
3074 switch (temp)
3075 {
3076 case 16:
3077 case 32:
3078 opcode_select (temp);
3079 break;
3080
3081 default:
3082 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3083 }
3084 }
3085
3086 static void
3087 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3088 {
3089 /* If we are not already in thumb mode go into it, EVEN if
3090 the target processor does not support thumb instructions.
3091 This is used by gcc/config/arm/lib1funcs.asm for example
3092 to compile interworking support functions even if the
3093 target processor should not support interworking. */
3094 if (! thumb_mode)
3095 {
3096 thumb_mode = 2;
3097 record_alignment (now_seg, 1);
3098 }
3099
3100 demand_empty_rest_of_line ();
3101 }
3102
3103 static void
3104 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3105 {
3106 s_thumb (0);
3107
3108 /* The following label is the name/address of the start of a Thumb function.
3109 We need to know this for the interworking support. */
3110 label_is_thumb_function_name = TRUE;
3111 }
3112
3113 /* Perform a .set directive, but also mark the alias as
3114 being a thumb function. */
3115
3116 static void
3117 s_thumb_set (int equiv)
3118 {
3119 /* XXX the following is a duplicate of the code for s_set() in read.c
3120 We cannot just call that code as we need to get at the symbol that
3121 is created. */
3122 char * name;
3123 char delim;
3124 char * end_name;
3125 symbolS * symbolP;
3126
3127 /* Especial apologies for the random logic:
3128 This just grew, and could be parsed much more simply!
3129 Dean - in haste. */
3130 delim = get_symbol_name (& name);
3131 end_name = input_line_pointer;
3132 (void) restore_line_pointer (delim);
3133
3134 if (*input_line_pointer != ',')
3135 {
3136 *end_name = 0;
3137 as_bad (_("expected comma after name \"%s\""), name);
3138 *end_name = delim;
3139 ignore_rest_of_line ();
3140 return;
3141 }
3142
3143 input_line_pointer++;
3144 *end_name = 0;
3145
3146 if (name[0] == '.' && name[1] == '\0')
3147 {
3148 /* XXX - this should not happen to .thumb_set. */
3149 abort ();
3150 }
3151
3152 if ((symbolP = symbol_find (name)) == NULL
3153 && (symbolP = md_undefined_symbol (name)) == NULL)
3154 {
3155 #ifndef NO_LISTING
3156 /* When doing symbol listings, play games with dummy fragments living
3157 outside the normal fragment chain to record the file and line info
3158 for this symbol. */
3159 if (listing & LISTING_SYMBOLS)
3160 {
3161 extern struct list_info_struct * listing_tail;
3162 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3163
3164 memset (dummy_frag, 0, sizeof (fragS));
3165 dummy_frag->fr_type = rs_fill;
3166 dummy_frag->line = listing_tail;
3167 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3168 dummy_frag->fr_symbol = symbolP;
3169 }
3170 else
3171 #endif
3172 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3173
3174 #ifdef OBJ_COFF
3175 /* "set" symbols are local unless otherwise specified. */
3176 SF_SET_LOCAL (symbolP);
3177 #endif /* OBJ_COFF */
3178 } /* Make a new symbol. */
3179
3180 symbol_table_insert (symbolP);
3181
3182 * end_name = delim;
3183
3184 if (equiv
3185 && S_IS_DEFINED (symbolP)
3186 && S_GET_SEGMENT (symbolP) != reg_section)
3187 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3188
3189 pseudo_set (symbolP);
3190
3191 demand_empty_rest_of_line ();
3192
3193 /* XXX Now we come to the Thumb specific bit of code. */
3194
3195 THUMB_SET_FUNC (symbolP, 1);
3196 ARM_SET_THUMB (symbolP, 1);
3197 #if defined OBJ_ELF || defined OBJ_COFF
3198 ARM_SET_INTERWORK (symbolP, support_interwork);
3199 #endif
3200 }
3201
3202 /* Directives: Mode selection. */
3203
3204 /* .syntax [unified|divided] - choose the new unified syntax
3205 (same for Arm and Thumb encoding, modulo slight differences in what
3206 can be represented) or the old divergent syntax for each mode. */
3207 static void
3208 s_syntax (int unused ATTRIBUTE_UNUSED)
3209 {
3210 char *name, delim;
3211
3212 delim = get_symbol_name (& name);
3213
3214 if (!strcasecmp (name, "unified"))
3215 unified_syntax = TRUE;
3216 else if (!strcasecmp (name, "divided"))
3217 unified_syntax = FALSE;
3218 else
3219 {
3220 as_bad (_("unrecognized syntax mode \"%s\""), name);
3221 return;
3222 }
3223 (void) restore_line_pointer (delim);
3224 demand_empty_rest_of_line ();
3225 }
3226
3227 /* Directives: sectioning and alignment. */
3228
3229 static void
3230 s_bss (int ignore ATTRIBUTE_UNUSED)
3231 {
3232 /* We don't support putting frags in the BSS segment, we fake it by
3233 marking in_bss, then looking at s_skip for clues. */
3234 subseg_set (bss_section, 0);
3235 demand_empty_rest_of_line ();
3236
3237 #ifdef md_elf_section_change_hook
3238 md_elf_section_change_hook ();
3239 #endif
3240 }
3241
3242 static void
3243 s_even (int ignore ATTRIBUTE_UNUSED)
3244 {
3245 /* Never make frag if expect extra pass. */
3246 if (!need_pass_2)
3247 frag_align (1, 0, 0);
3248
3249 record_alignment (now_seg, 1);
3250
3251 demand_empty_rest_of_line ();
3252 }
3253
3254 /* Directives: CodeComposer Studio. */
3255
3256 /* .ref (for CodeComposer Studio syntax only). */
3257 static void
3258 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3259 {
3260 if (codecomposer_syntax)
3261 ignore_rest_of_line ();
3262 else
3263 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3264 }
3265
3266 /* If name is not NULL, then it is used for marking the beginning of a
3267 function, whereas if it is NULL then it means the function end. */
3268 static void
3269 asmfunc_debug (const char * name)
3270 {
3271 static const char * last_name = NULL;
3272
3273 if (name != NULL)
3274 {
3275 gas_assert (last_name == NULL);
3276 last_name = name;
3277
3278 if (debug_type == DEBUG_STABS)
3279 stabs_generate_asm_func (name, name);
3280 }
3281 else
3282 {
3283 gas_assert (last_name != NULL);
3284
3285 if (debug_type == DEBUG_STABS)
3286 stabs_generate_asm_endfunc (last_name, last_name);
3287
3288 last_name = NULL;
3289 }
3290 }
3291
3292 static void
3293 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3294 {
3295 if (codecomposer_syntax)
3296 {
3297 switch (asmfunc_state)
3298 {
3299 case OUTSIDE_ASMFUNC:
3300 asmfunc_state = WAITING_ASMFUNC_NAME;
3301 break;
3302
3303 case WAITING_ASMFUNC_NAME:
3304 as_bad (_(".asmfunc repeated."));
3305 break;
3306
3307 case WAITING_ENDASMFUNC:
3308 as_bad (_(".asmfunc without function."));
3309 break;
3310 }
3311 demand_empty_rest_of_line ();
3312 }
3313 else
3314 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3315 }
3316
3317 static void
3318 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3319 {
3320 if (codecomposer_syntax)
3321 {
3322 switch (asmfunc_state)
3323 {
3324 case OUTSIDE_ASMFUNC:
3325 as_bad (_(".endasmfunc without a .asmfunc."));
3326 break;
3327
3328 case WAITING_ASMFUNC_NAME:
3329 as_bad (_(".endasmfunc without function."));
3330 break;
3331
3332 case WAITING_ENDASMFUNC:
3333 asmfunc_state = OUTSIDE_ASMFUNC;
3334 asmfunc_debug (NULL);
3335 break;
3336 }
3337 demand_empty_rest_of_line ();
3338 }
3339 else
3340 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3341 }
3342
3343 static void
3344 s_ccs_def (int name)
3345 {
3346 if (codecomposer_syntax)
3347 s_globl (name);
3348 else
3349 as_bad (_(".def pseudo-op only available with -mccs flag."));
3350 }
3351
3352 /* Directives: Literal pools. */
3353
3354 static literal_pool *
3355 find_literal_pool (void)
3356 {
3357 literal_pool * pool;
3358
3359 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3360 {
3361 if (pool->section == now_seg
3362 && pool->sub_section == now_subseg)
3363 break;
3364 }
3365
3366 return pool;
3367 }
3368
3369 static literal_pool *
3370 find_or_make_literal_pool (void)
3371 {
3372 /* Next literal pool ID number. */
3373 static unsigned int latest_pool_num = 1;
3374 literal_pool * pool;
3375
3376 pool = find_literal_pool ();
3377
3378 if (pool == NULL)
3379 {
3380 /* Create a new pool. */
3381 pool = XNEW (literal_pool);
3382 if (! pool)
3383 return NULL;
3384
3385 pool->next_free_entry = 0;
3386 pool->section = now_seg;
3387 pool->sub_section = now_subseg;
3388 pool->next = list_of_pools;
3389 pool->symbol = NULL;
3390 pool->alignment = 2;
3391
3392 /* Add it to the list. */
3393 list_of_pools = pool;
3394 }
3395
3396 /* New pools, and emptied pools, will have a NULL symbol. */
3397 if (pool->symbol == NULL)
3398 {
3399 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3400 (valueT) 0, &zero_address_frag);
3401 pool->id = latest_pool_num ++;
3402 }
3403
3404 /* Done. */
3405 return pool;
3406 }
3407
3408 /* Add the literal in the global 'inst'
3409 structure to the relevant literal pool. */
3410
3411 static int
3412 add_to_lit_pool (unsigned int nbytes)
3413 {
3414 #define PADDING_SLOT 0x1
3415 #define LIT_ENTRY_SIZE_MASK 0xFF
3416 literal_pool * pool;
3417 unsigned int entry, pool_size = 0;
3418 bfd_boolean padding_slot_p = FALSE;
3419 unsigned imm1 = 0;
3420 unsigned imm2 = 0;
3421
3422 if (nbytes == 8)
3423 {
3424 imm1 = inst.operands[1].imm;
3425 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3426 : inst.relocs[0].exp.X_unsigned ? 0
3427 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3428 if (target_big_endian)
3429 {
3430 imm1 = imm2;
3431 imm2 = inst.operands[1].imm;
3432 }
3433 }
3434
3435 pool = find_or_make_literal_pool ();
3436
3437 /* Check if this literal value is already in the pool. */
3438 for (entry = 0; entry < pool->next_free_entry; entry ++)
3439 {
3440 if (nbytes == 4)
3441 {
3442 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3443 && (inst.relocs[0].exp.X_op == O_constant)
3444 && (pool->literals[entry].X_add_number
3445 == inst.relocs[0].exp.X_add_number)
3446 && (pool->literals[entry].X_md == nbytes)
3447 && (pool->literals[entry].X_unsigned
3448 == inst.relocs[0].exp.X_unsigned))
3449 break;
3450
3451 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3452 && (inst.relocs[0].exp.X_op == O_symbol)
3453 && (pool->literals[entry].X_add_number
3454 == inst.relocs[0].exp.X_add_number)
3455 && (pool->literals[entry].X_add_symbol
3456 == inst.relocs[0].exp.X_add_symbol)
3457 && (pool->literals[entry].X_op_symbol
3458 == inst.relocs[0].exp.X_op_symbol)
3459 && (pool->literals[entry].X_md == nbytes))
3460 break;
3461 }
3462 else if ((nbytes == 8)
3463 && !(pool_size & 0x7)
3464 && ((entry + 1) != pool->next_free_entry)
3465 && (pool->literals[entry].X_op == O_constant)
3466 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3467 && (pool->literals[entry].X_unsigned
3468 == inst.relocs[0].exp.X_unsigned)
3469 && (pool->literals[entry + 1].X_op == O_constant)
3470 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3471 && (pool->literals[entry + 1].X_unsigned
3472 == inst.relocs[0].exp.X_unsigned))
3473 break;
3474
3475 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3476 if (padding_slot_p && (nbytes == 4))
3477 break;
3478
3479 pool_size += 4;
3480 }
3481
3482 /* Do we need to create a new entry? */
3483 if (entry == pool->next_free_entry)
3484 {
3485 if (entry >= MAX_LITERAL_POOL_SIZE)
3486 {
3487 inst.error = _("literal pool overflow");
3488 return FAIL;
3489 }
3490
3491 if (nbytes == 8)
3492 {
3493 /* For 8-byte entries, we align to an 8-byte boundary,
3494 and split it into two 4-byte entries, because on 32-bit
3495 host, 8-byte constants are treated as big num, thus
3496 saved in "generic_bignum" which will be overwritten
3497 by later assignments.
3498
3499 We also need to make sure there is enough space for
3500 the split.
3501
3502 We also check to make sure the literal operand is a
3503 constant number. */
3504 if (!(inst.relocs[0].exp.X_op == O_constant
3505 || inst.relocs[0].exp.X_op == O_big))
3506 {
3507 inst.error = _("invalid type for literal pool");
3508 return FAIL;
3509 }
3510 else if (pool_size & 0x7)
3511 {
3512 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3513 {
3514 inst.error = _("literal pool overflow");
3515 return FAIL;
3516 }
3517
3518 pool->literals[entry] = inst.relocs[0].exp;
3519 pool->literals[entry].X_op = O_constant;
3520 pool->literals[entry].X_add_number = 0;
3521 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3522 pool->next_free_entry += 1;
3523 pool_size += 4;
3524 }
3525 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3526 {
3527 inst.error = _("literal pool overflow");
3528 return FAIL;
3529 }
3530
3531 pool->literals[entry] = inst.relocs[0].exp;
3532 pool->literals[entry].X_op = O_constant;
3533 pool->literals[entry].X_add_number = imm1;
3534 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3535 pool->literals[entry++].X_md = 4;
3536 pool->literals[entry] = inst.relocs[0].exp;
3537 pool->literals[entry].X_op = O_constant;
3538 pool->literals[entry].X_add_number = imm2;
3539 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3540 pool->literals[entry].X_md = 4;
3541 pool->alignment = 3;
3542 pool->next_free_entry += 1;
3543 }
3544 else
3545 {
3546 pool->literals[entry] = inst.relocs[0].exp;
3547 pool->literals[entry].X_md = 4;
3548 }
3549
3550 #ifdef OBJ_ELF
3551 /* PR ld/12974: Record the location of the first source line to reference
3552 this entry in the literal pool. If it turns out during linking that the
3553 symbol does not exist we will be able to give an accurate line number for
3554 the (first use of the) missing reference. */
3555 if (debug_type == DEBUG_DWARF2)
3556 dwarf2_where (pool->locs + entry);
3557 #endif
3558 pool->next_free_entry += 1;
3559 }
3560 else if (padding_slot_p)
3561 {
3562 pool->literals[entry] = inst.relocs[0].exp;
3563 pool->literals[entry].X_md = nbytes;
3564 }
3565
3566 inst.relocs[0].exp.X_op = O_symbol;
3567 inst.relocs[0].exp.X_add_number = pool_size;
3568 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3569
3570 return SUCCESS;
3571 }
3572
3573 bfd_boolean
3574 tc_start_label_without_colon (void)
3575 {
3576 bfd_boolean ret = TRUE;
3577
3578 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3579 {
3580 const char *label = input_line_pointer;
3581
3582 while (!is_end_of_line[(int) label[-1]])
3583 --label;
3584
3585 if (*label == '.')
3586 {
3587 as_bad (_("Invalid label '%s'"), label);
3588 ret = FALSE;
3589 }
3590
3591 asmfunc_debug (label);
3592
3593 asmfunc_state = WAITING_ENDASMFUNC;
3594 }
3595
3596 return ret;
3597 }
3598
3599 /* Can't use symbol_new here, so have to create a symbol and then at
3600 a later date assign it a value. That's what these functions do. */
3601
3602 static void
3603 symbol_locate (symbolS * symbolP,
3604 const char * name, /* It is copied, the caller can modify. */
3605 segT segment, /* Segment identifier (SEG_<something>). */
3606 valueT valu, /* Symbol value. */
3607 fragS * frag) /* Associated fragment. */
3608 {
3609 size_t name_length;
3610 char * preserved_copy_of_name;
3611
3612 name_length = strlen (name) + 1; /* +1 for \0. */
3613 obstack_grow (&notes, name, name_length);
3614 preserved_copy_of_name = (char *) obstack_finish (&notes);
3615
3616 #ifdef tc_canonicalize_symbol_name
3617 preserved_copy_of_name =
3618 tc_canonicalize_symbol_name (preserved_copy_of_name);
3619 #endif
3620
3621 S_SET_NAME (symbolP, preserved_copy_of_name);
3622
3623 S_SET_SEGMENT (symbolP, segment);
3624 S_SET_VALUE (symbolP, valu);
3625 symbol_clear_list_pointers (symbolP);
3626
3627 symbol_set_frag (symbolP, frag);
3628
3629 /* Link to end of symbol chain. */
3630 {
3631 extern int symbol_table_frozen;
3632
3633 if (symbol_table_frozen)
3634 abort ();
3635 }
3636
3637 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3638
3639 obj_symbol_new_hook (symbolP);
3640
3641 #ifdef tc_symbol_new_hook
3642 tc_symbol_new_hook (symbolP);
3643 #endif
3644
3645 #ifdef DEBUG_SYMS
3646 verify_symbol_chain (symbol_rootP, symbol_lastP);
3647 #endif /* DEBUG_SYMS */
3648 }
3649
3650 static void
3651 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3652 {
3653 unsigned int entry;
3654 literal_pool * pool;
3655 char sym_name[20];
3656
3657 pool = find_literal_pool ();
3658 if (pool == NULL
3659 || pool->symbol == NULL
3660 || pool->next_free_entry == 0)
3661 return;
3662
3663 /* Align pool as you have word accesses.
3664 Only make a frag if we have to. */
3665 if (!need_pass_2)
3666 frag_align (pool->alignment, 0, 0);
3667
3668 record_alignment (now_seg, 2);
3669
3670 #ifdef OBJ_ELF
3671 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3672 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3673 #endif
3674 sprintf (sym_name, "$$lit_\002%x", pool->id);
3675
3676 symbol_locate (pool->symbol, sym_name, now_seg,
3677 (valueT) frag_now_fix (), frag_now);
3678 symbol_table_insert (pool->symbol);
3679
3680 ARM_SET_THUMB (pool->symbol, thumb_mode);
3681
3682 #if defined OBJ_COFF || defined OBJ_ELF
3683 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3684 #endif
3685
3686 for (entry = 0; entry < pool->next_free_entry; entry ++)
3687 {
3688 #ifdef OBJ_ELF
3689 if (debug_type == DEBUG_DWARF2)
3690 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3691 #endif
3692 /* First output the expression in the instruction to the pool. */
3693 emit_expr (&(pool->literals[entry]),
3694 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3695 }
3696
3697 /* Mark the pool as empty. */
3698 pool->next_free_entry = 0;
3699 pool->symbol = NULL;
3700 }
3701
3702 #ifdef OBJ_ELF
3703 /* Forward declarations for functions below, in the MD interface
3704 section. */
3705 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3706 static valueT create_unwind_entry (int);
3707 static void start_unwind_section (const segT, int);
3708 static void add_unwind_opcode (valueT, int);
3709 static void flush_pending_unwind (void);
3710
3711 /* Directives: Data. */
3712
3713 static void
3714 s_arm_elf_cons (int nbytes)
3715 {
3716 expressionS exp;
3717
3718 #ifdef md_flush_pending_output
3719 md_flush_pending_output ();
3720 #endif
3721
3722 if (is_it_end_of_statement ())
3723 {
3724 demand_empty_rest_of_line ();
3725 return;
3726 }
3727
3728 #ifdef md_cons_align
3729 md_cons_align (nbytes);
3730 #endif
3731
3732 mapping_state (MAP_DATA);
3733 do
3734 {
3735 int reloc;
3736 char *base = input_line_pointer;
3737
3738 expression (& exp);
3739
3740 if (exp.X_op != O_symbol)
3741 emit_expr (&exp, (unsigned int) nbytes);
3742 else
3743 {
3744 char *before_reloc = input_line_pointer;
3745 reloc = parse_reloc (&input_line_pointer);
3746 if (reloc == -1)
3747 {
3748 as_bad (_("unrecognized relocation suffix"));
3749 ignore_rest_of_line ();
3750 return;
3751 }
3752 else if (reloc == BFD_RELOC_UNUSED)
3753 emit_expr (&exp, (unsigned int) nbytes);
3754 else
3755 {
3756 reloc_howto_type *howto = (reloc_howto_type *)
3757 bfd_reloc_type_lookup (stdoutput,
3758 (bfd_reloc_code_real_type) reloc);
3759 int size = bfd_get_reloc_size (howto);
3760
3761 if (reloc == BFD_RELOC_ARM_PLT32)
3762 {
3763 as_bad (_("(plt) is only valid on branch targets"));
3764 reloc = BFD_RELOC_UNUSED;
3765 size = 0;
3766 }
3767
3768 if (size > nbytes)
3769 as_bad (ngettext ("%s relocations do not fit in %d byte",
3770 "%s relocations do not fit in %d bytes",
3771 nbytes),
3772 howto->name, nbytes);
3773 else
3774 {
3775 /* We've parsed an expression stopping at O_symbol.
3776 But there may be more expression left now that we
3777 have parsed the relocation marker. Parse it again.
3778 XXX Surely there is a cleaner way to do this. */
3779 char *p = input_line_pointer;
3780 int offset;
3781 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3782
3783 memcpy (save_buf, base, input_line_pointer - base);
3784 memmove (base + (input_line_pointer - before_reloc),
3785 base, before_reloc - base);
3786
3787 input_line_pointer = base + (input_line_pointer-before_reloc);
3788 expression (&exp);
3789 memcpy (base, save_buf, p - base);
3790
3791 offset = nbytes - size;
3792 p = frag_more (nbytes);
3793 memset (p, 0, nbytes);
3794 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3795 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3796 free (save_buf);
3797 }
3798 }
3799 }
3800 }
3801 while (*input_line_pointer++ == ',');
3802
3803 /* Put terminator back into stream. */
3804 input_line_pointer --;
3805 demand_empty_rest_of_line ();
3806 }
3807
3808 /* Emit an expression containing a 32-bit thumb instruction.
3809 Implementation based on put_thumb32_insn. */
3810
3811 static void
3812 emit_thumb32_expr (expressionS * exp)
3813 {
3814 expressionS exp_high = *exp;
3815
3816 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3817 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3818 exp->X_add_number &= 0xffff;
3819 emit_expr (exp, (unsigned int) THUMB_SIZE);
3820 }
3821
3822 /* Guess the instruction size based on the opcode. */
3823
3824 static int
3825 thumb_insn_size (int opcode)
3826 {
3827 if ((unsigned int) opcode < 0xe800u)
3828 return 2;
3829 else if ((unsigned int) opcode >= 0xe8000000u)
3830 return 4;
3831 else
3832 return 0;
3833 }
3834
3835 static bfd_boolean
3836 emit_insn (expressionS *exp, int nbytes)
3837 {
3838 int size = 0;
3839
3840 if (exp->X_op == O_constant)
3841 {
3842 size = nbytes;
3843
3844 if (size == 0)
3845 size = thumb_insn_size (exp->X_add_number);
3846
3847 if (size != 0)
3848 {
3849 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3850 {
3851 as_bad (_(".inst.n operand too big. "\
3852 "Use .inst.w instead"));
3853 size = 0;
3854 }
3855 else
3856 {
3857 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3858 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3859 else
3860 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3861
3862 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3863 emit_thumb32_expr (exp);
3864 else
3865 emit_expr (exp, (unsigned int) size);
3866
3867 it_fsm_post_encode ();
3868 }
3869 }
3870 else
3871 as_bad (_("cannot determine Thumb instruction size. " \
3872 "Use .inst.n/.inst.w instead"));
3873 }
3874 else
3875 as_bad (_("constant expression required"));
3876
3877 return (size != 0);
3878 }
3879
3880 /* Like s_arm_elf_cons but do not use md_cons_align and
3881 set the mapping state to MAP_ARM/MAP_THUMB. */
3882
3883 static void
3884 s_arm_elf_inst (int nbytes)
3885 {
3886 if (is_it_end_of_statement ())
3887 {
3888 demand_empty_rest_of_line ();
3889 return;
3890 }
3891
3892 /* Calling mapping_state () here will not change ARM/THUMB,
3893 but will ensure not to be in DATA state. */
3894
3895 if (thumb_mode)
3896 mapping_state (MAP_THUMB);
3897 else
3898 {
3899 if (nbytes != 0)
3900 {
3901 as_bad (_("width suffixes are invalid in ARM mode"));
3902 ignore_rest_of_line ();
3903 return;
3904 }
3905
3906 nbytes = 4;
3907
3908 mapping_state (MAP_ARM);
3909 }
3910
3911 do
3912 {
3913 expressionS exp;
3914
3915 expression (& exp);
3916
3917 if (! emit_insn (& exp, nbytes))
3918 {
3919 ignore_rest_of_line ();
3920 return;
3921 }
3922 }
3923 while (*input_line_pointer++ == ',');
3924
3925 /* Put terminator back into stream. */
3926 input_line_pointer --;
3927 demand_empty_rest_of_line ();
3928 }
3929
3930 /* Parse a .rel31 directive. */
3931
3932 static void
3933 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3934 {
3935 expressionS exp;
3936 char *p;
3937 valueT highbit;
3938
3939 highbit = 0;
3940 if (*input_line_pointer == '1')
3941 highbit = 0x80000000;
3942 else if (*input_line_pointer != '0')
3943 as_bad (_("expected 0 or 1"));
3944
3945 input_line_pointer++;
3946 if (*input_line_pointer != ',')
3947 as_bad (_("missing comma"));
3948 input_line_pointer++;
3949
3950 #ifdef md_flush_pending_output
3951 md_flush_pending_output ();
3952 #endif
3953
3954 #ifdef md_cons_align
3955 md_cons_align (4);
3956 #endif
3957
3958 mapping_state (MAP_DATA);
3959
3960 expression (&exp);
3961
3962 p = frag_more (4);
3963 md_number_to_chars (p, highbit, 4);
3964 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3965 BFD_RELOC_ARM_PREL31);
3966
3967 demand_empty_rest_of_line ();
3968 }
3969
3970 /* Directives: AEABI stack-unwind tables. */
3971
3972 /* Parse an unwind_fnstart directive. Simply records the current location. */
3973
3974 static void
3975 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3976 {
3977 demand_empty_rest_of_line ();
3978 if (unwind.proc_start)
3979 {
3980 as_bad (_("duplicate .fnstart directive"));
3981 return;
3982 }
3983
3984 /* Mark the start of the function. */
3985 unwind.proc_start = expr_build_dot ();
3986
3987 /* Reset the rest of the unwind info. */
3988 unwind.opcode_count = 0;
3989 unwind.table_entry = NULL;
3990 unwind.personality_routine = NULL;
3991 unwind.personality_index = -1;
3992 unwind.frame_size = 0;
3993 unwind.fp_offset = 0;
3994 unwind.fp_reg = REG_SP;
3995 unwind.fp_used = 0;
3996 unwind.sp_restored = 0;
3997 }
3998
3999
4000 /* Parse a handlerdata directive. Creates the exception handling table entry
4001 for the function. */
4002
4003 static void
4004 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4005 {
4006 demand_empty_rest_of_line ();
4007 if (!unwind.proc_start)
4008 as_bad (MISSING_FNSTART);
4009
4010 if (unwind.table_entry)
4011 as_bad (_("duplicate .handlerdata directive"));
4012
4013 create_unwind_entry (1);
4014 }
4015
4016 /* Parse an unwind_fnend directive. Generates the index table entry. */
4017
4018 static void
4019 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4020 {
4021 long where;
4022 char *ptr;
4023 valueT val;
4024 unsigned int marked_pr_dependency;
4025
4026 demand_empty_rest_of_line ();
4027
4028 if (!unwind.proc_start)
4029 {
4030 as_bad (_(".fnend directive without .fnstart"));
4031 return;
4032 }
4033
4034 /* Add eh table entry. */
4035 if (unwind.table_entry == NULL)
4036 val = create_unwind_entry (0);
4037 else
4038 val = 0;
4039
4040 /* Add index table entry. This is two words. */
4041 start_unwind_section (unwind.saved_seg, 1);
4042 frag_align (2, 0, 0);
4043 record_alignment (now_seg, 2);
4044
4045 ptr = frag_more (8);
4046 memset (ptr, 0, 8);
4047 where = frag_now_fix () - 8;
4048
4049 /* Self relative offset of the function start. */
4050 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4051 BFD_RELOC_ARM_PREL31);
4052
4053 /* Indicate dependency on EHABI-defined personality routines to the
4054 linker, if it hasn't been done already. */
4055 marked_pr_dependency
4056 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4057 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4058 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4059 {
4060 static const char *const name[] =
4061 {
4062 "__aeabi_unwind_cpp_pr0",
4063 "__aeabi_unwind_cpp_pr1",
4064 "__aeabi_unwind_cpp_pr2"
4065 };
4066 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4067 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4068 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4069 |= 1 << unwind.personality_index;
4070 }
4071
4072 if (val)
4073 /* Inline exception table entry. */
4074 md_number_to_chars (ptr + 4, val, 4);
4075 else
4076 /* Self relative offset of the table entry. */
4077 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4078 BFD_RELOC_ARM_PREL31);
4079
4080 /* Restore the original section. */
4081 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4082
4083 unwind.proc_start = NULL;
4084 }
4085
4086
4087 /* Parse an unwind_cantunwind directive. */
4088
4089 static void
4090 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4091 {
4092 demand_empty_rest_of_line ();
4093 if (!unwind.proc_start)
4094 as_bad (MISSING_FNSTART);
4095
4096 if (unwind.personality_routine || unwind.personality_index != -1)
4097 as_bad (_("personality routine specified for cantunwind frame"));
4098
4099 unwind.personality_index = -2;
4100 }
4101
4102
4103 /* Parse a personalityindex directive. */
4104
4105 static void
4106 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4107 {
4108 expressionS exp;
4109
4110 if (!unwind.proc_start)
4111 as_bad (MISSING_FNSTART);
4112
4113 if (unwind.personality_routine || unwind.personality_index != -1)
4114 as_bad (_("duplicate .personalityindex directive"));
4115
4116 expression (&exp);
4117
4118 if (exp.X_op != O_constant
4119 || exp.X_add_number < 0 || exp.X_add_number > 15)
4120 {
4121 as_bad (_("bad personality routine number"));
4122 ignore_rest_of_line ();
4123 return;
4124 }
4125
4126 unwind.personality_index = exp.X_add_number;
4127
4128 demand_empty_rest_of_line ();
4129 }
4130
4131
4132 /* Parse a personality directive. */
4133
4134 static void
4135 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4136 {
4137 char *name, *p, c;
4138
4139 if (!unwind.proc_start)
4140 as_bad (MISSING_FNSTART);
4141
4142 if (unwind.personality_routine || unwind.personality_index != -1)
4143 as_bad (_("duplicate .personality directive"));
4144
4145 c = get_symbol_name (& name);
4146 p = input_line_pointer;
4147 if (c == '"')
4148 ++ input_line_pointer;
4149 unwind.personality_routine = symbol_find_or_make (name);
4150 *p = c;
4151 demand_empty_rest_of_line ();
4152 }
4153
4154
4155 /* Parse a directive saving core registers. */
4156
4157 static void
4158 s_arm_unwind_save_core (void)
4159 {
4160 valueT op;
4161 long range;
4162 int n;
4163
4164 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4165 if (range == FAIL)
4166 {
4167 as_bad (_("expected register list"));
4168 ignore_rest_of_line ();
4169 return;
4170 }
4171
4172 demand_empty_rest_of_line ();
4173
4174 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4175 into .unwind_save {..., sp...}. We aren't bothered about the value of
4176 ip because it is clobbered by calls. */
4177 if (unwind.sp_restored && unwind.fp_reg == 12
4178 && (range & 0x3000) == 0x1000)
4179 {
4180 unwind.opcode_count--;
4181 unwind.sp_restored = 0;
4182 range = (range | 0x2000) & ~0x1000;
4183 unwind.pending_offset = 0;
4184 }
4185
4186 /* Pop r4-r15. */
4187 if (range & 0xfff0)
4188 {
4189 /* See if we can use the short opcodes. These pop a block of up to 8
4190 registers starting with r4, plus maybe r14. */
4191 for (n = 0; n < 8; n++)
4192 {
4193 /* Break at the first non-saved register. */
4194 if ((range & (1 << (n + 4))) == 0)
4195 break;
4196 }
4197 /* See if there are any other bits set. */
4198 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4199 {
4200 /* Use the long form. */
4201 op = 0x8000 | ((range >> 4) & 0xfff);
4202 add_unwind_opcode (op, 2);
4203 }
4204 else
4205 {
4206 /* Use the short form. */
4207 if (range & 0x4000)
4208 op = 0xa8; /* Pop r14. */
4209 else
4210 op = 0xa0; /* Do not pop r14. */
4211 op |= (n - 1);
4212 add_unwind_opcode (op, 1);
4213 }
4214 }
4215
4216 /* Pop r0-r3. */
4217 if (range & 0xf)
4218 {
4219 op = 0xb100 | (range & 0xf);
4220 add_unwind_opcode (op, 2);
4221 }
4222
4223 /* Record the number of bytes pushed. */
4224 for (n = 0; n < 16; n++)
4225 {
4226 if (range & (1 << n))
4227 unwind.frame_size += 4;
4228 }
4229 }
4230
4231
4232 /* Parse a directive saving FPA registers. */
4233
4234 static void
4235 s_arm_unwind_save_fpa (int reg)
4236 {
4237 expressionS exp;
4238 int num_regs;
4239 valueT op;
4240
4241 /* Get Number of registers to transfer. */
4242 if (skip_past_comma (&input_line_pointer) != FAIL)
4243 expression (&exp);
4244 else
4245 exp.X_op = O_illegal;
4246
4247 if (exp.X_op != O_constant)
4248 {
4249 as_bad (_("expected , <constant>"));
4250 ignore_rest_of_line ();
4251 return;
4252 }
4253
4254 num_regs = exp.X_add_number;
4255
4256 if (num_regs < 1 || num_regs > 4)
4257 {
4258 as_bad (_("number of registers must be in the range [1:4]"));
4259 ignore_rest_of_line ();
4260 return;
4261 }
4262
4263 demand_empty_rest_of_line ();
4264
4265 if (reg == 4)
4266 {
4267 /* Short form. */
4268 op = 0xb4 | (num_regs - 1);
4269 add_unwind_opcode (op, 1);
4270 }
4271 else
4272 {
4273 /* Long form. */
4274 op = 0xc800 | (reg << 4) | (num_regs - 1);
4275 add_unwind_opcode (op, 2);
4276 }
4277 unwind.frame_size += num_regs * 12;
4278 }
4279
4280
4281 /* Parse a directive saving VFP registers for ARMv6 and above. */
4282
4283 static void
4284 s_arm_unwind_save_vfp_armv6 (void)
4285 {
4286 int count;
4287 unsigned int start;
4288 valueT op;
4289 int num_vfpv3_regs = 0;
4290 int num_regs_below_16;
4291 bfd_boolean partial_match;
4292
4293 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4294 &partial_match);
4295 if (count == FAIL)
4296 {
4297 as_bad (_("expected register list"));
4298 ignore_rest_of_line ();
4299 return;
4300 }
4301
4302 demand_empty_rest_of_line ();
4303
4304 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4305 than FSTMX/FLDMX-style ones). */
4306
4307 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4308 if (start >= 16)
4309 num_vfpv3_regs = count;
4310 else if (start + count > 16)
4311 num_vfpv3_regs = start + count - 16;
4312
4313 if (num_vfpv3_regs > 0)
4314 {
4315 int start_offset = start > 16 ? start - 16 : 0;
4316 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4317 add_unwind_opcode (op, 2);
4318 }
4319
4320 /* Generate opcode for registers numbered in the range 0 .. 15. */
4321 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4322 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4323 if (num_regs_below_16 > 0)
4324 {
4325 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4326 add_unwind_opcode (op, 2);
4327 }
4328
4329 unwind.frame_size += count * 8;
4330 }
4331
4332
4333 /* Parse a directive saving VFP registers for pre-ARMv6. */
4334
4335 static void
4336 s_arm_unwind_save_vfp (void)
4337 {
4338 int count;
4339 unsigned int reg;
4340 valueT op;
4341 bfd_boolean partial_match;
4342
4343 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4344 &partial_match);
4345 if (count == FAIL)
4346 {
4347 as_bad (_("expected register list"));
4348 ignore_rest_of_line ();
4349 return;
4350 }
4351
4352 demand_empty_rest_of_line ();
4353
4354 if (reg == 8)
4355 {
4356 /* Short form. */
4357 op = 0xb8 | (count - 1);
4358 add_unwind_opcode (op, 1);
4359 }
4360 else
4361 {
4362 /* Long form. */
4363 op = 0xb300 | (reg << 4) | (count - 1);
4364 add_unwind_opcode (op, 2);
4365 }
4366 unwind.frame_size += count * 8 + 4;
4367 }
4368
4369
4370 /* Parse a directive saving iWMMXt data registers. */
4371
4372 static void
4373 s_arm_unwind_save_mmxwr (void)
4374 {
4375 int reg;
4376 int hi_reg;
4377 int i;
4378 unsigned mask = 0;
4379 valueT op;
4380
4381 if (*input_line_pointer == '{')
4382 input_line_pointer++;
4383
4384 do
4385 {
4386 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4387
4388 if (reg == FAIL)
4389 {
4390 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4391 goto error;
4392 }
4393
4394 if (mask >> reg)
4395 as_tsktsk (_("register list not in ascending order"));
4396 mask |= 1 << reg;
4397
4398 if (*input_line_pointer == '-')
4399 {
4400 input_line_pointer++;
4401 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4402 if (hi_reg == FAIL)
4403 {
4404 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4405 goto error;
4406 }
4407 else if (reg >= hi_reg)
4408 {
4409 as_bad (_("bad register range"));
4410 goto error;
4411 }
4412 for (; reg < hi_reg; reg++)
4413 mask |= 1 << reg;
4414 }
4415 }
4416 while (skip_past_comma (&input_line_pointer) != FAIL);
4417
4418 skip_past_char (&input_line_pointer, '}');
4419
4420 demand_empty_rest_of_line ();
4421
4422 /* Generate any deferred opcodes because we're going to be looking at
4423 the list. */
4424 flush_pending_unwind ();
4425
4426 for (i = 0; i < 16; i++)
4427 {
4428 if (mask & (1 << i))
4429 unwind.frame_size += 8;
4430 }
4431
4432 /* Attempt to combine with a previous opcode. We do this because gcc
4433 likes to output separate unwind directives for a single block of
4434 registers. */
4435 if (unwind.opcode_count > 0)
4436 {
4437 i = unwind.opcodes[unwind.opcode_count - 1];
4438 if ((i & 0xf8) == 0xc0)
4439 {
4440 i &= 7;
4441 /* Only merge if the blocks are contiguous. */
4442 if (i < 6)
4443 {
4444 if ((mask & 0xfe00) == (1 << 9))
4445 {
4446 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4447 unwind.opcode_count--;
4448 }
4449 }
4450 else if (i == 6 && unwind.opcode_count >= 2)
4451 {
4452 i = unwind.opcodes[unwind.opcode_count - 2];
4453 reg = i >> 4;
4454 i &= 0xf;
4455
4456 op = 0xffff << (reg - 1);
4457 if (reg > 0
4458 && ((mask & op) == (1u << (reg - 1))))
4459 {
4460 op = (1 << (reg + i + 1)) - 1;
4461 op &= ~((1 << reg) - 1);
4462 mask |= op;
4463 unwind.opcode_count -= 2;
4464 }
4465 }
4466 }
4467 }
4468
4469 hi_reg = 15;
4470 /* We want to generate opcodes in the order the registers have been
4471 saved, ie. descending order. */
4472 for (reg = 15; reg >= -1; reg--)
4473 {
4474 /* Save registers in blocks. */
4475 if (reg < 0
4476 || !(mask & (1 << reg)))
4477 {
4478 /* We found an unsaved reg. Generate opcodes to save the
4479 preceding block. */
4480 if (reg != hi_reg)
4481 {
4482 if (reg == 9)
4483 {
4484 /* Short form. */
4485 op = 0xc0 | (hi_reg - 10);
4486 add_unwind_opcode (op, 1);
4487 }
4488 else
4489 {
4490 /* Long form. */
4491 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4492 add_unwind_opcode (op, 2);
4493 }
4494 }
4495 hi_reg = reg - 1;
4496 }
4497 }
4498
4499 return;
4500 error:
4501 ignore_rest_of_line ();
4502 }
4503
4504 static void
4505 s_arm_unwind_save_mmxwcg (void)
4506 {
4507 int reg;
4508 int hi_reg;
4509 unsigned mask = 0;
4510 valueT op;
4511
4512 if (*input_line_pointer == '{')
4513 input_line_pointer++;
4514
4515 skip_whitespace (input_line_pointer);
4516
4517 do
4518 {
4519 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4520
4521 if (reg == FAIL)
4522 {
4523 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4524 goto error;
4525 }
4526
4527 reg -= 8;
4528 if (mask >> reg)
4529 as_tsktsk (_("register list not in ascending order"));
4530 mask |= 1 << reg;
4531
4532 if (*input_line_pointer == '-')
4533 {
4534 input_line_pointer++;
4535 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4536 if (hi_reg == FAIL)
4537 {
4538 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4539 goto error;
4540 }
4541 else if (reg >= hi_reg)
4542 {
4543 as_bad (_("bad register range"));
4544 goto error;
4545 }
4546 for (; reg < hi_reg; reg++)
4547 mask |= 1 << reg;
4548 }
4549 }
4550 while (skip_past_comma (&input_line_pointer) != FAIL);
4551
4552 skip_past_char (&input_line_pointer, '}');
4553
4554 demand_empty_rest_of_line ();
4555
4556 /* Generate any deferred opcodes because we're going to be looking at
4557 the list. */
4558 flush_pending_unwind ();
4559
4560 for (reg = 0; reg < 16; reg++)
4561 {
4562 if (mask & (1 << reg))
4563 unwind.frame_size += 4;
4564 }
4565 op = 0xc700 | mask;
4566 add_unwind_opcode (op, 2);
4567 return;
4568 error:
4569 ignore_rest_of_line ();
4570 }
4571
4572
4573 /* Parse an unwind_save directive.
4574 If the argument is non-zero, this is a .vsave directive. */
4575
4576 static void
4577 s_arm_unwind_save (int arch_v6)
4578 {
4579 char *peek;
4580 struct reg_entry *reg;
4581 bfd_boolean had_brace = FALSE;
4582
4583 if (!unwind.proc_start)
4584 as_bad (MISSING_FNSTART);
4585
4586 /* Figure out what sort of save we have. */
4587 peek = input_line_pointer;
4588
4589 if (*peek == '{')
4590 {
4591 had_brace = TRUE;
4592 peek++;
4593 }
4594
4595 reg = arm_reg_parse_multi (&peek);
4596
4597 if (!reg)
4598 {
4599 as_bad (_("register expected"));
4600 ignore_rest_of_line ();
4601 return;
4602 }
4603
4604 switch (reg->type)
4605 {
4606 case REG_TYPE_FN:
4607 if (had_brace)
4608 {
4609 as_bad (_("FPA .unwind_save does not take a register list"));
4610 ignore_rest_of_line ();
4611 return;
4612 }
4613 input_line_pointer = peek;
4614 s_arm_unwind_save_fpa (reg->number);
4615 return;
4616
4617 case REG_TYPE_RN:
4618 s_arm_unwind_save_core ();
4619 return;
4620
4621 case REG_TYPE_VFD:
4622 if (arch_v6)
4623 s_arm_unwind_save_vfp_armv6 ();
4624 else
4625 s_arm_unwind_save_vfp ();
4626 return;
4627
4628 case REG_TYPE_MMXWR:
4629 s_arm_unwind_save_mmxwr ();
4630 return;
4631
4632 case REG_TYPE_MMXWCG:
4633 s_arm_unwind_save_mmxwcg ();
4634 return;
4635
4636 default:
4637 as_bad (_(".unwind_save does not support this kind of register"));
4638 ignore_rest_of_line ();
4639 }
4640 }
4641
4642
4643 /* Parse an unwind_movsp directive. */
4644
4645 static void
4646 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4647 {
4648 int reg;
4649 valueT op;
4650 int offset;
4651
4652 if (!unwind.proc_start)
4653 as_bad (MISSING_FNSTART);
4654
4655 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4656 if (reg == FAIL)
4657 {
4658 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4659 ignore_rest_of_line ();
4660 return;
4661 }
4662
4663 /* Optional constant. */
4664 if (skip_past_comma (&input_line_pointer) != FAIL)
4665 {
4666 if (immediate_for_directive (&offset) == FAIL)
4667 return;
4668 }
4669 else
4670 offset = 0;
4671
4672 demand_empty_rest_of_line ();
4673
4674 if (reg == REG_SP || reg == REG_PC)
4675 {
4676 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4677 return;
4678 }
4679
4680 if (unwind.fp_reg != REG_SP)
4681 as_bad (_("unexpected .unwind_movsp directive"));
4682
4683 /* Generate opcode to restore the value. */
4684 op = 0x90 | reg;
4685 add_unwind_opcode (op, 1);
4686
4687 /* Record the information for later. */
4688 unwind.fp_reg = reg;
4689 unwind.fp_offset = unwind.frame_size - offset;
4690 unwind.sp_restored = 1;
4691 }
4692
4693 /* Parse an unwind_pad directive. */
4694
4695 static void
4696 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4697 {
4698 int offset;
4699
4700 if (!unwind.proc_start)
4701 as_bad (MISSING_FNSTART);
4702
4703 if (immediate_for_directive (&offset) == FAIL)
4704 return;
4705
4706 if (offset & 3)
4707 {
4708 as_bad (_("stack increment must be multiple of 4"));
4709 ignore_rest_of_line ();
4710 return;
4711 }
4712
4713 /* Don't generate any opcodes, just record the details for later. */
4714 unwind.frame_size += offset;
4715 unwind.pending_offset += offset;
4716
4717 demand_empty_rest_of_line ();
4718 }
4719
4720 /* Parse an unwind_setfp directive. */
4721
4722 static void
4723 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4724 {
4725 int sp_reg;
4726 int fp_reg;
4727 int offset;
4728
4729 if (!unwind.proc_start)
4730 as_bad (MISSING_FNSTART);
4731
4732 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4733 if (skip_past_comma (&input_line_pointer) == FAIL)
4734 sp_reg = FAIL;
4735 else
4736 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4737
4738 if (fp_reg == FAIL || sp_reg == FAIL)
4739 {
4740 as_bad (_("expected <reg>, <reg>"));
4741 ignore_rest_of_line ();
4742 return;
4743 }
4744
4745 /* Optional constant. */
4746 if (skip_past_comma (&input_line_pointer) != FAIL)
4747 {
4748 if (immediate_for_directive (&offset) == FAIL)
4749 return;
4750 }
4751 else
4752 offset = 0;
4753
4754 demand_empty_rest_of_line ();
4755
4756 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4757 {
4758 as_bad (_("register must be either sp or set by a previous"
4759 "unwind_movsp directive"));
4760 return;
4761 }
4762
4763 /* Don't generate any opcodes, just record the information for later. */
4764 unwind.fp_reg = fp_reg;
4765 unwind.fp_used = 1;
4766 if (sp_reg == REG_SP)
4767 unwind.fp_offset = unwind.frame_size - offset;
4768 else
4769 unwind.fp_offset -= offset;
4770 }
4771
4772 /* Parse an unwind_raw directive. */
4773
4774 static void
4775 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4776 {
4777 expressionS exp;
4778 /* This is an arbitrary limit. */
4779 unsigned char op[16];
4780 int count;
4781
4782 if (!unwind.proc_start)
4783 as_bad (MISSING_FNSTART);
4784
4785 expression (&exp);
4786 if (exp.X_op == O_constant
4787 && skip_past_comma (&input_line_pointer) != FAIL)
4788 {
4789 unwind.frame_size += exp.X_add_number;
4790 expression (&exp);
4791 }
4792 else
4793 exp.X_op = O_illegal;
4794
4795 if (exp.X_op != O_constant)
4796 {
4797 as_bad (_("expected <offset>, <opcode>"));
4798 ignore_rest_of_line ();
4799 return;
4800 }
4801
4802 count = 0;
4803
4804 /* Parse the opcode. */
4805 for (;;)
4806 {
4807 if (count >= 16)
4808 {
4809 as_bad (_("unwind opcode too long"));
4810 ignore_rest_of_line ();
4811 }
4812 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4813 {
4814 as_bad (_("invalid unwind opcode"));
4815 ignore_rest_of_line ();
4816 return;
4817 }
4818 op[count++] = exp.X_add_number;
4819
4820 /* Parse the next byte. */
4821 if (skip_past_comma (&input_line_pointer) == FAIL)
4822 break;
4823
4824 expression (&exp);
4825 }
4826
4827 /* Add the opcode bytes in reverse order. */
4828 while (count--)
4829 add_unwind_opcode (op[count], 1);
4830
4831 demand_empty_rest_of_line ();
4832 }
4833
4834
4835 /* Parse a .eabi_attribute directive. */
4836
4837 static void
4838 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4839 {
4840 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4841
4842 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4843 attributes_set_explicitly[tag] = 1;
4844 }
4845
4846 /* Emit a tls fix for the symbol. */
4847
4848 static void
4849 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4850 {
4851 char *p;
4852 expressionS exp;
4853 #ifdef md_flush_pending_output
4854 md_flush_pending_output ();
4855 #endif
4856
4857 #ifdef md_cons_align
4858 md_cons_align (4);
4859 #endif
4860
4861 /* Since we're just labelling the code, there's no need to define a
4862 mapping symbol. */
4863 expression (&exp);
4864 p = obstack_next_free (&frchain_now->frch_obstack);
4865 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4866 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4867 : BFD_RELOC_ARM_TLS_DESCSEQ);
4868 }
4869 #endif /* OBJ_ELF */
4870
4871 static void s_arm_arch (int);
4872 static void s_arm_object_arch (int);
4873 static void s_arm_cpu (int);
4874 static void s_arm_fpu (int);
4875 static void s_arm_arch_extension (int);
4876
4877 #ifdef TE_PE
4878
4879 static void
4880 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4881 {
4882 expressionS exp;
4883
4884 do
4885 {
4886 expression (&exp);
4887 if (exp.X_op == O_symbol)
4888 exp.X_op = O_secrel;
4889
4890 emit_expr (&exp, 4);
4891 }
4892 while (*input_line_pointer++ == ',');
4893
4894 input_line_pointer--;
4895 demand_empty_rest_of_line ();
4896 }
4897 #endif /* TE_PE */
4898
4899 /* This table describes all the machine specific pseudo-ops the assembler
4900 has to support. The fields are:
4901 pseudo-op name without dot
4902 function to call to execute this pseudo-op
4903 Integer arg to pass to the function. */
4904
4905 const pseudo_typeS md_pseudo_table[] =
4906 {
4907 /* Never called because '.req' does not start a line. */
4908 { "req", s_req, 0 },
4909 /* Following two are likewise never called. */
4910 { "dn", s_dn, 0 },
4911 { "qn", s_qn, 0 },
4912 { "unreq", s_unreq, 0 },
4913 { "bss", s_bss, 0 },
4914 { "align", s_align_ptwo, 2 },
4915 { "arm", s_arm, 0 },
4916 { "thumb", s_thumb, 0 },
4917 { "code", s_code, 0 },
4918 { "force_thumb", s_force_thumb, 0 },
4919 { "thumb_func", s_thumb_func, 0 },
4920 { "thumb_set", s_thumb_set, 0 },
4921 { "even", s_even, 0 },
4922 { "ltorg", s_ltorg, 0 },
4923 { "pool", s_ltorg, 0 },
4924 { "syntax", s_syntax, 0 },
4925 { "cpu", s_arm_cpu, 0 },
4926 { "arch", s_arm_arch, 0 },
4927 { "object_arch", s_arm_object_arch, 0 },
4928 { "fpu", s_arm_fpu, 0 },
4929 { "arch_extension", s_arm_arch_extension, 0 },
4930 #ifdef OBJ_ELF
4931 { "word", s_arm_elf_cons, 4 },
4932 { "long", s_arm_elf_cons, 4 },
4933 { "inst.n", s_arm_elf_inst, 2 },
4934 { "inst.w", s_arm_elf_inst, 4 },
4935 { "inst", s_arm_elf_inst, 0 },
4936 { "rel31", s_arm_rel31, 0 },
4937 { "fnstart", s_arm_unwind_fnstart, 0 },
4938 { "fnend", s_arm_unwind_fnend, 0 },
4939 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4940 { "personality", s_arm_unwind_personality, 0 },
4941 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4942 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4943 { "save", s_arm_unwind_save, 0 },
4944 { "vsave", s_arm_unwind_save, 1 },
4945 { "movsp", s_arm_unwind_movsp, 0 },
4946 { "pad", s_arm_unwind_pad, 0 },
4947 { "setfp", s_arm_unwind_setfp, 0 },
4948 { "unwind_raw", s_arm_unwind_raw, 0 },
4949 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4950 { "tlsdescseq", s_arm_tls_descseq, 0 },
4951 #else
4952 { "word", cons, 4},
4953
4954 /* These are used for dwarf. */
4955 {"2byte", cons, 2},
4956 {"4byte", cons, 4},
4957 {"8byte", cons, 8},
4958 /* These are used for dwarf2. */
4959 { "file", dwarf2_directive_file, 0 },
4960 { "loc", dwarf2_directive_loc, 0 },
4961 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4962 #endif
4963 { "extend", float_cons, 'x' },
4964 { "ldouble", float_cons, 'x' },
4965 { "packed", float_cons, 'p' },
4966 #ifdef TE_PE
4967 {"secrel32", pe_directive_secrel, 0},
4968 #endif
4969
4970 /* These are for compatibility with CodeComposer Studio. */
4971 {"ref", s_ccs_ref, 0},
4972 {"def", s_ccs_def, 0},
4973 {"asmfunc", s_ccs_asmfunc, 0},
4974 {"endasmfunc", s_ccs_endasmfunc, 0},
4975
4976 { 0, 0, 0 }
4977 };
4978 \f
4979 /* Parser functions used exclusively in instruction operands. */
4980
4981 /* Generic immediate-value read function for use in insn parsing.
4982 STR points to the beginning of the immediate (the leading #);
4983 VAL receives the value; if the value is outside [MIN, MAX]
4984 issue an error. PREFIX_OPT is true if the immediate prefix is
4985 optional. */
4986
4987 static int
4988 parse_immediate (char **str, int *val, int min, int max,
4989 bfd_boolean prefix_opt)
4990 {
4991 expressionS exp;
4992
4993 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4994 if (exp.X_op != O_constant)
4995 {
4996 inst.error = _("constant expression required");
4997 return FAIL;
4998 }
4999
5000 if (exp.X_add_number < min || exp.X_add_number > max)
5001 {
5002 inst.error = _("immediate value out of range");
5003 return FAIL;
5004 }
5005
5006 *val = exp.X_add_number;
5007 return SUCCESS;
5008 }
5009
5010 /* Less-generic immediate-value read function with the possibility of loading a
5011 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5012 instructions. Puts the result directly in inst.operands[i]. */
5013
5014 static int
5015 parse_big_immediate (char **str, int i, expressionS *in_exp,
5016 bfd_boolean allow_symbol_p)
5017 {
5018 expressionS exp;
5019 expressionS *exp_p = in_exp ? in_exp : &exp;
5020 char *ptr = *str;
5021
5022 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5023
5024 if (exp_p->X_op == O_constant)
5025 {
5026 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5027 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5028 O_constant. We have to be careful not to break compilation for
5029 32-bit X_add_number, though. */
5030 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5031 {
5032 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5033 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5034 & 0xffffffff);
5035 inst.operands[i].regisimm = 1;
5036 }
5037 }
5038 else if (exp_p->X_op == O_big
5039 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5040 {
5041 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5042
5043 /* Bignums have their least significant bits in
5044 generic_bignum[0]. Make sure we put 32 bits in imm and
5045 32 bits in reg, in a (hopefully) portable way. */
5046 gas_assert (parts != 0);
5047
5048 /* Make sure that the number is not too big.
5049 PR 11972: Bignums can now be sign-extended to the
5050 size of a .octa so check that the out of range bits
5051 are all zero or all one. */
5052 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5053 {
5054 LITTLENUM_TYPE m = -1;
5055
5056 if (generic_bignum[parts * 2] != 0
5057 && generic_bignum[parts * 2] != m)
5058 return FAIL;
5059
5060 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5061 if (generic_bignum[j] != generic_bignum[j-1])
5062 return FAIL;
5063 }
5064
5065 inst.operands[i].imm = 0;
5066 for (j = 0; j < parts; j++, idx++)
5067 inst.operands[i].imm |= generic_bignum[idx]
5068 << (LITTLENUM_NUMBER_OF_BITS * j);
5069 inst.operands[i].reg = 0;
5070 for (j = 0; j < parts; j++, idx++)
5071 inst.operands[i].reg |= generic_bignum[idx]
5072 << (LITTLENUM_NUMBER_OF_BITS * j);
5073 inst.operands[i].regisimm = 1;
5074 }
5075 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5076 return FAIL;
5077
5078 *str = ptr;
5079
5080 return SUCCESS;
5081 }
5082
5083 /* Returns the pseudo-register number of an FPA immediate constant,
5084 or FAIL if there isn't a valid constant here. */
5085
5086 static int
5087 parse_fpa_immediate (char ** str)
5088 {
5089 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5090 char * save_in;
5091 expressionS exp;
5092 int i;
5093 int j;
5094
5095 /* First try and match exact strings, this is to guarantee
5096 that some formats will work even for cross assembly. */
5097
5098 for (i = 0; fp_const[i]; i++)
5099 {
5100 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5101 {
5102 char *start = *str;
5103
5104 *str += strlen (fp_const[i]);
5105 if (is_end_of_line[(unsigned char) **str])
5106 return i + 8;
5107 *str = start;
5108 }
5109 }
5110
5111 /* Just because we didn't get a match doesn't mean that the constant
5112 isn't valid, just that it is in a format that we don't
5113 automatically recognize. Try parsing it with the standard
5114 expression routines. */
5115
5116 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5117
5118 /* Look for a raw floating point number. */
5119 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5120 && is_end_of_line[(unsigned char) *save_in])
5121 {
5122 for (i = 0; i < NUM_FLOAT_VALS; i++)
5123 {
5124 for (j = 0; j < MAX_LITTLENUMS; j++)
5125 {
5126 if (words[j] != fp_values[i][j])
5127 break;
5128 }
5129
5130 if (j == MAX_LITTLENUMS)
5131 {
5132 *str = save_in;
5133 return i + 8;
5134 }
5135 }
5136 }
5137
5138 /* Try and parse a more complex expression, this will probably fail
5139 unless the code uses a floating point prefix (eg "0f"). */
5140 save_in = input_line_pointer;
5141 input_line_pointer = *str;
5142 if (expression (&exp) == absolute_section
5143 && exp.X_op == O_big
5144 && exp.X_add_number < 0)
5145 {
5146 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5147 Ditto for 15. */
5148 #define X_PRECISION 5
5149 #define E_PRECISION 15L
5150 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5151 {
5152 for (i = 0; i < NUM_FLOAT_VALS; i++)
5153 {
5154 for (j = 0; j < MAX_LITTLENUMS; j++)
5155 {
5156 if (words[j] != fp_values[i][j])
5157 break;
5158 }
5159
5160 if (j == MAX_LITTLENUMS)
5161 {
5162 *str = input_line_pointer;
5163 input_line_pointer = save_in;
5164 return i + 8;
5165 }
5166 }
5167 }
5168 }
5169
5170 *str = input_line_pointer;
5171 input_line_pointer = save_in;
5172 inst.error = _("invalid FPA immediate expression");
5173 return FAIL;
5174 }
5175
5176 /* Returns 1 if a number has "quarter-precision" float format
5177 0baBbbbbbc defgh000 00000000 00000000. */
5178
5179 static int
5180 is_quarter_float (unsigned imm)
5181 {
5182 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5183 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5184 }
5185
5186
5187 /* Detect the presence of a floating point or integer zero constant,
5188 i.e. #0.0 or #0. */
5189
5190 static bfd_boolean
5191 parse_ifimm_zero (char **in)
5192 {
5193 int error_code;
5194
5195 if (!is_immediate_prefix (**in))
5196 {
5197 /* In unified syntax, all prefixes are optional. */
5198 if (!unified_syntax)
5199 return FALSE;
5200 }
5201 else
5202 ++*in;
5203
5204 /* Accept #0x0 as a synonym for #0. */
5205 if (strncmp (*in, "0x", 2) == 0)
5206 {
5207 int val;
5208 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5209 return FALSE;
5210 return TRUE;
5211 }
5212
5213 error_code = atof_generic (in, ".", EXP_CHARS,
5214 &generic_floating_point_number);
5215
5216 if (!error_code
5217 && generic_floating_point_number.sign == '+'
5218 && (generic_floating_point_number.low
5219 > generic_floating_point_number.leader))
5220 return TRUE;
5221
5222 return FALSE;
5223 }
5224
5225 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5226 0baBbbbbbc defgh000 00000000 00000000.
5227 The zero and minus-zero cases need special handling, since they can't be
5228 encoded in the "quarter-precision" float format, but can nonetheless be
5229 loaded as integer constants. */
5230
5231 static unsigned
5232 parse_qfloat_immediate (char **ccp, int *immed)
5233 {
5234 char *str = *ccp;
5235 char *fpnum;
5236 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5237 int found_fpchar = 0;
5238
5239 skip_past_char (&str, '#');
5240
5241 /* We must not accidentally parse an integer as a floating-point number. Make
5242 sure that the value we parse is not an integer by checking for special
5243 characters '.' or 'e'.
5244 FIXME: This is a horrible hack, but doing better is tricky because type
5245 information isn't in a very usable state at parse time. */
5246 fpnum = str;
5247 skip_whitespace (fpnum);
5248
5249 if (strncmp (fpnum, "0x", 2) == 0)
5250 return FAIL;
5251 else
5252 {
5253 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5254 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5255 {
5256 found_fpchar = 1;
5257 break;
5258 }
5259
5260 if (!found_fpchar)
5261 return FAIL;
5262 }
5263
5264 if ((str = atof_ieee (str, 's', words)) != NULL)
5265 {
5266 unsigned fpword = 0;
5267 int i;
5268
5269 /* Our FP word must be 32 bits (single-precision FP). */
5270 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5271 {
5272 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5273 fpword |= words[i];
5274 }
5275
5276 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5277 *immed = fpword;
5278 else
5279 return FAIL;
5280
5281 *ccp = str;
5282
5283 return SUCCESS;
5284 }
5285
5286 return FAIL;
5287 }
5288
5289 /* Shift operands. */
5290 enum shift_kind
5291 {
5292 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5293 };
5294
5295 struct asm_shift_name
5296 {
5297 const char *name;
5298 enum shift_kind kind;
5299 };
5300
5301 /* Third argument to parse_shift. */
5302 enum parse_shift_mode
5303 {
5304 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5305 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5306 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5307 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5308 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5309 };
5310
5311 /* Parse a <shift> specifier on an ARM data processing instruction.
5312 This has three forms:
5313
5314 (LSL|LSR|ASL|ASR|ROR) Rs
5315 (LSL|LSR|ASL|ASR|ROR) #imm
5316 RRX
5317
5318 Note that ASL is assimilated to LSL in the instruction encoding, and
5319 RRX to ROR #0 (which cannot be written as such). */
5320
5321 static int
5322 parse_shift (char **str, int i, enum parse_shift_mode mode)
5323 {
5324 const struct asm_shift_name *shift_name;
5325 enum shift_kind shift;
5326 char *s = *str;
5327 char *p = s;
5328 int reg;
5329
5330 for (p = *str; ISALPHA (*p); p++)
5331 ;
5332
5333 if (p == *str)
5334 {
5335 inst.error = _("shift expression expected");
5336 return FAIL;
5337 }
5338
5339 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5340 p - *str);
5341
5342 if (shift_name == NULL)
5343 {
5344 inst.error = _("shift expression expected");
5345 return FAIL;
5346 }
5347
5348 shift = shift_name->kind;
5349
5350 switch (mode)
5351 {
5352 case NO_SHIFT_RESTRICT:
5353 case SHIFT_IMMEDIATE: break;
5354
5355 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5356 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5357 {
5358 inst.error = _("'LSL' or 'ASR' required");
5359 return FAIL;
5360 }
5361 break;
5362
5363 case SHIFT_LSL_IMMEDIATE:
5364 if (shift != SHIFT_LSL)
5365 {
5366 inst.error = _("'LSL' required");
5367 return FAIL;
5368 }
5369 break;
5370
5371 case SHIFT_ASR_IMMEDIATE:
5372 if (shift != SHIFT_ASR)
5373 {
5374 inst.error = _("'ASR' required");
5375 return FAIL;
5376 }
5377 break;
5378
5379 default: abort ();
5380 }
5381
5382 if (shift != SHIFT_RRX)
5383 {
5384 /* Whitespace can appear here if the next thing is a bare digit. */
5385 skip_whitespace (p);
5386
5387 if (mode == NO_SHIFT_RESTRICT
5388 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5389 {
5390 inst.operands[i].imm = reg;
5391 inst.operands[i].immisreg = 1;
5392 }
5393 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5394 return FAIL;
5395 }
5396 inst.operands[i].shift_kind = shift;
5397 inst.operands[i].shifted = 1;
5398 *str = p;
5399 return SUCCESS;
5400 }
5401
5402 /* Parse a <shifter_operand> for an ARM data processing instruction:
5403
5404 #<immediate>
5405 #<immediate>, <rotate>
5406 <Rm>
5407 <Rm>, <shift>
5408
5409 where <shift> is defined by parse_shift above, and <rotate> is a
5410 multiple of 2 between 0 and 30. Validation of immediate operands
5411 is deferred to md_apply_fix. */
5412
5413 static int
5414 parse_shifter_operand (char **str, int i)
5415 {
5416 int value;
5417 expressionS exp;
5418
5419 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5420 {
5421 inst.operands[i].reg = value;
5422 inst.operands[i].isreg = 1;
5423
5424 /* parse_shift will override this if appropriate */
5425 inst.relocs[0].exp.X_op = O_constant;
5426 inst.relocs[0].exp.X_add_number = 0;
5427
5428 if (skip_past_comma (str) == FAIL)
5429 return SUCCESS;
5430
5431 /* Shift operation on register. */
5432 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5433 }
5434
5435 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5436 return FAIL;
5437
5438 if (skip_past_comma (str) == SUCCESS)
5439 {
5440 /* #x, y -- ie explicit rotation by Y. */
5441 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5442 return FAIL;
5443
5444 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5445 {
5446 inst.error = _("constant expression expected");
5447 return FAIL;
5448 }
5449
5450 value = exp.X_add_number;
5451 if (value < 0 || value > 30 || value % 2 != 0)
5452 {
5453 inst.error = _("invalid rotation");
5454 return FAIL;
5455 }
5456 if (inst.relocs[0].exp.X_add_number < 0
5457 || inst.relocs[0].exp.X_add_number > 255)
5458 {
5459 inst.error = _("invalid constant");
5460 return FAIL;
5461 }
5462
5463 /* Encode as specified. */
5464 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5465 return SUCCESS;
5466 }
5467
5468 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5469 inst.relocs[0].pc_rel = 0;
5470 return SUCCESS;
5471 }
5472
5473 /* Group relocation information. Each entry in the table contains the
5474 textual name of the relocation as may appear in assembler source
5475 and must end with a colon.
5476 Along with this textual name are the relocation codes to be used if
5477 the corresponding instruction is an ALU instruction (ADD or SUB only),
5478 an LDR, an LDRS, or an LDC. */
5479
5480 struct group_reloc_table_entry
5481 {
5482 const char *name;
5483 int alu_code;
5484 int ldr_code;
5485 int ldrs_code;
5486 int ldc_code;
5487 };
5488
5489 typedef enum
5490 {
5491 /* Varieties of non-ALU group relocation. */
5492
5493 GROUP_LDR,
5494 GROUP_LDRS,
5495 GROUP_LDC,
5496 GROUP_MVE
5497 } group_reloc_type;
5498
5499 static struct group_reloc_table_entry group_reloc_table[] =
5500 { /* Program counter relative: */
5501 { "pc_g0_nc",
5502 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5503 0, /* LDR */
5504 0, /* LDRS */
5505 0 }, /* LDC */
5506 { "pc_g0",
5507 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5508 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5509 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5510 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5511 { "pc_g1_nc",
5512 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5513 0, /* LDR */
5514 0, /* LDRS */
5515 0 }, /* LDC */
5516 { "pc_g1",
5517 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5518 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5519 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5520 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5521 { "pc_g2",
5522 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5523 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5524 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5525 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5526 /* Section base relative */
5527 { "sb_g0_nc",
5528 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5529 0, /* LDR */
5530 0, /* LDRS */
5531 0 }, /* LDC */
5532 { "sb_g0",
5533 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5534 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5535 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5536 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5537 { "sb_g1_nc",
5538 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5539 0, /* LDR */
5540 0, /* LDRS */
5541 0 }, /* LDC */
5542 { "sb_g1",
5543 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5544 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5545 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5546 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5547 { "sb_g2",
5548 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5549 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5550 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5551 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5552 /* Absolute thumb alu relocations. */
5553 { "lower0_7",
5554 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5555 0, /* LDR. */
5556 0, /* LDRS. */
5557 0 }, /* LDC. */
5558 { "lower8_15",
5559 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5560 0, /* LDR. */
5561 0, /* LDRS. */
5562 0 }, /* LDC. */
5563 { "upper0_7",
5564 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5565 0, /* LDR. */
5566 0, /* LDRS. */
5567 0 }, /* LDC. */
5568 { "upper8_15",
5569 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5570 0, /* LDR. */
5571 0, /* LDRS. */
5572 0 } }; /* LDC. */
5573
5574 /* Given the address of a pointer pointing to the textual name of a group
5575 relocation as may appear in assembler source, attempt to find its details
5576 in group_reloc_table. The pointer will be updated to the character after
5577 the trailing colon. On failure, FAIL will be returned; SUCCESS
5578 otherwise. On success, *entry will be updated to point at the relevant
5579 group_reloc_table entry. */
5580
5581 static int
5582 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5583 {
5584 unsigned int i;
5585 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5586 {
5587 int length = strlen (group_reloc_table[i].name);
5588
5589 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5590 && (*str)[length] == ':')
5591 {
5592 *out = &group_reloc_table[i];
5593 *str += (length + 1);
5594 return SUCCESS;
5595 }
5596 }
5597
5598 return FAIL;
5599 }
5600
5601 /* Parse a <shifter_operand> for an ARM data processing instruction
5602 (as for parse_shifter_operand) where group relocations are allowed:
5603
5604 #<immediate>
5605 #<immediate>, <rotate>
5606 #:<group_reloc>:<expression>
5607 <Rm>
5608 <Rm>, <shift>
5609
5610 where <group_reloc> is one of the strings defined in group_reloc_table.
5611 The hashes are optional.
5612
5613 Everything else is as for parse_shifter_operand. */
5614
5615 static parse_operand_result
5616 parse_shifter_operand_group_reloc (char **str, int i)
5617 {
5618 /* Determine if we have the sequence of characters #: or just :
5619 coming next. If we do, then we check for a group relocation.
5620 If we don't, punt the whole lot to parse_shifter_operand. */
5621
5622 if (((*str)[0] == '#' && (*str)[1] == ':')
5623 || (*str)[0] == ':')
5624 {
5625 struct group_reloc_table_entry *entry;
5626
5627 if ((*str)[0] == '#')
5628 (*str) += 2;
5629 else
5630 (*str)++;
5631
5632 /* Try to parse a group relocation. Anything else is an error. */
5633 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5634 {
5635 inst.error = _("unknown group relocation");
5636 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5637 }
5638
5639 /* We now have the group relocation table entry corresponding to
5640 the name in the assembler source. Next, we parse the expression. */
5641 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5642 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5643
5644 /* Record the relocation type (always the ALU variant here). */
5645 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5646 gas_assert (inst.relocs[0].type != 0);
5647
5648 return PARSE_OPERAND_SUCCESS;
5649 }
5650 else
5651 return parse_shifter_operand (str, i) == SUCCESS
5652 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5653
5654 /* Never reached. */
5655 }
5656
5657 /* Parse a Neon alignment expression. Information is written to
5658 inst.operands[i]. We assume the initial ':' has been skipped.
5659
5660 align .imm = align << 8, .immisalign=1, .preind=0 */
5661 static parse_operand_result
5662 parse_neon_alignment (char **str, int i)
5663 {
5664 char *p = *str;
5665 expressionS exp;
5666
5667 my_get_expression (&exp, &p, GE_NO_PREFIX);
5668
5669 if (exp.X_op != O_constant)
5670 {
5671 inst.error = _("alignment must be constant");
5672 return PARSE_OPERAND_FAIL;
5673 }
5674
5675 inst.operands[i].imm = exp.X_add_number << 8;
5676 inst.operands[i].immisalign = 1;
5677 /* Alignments are not pre-indexes. */
5678 inst.operands[i].preind = 0;
5679
5680 *str = p;
5681 return PARSE_OPERAND_SUCCESS;
5682 }
5683
5684 /* Parse all forms of an ARM address expression. Information is written
5685 to inst.operands[i] and/or inst.relocs[0].
5686
5687 Preindexed addressing (.preind=1):
5688
5689 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5690 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5691 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5692 .shift_kind=shift .relocs[0].exp=shift_imm
5693
5694 These three may have a trailing ! which causes .writeback to be set also.
5695
5696 Postindexed addressing (.postind=1, .writeback=1):
5697
5698 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5699 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5700 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5701 .shift_kind=shift .relocs[0].exp=shift_imm
5702
5703 Unindexed addressing (.preind=0, .postind=0):
5704
5705 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5706
5707 Other:
5708
5709 [Rn]{!} shorthand for [Rn,#0]{!}
5710 =immediate .isreg=0 .relocs[0].exp=immediate
5711 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5712
5713 It is the caller's responsibility to check for addressing modes not
5714 supported by the instruction, and to set inst.relocs[0].type. */
5715
5716 static parse_operand_result
5717 parse_address_main (char **str, int i, int group_relocations,
5718 group_reloc_type group_type)
5719 {
5720 char *p = *str;
5721 int reg;
5722
5723 if (skip_past_char (&p, '[') == FAIL)
5724 {
5725 if (skip_past_char (&p, '=') == FAIL)
5726 {
5727 /* Bare address - translate to PC-relative offset. */
5728 inst.relocs[0].pc_rel = 1;
5729 inst.operands[i].reg = REG_PC;
5730 inst.operands[i].isreg = 1;
5731 inst.operands[i].preind = 1;
5732
5733 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5734 return PARSE_OPERAND_FAIL;
5735 }
5736 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5737 /*allow_symbol_p=*/TRUE))
5738 return PARSE_OPERAND_FAIL;
5739
5740 *str = p;
5741 return PARSE_OPERAND_SUCCESS;
5742 }
5743
5744 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5745 skip_whitespace (p);
5746
5747 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5748 {
5749 if (group_type == GROUP_MVE)
5750 inst.error = BAD_ADDR_MODE;
5751 else
5752 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5753 return PARSE_OPERAND_FAIL;
5754 }
5755 inst.operands[i].reg = reg;
5756 inst.operands[i].isreg = 1;
5757
5758 if (skip_past_comma (&p) == SUCCESS)
5759 {
5760 inst.operands[i].preind = 1;
5761
5762 if (*p == '+') p++;
5763 else if (*p == '-') p++, inst.operands[i].negative = 1;
5764
5765 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5766 {
5767 inst.operands[i].imm = reg;
5768 inst.operands[i].immisreg = 1;
5769
5770 if (skip_past_comma (&p) == SUCCESS)
5771 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5772 return PARSE_OPERAND_FAIL;
5773 }
5774 else if (skip_past_char (&p, ':') == SUCCESS)
5775 {
5776 /* FIXME: '@' should be used here, but it's filtered out by generic
5777 code before we get to see it here. This may be subject to
5778 change. */
5779 parse_operand_result result = parse_neon_alignment (&p, i);
5780
5781 if (result != PARSE_OPERAND_SUCCESS)
5782 return result;
5783 }
5784 else
5785 {
5786 if (inst.operands[i].negative)
5787 {
5788 inst.operands[i].negative = 0;
5789 p--;
5790 }
5791
5792 if (group_relocations
5793 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5794 {
5795 struct group_reloc_table_entry *entry;
5796
5797 /* Skip over the #: or : sequence. */
5798 if (*p == '#')
5799 p += 2;
5800 else
5801 p++;
5802
5803 /* Try to parse a group relocation. Anything else is an
5804 error. */
5805 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5806 {
5807 inst.error = _("unknown group relocation");
5808 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5809 }
5810
5811 /* We now have the group relocation table entry corresponding to
5812 the name in the assembler source. Next, we parse the
5813 expression. */
5814 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5815 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5816
5817 /* Record the relocation type. */
5818 switch (group_type)
5819 {
5820 case GROUP_LDR:
5821 inst.relocs[0].type
5822 = (bfd_reloc_code_real_type) entry->ldr_code;
5823 break;
5824
5825 case GROUP_LDRS:
5826 inst.relocs[0].type
5827 = (bfd_reloc_code_real_type) entry->ldrs_code;
5828 break;
5829
5830 case GROUP_LDC:
5831 inst.relocs[0].type
5832 = (bfd_reloc_code_real_type) entry->ldc_code;
5833 break;
5834
5835 default:
5836 gas_assert (0);
5837 }
5838
5839 if (inst.relocs[0].type == 0)
5840 {
5841 inst.error = _("this group relocation is not allowed on this instruction");
5842 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5843 }
5844 }
5845 else
5846 {
5847 char *q = p;
5848
5849 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5850 return PARSE_OPERAND_FAIL;
5851 /* If the offset is 0, find out if it's a +0 or -0. */
5852 if (inst.relocs[0].exp.X_op == O_constant
5853 && inst.relocs[0].exp.X_add_number == 0)
5854 {
5855 skip_whitespace (q);
5856 if (*q == '#')
5857 {
5858 q++;
5859 skip_whitespace (q);
5860 }
5861 if (*q == '-')
5862 inst.operands[i].negative = 1;
5863 }
5864 }
5865 }
5866 }
5867 else if (skip_past_char (&p, ':') == SUCCESS)
5868 {
5869 /* FIXME: '@' should be used here, but it's filtered out by generic code
5870 before we get to see it here. This may be subject to change. */
5871 parse_operand_result result = parse_neon_alignment (&p, i);
5872
5873 if (result != PARSE_OPERAND_SUCCESS)
5874 return result;
5875 }
5876
5877 if (skip_past_char (&p, ']') == FAIL)
5878 {
5879 inst.error = _("']' expected");
5880 return PARSE_OPERAND_FAIL;
5881 }
5882
5883 if (skip_past_char (&p, '!') == SUCCESS)
5884 inst.operands[i].writeback = 1;
5885
5886 else if (skip_past_comma (&p) == SUCCESS)
5887 {
5888 if (skip_past_char (&p, '{') == SUCCESS)
5889 {
5890 /* [Rn], {expr} - unindexed, with option */
5891 if (parse_immediate (&p, &inst.operands[i].imm,
5892 0, 255, TRUE) == FAIL)
5893 return PARSE_OPERAND_FAIL;
5894
5895 if (skip_past_char (&p, '}') == FAIL)
5896 {
5897 inst.error = _("'}' expected at end of 'option' field");
5898 return PARSE_OPERAND_FAIL;
5899 }
5900 if (inst.operands[i].preind)
5901 {
5902 inst.error = _("cannot combine index with option");
5903 return PARSE_OPERAND_FAIL;
5904 }
5905 *str = p;
5906 return PARSE_OPERAND_SUCCESS;
5907 }
5908 else
5909 {
5910 inst.operands[i].postind = 1;
5911 inst.operands[i].writeback = 1;
5912
5913 if (inst.operands[i].preind)
5914 {
5915 inst.error = _("cannot combine pre- and post-indexing");
5916 return PARSE_OPERAND_FAIL;
5917 }
5918
5919 if (*p == '+') p++;
5920 else if (*p == '-') p++, inst.operands[i].negative = 1;
5921
5922 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5923 {
5924 /* We might be using the immediate for alignment already. If we
5925 are, OR the register number into the low-order bits. */
5926 if (inst.operands[i].immisalign)
5927 inst.operands[i].imm |= reg;
5928 else
5929 inst.operands[i].imm = reg;
5930 inst.operands[i].immisreg = 1;
5931
5932 if (skip_past_comma (&p) == SUCCESS)
5933 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5934 return PARSE_OPERAND_FAIL;
5935 }
5936 else
5937 {
5938 char *q = p;
5939
5940 if (inst.operands[i].negative)
5941 {
5942 inst.operands[i].negative = 0;
5943 p--;
5944 }
5945 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5946 return PARSE_OPERAND_FAIL;
5947 /* If the offset is 0, find out if it's a +0 or -0. */
5948 if (inst.relocs[0].exp.X_op == O_constant
5949 && inst.relocs[0].exp.X_add_number == 0)
5950 {
5951 skip_whitespace (q);
5952 if (*q == '#')
5953 {
5954 q++;
5955 skip_whitespace (q);
5956 }
5957 if (*q == '-')
5958 inst.operands[i].negative = 1;
5959 }
5960 }
5961 }
5962 }
5963
5964 /* If at this point neither .preind nor .postind is set, we have a
5965 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5966 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5967 {
5968 inst.operands[i].preind = 1;
5969 inst.relocs[0].exp.X_op = O_constant;
5970 inst.relocs[0].exp.X_add_number = 0;
5971 }
5972 *str = p;
5973 return PARSE_OPERAND_SUCCESS;
5974 }
5975
5976 static int
5977 parse_address (char **str, int i)
5978 {
5979 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5980 ? SUCCESS : FAIL;
5981 }
5982
5983 static parse_operand_result
5984 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5985 {
5986 return parse_address_main (str, i, 1, type);
5987 }
5988
5989 /* Parse an operand for a MOVW or MOVT instruction. */
5990 static int
5991 parse_half (char **str)
5992 {
5993 char * p;
5994
5995 p = *str;
5996 skip_past_char (&p, '#');
5997 if (strncasecmp (p, ":lower16:", 9) == 0)
5998 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
5999 else if (strncasecmp (p, ":upper16:", 9) == 0)
6000 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6001
6002 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6003 {
6004 p += 9;
6005 skip_whitespace (p);
6006 }
6007
6008 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6009 return FAIL;
6010
6011 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6012 {
6013 if (inst.relocs[0].exp.X_op != O_constant)
6014 {
6015 inst.error = _("constant expression expected");
6016 return FAIL;
6017 }
6018 if (inst.relocs[0].exp.X_add_number < 0
6019 || inst.relocs[0].exp.X_add_number > 0xffff)
6020 {
6021 inst.error = _("immediate value out of range");
6022 return FAIL;
6023 }
6024 }
6025 *str = p;
6026 return SUCCESS;
6027 }
6028
6029 /* Miscellaneous. */
6030
6031 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6032 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6033 static int
6034 parse_psr (char **str, bfd_boolean lhs)
6035 {
6036 char *p;
6037 unsigned long psr_field;
6038 const struct asm_psr *psr;
6039 char *start;
6040 bfd_boolean is_apsr = FALSE;
6041 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6042
6043 /* PR gas/12698: If the user has specified -march=all then m_profile will
6044 be TRUE, but we want to ignore it in this case as we are building for any
6045 CPU type, including non-m variants. */
6046 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6047 m_profile = FALSE;
6048
6049 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6050 feature for ease of use and backwards compatibility. */
6051 p = *str;
6052 if (strncasecmp (p, "SPSR", 4) == 0)
6053 {
6054 if (m_profile)
6055 goto unsupported_psr;
6056
6057 psr_field = SPSR_BIT;
6058 }
6059 else if (strncasecmp (p, "CPSR", 4) == 0)
6060 {
6061 if (m_profile)
6062 goto unsupported_psr;
6063
6064 psr_field = 0;
6065 }
6066 else if (strncasecmp (p, "APSR", 4) == 0)
6067 {
6068 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6069 and ARMv7-R architecture CPUs. */
6070 is_apsr = TRUE;
6071 psr_field = 0;
6072 }
6073 else if (m_profile)
6074 {
6075 start = p;
6076 do
6077 p++;
6078 while (ISALNUM (*p) || *p == '_');
6079
6080 if (strncasecmp (start, "iapsr", 5) == 0
6081 || strncasecmp (start, "eapsr", 5) == 0
6082 || strncasecmp (start, "xpsr", 4) == 0
6083 || strncasecmp (start, "psr", 3) == 0)
6084 p = start + strcspn (start, "rR") + 1;
6085
6086 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6087 p - start);
6088
6089 if (!psr)
6090 return FAIL;
6091
6092 /* If APSR is being written, a bitfield may be specified. Note that
6093 APSR itself is handled above. */
6094 if (psr->field <= 3)
6095 {
6096 psr_field = psr->field;
6097 is_apsr = TRUE;
6098 goto check_suffix;
6099 }
6100
6101 *str = p;
6102 /* M-profile MSR instructions have the mask field set to "10", except
6103 *PSR variants which modify APSR, which may use a different mask (and
6104 have been handled already). Do that by setting the PSR_f field
6105 here. */
6106 return psr->field | (lhs ? PSR_f : 0);
6107 }
6108 else
6109 goto unsupported_psr;
6110
6111 p += 4;
6112 check_suffix:
6113 if (*p == '_')
6114 {
6115 /* A suffix follows. */
6116 p++;
6117 start = p;
6118
6119 do
6120 p++;
6121 while (ISALNUM (*p) || *p == '_');
6122
6123 if (is_apsr)
6124 {
6125 /* APSR uses a notation for bits, rather than fields. */
6126 unsigned int nzcvq_bits = 0;
6127 unsigned int g_bit = 0;
6128 char *bit;
6129
6130 for (bit = start; bit != p; bit++)
6131 {
6132 switch (TOLOWER (*bit))
6133 {
6134 case 'n':
6135 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6136 break;
6137
6138 case 'z':
6139 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6140 break;
6141
6142 case 'c':
6143 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6144 break;
6145
6146 case 'v':
6147 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6148 break;
6149
6150 case 'q':
6151 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6152 break;
6153
6154 case 'g':
6155 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6156 break;
6157
6158 default:
6159 inst.error = _("unexpected bit specified after APSR");
6160 return FAIL;
6161 }
6162 }
6163
6164 if (nzcvq_bits == 0x1f)
6165 psr_field |= PSR_f;
6166
6167 if (g_bit == 0x1)
6168 {
6169 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6170 {
6171 inst.error = _("selected processor does not "
6172 "support DSP extension");
6173 return FAIL;
6174 }
6175
6176 psr_field |= PSR_s;
6177 }
6178
6179 if ((nzcvq_bits & 0x20) != 0
6180 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6181 || (g_bit & 0x2) != 0)
6182 {
6183 inst.error = _("bad bitmask specified after APSR");
6184 return FAIL;
6185 }
6186 }
6187 else
6188 {
6189 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6190 p - start);
6191 if (!psr)
6192 goto error;
6193
6194 psr_field |= psr->field;
6195 }
6196 }
6197 else
6198 {
6199 if (ISALNUM (*p))
6200 goto error; /* Garbage after "[CS]PSR". */
6201
6202 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6203 is deprecated, but allow it anyway. */
6204 if (is_apsr && lhs)
6205 {
6206 psr_field |= PSR_f;
6207 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6208 "deprecated"));
6209 }
6210 else if (!m_profile)
6211 /* These bits are never right for M-profile devices: don't set them
6212 (only code paths which read/write APSR reach here). */
6213 psr_field |= (PSR_c | PSR_f);
6214 }
6215 *str = p;
6216 return psr_field;
6217
6218 unsupported_psr:
6219 inst.error = _("selected processor does not support requested special "
6220 "purpose register");
6221 return FAIL;
6222
6223 error:
6224 inst.error = _("flag for {c}psr instruction expected");
6225 return FAIL;
6226 }
6227
6228 static int
6229 parse_sys_vldr_vstr (char **str)
6230 {
6231 unsigned i;
6232 int val = FAIL;
6233 struct {
6234 const char *name;
6235 int regl;
6236 int regh;
6237 } sysregs[] = {
6238 {"FPSCR", 0x1, 0x0},
6239 {"FPSCR_nzcvqc", 0x2, 0x0},
6240 {"VPR", 0x4, 0x1},
6241 {"P0", 0x5, 0x1},
6242 {"FPCXTNS", 0x6, 0x1},
6243 {"FPCXTS", 0x7, 0x1}
6244 };
6245 char *op_end = strchr (*str, ',');
6246 size_t op_strlen = op_end - *str;
6247
6248 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6249 {
6250 if (!strncmp (*str, sysregs[i].name, op_strlen))
6251 {
6252 val = sysregs[i].regl | (sysregs[i].regh << 3);
6253 *str = op_end;
6254 break;
6255 }
6256 }
6257
6258 return val;
6259 }
6260
6261 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6262 value suitable for splatting into the AIF field of the instruction. */
6263
6264 static int
6265 parse_cps_flags (char **str)
6266 {
6267 int val = 0;
6268 int saw_a_flag = 0;
6269 char *s = *str;
6270
6271 for (;;)
6272 switch (*s++)
6273 {
6274 case '\0': case ',':
6275 goto done;
6276
6277 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6278 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6279 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6280
6281 default:
6282 inst.error = _("unrecognized CPS flag");
6283 return FAIL;
6284 }
6285
6286 done:
6287 if (saw_a_flag == 0)
6288 {
6289 inst.error = _("missing CPS flags");
6290 return FAIL;
6291 }
6292
6293 *str = s - 1;
6294 return val;
6295 }
6296
6297 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6298 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6299
6300 static int
6301 parse_endian_specifier (char **str)
6302 {
6303 int little_endian;
6304 char *s = *str;
6305
6306 if (strncasecmp (s, "BE", 2))
6307 little_endian = 0;
6308 else if (strncasecmp (s, "LE", 2))
6309 little_endian = 1;
6310 else
6311 {
6312 inst.error = _("valid endian specifiers are be or le");
6313 return FAIL;
6314 }
6315
6316 if (ISALNUM (s[2]) || s[2] == '_')
6317 {
6318 inst.error = _("valid endian specifiers are be or le");
6319 return FAIL;
6320 }
6321
6322 *str = s + 2;
6323 return little_endian;
6324 }
6325
6326 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6327 value suitable for poking into the rotate field of an sxt or sxta
6328 instruction, or FAIL on error. */
6329
6330 static int
6331 parse_ror (char **str)
6332 {
6333 int rot;
6334 char *s = *str;
6335
6336 if (strncasecmp (s, "ROR", 3) == 0)
6337 s += 3;
6338 else
6339 {
6340 inst.error = _("missing rotation field after comma");
6341 return FAIL;
6342 }
6343
6344 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6345 return FAIL;
6346
6347 switch (rot)
6348 {
6349 case 0: *str = s; return 0x0;
6350 case 8: *str = s; return 0x1;
6351 case 16: *str = s; return 0x2;
6352 case 24: *str = s; return 0x3;
6353
6354 default:
6355 inst.error = _("rotation can only be 0, 8, 16, or 24");
6356 return FAIL;
6357 }
6358 }
6359
6360 /* Parse a conditional code (from conds[] below). The value returned is in the
6361 range 0 .. 14, or FAIL. */
6362 static int
6363 parse_cond (char **str)
6364 {
6365 char *q;
6366 const struct asm_cond *c;
6367 int n;
6368 /* Condition codes are always 2 characters, so matching up to
6369 3 characters is sufficient. */
6370 char cond[3];
6371
6372 q = *str;
6373 n = 0;
6374 while (ISALPHA (*q) && n < 3)
6375 {
6376 cond[n] = TOLOWER (*q);
6377 q++;
6378 n++;
6379 }
6380
6381 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6382 if (!c)
6383 {
6384 inst.error = _("condition required");
6385 return FAIL;
6386 }
6387
6388 *str = q;
6389 return c->value;
6390 }
6391
6392 /* Parse an option for a barrier instruction. Returns the encoding for the
6393 option, or FAIL. */
6394 static int
6395 parse_barrier (char **str)
6396 {
6397 char *p, *q;
6398 const struct asm_barrier_opt *o;
6399
6400 p = q = *str;
6401 while (ISALPHA (*q))
6402 q++;
6403
6404 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6405 q - p);
6406 if (!o)
6407 return FAIL;
6408
6409 if (!mark_feature_used (&o->arch))
6410 return FAIL;
6411
6412 *str = q;
6413 return o->value;
6414 }
6415
6416 /* Parse the operands of a table branch instruction. Similar to a memory
6417 operand. */
6418 static int
6419 parse_tb (char **str)
6420 {
6421 char * p = *str;
6422 int reg;
6423
6424 if (skip_past_char (&p, '[') == FAIL)
6425 {
6426 inst.error = _("'[' expected");
6427 return FAIL;
6428 }
6429
6430 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6431 {
6432 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6433 return FAIL;
6434 }
6435 inst.operands[0].reg = reg;
6436
6437 if (skip_past_comma (&p) == FAIL)
6438 {
6439 inst.error = _("',' expected");
6440 return FAIL;
6441 }
6442
6443 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6444 {
6445 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6446 return FAIL;
6447 }
6448 inst.operands[0].imm = reg;
6449
6450 if (skip_past_comma (&p) == SUCCESS)
6451 {
6452 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6453 return FAIL;
6454 if (inst.relocs[0].exp.X_add_number != 1)
6455 {
6456 inst.error = _("invalid shift");
6457 return FAIL;
6458 }
6459 inst.operands[0].shifted = 1;
6460 }
6461
6462 if (skip_past_char (&p, ']') == FAIL)
6463 {
6464 inst.error = _("']' expected");
6465 return FAIL;
6466 }
6467 *str = p;
6468 return SUCCESS;
6469 }
6470
6471 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6472 information on the types the operands can take and how they are encoded.
6473 Up to four operands may be read; this function handles setting the
6474 ".present" field for each read operand itself.
6475 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6476 else returns FAIL. */
6477
6478 static int
6479 parse_neon_mov (char **str, int *which_operand)
6480 {
6481 int i = *which_operand, val;
6482 enum arm_reg_type rtype;
6483 char *ptr = *str;
6484 struct neon_type_el optype;
6485
6486 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6487 {
6488 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6489 inst.operands[i].reg = val;
6490 inst.operands[i].isscalar = 1;
6491 inst.operands[i].vectype = optype;
6492 inst.operands[i++].present = 1;
6493
6494 if (skip_past_comma (&ptr) == FAIL)
6495 goto wanted_comma;
6496
6497 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6498 goto wanted_arm;
6499
6500 inst.operands[i].reg = val;
6501 inst.operands[i].isreg = 1;
6502 inst.operands[i].present = 1;
6503 }
6504 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6505 != FAIL)
6506 {
6507 /* Cases 0, 1, 2, 3, 5 (D only). */
6508 if (skip_past_comma (&ptr) == FAIL)
6509 goto wanted_comma;
6510
6511 inst.operands[i].reg = val;
6512 inst.operands[i].isreg = 1;
6513 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6514 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6515 inst.operands[i].isvec = 1;
6516 inst.operands[i].vectype = optype;
6517 inst.operands[i++].present = 1;
6518
6519 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6520 {
6521 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6522 Case 13: VMOV <Sd>, <Rm> */
6523 inst.operands[i].reg = val;
6524 inst.operands[i].isreg = 1;
6525 inst.operands[i].present = 1;
6526
6527 if (rtype == REG_TYPE_NQ)
6528 {
6529 first_error (_("can't use Neon quad register here"));
6530 return FAIL;
6531 }
6532 else if (rtype != REG_TYPE_VFS)
6533 {
6534 i++;
6535 if (skip_past_comma (&ptr) == FAIL)
6536 goto wanted_comma;
6537 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6538 goto wanted_arm;
6539 inst.operands[i].reg = val;
6540 inst.operands[i].isreg = 1;
6541 inst.operands[i].present = 1;
6542 }
6543 }
6544 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6545 &optype)) != FAIL)
6546 {
6547 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6548 Case 1: VMOV<c><q> <Dd>, <Dm>
6549 Case 8: VMOV.F32 <Sd>, <Sm>
6550 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6551
6552 inst.operands[i].reg = val;
6553 inst.operands[i].isreg = 1;
6554 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6555 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6556 inst.operands[i].isvec = 1;
6557 inst.operands[i].vectype = optype;
6558 inst.operands[i].present = 1;
6559
6560 if (skip_past_comma (&ptr) == SUCCESS)
6561 {
6562 /* Case 15. */
6563 i++;
6564
6565 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6566 goto wanted_arm;
6567
6568 inst.operands[i].reg = val;
6569 inst.operands[i].isreg = 1;
6570 inst.operands[i++].present = 1;
6571
6572 if (skip_past_comma (&ptr) == FAIL)
6573 goto wanted_comma;
6574
6575 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6576 goto wanted_arm;
6577
6578 inst.operands[i].reg = val;
6579 inst.operands[i].isreg = 1;
6580 inst.operands[i].present = 1;
6581 }
6582 }
6583 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6584 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6585 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6586 Case 10: VMOV.F32 <Sd>, #<imm>
6587 Case 11: VMOV.F64 <Dd>, #<imm> */
6588 inst.operands[i].immisfloat = 1;
6589 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6590 == SUCCESS)
6591 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6592 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6593 ;
6594 else
6595 {
6596 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6597 return FAIL;
6598 }
6599 }
6600 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6601 {
6602 /* Cases 6, 7. */
6603 inst.operands[i].reg = val;
6604 inst.operands[i].isreg = 1;
6605 inst.operands[i++].present = 1;
6606
6607 if (skip_past_comma (&ptr) == FAIL)
6608 goto wanted_comma;
6609
6610 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6611 {
6612 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6613 inst.operands[i].reg = val;
6614 inst.operands[i].isscalar = 1;
6615 inst.operands[i].present = 1;
6616 inst.operands[i].vectype = optype;
6617 }
6618 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6619 {
6620 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6621 inst.operands[i].reg = val;
6622 inst.operands[i].isreg = 1;
6623 inst.operands[i++].present = 1;
6624
6625 if (skip_past_comma (&ptr) == FAIL)
6626 goto wanted_comma;
6627
6628 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6629 == FAIL)
6630 {
6631 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6632 return FAIL;
6633 }
6634
6635 inst.operands[i].reg = val;
6636 inst.operands[i].isreg = 1;
6637 inst.operands[i].isvec = 1;
6638 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6639 inst.operands[i].vectype = optype;
6640 inst.operands[i].present = 1;
6641
6642 if (rtype == REG_TYPE_VFS)
6643 {
6644 /* Case 14. */
6645 i++;
6646 if (skip_past_comma (&ptr) == FAIL)
6647 goto wanted_comma;
6648 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6649 &optype)) == FAIL)
6650 {
6651 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6652 return FAIL;
6653 }
6654 inst.operands[i].reg = val;
6655 inst.operands[i].isreg = 1;
6656 inst.operands[i].isvec = 1;
6657 inst.operands[i].issingle = 1;
6658 inst.operands[i].vectype = optype;
6659 inst.operands[i].present = 1;
6660 }
6661 }
6662 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6663 != FAIL)
6664 {
6665 /* Case 13. */
6666 inst.operands[i].reg = val;
6667 inst.operands[i].isreg = 1;
6668 inst.operands[i].isvec = 1;
6669 inst.operands[i].issingle = 1;
6670 inst.operands[i].vectype = optype;
6671 inst.operands[i].present = 1;
6672 }
6673 }
6674 else
6675 {
6676 first_error (_("parse error"));
6677 return FAIL;
6678 }
6679
6680 /* Successfully parsed the operands. Update args. */
6681 *which_operand = i;
6682 *str = ptr;
6683 return SUCCESS;
6684
6685 wanted_comma:
6686 first_error (_("expected comma"));
6687 return FAIL;
6688
6689 wanted_arm:
6690 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6691 return FAIL;
6692 }
6693
6694 /* Use this macro when the operand constraints are different
6695 for ARM and THUMB (e.g. ldrd). */
6696 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6697 ((arm_operand) | ((thumb_operand) << 16))
6698
6699 /* Matcher codes for parse_operands. */
6700 enum operand_parse_code
6701 {
6702 OP_stop, /* end of line */
6703
6704 OP_RR, /* ARM register */
6705 OP_RRnpc, /* ARM register, not r15 */
6706 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6707 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6708 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6709 optional trailing ! */
6710 OP_RRw, /* ARM register, not r15, optional trailing ! */
6711 OP_RCP, /* Coprocessor number */
6712 OP_RCN, /* Coprocessor register */
6713 OP_RF, /* FPA register */
6714 OP_RVS, /* VFP single precision register */
6715 OP_RVD, /* VFP double precision register (0..15) */
6716 OP_RND, /* Neon double precision register (0..31) */
6717 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
6718 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
6719 */
6720 OP_RNQ, /* Neon quad precision register */
6721 OP_RNQMQ, /* Neon quad or MVE vector register. */
6722 OP_RVSD, /* VFP single or double precision register */
6723 OP_RNSD, /* Neon single or double precision register */
6724 OP_RNDQ, /* Neon double or quad precision register */
6725 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
6726 OP_RNSDQ, /* Neon single, double or quad precision register */
6727 OP_RNSC, /* Neon scalar D[X] */
6728 OP_RVC, /* VFP control register */
6729 OP_RMF, /* Maverick F register */
6730 OP_RMD, /* Maverick D register */
6731 OP_RMFX, /* Maverick FX register */
6732 OP_RMDX, /* Maverick DX register */
6733 OP_RMAX, /* Maverick AX register */
6734 OP_RMDS, /* Maverick DSPSC register */
6735 OP_RIWR, /* iWMMXt wR register */
6736 OP_RIWC, /* iWMMXt wC register */
6737 OP_RIWG, /* iWMMXt wCG register */
6738 OP_RXA, /* XScale accumulator register */
6739
6740 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
6741 */
6742 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
6743 GPR (no SP/SP) */
6744 OP_RMQ, /* MVE vector register. */
6745
6746 /* New operands for Armv8.1-M Mainline. */
6747 OP_LR, /* ARM LR register */
6748 OP_RRe, /* ARM register, only even numbered. */
6749 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
6750 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6751
6752 OP_REGLST, /* ARM register list */
6753 OP_CLRMLST, /* CLRM register list */
6754 OP_VRSLST, /* VFP single-precision register list */
6755 OP_VRDLST, /* VFP double-precision register list */
6756 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6757 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6758 OP_NSTRLST, /* Neon element/structure list */
6759 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
6760 OP_MSTRLST2, /* MVE vector list with two elements. */
6761 OP_MSTRLST4, /* MVE vector list with four elements. */
6762
6763 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6764 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6765 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6766 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6767 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6768 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6769 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6770 */
6771 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6772 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6773 OP_VMOV, /* Neon VMOV operands. */
6774 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6775 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6776 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6777 OP_VLDR, /* VLDR operand. */
6778
6779 OP_I0, /* immediate zero */
6780 OP_I7, /* immediate value 0 .. 7 */
6781 OP_I15, /* 0 .. 15 */
6782 OP_I16, /* 1 .. 16 */
6783 OP_I16z, /* 0 .. 16 */
6784 OP_I31, /* 0 .. 31 */
6785 OP_I31w, /* 0 .. 31, optional trailing ! */
6786 OP_I32, /* 1 .. 32 */
6787 OP_I32z, /* 0 .. 32 */
6788 OP_I63, /* 0 .. 63 */
6789 OP_I63s, /* -64 .. 63 */
6790 OP_I64, /* 1 .. 64 */
6791 OP_I64z, /* 0 .. 64 */
6792 OP_I255, /* 0 .. 255 */
6793
6794 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6795 OP_I7b, /* 0 .. 7 */
6796 OP_I15b, /* 0 .. 15 */
6797 OP_I31b, /* 0 .. 31 */
6798
6799 OP_SH, /* shifter operand */
6800 OP_SHG, /* shifter operand with possible group relocation */
6801 OP_ADDR, /* Memory address expression (any mode) */
6802 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
6803 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6804 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6805 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6806 OP_EXP, /* arbitrary expression */
6807 OP_EXPi, /* same, with optional immediate prefix */
6808 OP_EXPr, /* same, with optional relocation suffix */
6809 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6810 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6811 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6812 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6813
6814 OP_CPSF, /* CPS flags */
6815 OP_ENDI, /* Endianness specifier */
6816 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6817 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6818 OP_COND, /* conditional code */
6819 OP_TB, /* Table branch. */
6820
6821 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6822
6823 OP_RRnpc_I0, /* ARM register or literal 0 */
6824 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6825 OP_RR_EXi, /* ARM register or expression with imm prefix */
6826 OP_RF_IF, /* FPA register or immediate */
6827 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6828 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6829
6830 /* Optional operands. */
6831 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6832 OP_oI31b, /* 0 .. 31 */
6833 OP_oI32b, /* 1 .. 32 */
6834 OP_oI32z, /* 0 .. 32 */
6835 OP_oIffffb, /* 0 .. 65535 */
6836 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6837
6838 OP_oRR, /* ARM register */
6839 OP_oLR, /* ARM LR register */
6840 OP_oRRnpc, /* ARM register, not the PC */
6841 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6842 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6843 OP_oRND, /* Optional Neon double precision register */
6844 OP_oRNQ, /* Optional Neon quad precision register */
6845 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
6846 OP_oRNDQ, /* Optional Neon double or quad precision register */
6847 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6848 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
6849 register. */
6850 OP_oSHll, /* LSL immediate */
6851 OP_oSHar, /* ASR immediate */
6852 OP_oSHllar, /* LSL or ASR immediate */
6853 OP_oROR, /* ROR 0/8/16/24 */
6854 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6855
6856 /* Some pre-defined mixed (ARM/THUMB) operands. */
6857 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6858 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6859 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6860
6861 OP_FIRST_OPTIONAL = OP_oI7b
6862 };
6863
6864 /* Generic instruction operand parser. This does no encoding and no
6865 semantic validation; it merely squirrels values away in the inst
6866 structure. Returns SUCCESS or FAIL depending on whether the
6867 specified grammar matched. */
6868 static int
6869 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6870 {
6871 unsigned const int *upat = pattern;
6872 char *backtrack_pos = 0;
6873 const char *backtrack_error = 0;
6874 int i, val = 0, backtrack_index = 0;
6875 enum arm_reg_type rtype;
6876 parse_operand_result result;
6877 unsigned int op_parse_code;
6878 bfd_boolean partial_match;
6879
6880 #define po_char_or_fail(chr) \
6881 do \
6882 { \
6883 if (skip_past_char (&str, chr) == FAIL) \
6884 goto bad_args; \
6885 } \
6886 while (0)
6887
6888 #define po_reg_or_fail(regtype) \
6889 do \
6890 { \
6891 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6892 & inst.operands[i].vectype); \
6893 if (val == FAIL) \
6894 { \
6895 first_error (_(reg_expected_msgs[regtype])); \
6896 goto failure; \
6897 } \
6898 inst.operands[i].reg = val; \
6899 inst.operands[i].isreg = 1; \
6900 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6901 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6902 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6903 || rtype == REG_TYPE_VFD \
6904 || rtype == REG_TYPE_NQ); \
6905 } \
6906 while (0)
6907
6908 #define po_reg_or_goto(regtype, label) \
6909 do \
6910 { \
6911 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6912 & inst.operands[i].vectype); \
6913 if (val == FAIL) \
6914 goto label; \
6915 \
6916 inst.operands[i].reg = val; \
6917 inst.operands[i].isreg = 1; \
6918 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6919 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6920 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6921 || rtype == REG_TYPE_VFD \
6922 || rtype == REG_TYPE_NQ); \
6923 } \
6924 while (0)
6925
6926 #define po_imm_or_fail(min, max, popt) \
6927 do \
6928 { \
6929 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6930 goto failure; \
6931 inst.operands[i].imm = val; \
6932 } \
6933 while (0)
6934
6935 #define po_scalar_or_goto(elsz, label) \
6936 do \
6937 { \
6938 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6939 if (val == FAIL) \
6940 goto label; \
6941 inst.operands[i].reg = val; \
6942 inst.operands[i].isscalar = 1; \
6943 } \
6944 while (0)
6945
6946 #define po_misc_or_fail(expr) \
6947 do \
6948 { \
6949 if (expr) \
6950 goto failure; \
6951 } \
6952 while (0)
6953
6954 #define po_misc_or_fail_no_backtrack(expr) \
6955 do \
6956 { \
6957 result = expr; \
6958 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6959 backtrack_pos = 0; \
6960 if (result != PARSE_OPERAND_SUCCESS) \
6961 goto failure; \
6962 } \
6963 while (0)
6964
6965 #define po_barrier_or_imm(str) \
6966 do \
6967 { \
6968 val = parse_barrier (&str); \
6969 if (val == FAIL && ! ISALPHA (*str)) \
6970 goto immediate; \
6971 if (val == FAIL \
6972 /* ISB can only take SY as an option. */ \
6973 || ((inst.instruction & 0xf0) == 0x60 \
6974 && val != 0xf)) \
6975 { \
6976 inst.error = _("invalid barrier type"); \
6977 backtrack_pos = 0; \
6978 goto failure; \
6979 } \
6980 } \
6981 while (0)
6982
6983 skip_whitespace (str);
6984
6985 for (i = 0; upat[i] != OP_stop; i++)
6986 {
6987 op_parse_code = upat[i];
6988 if (op_parse_code >= 1<<16)
6989 op_parse_code = thumb ? (op_parse_code >> 16)
6990 : (op_parse_code & ((1<<16)-1));
6991
6992 if (op_parse_code >= OP_FIRST_OPTIONAL)
6993 {
6994 /* Remember where we are in case we need to backtrack. */
6995 gas_assert (!backtrack_pos);
6996 backtrack_pos = str;
6997 backtrack_error = inst.error;
6998 backtrack_index = i;
6999 }
7000
7001 if (i > 0 && (i > 1 || inst.operands[0].present))
7002 po_char_or_fail (',');
7003
7004 switch (op_parse_code)
7005 {
7006 /* Registers */
7007 case OP_oRRnpc:
7008 case OP_oRRnpcsp:
7009 case OP_RRnpc:
7010 case OP_RRnpcsp:
7011 case OP_oRR:
7012 case OP_RRe:
7013 case OP_RRo:
7014 case OP_LR:
7015 case OP_oLR:
7016 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7017 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7018 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7019 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7020 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7021 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7022 case OP_oRND:
7023 case OP_RNDMQR:
7024 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7025 break;
7026 try_rndmq:
7027 case OP_RNDMQ:
7028 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7029 break;
7030 try_rnd:
7031 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7032 case OP_RVC:
7033 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7034 break;
7035 /* Also accept generic coprocessor regs for unknown registers. */
7036 coproc_reg:
7037 po_reg_or_fail (REG_TYPE_CN);
7038 break;
7039 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7040 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7041 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7042 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7043 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7044 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7045 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7046 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7047 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7048 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7049 case OP_oRNQ:
7050 case OP_RNQMQ:
7051 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7052 break;
7053 try_nq:
7054 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7055 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7056 case OP_oRNDQMQ:
7057 case OP_RNDQMQ:
7058 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7059 break;
7060 try_rndq:
7061 case OP_oRNDQ:
7062 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7063 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7064 case OP_oRNSDQ:
7065 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7066 case OP_RNSDQMQR:
7067 po_reg_or_goto (REG_TYPE_RN, try_mq);
7068 break;
7069 try_mq:
7070 case OP_oRNSDQMQ:
7071 case OP_RNSDQMQ:
7072 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7073 break;
7074 try_nsdq2:
7075 po_reg_or_fail (REG_TYPE_NSDQ);
7076 inst.error = 0;
7077 break;
7078 case OP_RMQ:
7079 po_reg_or_fail (REG_TYPE_MQ);
7080 break;
7081 /* Neon scalar. Using an element size of 8 means that some invalid
7082 scalars are accepted here, so deal with those in later code. */
7083 case OP_RNSC: po_scalar_or_goto (8, failure); break;
7084
7085 case OP_RNDQ_I0:
7086 {
7087 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7088 break;
7089 try_imm0:
7090 po_imm_or_fail (0, 0, TRUE);
7091 }
7092 break;
7093
7094 case OP_RVSD_I0:
7095 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7096 break;
7097
7098 case OP_RSVD_FI0:
7099 {
7100 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7101 break;
7102 try_ifimm0:
7103 if (parse_ifimm_zero (&str))
7104 inst.operands[i].imm = 0;
7105 else
7106 {
7107 inst.error
7108 = _("only floating point zero is allowed as immediate value");
7109 goto failure;
7110 }
7111 }
7112 break;
7113
7114 case OP_RR_RNSC:
7115 {
7116 po_scalar_or_goto (8, try_rr);
7117 break;
7118 try_rr:
7119 po_reg_or_fail (REG_TYPE_RN);
7120 }
7121 break;
7122
7123 case OP_RNSDQ_RNSC_MQ:
7124 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7125 break;
7126 try_rnsdq_rnsc:
7127 case OP_RNSDQ_RNSC:
7128 {
7129 po_scalar_or_goto (8, try_nsdq);
7130 break;
7131 try_nsdq:
7132 po_reg_or_fail (REG_TYPE_NSDQ);
7133 }
7134 break;
7135
7136 case OP_RNSD_RNSC:
7137 {
7138 po_scalar_or_goto (8, try_s_scalar);
7139 break;
7140 try_s_scalar:
7141 po_scalar_or_goto (4, try_nsd);
7142 break;
7143 try_nsd:
7144 po_reg_or_fail (REG_TYPE_NSD);
7145 }
7146 break;
7147
7148 case OP_RNDQ_RNSC:
7149 {
7150 po_scalar_or_goto (8, try_ndq);
7151 break;
7152 try_ndq:
7153 po_reg_or_fail (REG_TYPE_NDQ);
7154 }
7155 break;
7156
7157 case OP_RND_RNSC:
7158 {
7159 po_scalar_or_goto (8, try_vfd);
7160 break;
7161 try_vfd:
7162 po_reg_or_fail (REG_TYPE_VFD);
7163 }
7164 break;
7165
7166 case OP_VMOV:
7167 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7168 not careful then bad things might happen. */
7169 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7170 break;
7171
7172 case OP_RNDQ_Ibig:
7173 {
7174 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7175 break;
7176 try_immbig:
7177 /* There's a possibility of getting a 64-bit immediate here, so
7178 we need special handling. */
7179 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7180 == FAIL)
7181 {
7182 inst.error = _("immediate value is out of range");
7183 goto failure;
7184 }
7185 }
7186 break;
7187
7188 case OP_RNDQ_I63b:
7189 {
7190 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7191 break;
7192 try_shimm:
7193 po_imm_or_fail (0, 63, TRUE);
7194 }
7195 break;
7196
7197 case OP_RRnpcb:
7198 po_char_or_fail ('[');
7199 po_reg_or_fail (REG_TYPE_RN);
7200 po_char_or_fail (']');
7201 break;
7202
7203 case OP_RRnpctw:
7204 case OP_RRw:
7205 case OP_oRRw:
7206 po_reg_or_fail (REG_TYPE_RN);
7207 if (skip_past_char (&str, '!') == SUCCESS)
7208 inst.operands[i].writeback = 1;
7209 break;
7210
7211 /* Immediates */
7212 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7213 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7214 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7215 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7216 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7217 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7218 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7219 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7220 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7221 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7222 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7223 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7224
7225 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7226 case OP_oI7b:
7227 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7228 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7229 case OP_oI31b:
7230 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7231 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7232 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7233 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7234
7235 /* Immediate variants */
7236 case OP_oI255c:
7237 po_char_or_fail ('{');
7238 po_imm_or_fail (0, 255, TRUE);
7239 po_char_or_fail ('}');
7240 break;
7241
7242 case OP_I31w:
7243 /* The expression parser chokes on a trailing !, so we have
7244 to find it first and zap it. */
7245 {
7246 char *s = str;
7247 while (*s && *s != ',')
7248 s++;
7249 if (s[-1] == '!')
7250 {
7251 s[-1] = '\0';
7252 inst.operands[i].writeback = 1;
7253 }
7254 po_imm_or_fail (0, 31, TRUE);
7255 if (str == s - 1)
7256 str = s;
7257 }
7258 break;
7259
7260 /* Expressions */
7261 case OP_EXPi: EXPi:
7262 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7263 GE_OPT_PREFIX));
7264 break;
7265
7266 case OP_EXP:
7267 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7268 GE_NO_PREFIX));
7269 break;
7270
7271 case OP_EXPr: EXPr:
7272 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7273 GE_NO_PREFIX));
7274 if (inst.relocs[0].exp.X_op == O_symbol)
7275 {
7276 val = parse_reloc (&str);
7277 if (val == -1)
7278 {
7279 inst.error = _("unrecognized relocation suffix");
7280 goto failure;
7281 }
7282 else if (val != BFD_RELOC_UNUSED)
7283 {
7284 inst.operands[i].imm = val;
7285 inst.operands[i].hasreloc = 1;
7286 }
7287 }
7288 break;
7289
7290 case OP_EXPs:
7291 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7292 GE_NO_PREFIX));
7293 if (inst.relocs[i].exp.X_op == O_symbol)
7294 {
7295 inst.operands[i].hasreloc = 1;
7296 }
7297 else if (inst.relocs[i].exp.X_op == O_constant)
7298 {
7299 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7300 inst.operands[i].hasreloc = 0;
7301 }
7302 break;
7303
7304 /* Operand for MOVW or MOVT. */
7305 case OP_HALF:
7306 po_misc_or_fail (parse_half (&str));
7307 break;
7308
7309 /* Register or expression. */
7310 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7311 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7312
7313 /* Register or immediate. */
7314 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7315 I0: po_imm_or_fail (0, 0, FALSE); break;
7316
7317 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7318 IF:
7319 if (!is_immediate_prefix (*str))
7320 goto bad_args;
7321 str++;
7322 val = parse_fpa_immediate (&str);
7323 if (val == FAIL)
7324 goto failure;
7325 /* FPA immediates are encoded as registers 8-15.
7326 parse_fpa_immediate has already applied the offset. */
7327 inst.operands[i].reg = val;
7328 inst.operands[i].isreg = 1;
7329 break;
7330
7331 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7332 I32z: po_imm_or_fail (0, 32, FALSE); break;
7333
7334 /* Two kinds of register. */
7335 case OP_RIWR_RIWC:
7336 {
7337 struct reg_entry *rege = arm_reg_parse_multi (&str);
7338 if (!rege
7339 || (rege->type != REG_TYPE_MMXWR
7340 && rege->type != REG_TYPE_MMXWC
7341 && rege->type != REG_TYPE_MMXWCG))
7342 {
7343 inst.error = _("iWMMXt data or control register expected");
7344 goto failure;
7345 }
7346 inst.operands[i].reg = rege->number;
7347 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7348 }
7349 break;
7350
7351 case OP_RIWC_RIWG:
7352 {
7353 struct reg_entry *rege = arm_reg_parse_multi (&str);
7354 if (!rege
7355 || (rege->type != REG_TYPE_MMXWC
7356 && rege->type != REG_TYPE_MMXWCG))
7357 {
7358 inst.error = _("iWMMXt control register expected");
7359 goto failure;
7360 }
7361 inst.operands[i].reg = rege->number;
7362 inst.operands[i].isreg = 1;
7363 }
7364 break;
7365
7366 /* Misc */
7367 case OP_CPSF: val = parse_cps_flags (&str); break;
7368 case OP_ENDI: val = parse_endian_specifier (&str); break;
7369 case OP_oROR: val = parse_ror (&str); break;
7370 case OP_COND: val = parse_cond (&str); break;
7371 case OP_oBARRIER_I15:
7372 po_barrier_or_imm (str); break;
7373 immediate:
7374 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7375 goto failure;
7376 break;
7377
7378 case OP_wPSR:
7379 case OP_rPSR:
7380 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7381 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7382 {
7383 inst.error = _("Banked registers are not available with this "
7384 "architecture.");
7385 goto failure;
7386 }
7387 break;
7388 try_psr:
7389 val = parse_psr (&str, op_parse_code == OP_wPSR);
7390 break;
7391
7392 case OP_VLDR:
7393 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7394 break;
7395 try_sysreg:
7396 val = parse_sys_vldr_vstr (&str);
7397 break;
7398
7399 case OP_APSR_RR:
7400 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7401 break;
7402 try_apsr:
7403 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7404 instruction). */
7405 if (strncasecmp (str, "APSR_", 5) == 0)
7406 {
7407 unsigned found = 0;
7408 str += 5;
7409 while (found < 15)
7410 switch (*str++)
7411 {
7412 case 'c': found = (found & 1) ? 16 : found | 1; break;
7413 case 'n': found = (found & 2) ? 16 : found | 2; break;
7414 case 'z': found = (found & 4) ? 16 : found | 4; break;
7415 case 'v': found = (found & 8) ? 16 : found | 8; break;
7416 default: found = 16;
7417 }
7418 if (found != 15)
7419 goto failure;
7420 inst.operands[i].isvec = 1;
7421 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7422 inst.operands[i].reg = REG_PC;
7423 }
7424 else
7425 goto failure;
7426 break;
7427
7428 case OP_TB:
7429 po_misc_or_fail (parse_tb (&str));
7430 break;
7431
7432 /* Register lists. */
7433 case OP_REGLST:
7434 val = parse_reg_list (&str, REGLIST_RN);
7435 if (*str == '^')
7436 {
7437 inst.operands[i].writeback = 1;
7438 str++;
7439 }
7440 break;
7441
7442 case OP_CLRMLST:
7443 val = parse_reg_list (&str, REGLIST_CLRM);
7444 break;
7445
7446 case OP_VRSLST:
7447 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7448 &partial_match);
7449 break;
7450
7451 case OP_VRDLST:
7452 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7453 &partial_match);
7454 break;
7455
7456 case OP_VRSDLST:
7457 /* Allow Q registers too. */
7458 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7459 REGLIST_NEON_D, &partial_match);
7460 if (val == FAIL)
7461 {
7462 inst.error = NULL;
7463 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7464 REGLIST_VFP_S, &partial_match);
7465 inst.operands[i].issingle = 1;
7466 }
7467 break;
7468
7469 case OP_VRSDVLST:
7470 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7471 REGLIST_VFP_D_VPR, &partial_match);
7472 if (val == FAIL && !partial_match)
7473 {
7474 inst.error = NULL;
7475 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7476 REGLIST_VFP_S_VPR, &partial_match);
7477 inst.operands[i].issingle = 1;
7478 }
7479 break;
7480
7481 case OP_NRDLST:
7482 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7483 REGLIST_NEON_D, &partial_match);
7484 break;
7485
7486 case OP_MSTRLST4:
7487 case OP_MSTRLST2:
7488 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7489 1, &inst.operands[i].vectype);
7490 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7491 goto failure;
7492 break;
7493 case OP_NSTRLST:
7494 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7495 0, &inst.operands[i].vectype);
7496 break;
7497
7498 /* Addressing modes */
7499 case OP_ADDRMVE:
7500 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7501 break;
7502
7503 case OP_ADDR:
7504 po_misc_or_fail (parse_address (&str, i));
7505 break;
7506
7507 case OP_ADDRGLDR:
7508 po_misc_or_fail_no_backtrack (
7509 parse_address_group_reloc (&str, i, GROUP_LDR));
7510 break;
7511
7512 case OP_ADDRGLDRS:
7513 po_misc_or_fail_no_backtrack (
7514 parse_address_group_reloc (&str, i, GROUP_LDRS));
7515 break;
7516
7517 case OP_ADDRGLDC:
7518 po_misc_or_fail_no_backtrack (
7519 parse_address_group_reloc (&str, i, GROUP_LDC));
7520 break;
7521
7522 case OP_SH:
7523 po_misc_or_fail (parse_shifter_operand (&str, i));
7524 break;
7525
7526 case OP_SHG:
7527 po_misc_or_fail_no_backtrack (
7528 parse_shifter_operand_group_reloc (&str, i));
7529 break;
7530
7531 case OP_oSHll:
7532 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7533 break;
7534
7535 case OP_oSHar:
7536 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7537 break;
7538
7539 case OP_oSHllar:
7540 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7541 break;
7542
7543 default:
7544 as_fatal (_("unhandled operand code %d"), op_parse_code);
7545 }
7546
7547 /* Various value-based sanity checks and shared operations. We
7548 do not signal immediate failures for the register constraints;
7549 this allows a syntax error to take precedence. */
7550 switch (op_parse_code)
7551 {
7552 case OP_oRRnpc:
7553 case OP_RRnpc:
7554 case OP_RRnpcb:
7555 case OP_RRw:
7556 case OP_oRRw:
7557 case OP_RRnpc_I0:
7558 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7559 inst.error = BAD_PC;
7560 break;
7561
7562 case OP_oRRnpcsp:
7563 case OP_RRnpcsp:
7564 if (inst.operands[i].isreg)
7565 {
7566 if (inst.operands[i].reg == REG_PC)
7567 inst.error = BAD_PC;
7568 else if (inst.operands[i].reg == REG_SP
7569 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7570 relaxed since ARMv8-A. */
7571 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7572 {
7573 gas_assert (thumb);
7574 inst.error = BAD_SP;
7575 }
7576 }
7577 break;
7578
7579 case OP_RRnpctw:
7580 if (inst.operands[i].isreg
7581 && inst.operands[i].reg == REG_PC
7582 && (inst.operands[i].writeback || thumb))
7583 inst.error = BAD_PC;
7584 break;
7585
7586 case OP_VLDR:
7587 if (inst.operands[i].isreg)
7588 break;
7589 /* fall through. */
7590 case OP_CPSF:
7591 case OP_ENDI:
7592 case OP_oROR:
7593 case OP_wPSR:
7594 case OP_rPSR:
7595 case OP_COND:
7596 case OP_oBARRIER_I15:
7597 case OP_REGLST:
7598 case OP_CLRMLST:
7599 case OP_VRSLST:
7600 case OP_VRDLST:
7601 case OP_VRSDLST:
7602 case OP_VRSDVLST:
7603 case OP_NRDLST:
7604 case OP_NSTRLST:
7605 case OP_MSTRLST2:
7606 case OP_MSTRLST4:
7607 if (val == FAIL)
7608 goto failure;
7609 inst.operands[i].imm = val;
7610 break;
7611
7612 case OP_LR:
7613 case OP_oLR:
7614 if (inst.operands[i].reg != REG_LR)
7615 inst.error = _("operand must be LR register");
7616 break;
7617
7618 case OP_RRe:
7619 if (inst.operands[i].isreg
7620 && (inst.operands[i].reg & 0x00000001) != 0)
7621 inst.error = BAD_ODD;
7622 break;
7623
7624 case OP_RRo:
7625 if (inst.operands[i].isreg)
7626 {
7627 if ((inst.operands[i].reg & 0x00000001) != 1)
7628 inst.error = BAD_EVEN;
7629 else if (inst.operands[i].reg == REG_SP)
7630 as_tsktsk (MVE_BAD_SP);
7631 else if (inst.operands[i].reg == REG_PC)
7632 inst.error = BAD_PC;
7633 }
7634 break;
7635
7636 default:
7637 break;
7638 }
7639
7640 /* If we get here, this operand was successfully parsed. */
7641 inst.operands[i].present = 1;
7642 continue;
7643
7644 bad_args:
7645 inst.error = BAD_ARGS;
7646
7647 failure:
7648 if (!backtrack_pos)
7649 {
7650 /* The parse routine should already have set inst.error, but set a
7651 default here just in case. */
7652 if (!inst.error)
7653 inst.error = BAD_SYNTAX;
7654 return FAIL;
7655 }
7656
7657 /* Do not backtrack over a trailing optional argument that
7658 absorbed some text. We will only fail again, with the
7659 'garbage following instruction' error message, which is
7660 probably less helpful than the current one. */
7661 if (backtrack_index == i && backtrack_pos != str
7662 && upat[i+1] == OP_stop)
7663 {
7664 if (!inst.error)
7665 inst.error = BAD_SYNTAX;
7666 return FAIL;
7667 }
7668
7669 /* Try again, skipping the optional argument at backtrack_pos. */
7670 str = backtrack_pos;
7671 inst.error = backtrack_error;
7672 inst.operands[backtrack_index].present = 0;
7673 i = backtrack_index;
7674 backtrack_pos = 0;
7675 }
7676
7677 /* Check that we have parsed all the arguments. */
7678 if (*str != '\0' && !inst.error)
7679 inst.error = _("garbage following instruction");
7680
7681 return inst.error ? FAIL : SUCCESS;
7682 }
7683
7684 #undef po_char_or_fail
7685 #undef po_reg_or_fail
7686 #undef po_reg_or_goto
7687 #undef po_imm_or_fail
7688 #undef po_scalar_or_fail
7689 #undef po_barrier_or_imm
7690
7691 /* Shorthand macro for instruction encoding functions issuing errors. */
7692 #define constraint(expr, err) \
7693 do \
7694 { \
7695 if (expr) \
7696 { \
7697 inst.error = err; \
7698 return; \
7699 } \
7700 } \
7701 while (0)
7702
7703 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7704 instructions are unpredictable if these registers are used. This
7705 is the BadReg predicate in ARM's Thumb-2 documentation.
7706
7707 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7708 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7709 #define reject_bad_reg(reg) \
7710 do \
7711 if (reg == REG_PC) \
7712 { \
7713 inst.error = BAD_PC; \
7714 return; \
7715 } \
7716 else if (reg == REG_SP \
7717 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7718 { \
7719 inst.error = BAD_SP; \
7720 return; \
7721 } \
7722 while (0)
7723
7724 /* If REG is R13 (the stack pointer), warn that its use is
7725 deprecated. */
7726 #define warn_deprecated_sp(reg) \
7727 do \
7728 if (warn_on_deprecated && reg == REG_SP) \
7729 as_tsktsk (_("use of r13 is deprecated")); \
7730 while (0)
7731
7732 /* Functions for operand encoding. ARM, then Thumb. */
7733
7734 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7735
7736 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7737
7738 The only binary encoding difference is the Coprocessor number. Coprocessor
7739 9 is used for half-precision calculations or conversions. The format of the
7740 instruction is the same as the equivalent Coprocessor 10 instruction that
7741 exists for Single-Precision operation. */
7742
7743 static void
7744 do_scalar_fp16_v82_encode (void)
7745 {
7746 if (inst.cond < COND_ALWAYS)
7747 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7748 " the behaviour is UNPREDICTABLE"));
7749 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7750 _(BAD_FP16));
7751
7752 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7753 mark_feature_used (&arm_ext_fp16);
7754 }
7755
7756 /* If VAL can be encoded in the immediate field of an ARM instruction,
7757 return the encoded form. Otherwise, return FAIL. */
7758
7759 static unsigned int
7760 encode_arm_immediate (unsigned int val)
7761 {
7762 unsigned int a, i;
7763
7764 if (val <= 0xff)
7765 return val;
7766
7767 for (i = 2; i < 32; i += 2)
7768 if ((a = rotate_left (val, i)) <= 0xff)
7769 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7770
7771 return FAIL;
7772 }
7773
7774 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7775 return the encoded form. Otherwise, return FAIL. */
7776 static unsigned int
7777 encode_thumb32_immediate (unsigned int val)
7778 {
7779 unsigned int a, i;
7780
7781 if (val <= 0xff)
7782 return val;
7783
7784 for (i = 1; i <= 24; i++)
7785 {
7786 a = val >> i;
7787 if ((val & ~(0xff << i)) == 0)
7788 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7789 }
7790
7791 a = val & 0xff;
7792 if (val == ((a << 16) | a))
7793 return 0x100 | a;
7794 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7795 return 0x300 | a;
7796
7797 a = val & 0xff00;
7798 if (val == ((a << 16) | a))
7799 return 0x200 | (a >> 8);
7800
7801 return FAIL;
7802 }
7803 /* Encode a VFP SP or DP register number into inst.instruction. */
7804
7805 static void
7806 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7807 {
7808 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7809 && reg > 15)
7810 {
7811 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7812 {
7813 if (thumb_mode)
7814 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7815 fpu_vfp_ext_d32);
7816 else
7817 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7818 fpu_vfp_ext_d32);
7819 }
7820 else
7821 {
7822 first_error (_("D register out of range for selected VFP version"));
7823 return;
7824 }
7825 }
7826
7827 switch (pos)
7828 {
7829 case VFP_REG_Sd:
7830 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7831 break;
7832
7833 case VFP_REG_Sn:
7834 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7835 break;
7836
7837 case VFP_REG_Sm:
7838 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7839 break;
7840
7841 case VFP_REG_Dd:
7842 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7843 break;
7844
7845 case VFP_REG_Dn:
7846 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7847 break;
7848
7849 case VFP_REG_Dm:
7850 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7851 break;
7852
7853 default:
7854 abort ();
7855 }
7856 }
7857
7858 /* Encode a <shift> in an ARM-format instruction. The immediate,
7859 if any, is handled by md_apply_fix. */
7860 static void
7861 encode_arm_shift (int i)
7862 {
7863 /* register-shifted register. */
7864 if (inst.operands[i].immisreg)
7865 {
7866 int op_index;
7867 for (op_index = 0; op_index <= i; ++op_index)
7868 {
7869 /* Check the operand only when it's presented. In pre-UAL syntax,
7870 if the destination register is the same as the first operand, two
7871 register form of the instruction can be used. */
7872 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7873 && inst.operands[op_index].reg == REG_PC)
7874 as_warn (UNPRED_REG ("r15"));
7875 }
7876
7877 if (inst.operands[i].imm == REG_PC)
7878 as_warn (UNPRED_REG ("r15"));
7879 }
7880
7881 if (inst.operands[i].shift_kind == SHIFT_RRX)
7882 inst.instruction |= SHIFT_ROR << 5;
7883 else
7884 {
7885 inst.instruction |= inst.operands[i].shift_kind << 5;
7886 if (inst.operands[i].immisreg)
7887 {
7888 inst.instruction |= SHIFT_BY_REG;
7889 inst.instruction |= inst.operands[i].imm << 8;
7890 }
7891 else
7892 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7893 }
7894 }
7895
7896 static void
7897 encode_arm_shifter_operand (int i)
7898 {
7899 if (inst.operands[i].isreg)
7900 {
7901 inst.instruction |= inst.operands[i].reg;
7902 encode_arm_shift (i);
7903 }
7904 else
7905 {
7906 inst.instruction |= INST_IMMEDIATE;
7907 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7908 inst.instruction |= inst.operands[i].imm;
7909 }
7910 }
7911
7912 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7913 static void
7914 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7915 {
7916 /* PR 14260:
7917 Generate an error if the operand is not a register. */
7918 constraint (!inst.operands[i].isreg,
7919 _("Instruction does not support =N addresses"));
7920
7921 inst.instruction |= inst.operands[i].reg << 16;
7922
7923 if (inst.operands[i].preind)
7924 {
7925 if (is_t)
7926 {
7927 inst.error = _("instruction does not accept preindexed addressing");
7928 return;
7929 }
7930 inst.instruction |= PRE_INDEX;
7931 if (inst.operands[i].writeback)
7932 inst.instruction |= WRITE_BACK;
7933
7934 }
7935 else if (inst.operands[i].postind)
7936 {
7937 gas_assert (inst.operands[i].writeback);
7938 if (is_t)
7939 inst.instruction |= WRITE_BACK;
7940 }
7941 else /* unindexed - only for coprocessor */
7942 {
7943 inst.error = _("instruction does not accept unindexed addressing");
7944 return;
7945 }
7946
7947 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7948 && (((inst.instruction & 0x000f0000) >> 16)
7949 == ((inst.instruction & 0x0000f000) >> 12)))
7950 as_warn ((inst.instruction & LOAD_BIT)
7951 ? _("destination register same as write-back base")
7952 : _("source register same as write-back base"));
7953 }
7954
7955 /* inst.operands[i] was set up by parse_address. Encode it into an
7956 ARM-format mode 2 load or store instruction. If is_t is true,
7957 reject forms that cannot be used with a T instruction (i.e. not
7958 post-indexed). */
7959 static void
7960 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7961 {
7962 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7963
7964 encode_arm_addr_mode_common (i, is_t);
7965
7966 if (inst.operands[i].immisreg)
7967 {
7968 constraint ((inst.operands[i].imm == REG_PC
7969 || (is_pc && inst.operands[i].writeback)),
7970 BAD_PC_ADDRESSING);
7971 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7972 inst.instruction |= inst.operands[i].imm;
7973 if (!inst.operands[i].negative)
7974 inst.instruction |= INDEX_UP;
7975 if (inst.operands[i].shifted)
7976 {
7977 if (inst.operands[i].shift_kind == SHIFT_RRX)
7978 inst.instruction |= SHIFT_ROR << 5;
7979 else
7980 {
7981 inst.instruction |= inst.operands[i].shift_kind << 5;
7982 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7983 }
7984 }
7985 }
7986 else /* immediate offset in inst.relocs[0] */
7987 {
7988 if (is_pc && !inst.relocs[0].pc_rel)
7989 {
7990 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7991
7992 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7993 cannot use PC in addressing.
7994 PC cannot be used in writeback addressing, either. */
7995 constraint ((is_t || inst.operands[i].writeback),
7996 BAD_PC_ADDRESSING);
7997
7998 /* Use of PC in str is deprecated for ARMv7. */
7999 if (warn_on_deprecated
8000 && !is_load
8001 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8002 as_tsktsk (_("use of PC in this instruction is deprecated"));
8003 }
8004
8005 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8006 {
8007 /* Prefer + for zero encoded value. */
8008 if (!inst.operands[i].negative)
8009 inst.instruction |= INDEX_UP;
8010 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8011 }
8012 }
8013 }
8014
8015 /* inst.operands[i] was set up by parse_address. Encode it into an
8016 ARM-format mode 3 load or store instruction. Reject forms that
8017 cannot be used with such instructions. If is_t is true, reject
8018 forms that cannot be used with a T instruction (i.e. not
8019 post-indexed). */
8020 static void
8021 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8022 {
8023 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8024 {
8025 inst.error = _("instruction does not accept scaled register index");
8026 return;
8027 }
8028
8029 encode_arm_addr_mode_common (i, is_t);
8030
8031 if (inst.operands[i].immisreg)
8032 {
8033 constraint ((inst.operands[i].imm == REG_PC
8034 || (is_t && inst.operands[i].reg == REG_PC)),
8035 BAD_PC_ADDRESSING);
8036 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8037 BAD_PC_WRITEBACK);
8038 inst.instruction |= inst.operands[i].imm;
8039 if (!inst.operands[i].negative)
8040 inst.instruction |= INDEX_UP;
8041 }
8042 else /* immediate offset in inst.relocs[0] */
8043 {
8044 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8045 && inst.operands[i].writeback),
8046 BAD_PC_WRITEBACK);
8047 inst.instruction |= HWOFFSET_IMM;
8048 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8049 {
8050 /* Prefer + for zero encoded value. */
8051 if (!inst.operands[i].negative)
8052 inst.instruction |= INDEX_UP;
8053
8054 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8055 }
8056 }
8057 }
8058
8059 /* Write immediate bits [7:0] to the following locations:
8060
8061 |28/24|23 19|18 16|15 4|3 0|
8062 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8063
8064 This function is used by VMOV/VMVN/VORR/VBIC. */
8065
8066 static void
8067 neon_write_immbits (unsigned immbits)
8068 {
8069 inst.instruction |= immbits & 0xf;
8070 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8071 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8072 }
8073
8074 /* Invert low-order SIZE bits of XHI:XLO. */
8075
8076 static void
8077 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8078 {
8079 unsigned immlo = xlo ? *xlo : 0;
8080 unsigned immhi = xhi ? *xhi : 0;
8081
8082 switch (size)
8083 {
8084 case 8:
8085 immlo = (~immlo) & 0xff;
8086 break;
8087
8088 case 16:
8089 immlo = (~immlo) & 0xffff;
8090 break;
8091
8092 case 64:
8093 immhi = (~immhi) & 0xffffffff;
8094 /* fall through. */
8095
8096 case 32:
8097 immlo = (~immlo) & 0xffffffff;
8098 break;
8099
8100 default:
8101 abort ();
8102 }
8103
8104 if (xlo)
8105 *xlo = immlo;
8106
8107 if (xhi)
8108 *xhi = immhi;
8109 }
8110
8111 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8112 A, B, C, D. */
8113
8114 static int
8115 neon_bits_same_in_bytes (unsigned imm)
8116 {
8117 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8118 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8119 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8120 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8121 }
8122
8123 /* For immediate of above form, return 0bABCD. */
8124
8125 static unsigned
8126 neon_squash_bits (unsigned imm)
8127 {
8128 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8129 | ((imm & 0x01000000) >> 21);
8130 }
8131
8132 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8133
8134 static unsigned
8135 neon_qfloat_bits (unsigned imm)
8136 {
8137 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8138 }
8139
8140 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8141 the instruction. *OP is passed as the initial value of the op field, and
8142 may be set to a different value depending on the constant (i.e.
8143 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8144 MVN). If the immediate looks like a repeated pattern then also
8145 try smaller element sizes. */
8146
8147 static int
8148 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8149 unsigned *immbits, int *op, int size,
8150 enum neon_el_type type)
8151 {
8152 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8153 float. */
8154 if (type == NT_float && !float_p)
8155 return FAIL;
8156
8157 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8158 {
8159 if (size != 32 || *op == 1)
8160 return FAIL;
8161 *immbits = neon_qfloat_bits (immlo);
8162 return 0xf;
8163 }
8164
8165 if (size == 64)
8166 {
8167 if (neon_bits_same_in_bytes (immhi)
8168 && neon_bits_same_in_bytes (immlo))
8169 {
8170 if (*op == 1)
8171 return FAIL;
8172 *immbits = (neon_squash_bits (immhi) << 4)
8173 | neon_squash_bits (immlo);
8174 *op = 1;
8175 return 0xe;
8176 }
8177
8178 if (immhi != immlo)
8179 return FAIL;
8180 }
8181
8182 if (size >= 32)
8183 {
8184 if (immlo == (immlo & 0x000000ff))
8185 {
8186 *immbits = immlo;
8187 return 0x0;
8188 }
8189 else if (immlo == (immlo & 0x0000ff00))
8190 {
8191 *immbits = immlo >> 8;
8192 return 0x2;
8193 }
8194 else if (immlo == (immlo & 0x00ff0000))
8195 {
8196 *immbits = immlo >> 16;
8197 return 0x4;
8198 }
8199 else if (immlo == (immlo & 0xff000000))
8200 {
8201 *immbits = immlo >> 24;
8202 return 0x6;
8203 }
8204 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8205 {
8206 *immbits = (immlo >> 8) & 0xff;
8207 return 0xc;
8208 }
8209 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8210 {
8211 *immbits = (immlo >> 16) & 0xff;
8212 return 0xd;
8213 }
8214
8215 if ((immlo & 0xffff) != (immlo >> 16))
8216 return FAIL;
8217 immlo &= 0xffff;
8218 }
8219
8220 if (size >= 16)
8221 {
8222 if (immlo == (immlo & 0x000000ff))
8223 {
8224 *immbits = immlo;
8225 return 0x8;
8226 }
8227 else if (immlo == (immlo & 0x0000ff00))
8228 {
8229 *immbits = immlo >> 8;
8230 return 0xa;
8231 }
8232
8233 if ((immlo & 0xff) != (immlo >> 8))
8234 return FAIL;
8235 immlo &= 0xff;
8236 }
8237
8238 if (immlo == (immlo & 0x000000ff))
8239 {
8240 /* Don't allow MVN with 8-bit immediate. */
8241 if (*op == 1)
8242 return FAIL;
8243 *immbits = immlo;
8244 return 0xe;
8245 }
8246
8247 return FAIL;
8248 }
8249
8250 #if defined BFD_HOST_64_BIT
8251 /* Returns TRUE if double precision value V may be cast
8252 to single precision without loss of accuracy. */
8253
8254 static bfd_boolean
8255 is_double_a_single (bfd_int64_t v)
8256 {
8257 int exp = (int)((v >> 52) & 0x7FF);
8258 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8259
8260 return (exp == 0 || exp == 0x7FF
8261 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8262 && (mantissa & 0x1FFFFFFFl) == 0;
8263 }
8264
8265 /* Returns a double precision value casted to single precision
8266 (ignoring the least significant bits in exponent and mantissa). */
8267
8268 static int
8269 double_to_single (bfd_int64_t v)
8270 {
8271 int sign = (int) ((v >> 63) & 1l);
8272 int exp = (int) ((v >> 52) & 0x7FF);
8273 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8274
8275 if (exp == 0x7FF)
8276 exp = 0xFF;
8277 else
8278 {
8279 exp = exp - 1023 + 127;
8280 if (exp >= 0xFF)
8281 {
8282 /* Infinity. */
8283 exp = 0x7F;
8284 mantissa = 0;
8285 }
8286 else if (exp < 0)
8287 {
8288 /* No denormalized numbers. */
8289 exp = 0;
8290 mantissa = 0;
8291 }
8292 }
8293 mantissa >>= 29;
8294 return (sign << 31) | (exp << 23) | mantissa;
8295 }
8296 #endif /* BFD_HOST_64_BIT */
8297
8298 enum lit_type
8299 {
8300 CONST_THUMB,
8301 CONST_ARM,
8302 CONST_VEC
8303 };
8304
8305 static void do_vfp_nsyn_opcode (const char *);
8306
8307 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8308 Determine whether it can be performed with a move instruction; if
8309 it can, convert inst.instruction to that move instruction and
8310 return TRUE; if it can't, convert inst.instruction to a literal-pool
8311 load and return FALSE. If this is not a valid thing to do in the
8312 current context, set inst.error and return TRUE.
8313
8314 inst.operands[i] describes the destination register. */
8315
8316 static bfd_boolean
8317 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8318 {
8319 unsigned long tbit;
8320 bfd_boolean thumb_p = (t == CONST_THUMB);
8321 bfd_boolean arm_p = (t == CONST_ARM);
8322
8323 if (thumb_p)
8324 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8325 else
8326 tbit = LOAD_BIT;
8327
8328 if ((inst.instruction & tbit) == 0)
8329 {
8330 inst.error = _("invalid pseudo operation");
8331 return TRUE;
8332 }
8333
8334 if (inst.relocs[0].exp.X_op != O_constant
8335 && inst.relocs[0].exp.X_op != O_symbol
8336 && inst.relocs[0].exp.X_op != O_big)
8337 {
8338 inst.error = _("constant expression expected");
8339 return TRUE;
8340 }
8341
8342 if (inst.relocs[0].exp.X_op == O_constant
8343 || inst.relocs[0].exp.X_op == O_big)
8344 {
8345 #if defined BFD_HOST_64_BIT
8346 bfd_int64_t v;
8347 #else
8348 offsetT v;
8349 #endif
8350 if (inst.relocs[0].exp.X_op == O_big)
8351 {
8352 LITTLENUM_TYPE w[X_PRECISION];
8353 LITTLENUM_TYPE * l;
8354
8355 if (inst.relocs[0].exp.X_add_number == -1)
8356 {
8357 gen_to_words (w, X_PRECISION, E_PRECISION);
8358 l = w;
8359 /* FIXME: Should we check words w[2..5] ? */
8360 }
8361 else
8362 l = generic_bignum;
8363
8364 #if defined BFD_HOST_64_BIT
8365 v =
8366 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8367 << LITTLENUM_NUMBER_OF_BITS)
8368 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8369 << LITTLENUM_NUMBER_OF_BITS)
8370 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8371 << LITTLENUM_NUMBER_OF_BITS)
8372 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8373 #else
8374 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8375 | (l[0] & LITTLENUM_MASK);
8376 #endif
8377 }
8378 else
8379 v = inst.relocs[0].exp.X_add_number;
8380
8381 if (!inst.operands[i].issingle)
8382 {
8383 if (thumb_p)
8384 {
8385 /* LDR should not use lead in a flag-setting instruction being
8386 chosen so we do not check whether movs can be used. */
8387
8388 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8389 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8390 && inst.operands[i].reg != 13
8391 && inst.operands[i].reg != 15)
8392 {
8393 /* Check if on thumb2 it can be done with a mov.w, mvn or
8394 movw instruction. */
8395 unsigned int newimm;
8396 bfd_boolean isNegated;
8397
8398 newimm = encode_thumb32_immediate (v);
8399 if (newimm != (unsigned int) FAIL)
8400 isNegated = FALSE;
8401 else
8402 {
8403 newimm = encode_thumb32_immediate (~v);
8404 if (newimm != (unsigned int) FAIL)
8405 isNegated = TRUE;
8406 }
8407
8408 /* The number can be loaded with a mov.w or mvn
8409 instruction. */
8410 if (newimm != (unsigned int) FAIL
8411 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8412 {
8413 inst.instruction = (0xf04f0000 /* MOV.W. */
8414 | (inst.operands[i].reg << 8));
8415 /* Change to MOVN. */
8416 inst.instruction |= (isNegated ? 0x200000 : 0);
8417 inst.instruction |= (newimm & 0x800) << 15;
8418 inst.instruction |= (newimm & 0x700) << 4;
8419 inst.instruction |= (newimm & 0x0ff);
8420 return TRUE;
8421 }
8422 /* The number can be loaded with a movw instruction. */
8423 else if ((v & ~0xFFFF) == 0
8424 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8425 {
8426 int imm = v & 0xFFFF;
8427
8428 inst.instruction = 0xf2400000; /* MOVW. */
8429 inst.instruction |= (inst.operands[i].reg << 8);
8430 inst.instruction |= (imm & 0xf000) << 4;
8431 inst.instruction |= (imm & 0x0800) << 15;
8432 inst.instruction |= (imm & 0x0700) << 4;
8433 inst.instruction |= (imm & 0x00ff);
8434 return TRUE;
8435 }
8436 }
8437 }
8438 else if (arm_p)
8439 {
8440 int value = encode_arm_immediate (v);
8441
8442 if (value != FAIL)
8443 {
8444 /* This can be done with a mov instruction. */
8445 inst.instruction &= LITERAL_MASK;
8446 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8447 inst.instruction |= value & 0xfff;
8448 return TRUE;
8449 }
8450
8451 value = encode_arm_immediate (~ v);
8452 if (value != FAIL)
8453 {
8454 /* This can be done with a mvn instruction. */
8455 inst.instruction &= LITERAL_MASK;
8456 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8457 inst.instruction |= value & 0xfff;
8458 return TRUE;
8459 }
8460 }
8461 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8462 {
8463 int op = 0;
8464 unsigned immbits = 0;
8465 unsigned immlo = inst.operands[1].imm;
8466 unsigned immhi = inst.operands[1].regisimm
8467 ? inst.operands[1].reg
8468 : inst.relocs[0].exp.X_unsigned
8469 ? 0
8470 : ((bfd_int64_t)((int) immlo)) >> 32;
8471 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8472 &op, 64, NT_invtype);
8473
8474 if (cmode == FAIL)
8475 {
8476 neon_invert_size (&immlo, &immhi, 64);
8477 op = !op;
8478 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8479 &op, 64, NT_invtype);
8480 }
8481
8482 if (cmode != FAIL)
8483 {
8484 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8485 | (1 << 23)
8486 | (cmode << 8)
8487 | (op << 5)
8488 | (1 << 4);
8489
8490 /* Fill other bits in vmov encoding for both thumb and arm. */
8491 if (thumb_mode)
8492 inst.instruction |= (0x7U << 29) | (0xF << 24);
8493 else
8494 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8495 neon_write_immbits (immbits);
8496 return TRUE;
8497 }
8498 }
8499 }
8500
8501 if (t == CONST_VEC)
8502 {
8503 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8504 if (inst.operands[i].issingle
8505 && is_quarter_float (inst.operands[1].imm)
8506 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8507 {
8508 inst.operands[1].imm =
8509 neon_qfloat_bits (v);
8510 do_vfp_nsyn_opcode ("fconsts");
8511 return TRUE;
8512 }
8513
8514 /* If our host does not support a 64-bit type then we cannot perform
8515 the following optimization. This mean that there will be a
8516 discrepancy between the output produced by an assembler built for
8517 a 32-bit-only host and the output produced from a 64-bit host, but
8518 this cannot be helped. */
8519 #if defined BFD_HOST_64_BIT
8520 else if (!inst.operands[1].issingle
8521 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8522 {
8523 if (is_double_a_single (v)
8524 && is_quarter_float (double_to_single (v)))
8525 {
8526 inst.operands[1].imm =
8527 neon_qfloat_bits (double_to_single (v));
8528 do_vfp_nsyn_opcode ("fconstd");
8529 return TRUE;
8530 }
8531 }
8532 #endif
8533 }
8534 }
8535
8536 if (add_to_lit_pool ((!inst.operands[i].isvec
8537 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8538 return TRUE;
8539
8540 inst.operands[1].reg = REG_PC;
8541 inst.operands[1].isreg = 1;
8542 inst.operands[1].preind = 1;
8543 inst.relocs[0].pc_rel = 1;
8544 inst.relocs[0].type = (thumb_p
8545 ? BFD_RELOC_ARM_THUMB_OFFSET
8546 : (mode_3
8547 ? BFD_RELOC_ARM_HWLITERAL
8548 : BFD_RELOC_ARM_LITERAL));
8549 return FALSE;
8550 }
8551
8552 /* inst.operands[i] was set up by parse_address. Encode it into an
8553 ARM-format instruction. Reject all forms which cannot be encoded
8554 into a coprocessor load/store instruction. If wb_ok is false,
8555 reject use of writeback; if unind_ok is false, reject use of
8556 unindexed addressing. If reloc_override is not 0, use it instead
8557 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8558 (in which case it is preserved). */
8559
8560 static int
8561 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8562 {
8563 if (!inst.operands[i].isreg)
8564 {
8565 /* PR 18256 */
8566 if (! inst.operands[0].isvec)
8567 {
8568 inst.error = _("invalid co-processor operand");
8569 return FAIL;
8570 }
8571 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8572 return SUCCESS;
8573 }
8574
8575 inst.instruction |= inst.operands[i].reg << 16;
8576
8577 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8578
8579 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8580 {
8581 gas_assert (!inst.operands[i].writeback);
8582 if (!unind_ok)
8583 {
8584 inst.error = _("instruction does not support unindexed addressing");
8585 return FAIL;
8586 }
8587 inst.instruction |= inst.operands[i].imm;
8588 inst.instruction |= INDEX_UP;
8589 return SUCCESS;
8590 }
8591
8592 if (inst.operands[i].preind)
8593 inst.instruction |= PRE_INDEX;
8594
8595 if (inst.operands[i].writeback)
8596 {
8597 if (inst.operands[i].reg == REG_PC)
8598 {
8599 inst.error = _("pc may not be used with write-back");
8600 return FAIL;
8601 }
8602 if (!wb_ok)
8603 {
8604 inst.error = _("instruction does not support writeback");
8605 return FAIL;
8606 }
8607 inst.instruction |= WRITE_BACK;
8608 }
8609
8610 if (reloc_override)
8611 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8612 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8613 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8614 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8615 {
8616 if (thumb_mode)
8617 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8618 else
8619 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8620 }
8621
8622 /* Prefer + for zero encoded value. */
8623 if (!inst.operands[i].negative)
8624 inst.instruction |= INDEX_UP;
8625
8626 return SUCCESS;
8627 }
8628
8629 /* Functions for instruction encoding, sorted by sub-architecture.
8630 First some generics; their names are taken from the conventional
8631 bit positions for register arguments in ARM format instructions. */
8632
8633 static void
8634 do_noargs (void)
8635 {
8636 }
8637
8638 static void
8639 do_rd (void)
8640 {
8641 inst.instruction |= inst.operands[0].reg << 12;
8642 }
8643
8644 static void
8645 do_rn (void)
8646 {
8647 inst.instruction |= inst.operands[0].reg << 16;
8648 }
8649
8650 static void
8651 do_rd_rm (void)
8652 {
8653 inst.instruction |= inst.operands[0].reg << 12;
8654 inst.instruction |= inst.operands[1].reg;
8655 }
8656
8657 static void
8658 do_rm_rn (void)
8659 {
8660 inst.instruction |= inst.operands[0].reg;
8661 inst.instruction |= inst.operands[1].reg << 16;
8662 }
8663
8664 static void
8665 do_rd_rn (void)
8666 {
8667 inst.instruction |= inst.operands[0].reg << 12;
8668 inst.instruction |= inst.operands[1].reg << 16;
8669 }
8670
8671 static void
8672 do_rn_rd (void)
8673 {
8674 inst.instruction |= inst.operands[0].reg << 16;
8675 inst.instruction |= inst.operands[1].reg << 12;
8676 }
8677
8678 static void
8679 do_tt (void)
8680 {
8681 inst.instruction |= inst.operands[0].reg << 8;
8682 inst.instruction |= inst.operands[1].reg << 16;
8683 }
8684
8685 static bfd_boolean
8686 check_obsolete (const arm_feature_set *feature, const char *msg)
8687 {
8688 if (ARM_CPU_IS_ANY (cpu_variant))
8689 {
8690 as_tsktsk ("%s", msg);
8691 return TRUE;
8692 }
8693 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8694 {
8695 as_bad ("%s", msg);
8696 return TRUE;
8697 }
8698
8699 return FALSE;
8700 }
8701
8702 static void
8703 do_rd_rm_rn (void)
8704 {
8705 unsigned Rn = inst.operands[2].reg;
8706 /* Enforce restrictions on SWP instruction. */
8707 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8708 {
8709 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8710 _("Rn must not overlap other operands"));
8711
8712 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8713 */
8714 if (!check_obsolete (&arm_ext_v8,
8715 _("swp{b} use is obsoleted for ARMv8 and later"))
8716 && warn_on_deprecated
8717 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8718 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8719 }
8720
8721 inst.instruction |= inst.operands[0].reg << 12;
8722 inst.instruction |= inst.operands[1].reg;
8723 inst.instruction |= Rn << 16;
8724 }
8725
8726 static void
8727 do_rd_rn_rm (void)
8728 {
8729 inst.instruction |= inst.operands[0].reg << 12;
8730 inst.instruction |= inst.operands[1].reg << 16;
8731 inst.instruction |= inst.operands[2].reg;
8732 }
8733
8734 static void
8735 do_rm_rd_rn (void)
8736 {
8737 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8738 constraint (((inst.relocs[0].exp.X_op != O_constant
8739 && inst.relocs[0].exp.X_op != O_illegal)
8740 || inst.relocs[0].exp.X_add_number != 0),
8741 BAD_ADDR_MODE);
8742 inst.instruction |= inst.operands[0].reg;
8743 inst.instruction |= inst.operands[1].reg << 12;
8744 inst.instruction |= inst.operands[2].reg << 16;
8745 }
8746
8747 static void
8748 do_imm0 (void)
8749 {
8750 inst.instruction |= inst.operands[0].imm;
8751 }
8752
8753 static void
8754 do_rd_cpaddr (void)
8755 {
8756 inst.instruction |= inst.operands[0].reg << 12;
8757 encode_arm_cp_address (1, TRUE, TRUE, 0);
8758 }
8759
8760 /* ARM instructions, in alphabetical order by function name (except
8761 that wrapper functions appear immediately after the function they
8762 wrap). */
8763
8764 /* This is a pseudo-op of the form "adr rd, label" to be converted
8765 into a relative address of the form "add rd, pc, #label-.-8". */
8766
8767 static void
8768 do_adr (void)
8769 {
8770 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8771
8772 /* Frag hacking will turn this into a sub instruction if the offset turns
8773 out to be negative. */
8774 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8775 inst.relocs[0].pc_rel = 1;
8776 inst.relocs[0].exp.X_add_number -= 8;
8777
8778 if (support_interwork
8779 && inst.relocs[0].exp.X_op == O_symbol
8780 && inst.relocs[0].exp.X_add_symbol != NULL
8781 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8782 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8783 inst.relocs[0].exp.X_add_number |= 1;
8784 }
8785
8786 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8787 into a relative address of the form:
8788 add rd, pc, #low(label-.-8)"
8789 add rd, rd, #high(label-.-8)" */
8790
8791 static void
8792 do_adrl (void)
8793 {
8794 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8795
8796 /* Frag hacking will turn this into a sub instruction if the offset turns
8797 out to be negative. */
8798 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8799 inst.relocs[0].pc_rel = 1;
8800 inst.size = INSN_SIZE * 2;
8801 inst.relocs[0].exp.X_add_number -= 8;
8802
8803 if (support_interwork
8804 && inst.relocs[0].exp.X_op == O_symbol
8805 && inst.relocs[0].exp.X_add_symbol != NULL
8806 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8807 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8808 inst.relocs[0].exp.X_add_number |= 1;
8809 }
8810
8811 static void
8812 do_arit (void)
8813 {
8814 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8815 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8816 THUMB1_RELOC_ONLY);
8817 if (!inst.operands[1].present)
8818 inst.operands[1].reg = inst.operands[0].reg;
8819 inst.instruction |= inst.operands[0].reg << 12;
8820 inst.instruction |= inst.operands[1].reg << 16;
8821 encode_arm_shifter_operand (2);
8822 }
8823
8824 static void
8825 do_barrier (void)
8826 {
8827 if (inst.operands[0].present)
8828 inst.instruction |= inst.operands[0].imm;
8829 else
8830 inst.instruction |= 0xf;
8831 }
8832
8833 static void
8834 do_bfc (void)
8835 {
8836 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8837 constraint (msb > 32, _("bit-field extends past end of register"));
8838 /* The instruction encoding stores the LSB and MSB,
8839 not the LSB and width. */
8840 inst.instruction |= inst.operands[0].reg << 12;
8841 inst.instruction |= inst.operands[1].imm << 7;
8842 inst.instruction |= (msb - 1) << 16;
8843 }
8844
8845 static void
8846 do_bfi (void)
8847 {
8848 unsigned int msb;
8849
8850 /* #0 in second position is alternative syntax for bfc, which is
8851 the same instruction but with REG_PC in the Rm field. */
8852 if (!inst.operands[1].isreg)
8853 inst.operands[1].reg = REG_PC;
8854
8855 msb = inst.operands[2].imm + inst.operands[3].imm;
8856 constraint (msb > 32, _("bit-field extends past end of register"));
8857 /* The instruction encoding stores the LSB and MSB,
8858 not the LSB and width. */
8859 inst.instruction |= inst.operands[0].reg << 12;
8860 inst.instruction |= inst.operands[1].reg;
8861 inst.instruction |= inst.operands[2].imm << 7;
8862 inst.instruction |= (msb - 1) << 16;
8863 }
8864
8865 static void
8866 do_bfx (void)
8867 {
8868 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8869 _("bit-field extends past end of register"));
8870 inst.instruction |= inst.operands[0].reg << 12;
8871 inst.instruction |= inst.operands[1].reg;
8872 inst.instruction |= inst.operands[2].imm << 7;
8873 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8874 }
8875
8876 /* ARM V5 breakpoint instruction (argument parse)
8877 BKPT <16 bit unsigned immediate>
8878 Instruction is not conditional.
8879 The bit pattern given in insns[] has the COND_ALWAYS condition,
8880 and it is an error if the caller tried to override that. */
8881
8882 static void
8883 do_bkpt (void)
8884 {
8885 /* Top 12 of 16 bits to bits 19:8. */
8886 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8887
8888 /* Bottom 4 of 16 bits to bits 3:0. */
8889 inst.instruction |= inst.operands[0].imm & 0xf;
8890 }
8891
8892 static void
8893 encode_branch (int default_reloc)
8894 {
8895 if (inst.operands[0].hasreloc)
8896 {
8897 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8898 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8899 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8900 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8901 ? BFD_RELOC_ARM_PLT32
8902 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8903 }
8904 else
8905 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8906 inst.relocs[0].pc_rel = 1;
8907 }
8908
8909 static void
8910 do_branch (void)
8911 {
8912 #ifdef OBJ_ELF
8913 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8914 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8915 else
8916 #endif
8917 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8918 }
8919
8920 static void
8921 do_bl (void)
8922 {
8923 #ifdef OBJ_ELF
8924 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8925 {
8926 if (inst.cond == COND_ALWAYS)
8927 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8928 else
8929 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8930 }
8931 else
8932 #endif
8933 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8934 }
8935
8936 /* ARM V5 branch-link-exchange instruction (argument parse)
8937 BLX <target_addr> ie BLX(1)
8938 BLX{<condition>} <Rm> ie BLX(2)
8939 Unfortunately, there are two different opcodes for this mnemonic.
8940 So, the insns[].value is not used, and the code here zaps values
8941 into inst.instruction.
8942 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8943
8944 static void
8945 do_blx (void)
8946 {
8947 if (inst.operands[0].isreg)
8948 {
8949 /* Arg is a register; the opcode provided by insns[] is correct.
8950 It is not illegal to do "blx pc", just useless. */
8951 if (inst.operands[0].reg == REG_PC)
8952 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8953
8954 inst.instruction |= inst.operands[0].reg;
8955 }
8956 else
8957 {
8958 /* Arg is an address; this instruction cannot be executed
8959 conditionally, and the opcode must be adjusted.
8960 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8961 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8962 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8963 inst.instruction = 0xfa000000;
8964 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8965 }
8966 }
8967
8968 static void
8969 do_bx (void)
8970 {
8971 bfd_boolean want_reloc;
8972
8973 if (inst.operands[0].reg == REG_PC)
8974 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8975
8976 inst.instruction |= inst.operands[0].reg;
8977 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8978 it is for ARMv4t or earlier. */
8979 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8980 if (!ARM_FEATURE_ZERO (selected_object_arch)
8981 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8982 want_reloc = TRUE;
8983
8984 #ifdef OBJ_ELF
8985 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8986 #endif
8987 want_reloc = FALSE;
8988
8989 if (want_reloc)
8990 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
8991 }
8992
8993
8994 /* ARM v5TEJ. Jump to Jazelle code. */
8995
8996 static void
8997 do_bxj (void)
8998 {
8999 if (inst.operands[0].reg == REG_PC)
9000 as_tsktsk (_("use of r15 in bxj is not really useful"));
9001
9002 inst.instruction |= inst.operands[0].reg;
9003 }
9004
9005 /* Co-processor data operation:
9006 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9007 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9008 static void
9009 do_cdp (void)
9010 {
9011 inst.instruction |= inst.operands[0].reg << 8;
9012 inst.instruction |= inst.operands[1].imm << 20;
9013 inst.instruction |= inst.operands[2].reg << 12;
9014 inst.instruction |= inst.operands[3].reg << 16;
9015 inst.instruction |= inst.operands[4].reg;
9016 inst.instruction |= inst.operands[5].imm << 5;
9017 }
9018
9019 static void
9020 do_cmp (void)
9021 {
9022 inst.instruction |= inst.operands[0].reg << 16;
9023 encode_arm_shifter_operand (1);
9024 }
9025
9026 /* Transfer between coprocessor and ARM registers.
9027 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9028 MRC2
9029 MCR{cond}
9030 MCR2
9031
9032 No special properties. */
9033
9034 struct deprecated_coproc_regs_s
9035 {
9036 unsigned cp;
9037 int opc1;
9038 unsigned crn;
9039 unsigned crm;
9040 int opc2;
9041 arm_feature_set deprecated;
9042 arm_feature_set obsoleted;
9043 const char *dep_msg;
9044 const char *obs_msg;
9045 };
9046
9047 #define DEPR_ACCESS_V8 \
9048 N_("This coprocessor register access is deprecated in ARMv8")
9049
9050 /* Table of all deprecated coprocessor registers. */
9051 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9052 {
9053 {15, 0, 7, 10, 5, /* CP15DMB. */
9054 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9055 DEPR_ACCESS_V8, NULL},
9056 {15, 0, 7, 10, 4, /* CP15DSB. */
9057 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9058 DEPR_ACCESS_V8, NULL},
9059 {15, 0, 7, 5, 4, /* CP15ISB. */
9060 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9061 DEPR_ACCESS_V8, NULL},
9062 {14, 6, 1, 0, 0, /* TEEHBR. */
9063 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9064 DEPR_ACCESS_V8, NULL},
9065 {14, 6, 0, 0, 0, /* TEECR. */
9066 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9067 DEPR_ACCESS_V8, NULL},
9068 };
9069
9070 #undef DEPR_ACCESS_V8
9071
9072 static const size_t deprecated_coproc_reg_count =
9073 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9074
9075 static void
9076 do_co_reg (void)
9077 {
9078 unsigned Rd;
9079 size_t i;
9080
9081 Rd = inst.operands[2].reg;
9082 if (thumb_mode)
9083 {
9084 if (inst.instruction == 0xee000010
9085 || inst.instruction == 0xfe000010)
9086 /* MCR, MCR2 */
9087 reject_bad_reg (Rd);
9088 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9089 /* MRC, MRC2 */
9090 constraint (Rd == REG_SP, BAD_SP);
9091 }
9092 else
9093 {
9094 /* MCR */
9095 if (inst.instruction == 0xe000010)
9096 constraint (Rd == REG_PC, BAD_PC);
9097 }
9098
9099 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9100 {
9101 const struct deprecated_coproc_regs_s *r =
9102 deprecated_coproc_regs + i;
9103
9104 if (inst.operands[0].reg == r->cp
9105 && inst.operands[1].imm == r->opc1
9106 && inst.operands[3].reg == r->crn
9107 && inst.operands[4].reg == r->crm
9108 && inst.operands[5].imm == r->opc2)
9109 {
9110 if (! ARM_CPU_IS_ANY (cpu_variant)
9111 && warn_on_deprecated
9112 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9113 as_tsktsk ("%s", r->dep_msg);
9114 }
9115 }
9116
9117 inst.instruction |= inst.operands[0].reg << 8;
9118 inst.instruction |= inst.operands[1].imm << 21;
9119 inst.instruction |= Rd << 12;
9120 inst.instruction |= inst.operands[3].reg << 16;
9121 inst.instruction |= inst.operands[4].reg;
9122 inst.instruction |= inst.operands[5].imm << 5;
9123 }
9124
9125 /* Transfer between coprocessor register and pair of ARM registers.
9126 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9127 MCRR2
9128 MRRC{cond}
9129 MRRC2
9130
9131 Two XScale instructions are special cases of these:
9132
9133 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9134 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9135
9136 Result unpredictable if Rd or Rn is R15. */
9137
9138 static void
9139 do_co_reg2c (void)
9140 {
9141 unsigned Rd, Rn;
9142
9143 Rd = inst.operands[2].reg;
9144 Rn = inst.operands[3].reg;
9145
9146 if (thumb_mode)
9147 {
9148 reject_bad_reg (Rd);
9149 reject_bad_reg (Rn);
9150 }
9151 else
9152 {
9153 constraint (Rd == REG_PC, BAD_PC);
9154 constraint (Rn == REG_PC, BAD_PC);
9155 }
9156
9157 /* Only check the MRRC{2} variants. */
9158 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9159 {
9160 /* If Rd == Rn, error that the operation is
9161 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9162 constraint (Rd == Rn, BAD_OVERLAP);
9163 }
9164
9165 inst.instruction |= inst.operands[0].reg << 8;
9166 inst.instruction |= inst.operands[1].imm << 4;
9167 inst.instruction |= Rd << 12;
9168 inst.instruction |= Rn << 16;
9169 inst.instruction |= inst.operands[4].reg;
9170 }
9171
9172 static void
9173 do_cpsi (void)
9174 {
9175 inst.instruction |= inst.operands[0].imm << 6;
9176 if (inst.operands[1].present)
9177 {
9178 inst.instruction |= CPSI_MMOD;
9179 inst.instruction |= inst.operands[1].imm;
9180 }
9181 }
9182
9183 static void
9184 do_dbg (void)
9185 {
9186 inst.instruction |= inst.operands[0].imm;
9187 }
9188
9189 static void
9190 do_div (void)
9191 {
9192 unsigned Rd, Rn, Rm;
9193
9194 Rd = inst.operands[0].reg;
9195 Rn = (inst.operands[1].present
9196 ? inst.operands[1].reg : Rd);
9197 Rm = inst.operands[2].reg;
9198
9199 constraint ((Rd == REG_PC), BAD_PC);
9200 constraint ((Rn == REG_PC), BAD_PC);
9201 constraint ((Rm == REG_PC), BAD_PC);
9202
9203 inst.instruction |= Rd << 16;
9204 inst.instruction |= Rn << 0;
9205 inst.instruction |= Rm << 8;
9206 }
9207
9208 static void
9209 do_it (void)
9210 {
9211 /* There is no IT instruction in ARM mode. We
9212 process it to do the validation as if in
9213 thumb mode, just in case the code gets
9214 assembled for thumb using the unified syntax. */
9215
9216 inst.size = 0;
9217 if (unified_syntax)
9218 {
9219 set_pred_insn_type (IT_INSN);
9220 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9221 now_pred.cc = inst.operands[0].imm;
9222 }
9223 }
9224
9225 /* If there is only one register in the register list,
9226 then return its register number. Otherwise return -1. */
9227 static int
9228 only_one_reg_in_list (int range)
9229 {
9230 int i = ffs (range) - 1;
9231 return (i > 15 || range != (1 << i)) ? -1 : i;
9232 }
9233
9234 static void
9235 encode_ldmstm(int from_push_pop_mnem)
9236 {
9237 int base_reg = inst.operands[0].reg;
9238 int range = inst.operands[1].imm;
9239 int one_reg;
9240
9241 inst.instruction |= base_reg << 16;
9242 inst.instruction |= range;
9243
9244 if (inst.operands[1].writeback)
9245 inst.instruction |= LDM_TYPE_2_OR_3;
9246
9247 if (inst.operands[0].writeback)
9248 {
9249 inst.instruction |= WRITE_BACK;
9250 /* Check for unpredictable uses of writeback. */
9251 if (inst.instruction & LOAD_BIT)
9252 {
9253 /* Not allowed in LDM type 2. */
9254 if ((inst.instruction & LDM_TYPE_2_OR_3)
9255 && ((range & (1 << REG_PC)) == 0))
9256 as_warn (_("writeback of base register is UNPREDICTABLE"));
9257 /* Only allowed if base reg not in list for other types. */
9258 else if (range & (1 << base_reg))
9259 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9260 }
9261 else /* STM. */
9262 {
9263 /* Not allowed for type 2. */
9264 if (inst.instruction & LDM_TYPE_2_OR_3)
9265 as_warn (_("writeback of base register is UNPREDICTABLE"));
9266 /* Only allowed if base reg not in list, or first in list. */
9267 else if ((range & (1 << base_reg))
9268 && (range & ((1 << base_reg) - 1)))
9269 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9270 }
9271 }
9272
9273 /* If PUSH/POP has only one register, then use the A2 encoding. */
9274 one_reg = only_one_reg_in_list (range);
9275 if (from_push_pop_mnem && one_reg >= 0)
9276 {
9277 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9278
9279 if (is_push && one_reg == 13 /* SP */)
9280 /* PR 22483: The A2 encoding cannot be used when
9281 pushing the stack pointer as this is UNPREDICTABLE. */
9282 return;
9283
9284 inst.instruction &= A_COND_MASK;
9285 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9286 inst.instruction |= one_reg << 12;
9287 }
9288 }
9289
9290 static void
9291 do_ldmstm (void)
9292 {
9293 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9294 }
9295
9296 /* ARMv5TE load-consecutive (argument parse)
9297 Mode is like LDRH.
9298
9299 LDRccD R, mode
9300 STRccD R, mode. */
9301
9302 static void
9303 do_ldrd (void)
9304 {
9305 constraint (inst.operands[0].reg % 2 != 0,
9306 _("first transfer register must be even"));
9307 constraint (inst.operands[1].present
9308 && inst.operands[1].reg != inst.operands[0].reg + 1,
9309 _("can only transfer two consecutive registers"));
9310 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9311 constraint (!inst.operands[2].isreg, _("'[' expected"));
9312
9313 if (!inst.operands[1].present)
9314 inst.operands[1].reg = inst.operands[0].reg + 1;
9315
9316 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9317 register and the first register written; we have to diagnose
9318 overlap between the base and the second register written here. */
9319
9320 if (inst.operands[2].reg == inst.operands[1].reg
9321 && (inst.operands[2].writeback || inst.operands[2].postind))
9322 as_warn (_("base register written back, and overlaps "
9323 "second transfer register"));
9324
9325 if (!(inst.instruction & V4_STR_BIT))
9326 {
9327 /* For an index-register load, the index register must not overlap the
9328 destination (even if not write-back). */
9329 if (inst.operands[2].immisreg
9330 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9331 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9332 as_warn (_("index register overlaps transfer register"));
9333 }
9334 inst.instruction |= inst.operands[0].reg << 12;
9335 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9336 }
9337
9338 static void
9339 do_ldrex (void)
9340 {
9341 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9342 || inst.operands[1].postind || inst.operands[1].writeback
9343 || inst.operands[1].immisreg || inst.operands[1].shifted
9344 || inst.operands[1].negative
9345 /* This can arise if the programmer has written
9346 strex rN, rM, foo
9347 or if they have mistakenly used a register name as the last
9348 operand, eg:
9349 strex rN, rM, rX
9350 It is very difficult to distinguish between these two cases
9351 because "rX" might actually be a label. ie the register
9352 name has been occluded by a symbol of the same name. So we
9353 just generate a general 'bad addressing mode' type error
9354 message and leave it up to the programmer to discover the
9355 true cause and fix their mistake. */
9356 || (inst.operands[1].reg == REG_PC),
9357 BAD_ADDR_MODE);
9358
9359 constraint (inst.relocs[0].exp.X_op != O_constant
9360 || inst.relocs[0].exp.X_add_number != 0,
9361 _("offset must be zero in ARM encoding"));
9362
9363 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9364
9365 inst.instruction |= inst.operands[0].reg << 12;
9366 inst.instruction |= inst.operands[1].reg << 16;
9367 inst.relocs[0].type = BFD_RELOC_UNUSED;
9368 }
9369
9370 static void
9371 do_ldrexd (void)
9372 {
9373 constraint (inst.operands[0].reg % 2 != 0,
9374 _("even register required"));
9375 constraint (inst.operands[1].present
9376 && inst.operands[1].reg != inst.operands[0].reg + 1,
9377 _("can only load two consecutive registers"));
9378 /* If op 1 were present and equal to PC, this function wouldn't
9379 have been called in the first place. */
9380 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9381
9382 inst.instruction |= inst.operands[0].reg << 12;
9383 inst.instruction |= inst.operands[2].reg << 16;
9384 }
9385
9386 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9387 which is not a multiple of four is UNPREDICTABLE. */
9388 static void
9389 check_ldr_r15_aligned (void)
9390 {
9391 constraint (!(inst.operands[1].immisreg)
9392 && (inst.operands[0].reg == REG_PC
9393 && inst.operands[1].reg == REG_PC
9394 && (inst.relocs[0].exp.X_add_number & 0x3)),
9395 _("ldr to register 15 must be 4-byte aligned"));
9396 }
9397
9398 static void
9399 do_ldst (void)
9400 {
9401 inst.instruction |= inst.operands[0].reg << 12;
9402 if (!inst.operands[1].isreg)
9403 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9404 return;
9405 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9406 check_ldr_r15_aligned ();
9407 }
9408
9409 static void
9410 do_ldstt (void)
9411 {
9412 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9413 reject [Rn,...]. */
9414 if (inst.operands[1].preind)
9415 {
9416 constraint (inst.relocs[0].exp.X_op != O_constant
9417 || inst.relocs[0].exp.X_add_number != 0,
9418 _("this instruction requires a post-indexed address"));
9419
9420 inst.operands[1].preind = 0;
9421 inst.operands[1].postind = 1;
9422 inst.operands[1].writeback = 1;
9423 }
9424 inst.instruction |= inst.operands[0].reg << 12;
9425 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9426 }
9427
9428 /* Halfword and signed-byte load/store operations. */
9429
9430 static void
9431 do_ldstv4 (void)
9432 {
9433 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9434 inst.instruction |= inst.operands[0].reg << 12;
9435 if (!inst.operands[1].isreg)
9436 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9437 return;
9438 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9439 }
9440
9441 static void
9442 do_ldsttv4 (void)
9443 {
9444 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9445 reject [Rn,...]. */
9446 if (inst.operands[1].preind)
9447 {
9448 constraint (inst.relocs[0].exp.X_op != O_constant
9449 || inst.relocs[0].exp.X_add_number != 0,
9450 _("this instruction requires a post-indexed address"));
9451
9452 inst.operands[1].preind = 0;
9453 inst.operands[1].postind = 1;
9454 inst.operands[1].writeback = 1;
9455 }
9456 inst.instruction |= inst.operands[0].reg << 12;
9457 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9458 }
9459
9460 /* Co-processor register load/store.
9461 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9462 static void
9463 do_lstc (void)
9464 {
9465 inst.instruction |= inst.operands[0].reg << 8;
9466 inst.instruction |= inst.operands[1].reg << 12;
9467 encode_arm_cp_address (2, TRUE, TRUE, 0);
9468 }
9469
9470 static void
9471 do_mlas (void)
9472 {
9473 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9474 if (inst.operands[0].reg == inst.operands[1].reg
9475 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9476 && !(inst.instruction & 0x00400000))
9477 as_tsktsk (_("Rd and Rm should be different in mla"));
9478
9479 inst.instruction |= inst.operands[0].reg << 16;
9480 inst.instruction |= inst.operands[1].reg;
9481 inst.instruction |= inst.operands[2].reg << 8;
9482 inst.instruction |= inst.operands[3].reg << 12;
9483 }
9484
9485 static void
9486 do_mov (void)
9487 {
9488 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9489 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9490 THUMB1_RELOC_ONLY);
9491 inst.instruction |= inst.operands[0].reg << 12;
9492 encode_arm_shifter_operand (1);
9493 }
9494
9495 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9496 static void
9497 do_mov16 (void)
9498 {
9499 bfd_vma imm;
9500 bfd_boolean top;
9501
9502 top = (inst.instruction & 0x00400000) != 0;
9503 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9504 _(":lower16: not allowed in this instruction"));
9505 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9506 _(":upper16: not allowed in this instruction"));
9507 inst.instruction |= inst.operands[0].reg << 12;
9508 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9509 {
9510 imm = inst.relocs[0].exp.X_add_number;
9511 /* The value is in two pieces: 0:11, 16:19. */
9512 inst.instruction |= (imm & 0x00000fff);
9513 inst.instruction |= (imm & 0x0000f000) << 4;
9514 }
9515 }
9516
9517 static int
9518 do_vfp_nsyn_mrs (void)
9519 {
9520 if (inst.operands[0].isvec)
9521 {
9522 if (inst.operands[1].reg != 1)
9523 first_error (_("operand 1 must be FPSCR"));
9524 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9525 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9526 do_vfp_nsyn_opcode ("fmstat");
9527 }
9528 else if (inst.operands[1].isvec)
9529 do_vfp_nsyn_opcode ("fmrx");
9530 else
9531 return FAIL;
9532
9533 return SUCCESS;
9534 }
9535
9536 static int
9537 do_vfp_nsyn_msr (void)
9538 {
9539 if (inst.operands[0].isvec)
9540 do_vfp_nsyn_opcode ("fmxr");
9541 else
9542 return FAIL;
9543
9544 return SUCCESS;
9545 }
9546
9547 static void
9548 do_vmrs (void)
9549 {
9550 unsigned Rt = inst.operands[0].reg;
9551
9552 if (thumb_mode && Rt == REG_SP)
9553 {
9554 inst.error = BAD_SP;
9555 return;
9556 }
9557
9558 /* MVFR2 is only valid at ARMv8-A. */
9559 if (inst.operands[1].reg == 5)
9560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9561 _(BAD_FPU));
9562
9563 /* APSR_ sets isvec. All other refs to PC are illegal. */
9564 if (!inst.operands[0].isvec && Rt == REG_PC)
9565 {
9566 inst.error = BAD_PC;
9567 return;
9568 }
9569
9570 /* If we get through parsing the register name, we just insert the number
9571 generated into the instruction without further validation. */
9572 inst.instruction |= (inst.operands[1].reg << 16);
9573 inst.instruction |= (Rt << 12);
9574 }
9575
9576 static void
9577 do_vmsr (void)
9578 {
9579 unsigned Rt = inst.operands[1].reg;
9580
9581 if (thumb_mode)
9582 reject_bad_reg (Rt);
9583 else if (Rt == REG_PC)
9584 {
9585 inst.error = BAD_PC;
9586 return;
9587 }
9588
9589 /* MVFR2 is only valid for ARMv8-A. */
9590 if (inst.operands[0].reg == 5)
9591 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9592 _(BAD_FPU));
9593
9594 /* If we get through parsing the register name, we just insert the number
9595 generated into the instruction without further validation. */
9596 inst.instruction |= (inst.operands[0].reg << 16);
9597 inst.instruction |= (Rt << 12);
9598 }
9599
9600 static void
9601 do_mrs (void)
9602 {
9603 unsigned br;
9604
9605 if (do_vfp_nsyn_mrs () == SUCCESS)
9606 return;
9607
9608 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9609 inst.instruction |= inst.operands[0].reg << 12;
9610
9611 if (inst.operands[1].isreg)
9612 {
9613 br = inst.operands[1].reg;
9614 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9615 as_bad (_("bad register for mrs"));
9616 }
9617 else
9618 {
9619 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9620 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9621 != (PSR_c|PSR_f),
9622 _("'APSR', 'CPSR' or 'SPSR' expected"));
9623 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9624 }
9625
9626 inst.instruction |= br;
9627 }
9628
9629 /* Two possible forms:
9630 "{C|S}PSR_<field>, Rm",
9631 "{C|S}PSR_f, #expression". */
9632
9633 static void
9634 do_msr (void)
9635 {
9636 if (do_vfp_nsyn_msr () == SUCCESS)
9637 return;
9638
9639 inst.instruction |= inst.operands[0].imm;
9640 if (inst.operands[1].isreg)
9641 inst.instruction |= inst.operands[1].reg;
9642 else
9643 {
9644 inst.instruction |= INST_IMMEDIATE;
9645 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9646 inst.relocs[0].pc_rel = 0;
9647 }
9648 }
9649
9650 static void
9651 do_mul (void)
9652 {
9653 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9654
9655 if (!inst.operands[2].present)
9656 inst.operands[2].reg = inst.operands[0].reg;
9657 inst.instruction |= inst.operands[0].reg << 16;
9658 inst.instruction |= inst.operands[1].reg;
9659 inst.instruction |= inst.operands[2].reg << 8;
9660
9661 if (inst.operands[0].reg == inst.operands[1].reg
9662 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9663 as_tsktsk (_("Rd and Rm should be different in mul"));
9664 }
9665
9666 /* Long Multiply Parser
9667 UMULL RdLo, RdHi, Rm, Rs
9668 SMULL RdLo, RdHi, Rm, Rs
9669 UMLAL RdLo, RdHi, Rm, Rs
9670 SMLAL RdLo, RdHi, Rm, Rs. */
9671
9672 static void
9673 do_mull (void)
9674 {
9675 inst.instruction |= inst.operands[0].reg << 12;
9676 inst.instruction |= inst.operands[1].reg << 16;
9677 inst.instruction |= inst.operands[2].reg;
9678 inst.instruction |= inst.operands[3].reg << 8;
9679
9680 /* rdhi and rdlo must be different. */
9681 if (inst.operands[0].reg == inst.operands[1].reg)
9682 as_tsktsk (_("rdhi and rdlo must be different"));
9683
9684 /* rdhi, rdlo and rm must all be different before armv6. */
9685 if ((inst.operands[0].reg == inst.operands[2].reg
9686 || inst.operands[1].reg == inst.operands[2].reg)
9687 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9688 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9689 }
9690
9691 static void
9692 do_nop (void)
9693 {
9694 if (inst.operands[0].present
9695 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9696 {
9697 /* Architectural NOP hints are CPSR sets with no bits selected. */
9698 inst.instruction &= 0xf0000000;
9699 inst.instruction |= 0x0320f000;
9700 if (inst.operands[0].present)
9701 inst.instruction |= inst.operands[0].imm;
9702 }
9703 }
9704
9705 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9706 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9707 Condition defaults to COND_ALWAYS.
9708 Error if Rd, Rn or Rm are R15. */
9709
9710 static void
9711 do_pkhbt (void)
9712 {
9713 inst.instruction |= inst.operands[0].reg << 12;
9714 inst.instruction |= inst.operands[1].reg << 16;
9715 inst.instruction |= inst.operands[2].reg;
9716 if (inst.operands[3].present)
9717 encode_arm_shift (3);
9718 }
9719
9720 /* ARM V6 PKHTB (Argument Parse). */
9721
9722 static void
9723 do_pkhtb (void)
9724 {
9725 if (!inst.operands[3].present)
9726 {
9727 /* If the shift specifier is omitted, turn the instruction
9728 into pkhbt rd, rm, rn. */
9729 inst.instruction &= 0xfff00010;
9730 inst.instruction |= inst.operands[0].reg << 12;
9731 inst.instruction |= inst.operands[1].reg;
9732 inst.instruction |= inst.operands[2].reg << 16;
9733 }
9734 else
9735 {
9736 inst.instruction |= inst.operands[0].reg << 12;
9737 inst.instruction |= inst.operands[1].reg << 16;
9738 inst.instruction |= inst.operands[2].reg;
9739 encode_arm_shift (3);
9740 }
9741 }
9742
9743 /* ARMv5TE: Preload-Cache
9744 MP Extensions: Preload for write
9745
9746 PLD(W) <addr_mode>
9747
9748 Syntactically, like LDR with B=1, W=0, L=1. */
9749
9750 static void
9751 do_pld (void)
9752 {
9753 constraint (!inst.operands[0].isreg,
9754 _("'[' expected after PLD mnemonic"));
9755 constraint (inst.operands[0].postind,
9756 _("post-indexed expression used in preload instruction"));
9757 constraint (inst.operands[0].writeback,
9758 _("writeback used in preload instruction"));
9759 constraint (!inst.operands[0].preind,
9760 _("unindexed addressing used in preload instruction"));
9761 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9762 }
9763
9764 /* ARMv7: PLI <addr_mode> */
9765 static void
9766 do_pli (void)
9767 {
9768 constraint (!inst.operands[0].isreg,
9769 _("'[' expected after PLI mnemonic"));
9770 constraint (inst.operands[0].postind,
9771 _("post-indexed expression used in preload instruction"));
9772 constraint (inst.operands[0].writeback,
9773 _("writeback used in preload instruction"));
9774 constraint (!inst.operands[0].preind,
9775 _("unindexed addressing used in preload instruction"));
9776 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9777 inst.instruction &= ~PRE_INDEX;
9778 }
9779
9780 static void
9781 do_push_pop (void)
9782 {
9783 constraint (inst.operands[0].writeback,
9784 _("push/pop do not support {reglist}^"));
9785 inst.operands[1] = inst.operands[0];
9786 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9787 inst.operands[0].isreg = 1;
9788 inst.operands[0].writeback = 1;
9789 inst.operands[0].reg = REG_SP;
9790 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9791 }
9792
9793 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9794 word at the specified address and the following word
9795 respectively.
9796 Unconditionally executed.
9797 Error if Rn is R15. */
9798
9799 static void
9800 do_rfe (void)
9801 {
9802 inst.instruction |= inst.operands[0].reg << 16;
9803 if (inst.operands[0].writeback)
9804 inst.instruction |= WRITE_BACK;
9805 }
9806
9807 /* ARM V6 ssat (argument parse). */
9808
9809 static void
9810 do_ssat (void)
9811 {
9812 inst.instruction |= inst.operands[0].reg << 12;
9813 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9814 inst.instruction |= inst.operands[2].reg;
9815
9816 if (inst.operands[3].present)
9817 encode_arm_shift (3);
9818 }
9819
9820 /* ARM V6 usat (argument parse). */
9821
9822 static void
9823 do_usat (void)
9824 {
9825 inst.instruction |= inst.operands[0].reg << 12;
9826 inst.instruction |= inst.operands[1].imm << 16;
9827 inst.instruction |= inst.operands[2].reg;
9828
9829 if (inst.operands[3].present)
9830 encode_arm_shift (3);
9831 }
9832
9833 /* ARM V6 ssat16 (argument parse). */
9834
9835 static void
9836 do_ssat16 (void)
9837 {
9838 inst.instruction |= inst.operands[0].reg << 12;
9839 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9840 inst.instruction |= inst.operands[2].reg;
9841 }
9842
9843 static void
9844 do_usat16 (void)
9845 {
9846 inst.instruction |= inst.operands[0].reg << 12;
9847 inst.instruction |= inst.operands[1].imm << 16;
9848 inst.instruction |= inst.operands[2].reg;
9849 }
9850
9851 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9852 preserving the other bits.
9853
9854 setend <endian_specifier>, where <endian_specifier> is either
9855 BE or LE. */
9856
9857 static void
9858 do_setend (void)
9859 {
9860 if (warn_on_deprecated
9861 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9862 as_tsktsk (_("setend use is deprecated for ARMv8"));
9863
9864 if (inst.operands[0].imm)
9865 inst.instruction |= 0x200;
9866 }
9867
9868 static void
9869 do_shift (void)
9870 {
9871 unsigned int Rm = (inst.operands[1].present
9872 ? inst.operands[1].reg
9873 : inst.operands[0].reg);
9874
9875 inst.instruction |= inst.operands[0].reg << 12;
9876 inst.instruction |= Rm;
9877 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9878 {
9879 inst.instruction |= inst.operands[2].reg << 8;
9880 inst.instruction |= SHIFT_BY_REG;
9881 /* PR 12854: Error on extraneous shifts. */
9882 constraint (inst.operands[2].shifted,
9883 _("extraneous shift as part of operand to shift insn"));
9884 }
9885 else
9886 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9887 }
9888
9889 static void
9890 do_smc (void)
9891 {
9892 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9893 inst.relocs[0].pc_rel = 0;
9894 }
9895
9896 static void
9897 do_hvc (void)
9898 {
9899 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9900 inst.relocs[0].pc_rel = 0;
9901 }
9902
9903 static void
9904 do_swi (void)
9905 {
9906 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9907 inst.relocs[0].pc_rel = 0;
9908 }
9909
9910 static void
9911 do_setpan (void)
9912 {
9913 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9914 _("selected processor does not support SETPAN instruction"));
9915
9916 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9917 }
9918
9919 static void
9920 do_t_setpan (void)
9921 {
9922 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9923 _("selected processor does not support SETPAN instruction"));
9924
9925 inst.instruction |= (inst.operands[0].imm << 3);
9926 }
9927
9928 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9929 SMLAxy{cond} Rd,Rm,Rs,Rn
9930 SMLAWy{cond} Rd,Rm,Rs,Rn
9931 Error if any register is R15. */
9932
9933 static void
9934 do_smla (void)
9935 {
9936 inst.instruction |= inst.operands[0].reg << 16;
9937 inst.instruction |= inst.operands[1].reg;
9938 inst.instruction |= inst.operands[2].reg << 8;
9939 inst.instruction |= inst.operands[3].reg << 12;
9940 }
9941
9942 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9943 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9944 Error if any register is R15.
9945 Warning if Rdlo == Rdhi. */
9946
9947 static void
9948 do_smlal (void)
9949 {
9950 inst.instruction |= inst.operands[0].reg << 12;
9951 inst.instruction |= inst.operands[1].reg << 16;
9952 inst.instruction |= inst.operands[2].reg;
9953 inst.instruction |= inst.operands[3].reg << 8;
9954
9955 if (inst.operands[0].reg == inst.operands[1].reg)
9956 as_tsktsk (_("rdhi and rdlo must be different"));
9957 }
9958
9959 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9960 SMULxy{cond} Rd,Rm,Rs
9961 Error if any register is R15. */
9962
9963 static void
9964 do_smul (void)
9965 {
9966 inst.instruction |= inst.operands[0].reg << 16;
9967 inst.instruction |= inst.operands[1].reg;
9968 inst.instruction |= inst.operands[2].reg << 8;
9969 }
9970
9971 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9972 the same for both ARM and Thumb-2. */
9973
9974 static void
9975 do_srs (void)
9976 {
9977 int reg;
9978
9979 if (inst.operands[0].present)
9980 {
9981 reg = inst.operands[0].reg;
9982 constraint (reg != REG_SP, _("SRS base register must be r13"));
9983 }
9984 else
9985 reg = REG_SP;
9986
9987 inst.instruction |= reg << 16;
9988 inst.instruction |= inst.operands[1].imm;
9989 if (inst.operands[0].writeback || inst.operands[1].writeback)
9990 inst.instruction |= WRITE_BACK;
9991 }
9992
9993 /* ARM V6 strex (argument parse). */
9994
9995 static void
9996 do_strex (void)
9997 {
9998 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9999 || inst.operands[2].postind || inst.operands[2].writeback
10000 || inst.operands[2].immisreg || inst.operands[2].shifted
10001 || inst.operands[2].negative
10002 /* See comment in do_ldrex(). */
10003 || (inst.operands[2].reg == REG_PC),
10004 BAD_ADDR_MODE);
10005
10006 constraint (inst.operands[0].reg == inst.operands[1].reg
10007 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10008
10009 constraint (inst.relocs[0].exp.X_op != O_constant
10010 || inst.relocs[0].exp.X_add_number != 0,
10011 _("offset must be zero in ARM encoding"));
10012
10013 inst.instruction |= inst.operands[0].reg << 12;
10014 inst.instruction |= inst.operands[1].reg;
10015 inst.instruction |= inst.operands[2].reg << 16;
10016 inst.relocs[0].type = BFD_RELOC_UNUSED;
10017 }
10018
10019 static void
10020 do_t_strexbh (void)
10021 {
10022 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10023 || inst.operands[2].postind || inst.operands[2].writeback
10024 || inst.operands[2].immisreg || inst.operands[2].shifted
10025 || inst.operands[2].negative,
10026 BAD_ADDR_MODE);
10027
10028 constraint (inst.operands[0].reg == inst.operands[1].reg
10029 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10030
10031 do_rm_rd_rn ();
10032 }
10033
10034 static void
10035 do_strexd (void)
10036 {
10037 constraint (inst.operands[1].reg % 2 != 0,
10038 _("even register required"));
10039 constraint (inst.operands[2].present
10040 && inst.operands[2].reg != inst.operands[1].reg + 1,
10041 _("can only store two consecutive registers"));
10042 /* If op 2 were present and equal to PC, this function wouldn't
10043 have been called in the first place. */
10044 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10045
10046 constraint (inst.operands[0].reg == inst.operands[1].reg
10047 || inst.operands[0].reg == inst.operands[1].reg + 1
10048 || inst.operands[0].reg == inst.operands[3].reg,
10049 BAD_OVERLAP);
10050
10051 inst.instruction |= inst.operands[0].reg << 12;
10052 inst.instruction |= inst.operands[1].reg;
10053 inst.instruction |= inst.operands[3].reg << 16;
10054 }
10055
10056 /* ARM V8 STRL. */
10057 static void
10058 do_stlex (void)
10059 {
10060 constraint (inst.operands[0].reg == inst.operands[1].reg
10061 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10062
10063 do_rd_rm_rn ();
10064 }
10065
10066 static void
10067 do_t_stlex (void)
10068 {
10069 constraint (inst.operands[0].reg == inst.operands[1].reg
10070 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10071
10072 do_rm_rd_rn ();
10073 }
10074
10075 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10076 extends it to 32-bits, and adds the result to a value in another
10077 register. You can specify a rotation by 0, 8, 16, or 24 bits
10078 before extracting the 16-bit value.
10079 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10080 Condition defaults to COND_ALWAYS.
10081 Error if any register uses R15. */
10082
10083 static void
10084 do_sxtah (void)
10085 {
10086 inst.instruction |= inst.operands[0].reg << 12;
10087 inst.instruction |= inst.operands[1].reg << 16;
10088 inst.instruction |= inst.operands[2].reg;
10089 inst.instruction |= inst.operands[3].imm << 10;
10090 }
10091
10092 /* ARM V6 SXTH.
10093
10094 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10095 Condition defaults to COND_ALWAYS.
10096 Error if any register uses R15. */
10097
10098 static void
10099 do_sxth (void)
10100 {
10101 inst.instruction |= inst.operands[0].reg << 12;
10102 inst.instruction |= inst.operands[1].reg;
10103 inst.instruction |= inst.operands[2].imm << 10;
10104 }
10105 \f
10106 /* VFP instructions. In a logical order: SP variant first, monad
10107 before dyad, arithmetic then move then load/store. */
10108
10109 static void
10110 do_vfp_sp_monadic (void)
10111 {
10112 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10113 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10114 }
10115
10116 static void
10117 do_vfp_sp_dyadic (void)
10118 {
10119 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10120 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10121 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10122 }
10123
10124 static void
10125 do_vfp_sp_compare_z (void)
10126 {
10127 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10128 }
10129
10130 static void
10131 do_vfp_dp_sp_cvt (void)
10132 {
10133 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10134 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10135 }
10136
10137 static void
10138 do_vfp_sp_dp_cvt (void)
10139 {
10140 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10141 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10142 }
10143
10144 static void
10145 do_vfp_reg_from_sp (void)
10146 {
10147 inst.instruction |= inst.operands[0].reg << 12;
10148 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10149 }
10150
10151 static void
10152 do_vfp_reg2_from_sp2 (void)
10153 {
10154 constraint (inst.operands[2].imm != 2,
10155 _("only two consecutive VFP SP registers allowed here"));
10156 inst.instruction |= inst.operands[0].reg << 12;
10157 inst.instruction |= inst.operands[1].reg << 16;
10158 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10159 }
10160
10161 static void
10162 do_vfp_sp_from_reg (void)
10163 {
10164 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10165 inst.instruction |= inst.operands[1].reg << 12;
10166 }
10167
10168 static void
10169 do_vfp_sp2_from_reg2 (void)
10170 {
10171 constraint (inst.operands[0].imm != 2,
10172 _("only two consecutive VFP SP registers allowed here"));
10173 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10174 inst.instruction |= inst.operands[1].reg << 12;
10175 inst.instruction |= inst.operands[2].reg << 16;
10176 }
10177
10178 static void
10179 do_vfp_sp_ldst (void)
10180 {
10181 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10182 encode_arm_cp_address (1, FALSE, TRUE, 0);
10183 }
10184
10185 static void
10186 do_vfp_dp_ldst (void)
10187 {
10188 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10189 encode_arm_cp_address (1, FALSE, TRUE, 0);
10190 }
10191
10192
10193 static void
10194 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10195 {
10196 if (inst.operands[0].writeback)
10197 inst.instruction |= WRITE_BACK;
10198 else
10199 constraint (ldstm_type != VFP_LDSTMIA,
10200 _("this addressing mode requires base-register writeback"));
10201 inst.instruction |= inst.operands[0].reg << 16;
10202 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10203 inst.instruction |= inst.operands[1].imm;
10204 }
10205
10206 static void
10207 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10208 {
10209 int count;
10210
10211 if (inst.operands[0].writeback)
10212 inst.instruction |= WRITE_BACK;
10213 else
10214 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10215 _("this addressing mode requires base-register writeback"));
10216
10217 inst.instruction |= inst.operands[0].reg << 16;
10218 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10219
10220 count = inst.operands[1].imm << 1;
10221 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10222 count += 1;
10223
10224 inst.instruction |= count;
10225 }
10226
10227 static void
10228 do_vfp_sp_ldstmia (void)
10229 {
10230 vfp_sp_ldstm (VFP_LDSTMIA);
10231 }
10232
10233 static void
10234 do_vfp_sp_ldstmdb (void)
10235 {
10236 vfp_sp_ldstm (VFP_LDSTMDB);
10237 }
10238
10239 static void
10240 do_vfp_dp_ldstmia (void)
10241 {
10242 vfp_dp_ldstm (VFP_LDSTMIA);
10243 }
10244
10245 static void
10246 do_vfp_dp_ldstmdb (void)
10247 {
10248 vfp_dp_ldstm (VFP_LDSTMDB);
10249 }
10250
10251 static void
10252 do_vfp_xp_ldstmia (void)
10253 {
10254 vfp_dp_ldstm (VFP_LDSTMIAX);
10255 }
10256
10257 static void
10258 do_vfp_xp_ldstmdb (void)
10259 {
10260 vfp_dp_ldstm (VFP_LDSTMDBX);
10261 }
10262
10263 static void
10264 do_vfp_dp_rd_rm (void)
10265 {
10266 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10267 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10268 }
10269
10270 static void
10271 do_vfp_dp_rn_rd (void)
10272 {
10273 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10274 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10275 }
10276
10277 static void
10278 do_vfp_dp_rd_rn (void)
10279 {
10280 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10281 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10282 }
10283
10284 static void
10285 do_vfp_dp_rd_rn_rm (void)
10286 {
10287 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10288 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10289 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10290 }
10291
10292 static void
10293 do_vfp_dp_rd (void)
10294 {
10295 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10296 }
10297
10298 static void
10299 do_vfp_dp_rm_rd_rn (void)
10300 {
10301 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10302 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10303 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10304 }
10305
10306 /* VFPv3 instructions. */
10307 static void
10308 do_vfp_sp_const (void)
10309 {
10310 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10311 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10312 inst.instruction |= (inst.operands[1].imm & 0x0f);
10313 }
10314
10315 static void
10316 do_vfp_dp_const (void)
10317 {
10318 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10319 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10320 inst.instruction |= (inst.operands[1].imm & 0x0f);
10321 }
10322
10323 static void
10324 vfp_conv (int srcsize)
10325 {
10326 int immbits = srcsize - inst.operands[1].imm;
10327
10328 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10329 {
10330 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10331 i.e. immbits must be in range 0 - 16. */
10332 inst.error = _("immediate value out of range, expected range [0, 16]");
10333 return;
10334 }
10335 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10336 {
10337 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10338 i.e. immbits must be in range 0 - 31. */
10339 inst.error = _("immediate value out of range, expected range [1, 32]");
10340 return;
10341 }
10342
10343 inst.instruction |= (immbits & 1) << 5;
10344 inst.instruction |= (immbits >> 1);
10345 }
10346
10347 static void
10348 do_vfp_sp_conv_16 (void)
10349 {
10350 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10351 vfp_conv (16);
10352 }
10353
10354 static void
10355 do_vfp_dp_conv_16 (void)
10356 {
10357 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10358 vfp_conv (16);
10359 }
10360
10361 static void
10362 do_vfp_sp_conv_32 (void)
10363 {
10364 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10365 vfp_conv (32);
10366 }
10367
10368 static void
10369 do_vfp_dp_conv_32 (void)
10370 {
10371 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10372 vfp_conv (32);
10373 }
10374 \f
10375 /* FPA instructions. Also in a logical order. */
10376
10377 static void
10378 do_fpa_cmp (void)
10379 {
10380 inst.instruction |= inst.operands[0].reg << 16;
10381 inst.instruction |= inst.operands[1].reg;
10382 }
10383
10384 static void
10385 do_fpa_ldmstm (void)
10386 {
10387 inst.instruction |= inst.operands[0].reg << 12;
10388 switch (inst.operands[1].imm)
10389 {
10390 case 1: inst.instruction |= CP_T_X; break;
10391 case 2: inst.instruction |= CP_T_Y; break;
10392 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10393 case 4: break;
10394 default: abort ();
10395 }
10396
10397 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10398 {
10399 /* The instruction specified "ea" or "fd", so we can only accept
10400 [Rn]{!}. The instruction does not really support stacking or
10401 unstacking, so we have to emulate these by setting appropriate
10402 bits and offsets. */
10403 constraint (inst.relocs[0].exp.X_op != O_constant
10404 || inst.relocs[0].exp.X_add_number != 0,
10405 _("this instruction does not support indexing"));
10406
10407 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10408 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10409
10410 if (!(inst.instruction & INDEX_UP))
10411 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10412
10413 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10414 {
10415 inst.operands[2].preind = 0;
10416 inst.operands[2].postind = 1;
10417 }
10418 }
10419
10420 encode_arm_cp_address (2, TRUE, TRUE, 0);
10421 }
10422 \f
10423 /* iWMMXt instructions: strictly in alphabetical order. */
10424
10425 static void
10426 do_iwmmxt_tandorc (void)
10427 {
10428 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10429 }
10430
10431 static void
10432 do_iwmmxt_textrc (void)
10433 {
10434 inst.instruction |= inst.operands[0].reg << 12;
10435 inst.instruction |= inst.operands[1].imm;
10436 }
10437
10438 static void
10439 do_iwmmxt_textrm (void)
10440 {
10441 inst.instruction |= inst.operands[0].reg << 12;
10442 inst.instruction |= inst.operands[1].reg << 16;
10443 inst.instruction |= inst.operands[2].imm;
10444 }
10445
10446 static void
10447 do_iwmmxt_tinsr (void)
10448 {
10449 inst.instruction |= inst.operands[0].reg << 16;
10450 inst.instruction |= inst.operands[1].reg << 12;
10451 inst.instruction |= inst.operands[2].imm;
10452 }
10453
10454 static void
10455 do_iwmmxt_tmia (void)
10456 {
10457 inst.instruction |= inst.operands[0].reg << 5;
10458 inst.instruction |= inst.operands[1].reg;
10459 inst.instruction |= inst.operands[2].reg << 12;
10460 }
10461
10462 static void
10463 do_iwmmxt_waligni (void)
10464 {
10465 inst.instruction |= inst.operands[0].reg << 12;
10466 inst.instruction |= inst.operands[1].reg << 16;
10467 inst.instruction |= inst.operands[2].reg;
10468 inst.instruction |= inst.operands[3].imm << 20;
10469 }
10470
10471 static void
10472 do_iwmmxt_wmerge (void)
10473 {
10474 inst.instruction |= inst.operands[0].reg << 12;
10475 inst.instruction |= inst.operands[1].reg << 16;
10476 inst.instruction |= inst.operands[2].reg;
10477 inst.instruction |= inst.operands[3].imm << 21;
10478 }
10479
10480 static void
10481 do_iwmmxt_wmov (void)
10482 {
10483 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10484 inst.instruction |= inst.operands[0].reg << 12;
10485 inst.instruction |= inst.operands[1].reg << 16;
10486 inst.instruction |= inst.operands[1].reg;
10487 }
10488
10489 static void
10490 do_iwmmxt_wldstbh (void)
10491 {
10492 int reloc;
10493 inst.instruction |= inst.operands[0].reg << 12;
10494 if (thumb_mode)
10495 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10496 else
10497 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10498 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10499 }
10500
10501 static void
10502 do_iwmmxt_wldstw (void)
10503 {
10504 /* RIWR_RIWC clears .isreg for a control register. */
10505 if (!inst.operands[0].isreg)
10506 {
10507 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10508 inst.instruction |= 0xf0000000;
10509 }
10510
10511 inst.instruction |= inst.operands[0].reg << 12;
10512 encode_arm_cp_address (1, TRUE, TRUE, 0);
10513 }
10514
10515 static void
10516 do_iwmmxt_wldstd (void)
10517 {
10518 inst.instruction |= inst.operands[0].reg << 12;
10519 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10520 && inst.operands[1].immisreg)
10521 {
10522 inst.instruction &= ~0x1a000ff;
10523 inst.instruction |= (0xfU << 28);
10524 if (inst.operands[1].preind)
10525 inst.instruction |= PRE_INDEX;
10526 if (!inst.operands[1].negative)
10527 inst.instruction |= INDEX_UP;
10528 if (inst.operands[1].writeback)
10529 inst.instruction |= WRITE_BACK;
10530 inst.instruction |= inst.operands[1].reg << 16;
10531 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10532 inst.instruction |= inst.operands[1].imm;
10533 }
10534 else
10535 encode_arm_cp_address (1, TRUE, FALSE, 0);
10536 }
10537
10538 static void
10539 do_iwmmxt_wshufh (void)
10540 {
10541 inst.instruction |= inst.operands[0].reg << 12;
10542 inst.instruction |= inst.operands[1].reg << 16;
10543 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10544 inst.instruction |= (inst.operands[2].imm & 0x0f);
10545 }
10546
10547 static void
10548 do_iwmmxt_wzero (void)
10549 {
10550 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10551 inst.instruction |= inst.operands[0].reg;
10552 inst.instruction |= inst.operands[0].reg << 12;
10553 inst.instruction |= inst.operands[0].reg << 16;
10554 }
10555
10556 static void
10557 do_iwmmxt_wrwrwr_or_imm5 (void)
10558 {
10559 if (inst.operands[2].isreg)
10560 do_rd_rn_rm ();
10561 else {
10562 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10563 _("immediate operand requires iWMMXt2"));
10564 do_rd_rn ();
10565 if (inst.operands[2].imm == 0)
10566 {
10567 switch ((inst.instruction >> 20) & 0xf)
10568 {
10569 case 4:
10570 case 5:
10571 case 6:
10572 case 7:
10573 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10574 inst.operands[2].imm = 16;
10575 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10576 break;
10577 case 8:
10578 case 9:
10579 case 10:
10580 case 11:
10581 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10582 inst.operands[2].imm = 32;
10583 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10584 break;
10585 case 12:
10586 case 13:
10587 case 14:
10588 case 15:
10589 {
10590 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10591 unsigned long wrn;
10592 wrn = (inst.instruction >> 16) & 0xf;
10593 inst.instruction &= 0xff0fff0f;
10594 inst.instruction |= wrn;
10595 /* Bail out here; the instruction is now assembled. */
10596 return;
10597 }
10598 }
10599 }
10600 /* Map 32 -> 0, etc. */
10601 inst.operands[2].imm &= 0x1f;
10602 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10603 }
10604 }
10605 \f
10606 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10607 operations first, then control, shift, and load/store. */
10608
10609 /* Insns like "foo X,Y,Z". */
10610
10611 static void
10612 do_mav_triple (void)
10613 {
10614 inst.instruction |= inst.operands[0].reg << 16;
10615 inst.instruction |= inst.operands[1].reg;
10616 inst.instruction |= inst.operands[2].reg << 12;
10617 }
10618
10619 /* Insns like "foo W,X,Y,Z".
10620 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10621
10622 static void
10623 do_mav_quad (void)
10624 {
10625 inst.instruction |= inst.operands[0].reg << 5;
10626 inst.instruction |= inst.operands[1].reg << 12;
10627 inst.instruction |= inst.operands[2].reg << 16;
10628 inst.instruction |= inst.operands[3].reg;
10629 }
10630
10631 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10632 static void
10633 do_mav_dspsc (void)
10634 {
10635 inst.instruction |= inst.operands[1].reg << 12;
10636 }
10637
10638 /* Maverick shift immediate instructions.
10639 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10640 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10641
10642 static void
10643 do_mav_shift (void)
10644 {
10645 int imm = inst.operands[2].imm;
10646
10647 inst.instruction |= inst.operands[0].reg << 12;
10648 inst.instruction |= inst.operands[1].reg << 16;
10649
10650 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10651 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10652 Bit 4 should be 0. */
10653 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10654
10655 inst.instruction |= imm;
10656 }
10657 \f
10658 /* XScale instructions. Also sorted arithmetic before move. */
10659
10660 /* Xscale multiply-accumulate (argument parse)
10661 MIAcc acc0,Rm,Rs
10662 MIAPHcc acc0,Rm,Rs
10663 MIAxycc acc0,Rm,Rs. */
10664
10665 static void
10666 do_xsc_mia (void)
10667 {
10668 inst.instruction |= inst.operands[1].reg;
10669 inst.instruction |= inst.operands[2].reg << 12;
10670 }
10671
10672 /* Xscale move-accumulator-register (argument parse)
10673
10674 MARcc acc0,RdLo,RdHi. */
10675
10676 static void
10677 do_xsc_mar (void)
10678 {
10679 inst.instruction |= inst.operands[1].reg << 12;
10680 inst.instruction |= inst.operands[2].reg << 16;
10681 }
10682
10683 /* Xscale move-register-accumulator (argument parse)
10684
10685 MRAcc RdLo,RdHi,acc0. */
10686
10687 static void
10688 do_xsc_mra (void)
10689 {
10690 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10691 inst.instruction |= inst.operands[0].reg << 12;
10692 inst.instruction |= inst.operands[1].reg << 16;
10693 }
10694 \f
10695 /* Encoding functions relevant only to Thumb. */
10696
10697 /* inst.operands[i] is a shifted-register operand; encode
10698 it into inst.instruction in the format used by Thumb32. */
10699
10700 static void
10701 encode_thumb32_shifted_operand (int i)
10702 {
10703 unsigned int value = inst.relocs[0].exp.X_add_number;
10704 unsigned int shift = inst.operands[i].shift_kind;
10705
10706 constraint (inst.operands[i].immisreg,
10707 _("shift by register not allowed in thumb mode"));
10708 inst.instruction |= inst.operands[i].reg;
10709 if (shift == SHIFT_RRX)
10710 inst.instruction |= SHIFT_ROR << 4;
10711 else
10712 {
10713 constraint (inst.relocs[0].exp.X_op != O_constant,
10714 _("expression too complex"));
10715
10716 constraint (value > 32
10717 || (value == 32 && (shift == SHIFT_LSL
10718 || shift == SHIFT_ROR)),
10719 _("shift expression is too large"));
10720
10721 if (value == 0)
10722 shift = SHIFT_LSL;
10723 else if (value == 32)
10724 value = 0;
10725
10726 inst.instruction |= shift << 4;
10727 inst.instruction |= (value & 0x1c) << 10;
10728 inst.instruction |= (value & 0x03) << 6;
10729 }
10730 }
10731
10732
10733 /* inst.operands[i] was set up by parse_address. Encode it into a
10734 Thumb32 format load or store instruction. Reject forms that cannot
10735 be used with such instructions. If is_t is true, reject forms that
10736 cannot be used with a T instruction; if is_d is true, reject forms
10737 that cannot be used with a D instruction. If it is a store insn,
10738 reject PC in Rn. */
10739
10740 static void
10741 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10742 {
10743 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10744
10745 constraint (!inst.operands[i].isreg,
10746 _("Instruction does not support =N addresses"));
10747
10748 inst.instruction |= inst.operands[i].reg << 16;
10749 if (inst.operands[i].immisreg)
10750 {
10751 constraint (is_pc, BAD_PC_ADDRESSING);
10752 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10753 constraint (inst.operands[i].negative,
10754 _("Thumb does not support negative register indexing"));
10755 constraint (inst.operands[i].postind,
10756 _("Thumb does not support register post-indexing"));
10757 constraint (inst.operands[i].writeback,
10758 _("Thumb does not support register indexing with writeback"));
10759 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10760 _("Thumb supports only LSL in shifted register indexing"));
10761
10762 inst.instruction |= inst.operands[i].imm;
10763 if (inst.operands[i].shifted)
10764 {
10765 constraint (inst.relocs[0].exp.X_op != O_constant,
10766 _("expression too complex"));
10767 constraint (inst.relocs[0].exp.X_add_number < 0
10768 || inst.relocs[0].exp.X_add_number > 3,
10769 _("shift out of range"));
10770 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10771 }
10772 inst.relocs[0].type = BFD_RELOC_UNUSED;
10773 }
10774 else if (inst.operands[i].preind)
10775 {
10776 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10777 constraint (is_t && inst.operands[i].writeback,
10778 _("cannot use writeback with this instruction"));
10779 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10780 BAD_PC_ADDRESSING);
10781
10782 if (is_d)
10783 {
10784 inst.instruction |= 0x01000000;
10785 if (inst.operands[i].writeback)
10786 inst.instruction |= 0x00200000;
10787 }
10788 else
10789 {
10790 inst.instruction |= 0x00000c00;
10791 if (inst.operands[i].writeback)
10792 inst.instruction |= 0x00000100;
10793 }
10794 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10795 }
10796 else if (inst.operands[i].postind)
10797 {
10798 gas_assert (inst.operands[i].writeback);
10799 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10800 constraint (is_t, _("cannot use post-indexing with this instruction"));
10801
10802 if (is_d)
10803 inst.instruction |= 0x00200000;
10804 else
10805 inst.instruction |= 0x00000900;
10806 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10807 }
10808 else /* unindexed - only for coprocessor */
10809 inst.error = _("instruction does not accept unindexed addressing");
10810 }
10811
10812 /* Table of Thumb instructions which exist in both 16- and 32-bit
10813 encodings (the latter only in post-V6T2 cores). The index is the
10814 value used in the insns table below. When there is more than one
10815 possible 16-bit encoding for the instruction, this table always
10816 holds variant (1).
10817 Also contains several pseudo-instructions used during relaxation. */
10818 #define T16_32_TAB \
10819 X(_adc, 4140, eb400000), \
10820 X(_adcs, 4140, eb500000), \
10821 X(_add, 1c00, eb000000), \
10822 X(_adds, 1c00, eb100000), \
10823 X(_addi, 0000, f1000000), \
10824 X(_addis, 0000, f1100000), \
10825 X(_add_pc,000f, f20f0000), \
10826 X(_add_sp,000d, f10d0000), \
10827 X(_adr, 000f, f20f0000), \
10828 X(_and, 4000, ea000000), \
10829 X(_ands, 4000, ea100000), \
10830 X(_asr, 1000, fa40f000), \
10831 X(_asrs, 1000, fa50f000), \
10832 X(_b, e000, f000b000), \
10833 X(_bcond, d000, f0008000), \
10834 X(_bf, 0000, f040e001), \
10835 X(_bfcsel,0000, f000e001), \
10836 X(_bfx, 0000, f060e001), \
10837 X(_bfl, 0000, f000c001), \
10838 X(_bflx, 0000, f070e001), \
10839 X(_bic, 4380, ea200000), \
10840 X(_bics, 4380, ea300000), \
10841 X(_cmn, 42c0, eb100f00), \
10842 X(_cmp, 2800, ebb00f00), \
10843 X(_cpsie, b660, f3af8400), \
10844 X(_cpsid, b670, f3af8600), \
10845 X(_cpy, 4600, ea4f0000), \
10846 X(_dec_sp,80dd, f1ad0d00), \
10847 X(_dls, 0000, f040e001), \
10848 X(_eor, 4040, ea800000), \
10849 X(_eors, 4040, ea900000), \
10850 X(_inc_sp,00dd, f10d0d00), \
10851 X(_ldmia, c800, e8900000), \
10852 X(_ldr, 6800, f8500000), \
10853 X(_ldrb, 7800, f8100000), \
10854 X(_ldrh, 8800, f8300000), \
10855 X(_ldrsb, 5600, f9100000), \
10856 X(_ldrsh, 5e00, f9300000), \
10857 X(_ldr_pc,4800, f85f0000), \
10858 X(_ldr_pc2,4800, f85f0000), \
10859 X(_ldr_sp,9800, f85d0000), \
10860 X(_le, 0000, f00fc001), \
10861 X(_lsl, 0000, fa00f000), \
10862 X(_lsls, 0000, fa10f000), \
10863 X(_lsr, 0800, fa20f000), \
10864 X(_lsrs, 0800, fa30f000), \
10865 X(_mov, 2000, ea4f0000), \
10866 X(_movs, 2000, ea5f0000), \
10867 X(_mul, 4340, fb00f000), \
10868 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10869 X(_mvn, 43c0, ea6f0000), \
10870 X(_mvns, 43c0, ea7f0000), \
10871 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10872 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10873 X(_orr, 4300, ea400000), \
10874 X(_orrs, 4300, ea500000), \
10875 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10876 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10877 X(_rev, ba00, fa90f080), \
10878 X(_rev16, ba40, fa90f090), \
10879 X(_revsh, bac0, fa90f0b0), \
10880 X(_ror, 41c0, fa60f000), \
10881 X(_rors, 41c0, fa70f000), \
10882 X(_sbc, 4180, eb600000), \
10883 X(_sbcs, 4180, eb700000), \
10884 X(_stmia, c000, e8800000), \
10885 X(_str, 6000, f8400000), \
10886 X(_strb, 7000, f8000000), \
10887 X(_strh, 8000, f8200000), \
10888 X(_str_sp,9000, f84d0000), \
10889 X(_sub, 1e00, eba00000), \
10890 X(_subs, 1e00, ebb00000), \
10891 X(_subi, 8000, f1a00000), \
10892 X(_subis, 8000, f1b00000), \
10893 X(_sxtb, b240, fa4ff080), \
10894 X(_sxth, b200, fa0ff080), \
10895 X(_tst, 4200, ea100f00), \
10896 X(_uxtb, b2c0, fa5ff080), \
10897 X(_uxth, b280, fa1ff080), \
10898 X(_nop, bf00, f3af8000), \
10899 X(_yield, bf10, f3af8001), \
10900 X(_wfe, bf20, f3af8002), \
10901 X(_wfi, bf30, f3af8003), \
10902 X(_wls, 0000, f040c001), \
10903 X(_sev, bf40, f3af8004), \
10904 X(_sevl, bf50, f3af8005), \
10905 X(_udf, de00, f7f0a000)
10906
10907 /* To catch errors in encoding functions, the codes are all offset by
10908 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10909 as 16-bit instructions. */
10910 #define X(a,b,c) T_MNEM##a
10911 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10912 #undef X
10913
10914 #define X(a,b,c) 0x##b
10915 static const unsigned short thumb_op16[] = { T16_32_TAB };
10916 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10917 #undef X
10918
10919 #define X(a,b,c) 0x##c
10920 static const unsigned int thumb_op32[] = { T16_32_TAB };
10921 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10922 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10923 #undef X
10924 #undef T16_32_TAB
10925
10926 /* Thumb instruction encoders, in alphabetical order. */
10927
10928 /* ADDW or SUBW. */
10929
10930 static void
10931 do_t_add_sub_w (void)
10932 {
10933 int Rd, Rn;
10934
10935 Rd = inst.operands[0].reg;
10936 Rn = inst.operands[1].reg;
10937
10938 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10939 is the SP-{plus,minus}-immediate form of the instruction. */
10940 if (Rn == REG_SP)
10941 constraint (Rd == REG_PC, BAD_PC);
10942 else
10943 reject_bad_reg (Rd);
10944
10945 inst.instruction |= (Rn << 16) | (Rd << 8);
10946 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10947 }
10948
10949 /* Parse an add or subtract instruction. We get here with inst.instruction
10950 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10951
10952 static void
10953 do_t_add_sub (void)
10954 {
10955 int Rd, Rs, Rn;
10956
10957 Rd = inst.operands[0].reg;
10958 Rs = (inst.operands[1].present
10959 ? inst.operands[1].reg /* Rd, Rs, foo */
10960 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10961
10962 if (Rd == REG_PC)
10963 set_pred_insn_type_last ();
10964
10965 if (unified_syntax)
10966 {
10967 bfd_boolean flags;
10968 bfd_boolean narrow;
10969 int opcode;
10970
10971 flags = (inst.instruction == T_MNEM_adds
10972 || inst.instruction == T_MNEM_subs);
10973 if (flags)
10974 narrow = !in_pred_block ();
10975 else
10976 narrow = in_pred_block ();
10977 if (!inst.operands[2].isreg)
10978 {
10979 int add;
10980
10981 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10982 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10983
10984 add = (inst.instruction == T_MNEM_add
10985 || inst.instruction == T_MNEM_adds);
10986 opcode = 0;
10987 if (inst.size_req != 4)
10988 {
10989 /* Attempt to use a narrow opcode, with relaxation if
10990 appropriate. */
10991 if (Rd == REG_SP && Rs == REG_SP && !flags)
10992 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10993 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10994 opcode = T_MNEM_add_sp;
10995 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10996 opcode = T_MNEM_add_pc;
10997 else if (Rd <= 7 && Rs <= 7 && narrow)
10998 {
10999 if (flags)
11000 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11001 else
11002 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11003 }
11004 if (opcode)
11005 {
11006 inst.instruction = THUMB_OP16(opcode);
11007 inst.instruction |= (Rd << 4) | Rs;
11008 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11009 || (inst.relocs[0].type
11010 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11011 {
11012 if (inst.size_req == 2)
11013 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11014 else
11015 inst.relax = opcode;
11016 }
11017 }
11018 else
11019 constraint (inst.size_req == 2, BAD_HIREG);
11020 }
11021 if (inst.size_req == 4
11022 || (inst.size_req != 2 && !opcode))
11023 {
11024 constraint ((inst.relocs[0].type
11025 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11026 && (inst.relocs[0].type
11027 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11028 THUMB1_RELOC_ONLY);
11029 if (Rd == REG_PC)
11030 {
11031 constraint (add, BAD_PC);
11032 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11033 _("only SUBS PC, LR, #const allowed"));
11034 constraint (inst.relocs[0].exp.X_op != O_constant,
11035 _("expression too complex"));
11036 constraint (inst.relocs[0].exp.X_add_number < 0
11037 || inst.relocs[0].exp.X_add_number > 0xff,
11038 _("immediate value out of range"));
11039 inst.instruction = T2_SUBS_PC_LR
11040 | inst.relocs[0].exp.X_add_number;
11041 inst.relocs[0].type = BFD_RELOC_UNUSED;
11042 return;
11043 }
11044 else if (Rs == REG_PC)
11045 {
11046 /* Always use addw/subw. */
11047 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11048 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11049 }
11050 else
11051 {
11052 inst.instruction = THUMB_OP32 (inst.instruction);
11053 inst.instruction = (inst.instruction & 0xe1ffffff)
11054 | 0x10000000;
11055 if (flags)
11056 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11057 else
11058 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11059 }
11060 inst.instruction |= Rd << 8;
11061 inst.instruction |= Rs << 16;
11062 }
11063 }
11064 else
11065 {
11066 unsigned int value = inst.relocs[0].exp.X_add_number;
11067 unsigned int shift = inst.operands[2].shift_kind;
11068
11069 Rn = inst.operands[2].reg;
11070 /* See if we can do this with a 16-bit instruction. */
11071 if (!inst.operands[2].shifted && inst.size_req != 4)
11072 {
11073 if (Rd > 7 || Rs > 7 || Rn > 7)
11074 narrow = FALSE;
11075
11076 if (narrow)
11077 {
11078 inst.instruction = ((inst.instruction == T_MNEM_adds
11079 || inst.instruction == T_MNEM_add)
11080 ? T_OPCODE_ADD_R3
11081 : T_OPCODE_SUB_R3);
11082 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11083 return;
11084 }
11085
11086 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11087 {
11088 /* Thumb-1 cores (except v6-M) require at least one high
11089 register in a narrow non flag setting add. */
11090 if (Rd > 7 || Rn > 7
11091 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11092 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11093 {
11094 if (Rd == Rn)
11095 {
11096 Rn = Rs;
11097 Rs = Rd;
11098 }
11099 inst.instruction = T_OPCODE_ADD_HI;
11100 inst.instruction |= (Rd & 8) << 4;
11101 inst.instruction |= (Rd & 7);
11102 inst.instruction |= Rn << 3;
11103 return;
11104 }
11105 }
11106 }
11107
11108 constraint (Rd == REG_PC, BAD_PC);
11109 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11110 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11111 constraint (Rs == REG_PC, BAD_PC);
11112 reject_bad_reg (Rn);
11113
11114 /* If we get here, it can't be done in 16 bits. */
11115 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11116 _("shift must be constant"));
11117 inst.instruction = THUMB_OP32 (inst.instruction);
11118 inst.instruction |= Rd << 8;
11119 inst.instruction |= Rs << 16;
11120 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11121 _("shift value over 3 not allowed in thumb mode"));
11122 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11123 _("only LSL shift allowed in thumb mode"));
11124 encode_thumb32_shifted_operand (2);
11125 }
11126 }
11127 else
11128 {
11129 constraint (inst.instruction == T_MNEM_adds
11130 || inst.instruction == T_MNEM_subs,
11131 BAD_THUMB32);
11132
11133 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11134 {
11135 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11136 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11137 BAD_HIREG);
11138
11139 inst.instruction = (inst.instruction == T_MNEM_add
11140 ? 0x0000 : 0x8000);
11141 inst.instruction |= (Rd << 4) | Rs;
11142 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11143 return;
11144 }
11145
11146 Rn = inst.operands[2].reg;
11147 constraint (inst.operands[2].shifted, _("unshifted register required"));
11148
11149 /* We now have Rd, Rs, and Rn set to registers. */
11150 if (Rd > 7 || Rs > 7 || Rn > 7)
11151 {
11152 /* Can't do this for SUB. */
11153 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11154 inst.instruction = T_OPCODE_ADD_HI;
11155 inst.instruction |= (Rd & 8) << 4;
11156 inst.instruction |= (Rd & 7);
11157 if (Rs == Rd)
11158 inst.instruction |= Rn << 3;
11159 else if (Rn == Rd)
11160 inst.instruction |= Rs << 3;
11161 else
11162 constraint (1, _("dest must overlap one source register"));
11163 }
11164 else
11165 {
11166 inst.instruction = (inst.instruction == T_MNEM_add
11167 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11168 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11169 }
11170 }
11171 }
11172
11173 static void
11174 do_t_adr (void)
11175 {
11176 unsigned Rd;
11177
11178 Rd = inst.operands[0].reg;
11179 reject_bad_reg (Rd);
11180
11181 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11182 {
11183 /* Defer to section relaxation. */
11184 inst.relax = inst.instruction;
11185 inst.instruction = THUMB_OP16 (inst.instruction);
11186 inst.instruction |= Rd << 4;
11187 }
11188 else if (unified_syntax && inst.size_req != 2)
11189 {
11190 /* Generate a 32-bit opcode. */
11191 inst.instruction = THUMB_OP32 (inst.instruction);
11192 inst.instruction |= Rd << 8;
11193 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11194 inst.relocs[0].pc_rel = 1;
11195 }
11196 else
11197 {
11198 /* Generate a 16-bit opcode. */
11199 inst.instruction = THUMB_OP16 (inst.instruction);
11200 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11201 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11202 inst.relocs[0].pc_rel = 1;
11203 inst.instruction |= Rd << 4;
11204 }
11205
11206 if (inst.relocs[0].exp.X_op == O_symbol
11207 && inst.relocs[0].exp.X_add_symbol != NULL
11208 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11209 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11210 inst.relocs[0].exp.X_add_number += 1;
11211 }
11212
11213 /* Arithmetic instructions for which there is just one 16-bit
11214 instruction encoding, and it allows only two low registers.
11215 For maximal compatibility with ARM syntax, we allow three register
11216 operands even when Thumb-32 instructions are not available, as long
11217 as the first two are identical. For instance, both "sbc r0,r1" and
11218 "sbc r0,r0,r1" are allowed. */
11219 static void
11220 do_t_arit3 (void)
11221 {
11222 int Rd, Rs, Rn;
11223
11224 Rd = inst.operands[0].reg;
11225 Rs = (inst.operands[1].present
11226 ? inst.operands[1].reg /* Rd, Rs, foo */
11227 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11228 Rn = inst.operands[2].reg;
11229
11230 reject_bad_reg (Rd);
11231 reject_bad_reg (Rs);
11232 if (inst.operands[2].isreg)
11233 reject_bad_reg (Rn);
11234
11235 if (unified_syntax)
11236 {
11237 if (!inst.operands[2].isreg)
11238 {
11239 /* For an immediate, we always generate a 32-bit opcode;
11240 section relaxation will shrink it later if possible. */
11241 inst.instruction = THUMB_OP32 (inst.instruction);
11242 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11243 inst.instruction |= Rd << 8;
11244 inst.instruction |= Rs << 16;
11245 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11246 }
11247 else
11248 {
11249 bfd_boolean narrow;
11250
11251 /* See if we can do this with a 16-bit instruction. */
11252 if (THUMB_SETS_FLAGS (inst.instruction))
11253 narrow = !in_pred_block ();
11254 else
11255 narrow = in_pred_block ();
11256
11257 if (Rd > 7 || Rn > 7 || Rs > 7)
11258 narrow = FALSE;
11259 if (inst.operands[2].shifted)
11260 narrow = FALSE;
11261 if (inst.size_req == 4)
11262 narrow = FALSE;
11263
11264 if (narrow
11265 && Rd == Rs)
11266 {
11267 inst.instruction = THUMB_OP16 (inst.instruction);
11268 inst.instruction |= Rd;
11269 inst.instruction |= Rn << 3;
11270 return;
11271 }
11272
11273 /* If we get here, it can't be done in 16 bits. */
11274 constraint (inst.operands[2].shifted
11275 && inst.operands[2].immisreg,
11276 _("shift must be constant"));
11277 inst.instruction = THUMB_OP32 (inst.instruction);
11278 inst.instruction |= Rd << 8;
11279 inst.instruction |= Rs << 16;
11280 encode_thumb32_shifted_operand (2);
11281 }
11282 }
11283 else
11284 {
11285 /* On its face this is a lie - the instruction does set the
11286 flags. However, the only supported mnemonic in this mode
11287 says it doesn't. */
11288 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11289
11290 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11291 _("unshifted register required"));
11292 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11293 constraint (Rd != Rs,
11294 _("dest and source1 must be the same register"));
11295
11296 inst.instruction = THUMB_OP16 (inst.instruction);
11297 inst.instruction |= Rd;
11298 inst.instruction |= Rn << 3;
11299 }
11300 }
11301
11302 /* Similarly, but for instructions where the arithmetic operation is
11303 commutative, so we can allow either of them to be different from
11304 the destination operand in a 16-bit instruction. For instance, all
11305 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11306 accepted. */
11307 static void
11308 do_t_arit3c (void)
11309 {
11310 int Rd, Rs, Rn;
11311
11312 Rd = inst.operands[0].reg;
11313 Rs = (inst.operands[1].present
11314 ? inst.operands[1].reg /* Rd, Rs, foo */
11315 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11316 Rn = inst.operands[2].reg;
11317
11318 reject_bad_reg (Rd);
11319 reject_bad_reg (Rs);
11320 if (inst.operands[2].isreg)
11321 reject_bad_reg (Rn);
11322
11323 if (unified_syntax)
11324 {
11325 if (!inst.operands[2].isreg)
11326 {
11327 /* For an immediate, we always generate a 32-bit opcode;
11328 section relaxation will shrink it later if possible. */
11329 inst.instruction = THUMB_OP32 (inst.instruction);
11330 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11331 inst.instruction |= Rd << 8;
11332 inst.instruction |= Rs << 16;
11333 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11334 }
11335 else
11336 {
11337 bfd_boolean narrow;
11338
11339 /* See if we can do this with a 16-bit instruction. */
11340 if (THUMB_SETS_FLAGS (inst.instruction))
11341 narrow = !in_pred_block ();
11342 else
11343 narrow = in_pred_block ();
11344
11345 if (Rd > 7 || Rn > 7 || Rs > 7)
11346 narrow = FALSE;
11347 if (inst.operands[2].shifted)
11348 narrow = FALSE;
11349 if (inst.size_req == 4)
11350 narrow = FALSE;
11351
11352 if (narrow)
11353 {
11354 if (Rd == Rs)
11355 {
11356 inst.instruction = THUMB_OP16 (inst.instruction);
11357 inst.instruction |= Rd;
11358 inst.instruction |= Rn << 3;
11359 return;
11360 }
11361 if (Rd == Rn)
11362 {
11363 inst.instruction = THUMB_OP16 (inst.instruction);
11364 inst.instruction |= Rd;
11365 inst.instruction |= Rs << 3;
11366 return;
11367 }
11368 }
11369
11370 /* If we get here, it can't be done in 16 bits. */
11371 constraint (inst.operands[2].shifted
11372 && inst.operands[2].immisreg,
11373 _("shift must be constant"));
11374 inst.instruction = THUMB_OP32 (inst.instruction);
11375 inst.instruction |= Rd << 8;
11376 inst.instruction |= Rs << 16;
11377 encode_thumb32_shifted_operand (2);
11378 }
11379 }
11380 else
11381 {
11382 /* On its face this is a lie - the instruction does set the
11383 flags. However, the only supported mnemonic in this mode
11384 says it doesn't. */
11385 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11386
11387 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11388 _("unshifted register required"));
11389 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11390
11391 inst.instruction = THUMB_OP16 (inst.instruction);
11392 inst.instruction |= Rd;
11393
11394 if (Rd == Rs)
11395 inst.instruction |= Rn << 3;
11396 else if (Rd == Rn)
11397 inst.instruction |= Rs << 3;
11398 else
11399 constraint (1, _("dest must overlap one source register"));
11400 }
11401 }
11402
11403 static void
11404 do_t_bfc (void)
11405 {
11406 unsigned Rd;
11407 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11408 constraint (msb > 32, _("bit-field extends past end of register"));
11409 /* The instruction encoding stores the LSB and MSB,
11410 not the LSB and width. */
11411 Rd = inst.operands[0].reg;
11412 reject_bad_reg (Rd);
11413 inst.instruction |= Rd << 8;
11414 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11415 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11416 inst.instruction |= msb - 1;
11417 }
11418
11419 static void
11420 do_t_bfi (void)
11421 {
11422 int Rd, Rn;
11423 unsigned int msb;
11424
11425 Rd = inst.operands[0].reg;
11426 reject_bad_reg (Rd);
11427
11428 /* #0 in second position is alternative syntax for bfc, which is
11429 the same instruction but with REG_PC in the Rm field. */
11430 if (!inst.operands[1].isreg)
11431 Rn = REG_PC;
11432 else
11433 {
11434 Rn = inst.operands[1].reg;
11435 reject_bad_reg (Rn);
11436 }
11437
11438 msb = inst.operands[2].imm + inst.operands[3].imm;
11439 constraint (msb > 32, _("bit-field extends past end of register"));
11440 /* The instruction encoding stores the LSB and MSB,
11441 not the LSB and width. */
11442 inst.instruction |= Rd << 8;
11443 inst.instruction |= Rn << 16;
11444 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11445 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11446 inst.instruction |= msb - 1;
11447 }
11448
11449 static void
11450 do_t_bfx (void)
11451 {
11452 unsigned Rd, Rn;
11453
11454 Rd = inst.operands[0].reg;
11455 Rn = inst.operands[1].reg;
11456
11457 reject_bad_reg (Rd);
11458 reject_bad_reg (Rn);
11459
11460 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11461 _("bit-field extends past end of register"));
11462 inst.instruction |= Rd << 8;
11463 inst.instruction |= Rn << 16;
11464 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11465 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11466 inst.instruction |= inst.operands[3].imm - 1;
11467 }
11468
11469 /* ARM V5 Thumb BLX (argument parse)
11470 BLX <target_addr> which is BLX(1)
11471 BLX <Rm> which is BLX(2)
11472 Unfortunately, there are two different opcodes for this mnemonic.
11473 So, the insns[].value is not used, and the code here zaps values
11474 into inst.instruction.
11475
11476 ??? How to take advantage of the additional two bits of displacement
11477 available in Thumb32 mode? Need new relocation? */
11478
11479 static void
11480 do_t_blx (void)
11481 {
11482 set_pred_insn_type_last ();
11483
11484 if (inst.operands[0].isreg)
11485 {
11486 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11487 /* We have a register, so this is BLX(2). */
11488 inst.instruction |= inst.operands[0].reg << 3;
11489 }
11490 else
11491 {
11492 /* No register. This must be BLX(1). */
11493 inst.instruction = 0xf000e800;
11494 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11495 }
11496 }
11497
11498 static void
11499 do_t_branch (void)
11500 {
11501 int opcode;
11502 int cond;
11503 bfd_reloc_code_real_type reloc;
11504
11505 cond = inst.cond;
11506 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
11507
11508 if (in_pred_block ())
11509 {
11510 /* Conditional branches inside IT blocks are encoded as unconditional
11511 branches. */
11512 cond = COND_ALWAYS;
11513 }
11514 else
11515 cond = inst.cond;
11516
11517 if (cond != COND_ALWAYS)
11518 opcode = T_MNEM_bcond;
11519 else
11520 opcode = inst.instruction;
11521
11522 if (unified_syntax
11523 && (inst.size_req == 4
11524 || (inst.size_req != 2
11525 && (inst.operands[0].hasreloc
11526 || inst.relocs[0].exp.X_op == O_constant))))
11527 {
11528 inst.instruction = THUMB_OP32(opcode);
11529 if (cond == COND_ALWAYS)
11530 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11531 else
11532 {
11533 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11534 _("selected architecture does not support "
11535 "wide conditional branch instruction"));
11536
11537 gas_assert (cond != 0xF);
11538 inst.instruction |= cond << 22;
11539 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11540 }
11541 }
11542 else
11543 {
11544 inst.instruction = THUMB_OP16(opcode);
11545 if (cond == COND_ALWAYS)
11546 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11547 else
11548 {
11549 inst.instruction |= cond << 8;
11550 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11551 }
11552 /* Allow section relaxation. */
11553 if (unified_syntax && inst.size_req != 2)
11554 inst.relax = opcode;
11555 }
11556 inst.relocs[0].type = reloc;
11557 inst.relocs[0].pc_rel = 1;
11558 }
11559
11560 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11561 between the two is the maximum immediate allowed - which is passed in
11562 RANGE. */
11563 static void
11564 do_t_bkpt_hlt1 (int range)
11565 {
11566 constraint (inst.cond != COND_ALWAYS,
11567 _("instruction is always unconditional"));
11568 if (inst.operands[0].present)
11569 {
11570 constraint (inst.operands[0].imm > range,
11571 _("immediate value out of range"));
11572 inst.instruction |= inst.operands[0].imm;
11573 }
11574
11575 set_pred_insn_type (NEUTRAL_IT_INSN);
11576 }
11577
11578 static void
11579 do_t_hlt (void)
11580 {
11581 do_t_bkpt_hlt1 (63);
11582 }
11583
11584 static void
11585 do_t_bkpt (void)
11586 {
11587 do_t_bkpt_hlt1 (255);
11588 }
11589
11590 static void
11591 do_t_branch23 (void)
11592 {
11593 set_pred_insn_type_last ();
11594 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11595
11596 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11597 this file. We used to simply ignore the PLT reloc type here --
11598 the branch encoding is now needed to deal with TLSCALL relocs.
11599 So if we see a PLT reloc now, put it back to how it used to be to
11600 keep the preexisting behaviour. */
11601 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11602 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11603
11604 #if defined(OBJ_COFF)
11605 /* If the destination of the branch is a defined symbol which does not have
11606 the THUMB_FUNC attribute, then we must be calling a function which has
11607 the (interfacearm) attribute. We look for the Thumb entry point to that
11608 function and change the branch to refer to that function instead. */
11609 if ( inst.relocs[0].exp.X_op == O_symbol
11610 && inst.relocs[0].exp.X_add_symbol != NULL
11611 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11612 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11613 inst.relocs[0].exp.X_add_symbol
11614 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11615 #endif
11616 }
11617
11618 static void
11619 do_t_bx (void)
11620 {
11621 set_pred_insn_type_last ();
11622 inst.instruction |= inst.operands[0].reg << 3;
11623 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11624 should cause the alignment to be checked once it is known. This is
11625 because BX PC only works if the instruction is word aligned. */
11626 }
11627
11628 static void
11629 do_t_bxj (void)
11630 {
11631 int Rm;
11632
11633 set_pred_insn_type_last ();
11634 Rm = inst.operands[0].reg;
11635 reject_bad_reg (Rm);
11636 inst.instruction |= Rm << 16;
11637 }
11638
11639 static void
11640 do_t_clz (void)
11641 {
11642 unsigned Rd;
11643 unsigned Rm;
11644
11645 Rd = inst.operands[0].reg;
11646 Rm = inst.operands[1].reg;
11647
11648 reject_bad_reg (Rd);
11649 reject_bad_reg (Rm);
11650
11651 inst.instruction |= Rd << 8;
11652 inst.instruction |= Rm << 16;
11653 inst.instruction |= Rm;
11654 }
11655
11656 static void
11657 do_t_csdb (void)
11658 {
11659 set_pred_insn_type (OUTSIDE_PRED_INSN);
11660 }
11661
11662 static void
11663 do_t_cps (void)
11664 {
11665 set_pred_insn_type (OUTSIDE_PRED_INSN);
11666 inst.instruction |= inst.operands[0].imm;
11667 }
11668
11669 static void
11670 do_t_cpsi (void)
11671 {
11672 set_pred_insn_type (OUTSIDE_PRED_INSN);
11673 if (unified_syntax
11674 && (inst.operands[1].present || inst.size_req == 4)
11675 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11676 {
11677 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11678 inst.instruction = 0xf3af8000;
11679 inst.instruction |= imod << 9;
11680 inst.instruction |= inst.operands[0].imm << 5;
11681 if (inst.operands[1].present)
11682 inst.instruction |= 0x100 | inst.operands[1].imm;
11683 }
11684 else
11685 {
11686 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11687 && (inst.operands[0].imm & 4),
11688 _("selected processor does not support 'A' form "
11689 "of this instruction"));
11690 constraint (inst.operands[1].present || inst.size_req == 4,
11691 _("Thumb does not support the 2-argument "
11692 "form of this instruction"));
11693 inst.instruction |= inst.operands[0].imm;
11694 }
11695 }
11696
11697 /* THUMB CPY instruction (argument parse). */
11698
11699 static void
11700 do_t_cpy (void)
11701 {
11702 if (inst.size_req == 4)
11703 {
11704 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11705 inst.instruction |= inst.operands[0].reg << 8;
11706 inst.instruction |= inst.operands[1].reg;
11707 }
11708 else
11709 {
11710 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11711 inst.instruction |= (inst.operands[0].reg & 0x7);
11712 inst.instruction |= inst.operands[1].reg << 3;
11713 }
11714 }
11715
11716 static void
11717 do_t_cbz (void)
11718 {
11719 set_pred_insn_type (OUTSIDE_PRED_INSN);
11720 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11721 inst.instruction |= inst.operands[0].reg;
11722 inst.relocs[0].pc_rel = 1;
11723 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11724 }
11725
11726 static void
11727 do_t_dbg (void)
11728 {
11729 inst.instruction |= inst.operands[0].imm;
11730 }
11731
11732 static void
11733 do_t_div (void)
11734 {
11735 unsigned Rd, Rn, Rm;
11736
11737 Rd = inst.operands[0].reg;
11738 Rn = (inst.operands[1].present
11739 ? inst.operands[1].reg : Rd);
11740 Rm = inst.operands[2].reg;
11741
11742 reject_bad_reg (Rd);
11743 reject_bad_reg (Rn);
11744 reject_bad_reg (Rm);
11745
11746 inst.instruction |= Rd << 8;
11747 inst.instruction |= Rn << 16;
11748 inst.instruction |= Rm;
11749 }
11750
11751 static void
11752 do_t_hint (void)
11753 {
11754 if (unified_syntax && inst.size_req == 4)
11755 inst.instruction = THUMB_OP32 (inst.instruction);
11756 else
11757 inst.instruction = THUMB_OP16 (inst.instruction);
11758 }
11759
11760 static void
11761 do_t_it (void)
11762 {
11763 unsigned int cond = inst.operands[0].imm;
11764
11765 set_pred_insn_type (IT_INSN);
11766 now_pred.mask = (inst.instruction & 0xf) | 0x10;
11767 now_pred.cc = cond;
11768 now_pred.warn_deprecated = FALSE;
11769 now_pred.type = SCALAR_PRED;
11770
11771 /* If the condition is a negative condition, invert the mask. */
11772 if ((cond & 0x1) == 0x0)
11773 {
11774 unsigned int mask = inst.instruction & 0x000f;
11775
11776 if ((mask & 0x7) == 0)
11777 {
11778 /* No conversion needed. */
11779 now_pred.block_length = 1;
11780 }
11781 else if ((mask & 0x3) == 0)
11782 {
11783 mask ^= 0x8;
11784 now_pred.block_length = 2;
11785 }
11786 else if ((mask & 0x1) == 0)
11787 {
11788 mask ^= 0xC;
11789 now_pred.block_length = 3;
11790 }
11791 else
11792 {
11793 mask ^= 0xE;
11794 now_pred.block_length = 4;
11795 }
11796
11797 inst.instruction &= 0xfff0;
11798 inst.instruction |= mask;
11799 }
11800
11801 inst.instruction |= cond << 4;
11802 }
11803
11804 static void
11805 do_mve_vpt (void)
11806 {
11807 /* We are dealing with a vector predicated block. */
11808 set_pred_insn_type (VPT_INSN);
11809 now_pred.cc = 0;
11810 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
11811 | ((inst.instruction & 0xe000) >> 13);
11812 now_pred.warn_deprecated = FALSE;
11813 now_pred.type = VECTOR_PRED;
11814 }
11815
11816 /* Helper function used for both push/pop and ldm/stm. */
11817 static void
11818 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11819 bfd_boolean writeback)
11820 {
11821 bfd_boolean load, store;
11822
11823 gas_assert (base != -1 || !do_io);
11824 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11825 store = do_io && !load;
11826
11827 if (mask & (1 << 13))
11828 inst.error = _("SP not allowed in register list");
11829
11830 if (do_io && (mask & (1 << base)) != 0
11831 && writeback)
11832 inst.error = _("having the base register in the register list when "
11833 "using write back is UNPREDICTABLE");
11834
11835 if (load)
11836 {
11837 if (mask & (1 << 15))
11838 {
11839 if (mask & (1 << 14))
11840 inst.error = _("LR and PC should not both be in register list");
11841 else
11842 set_pred_insn_type_last ();
11843 }
11844 }
11845 else if (store)
11846 {
11847 if (mask & (1 << 15))
11848 inst.error = _("PC not allowed in register list");
11849 }
11850
11851 if (do_io && ((mask & (mask - 1)) == 0))
11852 {
11853 /* Single register transfers implemented as str/ldr. */
11854 if (writeback)
11855 {
11856 if (inst.instruction & (1 << 23))
11857 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11858 else
11859 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11860 }
11861 else
11862 {
11863 if (inst.instruction & (1 << 23))
11864 inst.instruction = 0x00800000; /* ia -> [base] */
11865 else
11866 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11867 }
11868
11869 inst.instruction |= 0xf8400000;
11870 if (load)
11871 inst.instruction |= 0x00100000;
11872
11873 mask = ffs (mask) - 1;
11874 mask <<= 12;
11875 }
11876 else if (writeback)
11877 inst.instruction |= WRITE_BACK;
11878
11879 inst.instruction |= mask;
11880 if (do_io)
11881 inst.instruction |= base << 16;
11882 }
11883
11884 static void
11885 do_t_ldmstm (void)
11886 {
11887 /* This really doesn't seem worth it. */
11888 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11889 _("expression too complex"));
11890 constraint (inst.operands[1].writeback,
11891 _("Thumb load/store multiple does not support {reglist}^"));
11892
11893 if (unified_syntax)
11894 {
11895 bfd_boolean narrow;
11896 unsigned mask;
11897
11898 narrow = FALSE;
11899 /* See if we can use a 16-bit instruction. */
11900 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11901 && inst.size_req != 4
11902 && !(inst.operands[1].imm & ~0xff))
11903 {
11904 mask = 1 << inst.operands[0].reg;
11905
11906 if (inst.operands[0].reg <= 7)
11907 {
11908 if (inst.instruction == T_MNEM_stmia
11909 ? inst.operands[0].writeback
11910 : (inst.operands[0].writeback
11911 == !(inst.operands[1].imm & mask)))
11912 {
11913 if (inst.instruction == T_MNEM_stmia
11914 && (inst.operands[1].imm & mask)
11915 && (inst.operands[1].imm & (mask - 1)))
11916 as_warn (_("value stored for r%d is UNKNOWN"),
11917 inst.operands[0].reg);
11918
11919 inst.instruction = THUMB_OP16 (inst.instruction);
11920 inst.instruction |= inst.operands[0].reg << 8;
11921 inst.instruction |= inst.operands[1].imm;
11922 narrow = TRUE;
11923 }
11924 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11925 {
11926 /* This means 1 register in reg list one of 3 situations:
11927 1. Instruction is stmia, but without writeback.
11928 2. lmdia without writeback, but with Rn not in
11929 reglist.
11930 3. ldmia with writeback, but with Rn in reglist.
11931 Case 3 is UNPREDICTABLE behaviour, so we handle
11932 case 1 and 2 which can be converted into a 16-bit
11933 str or ldr. The SP cases are handled below. */
11934 unsigned long opcode;
11935 /* First, record an error for Case 3. */
11936 if (inst.operands[1].imm & mask
11937 && inst.operands[0].writeback)
11938 inst.error =
11939 _("having the base register in the register list when "
11940 "using write back is UNPREDICTABLE");
11941
11942 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11943 : T_MNEM_ldr);
11944 inst.instruction = THUMB_OP16 (opcode);
11945 inst.instruction |= inst.operands[0].reg << 3;
11946 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11947 narrow = TRUE;
11948 }
11949 }
11950 else if (inst.operands[0] .reg == REG_SP)
11951 {
11952 if (inst.operands[0].writeback)
11953 {
11954 inst.instruction =
11955 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11956 ? T_MNEM_push : T_MNEM_pop);
11957 inst.instruction |= inst.operands[1].imm;
11958 narrow = TRUE;
11959 }
11960 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11961 {
11962 inst.instruction =
11963 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11964 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11965 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11966 narrow = TRUE;
11967 }
11968 }
11969 }
11970
11971 if (!narrow)
11972 {
11973 if (inst.instruction < 0xffff)
11974 inst.instruction = THUMB_OP32 (inst.instruction);
11975
11976 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
11977 inst.operands[1].imm,
11978 inst.operands[0].writeback);
11979 }
11980 }
11981 else
11982 {
11983 constraint (inst.operands[0].reg > 7
11984 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11985 constraint (inst.instruction != T_MNEM_ldmia
11986 && inst.instruction != T_MNEM_stmia,
11987 _("Thumb-2 instruction only valid in unified syntax"));
11988 if (inst.instruction == T_MNEM_stmia)
11989 {
11990 if (!inst.operands[0].writeback)
11991 as_warn (_("this instruction will write back the base register"));
11992 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11993 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11994 as_warn (_("value stored for r%d is UNKNOWN"),
11995 inst.operands[0].reg);
11996 }
11997 else
11998 {
11999 if (!inst.operands[0].writeback
12000 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12001 as_warn (_("this instruction will write back the base register"));
12002 else if (inst.operands[0].writeback
12003 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12004 as_warn (_("this instruction will not write back the base register"));
12005 }
12006
12007 inst.instruction = THUMB_OP16 (inst.instruction);
12008 inst.instruction |= inst.operands[0].reg << 8;
12009 inst.instruction |= inst.operands[1].imm;
12010 }
12011 }
12012
12013 static void
12014 do_t_ldrex (void)
12015 {
12016 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12017 || inst.operands[1].postind || inst.operands[1].writeback
12018 || inst.operands[1].immisreg || inst.operands[1].shifted
12019 || inst.operands[1].negative,
12020 BAD_ADDR_MODE);
12021
12022 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12023
12024 inst.instruction |= inst.operands[0].reg << 12;
12025 inst.instruction |= inst.operands[1].reg << 16;
12026 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12027 }
12028
12029 static void
12030 do_t_ldrexd (void)
12031 {
12032 if (!inst.operands[1].present)
12033 {
12034 constraint (inst.operands[0].reg == REG_LR,
12035 _("r14 not allowed as first register "
12036 "when second register is omitted"));
12037 inst.operands[1].reg = inst.operands[0].reg + 1;
12038 }
12039 constraint (inst.operands[0].reg == inst.operands[1].reg,
12040 BAD_OVERLAP);
12041
12042 inst.instruction |= inst.operands[0].reg << 12;
12043 inst.instruction |= inst.operands[1].reg << 8;
12044 inst.instruction |= inst.operands[2].reg << 16;
12045 }
12046
12047 static void
12048 do_t_ldst (void)
12049 {
12050 unsigned long opcode;
12051 int Rn;
12052
12053 if (inst.operands[0].isreg
12054 && !inst.operands[0].preind
12055 && inst.operands[0].reg == REG_PC)
12056 set_pred_insn_type_last ();
12057
12058 opcode = inst.instruction;
12059 if (unified_syntax)
12060 {
12061 if (!inst.operands[1].isreg)
12062 {
12063 if (opcode <= 0xffff)
12064 inst.instruction = THUMB_OP32 (opcode);
12065 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12066 return;
12067 }
12068 if (inst.operands[1].isreg
12069 && !inst.operands[1].writeback
12070 && !inst.operands[1].shifted && !inst.operands[1].postind
12071 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12072 && opcode <= 0xffff
12073 && inst.size_req != 4)
12074 {
12075 /* Insn may have a 16-bit form. */
12076 Rn = inst.operands[1].reg;
12077 if (inst.operands[1].immisreg)
12078 {
12079 inst.instruction = THUMB_OP16 (opcode);
12080 /* [Rn, Rik] */
12081 if (Rn <= 7 && inst.operands[1].imm <= 7)
12082 goto op16;
12083 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12084 reject_bad_reg (inst.operands[1].imm);
12085 }
12086 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12087 && opcode != T_MNEM_ldrsb)
12088 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12089 || (Rn == REG_SP && opcode == T_MNEM_str))
12090 {
12091 /* [Rn, #const] */
12092 if (Rn > 7)
12093 {
12094 if (Rn == REG_PC)
12095 {
12096 if (inst.relocs[0].pc_rel)
12097 opcode = T_MNEM_ldr_pc2;
12098 else
12099 opcode = T_MNEM_ldr_pc;
12100 }
12101 else
12102 {
12103 if (opcode == T_MNEM_ldr)
12104 opcode = T_MNEM_ldr_sp;
12105 else
12106 opcode = T_MNEM_str_sp;
12107 }
12108 inst.instruction = inst.operands[0].reg << 8;
12109 }
12110 else
12111 {
12112 inst.instruction = inst.operands[0].reg;
12113 inst.instruction |= inst.operands[1].reg << 3;
12114 }
12115 inst.instruction |= THUMB_OP16 (opcode);
12116 if (inst.size_req == 2)
12117 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12118 else
12119 inst.relax = opcode;
12120 return;
12121 }
12122 }
12123 /* Definitely a 32-bit variant. */
12124
12125 /* Warning for Erratum 752419. */
12126 if (opcode == T_MNEM_ldr
12127 && inst.operands[0].reg == REG_SP
12128 && inst.operands[1].writeback == 1
12129 && !inst.operands[1].immisreg)
12130 {
12131 if (no_cpu_selected ()
12132 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12133 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12134 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12135 as_warn (_("This instruction may be unpredictable "
12136 "if executed on M-profile cores "
12137 "with interrupts enabled."));
12138 }
12139
12140 /* Do some validations regarding addressing modes. */
12141 if (inst.operands[1].immisreg)
12142 reject_bad_reg (inst.operands[1].imm);
12143
12144 constraint (inst.operands[1].writeback == 1
12145 && inst.operands[0].reg == inst.operands[1].reg,
12146 BAD_OVERLAP);
12147
12148 inst.instruction = THUMB_OP32 (opcode);
12149 inst.instruction |= inst.operands[0].reg << 12;
12150 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12151 check_ldr_r15_aligned ();
12152 return;
12153 }
12154
12155 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12156
12157 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12158 {
12159 /* Only [Rn,Rm] is acceptable. */
12160 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12161 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12162 || inst.operands[1].postind || inst.operands[1].shifted
12163 || inst.operands[1].negative,
12164 _("Thumb does not support this addressing mode"));
12165 inst.instruction = THUMB_OP16 (inst.instruction);
12166 goto op16;
12167 }
12168
12169 inst.instruction = THUMB_OP16 (inst.instruction);
12170 if (!inst.operands[1].isreg)
12171 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12172 return;
12173
12174 constraint (!inst.operands[1].preind
12175 || inst.operands[1].shifted
12176 || inst.operands[1].writeback,
12177 _("Thumb does not support this addressing mode"));
12178 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12179 {
12180 constraint (inst.instruction & 0x0600,
12181 _("byte or halfword not valid for base register"));
12182 constraint (inst.operands[1].reg == REG_PC
12183 && !(inst.instruction & THUMB_LOAD_BIT),
12184 _("r15 based store not allowed"));
12185 constraint (inst.operands[1].immisreg,
12186 _("invalid base register for register offset"));
12187
12188 if (inst.operands[1].reg == REG_PC)
12189 inst.instruction = T_OPCODE_LDR_PC;
12190 else if (inst.instruction & THUMB_LOAD_BIT)
12191 inst.instruction = T_OPCODE_LDR_SP;
12192 else
12193 inst.instruction = T_OPCODE_STR_SP;
12194
12195 inst.instruction |= inst.operands[0].reg << 8;
12196 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12197 return;
12198 }
12199
12200 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12201 if (!inst.operands[1].immisreg)
12202 {
12203 /* Immediate offset. */
12204 inst.instruction |= inst.operands[0].reg;
12205 inst.instruction |= inst.operands[1].reg << 3;
12206 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12207 return;
12208 }
12209
12210 /* Register offset. */
12211 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12212 constraint (inst.operands[1].negative,
12213 _("Thumb does not support this addressing mode"));
12214
12215 op16:
12216 switch (inst.instruction)
12217 {
12218 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12219 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12220 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12221 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12222 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12223 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12224 case 0x5600 /* ldrsb */:
12225 case 0x5e00 /* ldrsh */: break;
12226 default: abort ();
12227 }
12228
12229 inst.instruction |= inst.operands[0].reg;
12230 inst.instruction |= inst.operands[1].reg << 3;
12231 inst.instruction |= inst.operands[1].imm << 6;
12232 }
12233
12234 static void
12235 do_t_ldstd (void)
12236 {
12237 if (!inst.operands[1].present)
12238 {
12239 inst.operands[1].reg = inst.operands[0].reg + 1;
12240 constraint (inst.operands[0].reg == REG_LR,
12241 _("r14 not allowed here"));
12242 constraint (inst.operands[0].reg == REG_R12,
12243 _("r12 not allowed here"));
12244 }
12245
12246 if (inst.operands[2].writeback
12247 && (inst.operands[0].reg == inst.operands[2].reg
12248 || inst.operands[1].reg == inst.operands[2].reg))
12249 as_warn (_("base register written back, and overlaps "
12250 "one of transfer registers"));
12251
12252 inst.instruction |= inst.operands[0].reg << 12;
12253 inst.instruction |= inst.operands[1].reg << 8;
12254 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12255 }
12256
12257 static void
12258 do_t_ldstt (void)
12259 {
12260 inst.instruction |= inst.operands[0].reg << 12;
12261 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12262 }
12263
12264 static void
12265 do_t_mla (void)
12266 {
12267 unsigned Rd, Rn, Rm, Ra;
12268
12269 Rd = inst.operands[0].reg;
12270 Rn = inst.operands[1].reg;
12271 Rm = inst.operands[2].reg;
12272 Ra = inst.operands[3].reg;
12273
12274 reject_bad_reg (Rd);
12275 reject_bad_reg (Rn);
12276 reject_bad_reg (Rm);
12277 reject_bad_reg (Ra);
12278
12279 inst.instruction |= Rd << 8;
12280 inst.instruction |= Rn << 16;
12281 inst.instruction |= Rm;
12282 inst.instruction |= Ra << 12;
12283 }
12284
12285 static void
12286 do_t_mlal (void)
12287 {
12288 unsigned RdLo, RdHi, Rn, Rm;
12289
12290 RdLo = inst.operands[0].reg;
12291 RdHi = inst.operands[1].reg;
12292 Rn = inst.operands[2].reg;
12293 Rm = inst.operands[3].reg;
12294
12295 reject_bad_reg (RdLo);
12296 reject_bad_reg (RdHi);
12297 reject_bad_reg (Rn);
12298 reject_bad_reg (Rm);
12299
12300 inst.instruction |= RdLo << 12;
12301 inst.instruction |= RdHi << 8;
12302 inst.instruction |= Rn << 16;
12303 inst.instruction |= Rm;
12304 }
12305
12306 static void
12307 do_t_mov_cmp (void)
12308 {
12309 unsigned Rn, Rm;
12310
12311 Rn = inst.operands[0].reg;
12312 Rm = inst.operands[1].reg;
12313
12314 if (Rn == REG_PC)
12315 set_pred_insn_type_last ();
12316
12317 if (unified_syntax)
12318 {
12319 int r0off = (inst.instruction == T_MNEM_mov
12320 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12321 unsigned long opcode;
12322 bfd_boolean narrow;
12323 bfd_boolean low_regs;
12324
12325 low_regs = (Rn <= 7 && Rm <= 7);
12326 opcode = inst.instruction;
12327 if (in_pred_block ())
12328 narrow = opcode != T_MNEM_movs;
12329 else
12330 narrow = opcode != T_MNEM_movs || low_regs;
12331 if (inst.size_req == 4
12332 || inst.operands[1].shifted)
12333 narrow = FALSE;
12334
12335 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12336 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12337 && !inst.operands[1].shifted
12338 && Rn == REG_PC
12339 && Rm == REG_LR)
12340 {
12341 inst.instruction = T2_SUBS_PC_LR;
12342 return;
12343 }
12344
12345 if (opcode == T_MNEM_cmp)
12346 {
12347 constraint (Rn == REG_PC, BAD_PC);
12348 if (narrow)
12349 {
12350 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12351 but valid. */
12352 warn_deprecated_sp (Rm);
12353 /* R15 was documented as a valid choice for Rm in ARMv6,
12354 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12355 tools reject R15, so we do too. */
12356 constraint (Rm == REG_PC, BAD_PC);
12357 }
12358 else
12359 reject_bad_reg (Rm);
12360 }
12361 else if (opcode == T_MNEM_mov
12362 || opcode == T_MNEM_movs)
12363 {
12364 if (inst.operands[1].isreg)
12365 {
12366 if (opcode == T_MNEM_movs)
12367 {
12368 reject_bad_reg (Rn);
12369 reject_bad_reg (Rm);
12370 }
12371 else if (narrow)
12372 {
12373 /* This is mov.n. */
12374 if ((Rn == REG_SP || Rn == REG_PC)
12375 && (Rm == REG_SP || Rm == REG_PC))
12376 {
12377 as_tsktsk (_("Use of r%u as a source register is "
12378 "deprecated when r%u is the destination "
12379 "register."), Rm, Rn);
12380 }
12381 }
12382 else
12383 {
12384 /* This is mov.w. */
12385 constraint (Rn == REG_PC, BAD_PC);
12386 constraint (Rm == REG_PC, BAD_PC);
12387 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12388 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12389 }
12390 }
12391 else
12392 reject_bad_reg (Rn);
12393 }
12394
12395 if (!inst.operands[1].isreg)
12396 {
12397 /* Immediate operand. */
12398 if (!in_pred_block () && opcode == T_MNEM_mov)
12399 narrow = 0;
12400 if (low_regs && narrow)
12401 {
12402 inst.instruction = THUMB_OP16 (opcode);
12403 inst.instruction |= Rn << 8;
12404 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12405 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12406 {
12407 if (inst.size_req == 2)
12408 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12409 else
12410 inst.relax = opcode;
12411 }
12412 }
12413 else
12414 {
12415 constraint ((inst.relocs[0].type
12416 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12417 && (inst.relocs[0].type
12418 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12419 THUMB1_RELOC_ONLY);
12420
12421 inst.instruction = THUMB_OP32 (inst.instruction);
12422 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12423 inst.instruction |= Rn << r0off;
12424 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12425 }
12426 }
12427 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12428 && (inst.instruction == T_MNEM_mov
12429 || inst.instruction == T_MNEM_movs))
12430 {
12431 /* Register shifts are encoded as separate shift instructions. */
12432 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12433
12434 if (in_pred_block ())
12435 narrow = !flags;
12436 else
12437 narrow = flags;
12438
12439 if (inst.size_req == 4)
12440 narrow = FALSE;
12441
12442 if (!low_regs || inst.operands[1].imm > 7)
12443 narrow = FALSE;
12444
12445 if (Rn != Rm)
12446 narrow = FALSE;
12447
12448 switch (inst.operands[1].shift_kind)
12449 {
12450 case SHIFT_LSL:
12451 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12452 break;
12453 case SHIFT_ASR:
12454 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12455 break;
12456 case SHIFT_LSR:
12457 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12458 break;
12459 case SHIFT_ROR:
12460 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12461 break;
12462 default:
12463 abort ();
12464 }
12465
12466 inst.instruction = opcode;
12467 if (narrow)
12468 {
12469 inst.instruction |= Rn;
12470 inst.instruction |= inst.operands[1].imm << 3;
12471 }
12472 else
12473 {
12474 if (flags)
12475 inst.instruction |= CONDS_BIT;
12476
12477 inst.instruction |= Rn << 8;
12478 inst.instruction |= Rm << 16;
12479 inst.instruction |= inst.operands[1].imm;
12480 }
12481 }
12482 else if (!narrow)
12483 {
12484 /* Some mov with immediate shift have narrow variants.
12485 Register shifts are handled above. */
12486 if (low_regs && inst.operands[1].shifted
12487 && (inst.instruction == T_MNEM_mov
12488 || inst.instruction == T_MNEM_movs))
12489 {
12490 if (in_pred_block ())
12491 narrow = (inst.instruction == T_MNEM_mov);
12492 else
12493 narrow = (inst.instruction == T_MNEM_movs);
12494 }
12495
12496 if (narrow)
12497 {
12498 switch (inst.operands[1].shift_kind)
12499 {
12500 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12501 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12502 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12503 default: narrow = FALSE; break;
12504 }
12505 }
12506
12507 if (narrow)
12508 {
12509 inst.instruction |= Rn;
12510 inst.instruction |= Rm << 3;
12511 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12512 }
12513 else
12514 {
12515 inst.instruction = THUMB_OP32 (inst.instruction);
12516 inst.instruction |= Rn << r0off;
12517 encode_thumb32_shifted_operand (1);
12518 }
12519 }
12520 else
12521 switch (inst.instruction)
12522 {
12523 case T_MNEM_mov:
12524 /* In v4t or v5t a move of two lowregs produces unpredictable
12525 results. Don't allow this. */
12526 if (low_regs)
12527 {
12528 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12529 "MOV Rd, Rs with two low registers is not "
12530 "permitted on this architecture");
12531 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12532 arm_ext_v6);
12533 }
12534
12535 inst.instruction = T_OPCODE_MOV_HR;
12536 inst.instruction |= (Rn & 0x8) << 4;
12537 inst.instruction |= (Rn & 0x7);
12538 inst.instruction |= Rm << 3;
12539 break;
12540
12541 case T_MNEM_movs:
12542 /* We know we have low registers at this point.
12543 Generate LSLS Rd, Rs, #0. */
12544 inst.instruction = T_OPCODE_LSL_I;
12545 inst.instruction |= Rn;
12546 inst.instruction |= Rm << 3;
12547 break;
12548
12549 case T_MNEM_cmp:
12550 if (low_regs)
12551 {
12552 inst.instruction = T_OPCODE_CMP_LR;
12553 inst.instruction |= Rn;
12554 inst.instruction |= Rm << 3;
12555 }
12556 else
12557 {
12558 inst.instruction = T_OPCODE_CMP_HR;
12559 inst.instruction |= (Rn & 0x8) << 4;
12560 inst.instruction |= (Rn & 0x7);
12561 inst.instruction |= Rm << 3;
12562 }
12563 break;
12564 }
12565 return;
12566 }
12567
12568 inst.instruction = THUMB_OP16 (inst.instruction);
12569
12570 /* PR 10443: Do not silently ignore shifted operands. */
12571 constraint (inst.operands[1].shifted,
12572 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12573
12574 if (inst.operands[1].isreg)
12575 {
12576 if (Rn < 8 && Rm < 8)
12577 {
12578 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12579 since a MOV instruction produces unpredictable results. */
12580 if (inst.instruction == T_OPCODE_MOV_I8)
12581 inst.instruction = T_OPCODE_ADD_I3;
12582 else
12583 inst.instruction = T_OPCODE_CMP_LR;
12584
12585 inst.instruction |= Rn;
12586 inst.instruction |= Rm << 3;
12587 }
12588 else
12589 {
12590 if (inst.instruction == T_OPCODE_MOV_I8)
12591 inst.instruction = T_OPCODE_MOV_HR;
12592 else
12593 inst.instruction = T_OPCODE_CMP_HR;
12594 do_t_cpy ();
12595 }
12596 }
12597 else
12598 {
12599 constraint (Rn > 7,
12600 _("only lo regs allowed with immediate"));
12601 inst.instruction |= Rn << 8;
12602 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12603 }
12604 }
12605
12606 static void
12607 do_t_mov16 (void)
12608 {
12609 unsigned Rd;
12610 bfd_vma imm;
12611 bfd_boolean top;
12612
12613 top = (inst.instruction & 0x00800000) != 0;
12614 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12615 {
12616 constraint (top, _(":lower16: not allowed in this instruction"));
12617 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12618 }
12619 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12620 {
12621 constraint (!top, _(":upper16: not allowed in this instruction"));
12622 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12623 }
12624
12625 Rd = inst.operands[0].reg;
12626 reject_bad_reg (Rd);
12627
12628 inst.instruction |= Rd << 8;
12629 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12630 {
12631 imm = inst.relocs[0].exp.X_add_number;
12632 inst.instruction |= (imm & 0xf000) << 4;
12633 inst.instruction |= (imm & 0x0800) << 15;
12634 inst.instruction |= (imm & 0x0700) << 4;
12635 inst.instruction |= (imm & 0x00ff);
12636 }
12637 }
12638
12639 static void
12640 do_t_mvn_tst (void)
12641 {
12642 unsigned Rn, Rm;
12643
12644 Rn = inst.operands[0].reg;
12645 Rm = inst.operands[1].reg;
12646
12647 if (inst.instruction == T_MNEM_cmp
12648 || inst.instruction == T_MNEM_cmn)
12649 constraint (Rn == REG_PC, BAD_PC);
12650 else
12651 reject_bad_reg (Rn);
12652 reject_bad_reg (Rm);
12653
12654 if (unified_syntax)
12655 {
12656 int r0off = (inst.instruction == T_MNEM_mvn
12657 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12658 bfd_boolean narrow;
12659
12660 if (inst.size_req == 4
12661 || inst.instruction > 0xffff
12662 || inst.operands[1].shifted
12663 || Rn > 7 || Rm > 7)
12664 narrow = FALSE;
12665 else if (inst.instruction == T_MNEM_cmn
12666 || inst.instruction == T_MNEM_tst)
12667 narrow = TRUE;
12668 else if (THUMB_SETS_FLAGS (inst.instruction))
12669 narrow = !in_pred_block ();
12670 else
12671 narrow = in_pred_block ();
12672
12673 if (!inst.operands[1].isreg)
12674 {
12675 /* For an immediate, we always generate a 32-bit opcode;
12676 section relaxation will shrink it later if possible. */
12677 if (inst.instruction < 0xffff)
12678 inst.instruction = THUMB_OP32 (inst.instruction);
12679 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12680 inst.instruction |= Rn << r0off;
12681 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12682 }
12683 else
12684 {
12685 /* See if we can do this with a 16-bit instruction. */
12686 if (narrow)
12687 {
12688 inst.instruction = THUMB_OP16 (inst.instruction);
12689 inst.instruction |= Rn;
12690 inst.instruction |= Rm << 3;
12691 }
12692 else
12693 {
12694 constraint (inst.operands[1].shifted
12695 && inst.operands[1].immisreg,
12696 _("shift must be constant"));
12697 if (inst.instruction < 0xffff)
12698 inst.instruction = THUMB_OP32 (inst.instruction);
12699 inst.instruction |= Rn << r0off;
12700 encode_thumb32_shifted_operand (1);
12701 }
12702 }
12703 }
12704 else
12705 {
12706 constraint (inst.instruction > 0xffff
12707 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12708 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12709 _("unshifted register required"));
12710 constraint (Rn > 7 || Rm > 7,
12711 BAD_HIREG);
12712
12713 inst.instruction = THUMB_OP16 (inst.instruction);
12714 inst.instruction |= Rn;
12715 inst.instruction |= Rm << 3;
12716 }
12717 }
12718
12719 static void
12720 do_t_mrs (void)
12721 {
12722 unsigned Rd;
12723
12724 if (do_vfp_nsyn_mrs () == SUCCESS)
12725 return;
12726
12727 Rd = inst.operands[0].reg;
12728 reject_bad_reg (Rd);
12729 inst.instruction |= Rd << 8;
12730
12731 if (inst.operands[1].isreg)
12732 {
12733 unsigned br = inst.operands[1].reg;
12734 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12735 as_bad (_("bad register for mrs"));
12736
12737 inst.instruction |= br & (0xf << 16);
12738 inst.instruction |= (br & 0x300) >> 4;
12739 inst.instruction |= (br & SPSR_BIT) >> 2;
12740 }
12741 else
12742 {
12743 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12744
12745 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12746 {
12747 /* PR gas/12698: The constraint is only applied for m_profile.
12748 If the user has specified -march=all, we want to ignore it as
12749 we are building for any CPU type, including non-m variants. */
12750 bfd_boolean m_profile =
12751 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12752 constraint ((flags != 0) && m_profile, _("selected processor does "
12753 "not support requested special purpose register"));
12754 }
12755 else
12756 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12757 devices). */
12758 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12759 _("'APSR', 'CPSR' or 'SPSR' expected"));
12760
12761 inst.instruction |= (flags & SPSR_BIT) >> 2;
12762 inst.instruction |= inst.operands[1].imm & 0xff;
12763 inst.instruction |= 0xf0000;
12764 }
12765 }
12766
12767 static void
12768 do_t_msr (void)
12769 {
12770 int flags;
12771 unsigned Rn;
12772
12773 if (do_vfp_nsyn_msr () == SUCCESS)
12774 return;
12775
12776 constraint (!inst.operands[1].isreg,
12777 _("Thumb encoding does not support an immediate here"));
12778
12779 if (inst.operands[0].isreg)
12780 flags = (int)(inst.operands[0].reg);
12781 else
12782 flags = inst.operands[0].imm;
12783
12784 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12785 {
12786 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12787
12788 /* PR gas/12698: The constraint is only applied for m_profile.
12789 If the user has specified -march=all, we want to ignore it as
12790 we are building for any CPU type, including non-m variants. */
12791 bfd_boolean m_profile =
12792 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12793 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12794 && (bits & ~(PSR_s | PSR_f)) != 0)
12795 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12796 && bits != PSR_f)) && m_profile,
12797 _("selected processor does not support requested special "
12798 "purpose register"));
12799 }
12800 else
12801 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12802 "requested special purpose register"));
12803
12804 Rn = inst.operands[1].reg;
12805 reject_bad_reg (Rn);
12806
12807 inst.instruction |= (flags & SPSR_BIT) >> 2;
12808 inst.instruction |= (flags & 0xf0000) >> 8;
12809 inst.instruction |= (flags & 0x300) >> 4;
12810 inst.instruction |= (flags & 0xff);
12811 inst.instruction |= Rn << 16;
12812 }
12813
12814 static void
12815 do_t_mul (void)
12816 {
12817 bfd_boolean narrow;
12818 unsigned Rd, Rn, Rm;
12819
12820 if (!inst.operands[2].present)
12821 inst.operands[2].reg = inst.operands[0].reg;
12822
12823 Rd = inst.operands[0].reg;
12824 Rn = inst.operands[1].reg;
12825 Rm = inst.operands[2].reg;
12826
12827 if (unified_syntax)
12828 {
12829 if (inst.size_req == 4
12830 || (Rd != Rn
12831 && Rd != Rm)
12832 || Rn > 7
12833 || Rm > 7)
12834 narrow = FALSE;
12835 else if (inst.instruction == T_MNEM_muls)
12836 narrow = !in_pred_block ();
12837 else
12838 narrow = in_pred_block ();
12839 }
12840 else
12841 {
12842 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12843 constraint (Rn > 7 || Rm > 7,
12844 BAD_HIREG);
12845 narrow = TRUE;
12846 }
12847
12848 if (narrow)
12849 {
12850 /* 16-bit MULS/Conditional MUL. */
12851 inst.instruction = THUMB_OP16 (inst.instruction);
12852 inst.instruction |= Rd;
12853
12854 if (Rd == Rn)
12855 inst.instruction |= Rm << 3;
12856 else if (Rd == Rm)
12857 inst.instruction |= Rn << 3;
12858 else
12859 constraint (1, _("dest must overlap one source register"));
12860 }
12861 else
12862 {
12863 constraint (inst.instruction != T_MNEM_mul,
12864 _("Thumb-2 MUL must not set flags"));
12865 /* 32-bit MUL. */
12866 inst.instruction = THUMB_OP32 (inst.instruction);
12867 inst.instruction |= Rd << 8;
12868 inst.instruction |= Rn << 16;
12869 inst.instruction |= Rm << 0;
12870
12871 reject_bad_reg (Rd);
12872 reject_bad_reg (Rn);
12873 reject_bad_reg (Rm);
12874 }
12875 }
12876
12877 static void
12878 do_t_mull (void)
12879 {
12880 unsigned RdLo, RdHi, Rn, Rm;
12881
12882 RdLo = inst.operands[0].reg;
12883 RdHi = inst.operands[1].reg;
12884 Rn = inst.operands[2].reg;
12885 Rm = inst.operands[3].reg;
12886
12887 reject_bad_reg (RdLo);
12888 reject_bad_reg (RdHi);
12889 reject_bad_reg (Rn);
12890 reject_bad_reg (Rm);
12891
12892 inst.instruction |= RdLo << 12;
12893 inst.instruction |= RdHi << 8;
12894 inst.instruction |= Rn << 16;
12895 inst.instruction |= Rm;
12896
12897 if (RdLo == RdHi)
12898 as_tsktsk (_("rdhi and rdlo must be different"));
12899 }
12900
12901 static void
12902 do_t_nop (void)
12903 {
12904 set_pred_insn_type (NEUTRAL_IT_INSN);
12905
12906 if (unified_syntax)
12907 {
12908 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12909 {
12910 inst.instruction = THUMB_OP32 (inst.instruction);
12911 inst.instruction |= inst.operands[0].imm;
12912 }
12913 else
12914 {
12915 /* PR9722: Check for Thumb2 availability before
12916 generating a thumb2 nop instruction. */
12917 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12918 {
12919 inst.instruction = THUMB_OP16 (inst.instruction);
12920 inst.instruction |= inst.operands[0].imm << 4;
12921 }
12922 else
12923 inst.instruction = 0x46c0;
12924 }
12925 }
12926 else
12927 {
12928 constraint (inst.operands[0].present,
12929 _("Thumb does not support NOP with hints"));
12930 inst.instruction = 0x46c0;
12931 }
12932 }
12933
12934 static void
12935 do_t_neg (void)
12936 {
12937 if (unified_syntax)
12938 {
12939 bfd_boolean narrow;
12940
12941 if (THUMB_SETS_FLAGS (inst.instruction))
12942 narrow = !in_pred_block ();
12943 else
12944 narrow = in_pred_block ();
12945 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12946 narrow = FALSE;
12947 if (inst.size_req == 4)
12948 narrow = FALSE;
12949
12950 if (!narrow)
12951 {
12952 inst.instruction = THUMB_OP32 (inst.instruction);
12953 inst.instruction |= inst.operands[0].reg << 8;
12954 inst.instruction |= inst.operands[1].reg << 16;
12955 }
12956 else
12957 {
12958 inst.instruction = THUMB_OP16 (inst.instruction);
12959 inst.instruction |= inst.operands[0].reg;
12960 inst.instruction |= inst.operands[1].reg << 3;
12961 }
12962 }
12963 else
12964 {
12965 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12966 BAD_HIREG);
12967 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12968
12969 inst.instruction = THUMB_OP16 (inst.instruction);
12970 inst.instruction |= inst.operands[0].reg;
12971 inst.instruction |= inst.operands[1].reg << 3;
12972 }
12973 }
12974
12975 static void
12976 do_t_orn (void)
12977 {
12978 unsigned Rd, Rn;
12979
12980 Rd = inst.operands[0].reg;
12981 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12982
12983 reject_bad_reg (Rd);
12984 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12985 reject_bad_reg (Rn);
12986
12987 inst.instruction |= Rd << 8;
12988 inst.instruction |= Rn << 16;
12989
12990 if (!inst.operands[2].isreg)
12991 {
12992 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12993 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12994 }
12995 else
12996 {
12997 unsigned Rm;
12998
12999 Rm = inst.operands[2].reg;
13000 reject_bad_reg (Rm);
13001
13002 constraint (inst.operands[2].shifted
13003 && inst.operands[2].immisreg,
13004 _("shift must be constant"));
13005 encode_thumb32_shifted_operand (2);
13006 }
13007 }
13008
13009 static void
13010 do_t_pkhbt (void)
13011 {
13012 unsigned Rd, Rn, Rm;
13013
13014 Rd = inst.operands[0].reg;
13015 Rn = inst.operands[1].reg;
13016 Rm = inst.operands[2].reg;
13017
13018 reject_bad_reg (Rd);
13019 reject_bad_reg (Rn);
13020 reject_bad_reg (Rm);
13021
13022 inst.instruction |= Rd << 8;
13023 inst.instruction |= Rn << 16;
13024 inst.instruction |= Rm;
13025 if (inst.operands[3].present)
13026 {
13027 unsigned int val = inst.relocs[0].exp.X_add_number;
13028 constraint (inst.relocs[0].exp.X_op != O_constant,
13029 _("expression too complex"));
13030 inst.instruction |= (val & 0x1c) << 10;
13031 inst.instruction |= (val & 0x03) << 6;
13032 }
13033 }
13034
13035 static void
13036 do_t_pkhtb (void)
13037 {
13038 if (!inst.operands[3].present)
13039 {
13040 unsigned Rtmp;
13041
13042 inst.instruction &= ~0x00000020;
13043
13044 /* PR 10168. Swap the Rm and Rn registers. */
13045 Rtmp = inst.operands[1].reg;
13046 inst.operands[1].reg = inst.operands[2].reg;
13047 inst.operands[2].reg = Rtmp;
13048 }
13049 do_t_pkhbt ();
13050 }
13051
13052 static void
13053 do_t_pld (void)
13054 {
13055 if (inst.operands[0].immisreg)
13056 reject_bad_reg (inst.operands[0].imm);
13057
13058 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13059 }
13060
13061 static void
13062 do_t_push_pop (void)
13063 {
13064 unsigned mask;
13065
13066 constraint (inst.operands[0].writeback,
13067 _("push/pop do not support {reglist}^"));
13068 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13069 _("expression too complex"));
13070
13071 mask = inst.operands[0].imm;
13072 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13073 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13074 else if (inst.size_req != 4
13075 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13076 ? REG_LR : REG_PC)))
13077 {
13078 inst.instruction = THUMB_OP16 (inst.instruction);
13079 inst.instruction |= THUMB_PP_PC_LR;
13080 inst.instruction |= mask & 0xff;
13081 }
13082 else if (unified_syntax)
13083 {
13084 inst.instruction = THUMB_OP32 (inst.instruction);
13085 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13086 }
13087 else
13088 {
13089 inst.error = _("invalid register list to push/pop instruction");
13090 return;
13091 }
13092 }
13093
13094 static void
13095 do_t_clrm (void)
13096 {
13097 if (unified_syntax)
13098 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13099 else
13100 {
13101 inst.error = _("invalid register list to push/pop instruction");
13102 return;
13103 }
13104 }
13105
13106 static void
13107 do_t_vscclrm (void)
13108 {
13109 if (inst.operands[0].issingle)
13110 {
13111 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13112 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13113 inst.instruction |= inst.operands[0].imm;
13114 }
13115 else
13116 {
13117 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13118 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13119 inst.instruction |= 1 << 8;
13120 inst.instruction |= inst.operands[0].imm << 1;
13121 }
13122 }
13123
13124 static void
13125 do_t_rbit (void)
13126 {
13127 unsigned Rd, Rm;
13128
13129 Rd = inst.operands[0].reg;
13130 Rm = inst.operands[1].reg;
13131
13132 reject_bad_reg (Rd);
13133 reject_bad_reg (Rm);
13134
13135 inst.instruction |= Rd << 8;
13136 inst.instruction |= Rm << 16;
13137 inst.instruction |= Rm;
13138 }
13139
13140 static void
13141 do_t_rev (void)
13142 {
13143 unsigned Rd, Rm;
13144
13145 Rd = inst.operands[0].reg;
13146 Rm = inst.operands[1].reg;
13147
13148 reject_bad_reg (Rd);
13149 reject_bad_reg (Rm);
13150
13151 if (Rd <= 7 && Rm <= 7
13152 && inst.size_req != 4)
13153 {
13154 inst.instruction = THUMB_OP16 (inst.instruction);
13155 inst.instruction |= Rd;
13156 inst.instruction |= Rm << 3;
13157 }
13158 else if (unified_syntax)
13159 {
13160 inst.instruction = THUMB_OP32 (inst.instruction);
13161 inst.instruction |= Rd << 8;
13162 inst.instruction |= Rm << 16;
13163 inst.instruction |= Rm;
13164 }
13165 else
13166 inst.error = BAD_HIREG;
13167 }
13168
13169 static void
13170 do_t_rrx (void)
13171 {
13172 unsigned Rd, Rm;
13173
13174 Rd = inst.operands[0].reg;
13175 Rm = inst.operands[1].reg;
13176
13177 reject_bad_reg (Rd);
13178 reject_bad_reg (Rm);
13179
13180 inst.instruction |= Rd << 8;
13181 inst.instruction |= Rm;
13182 }
13183
13184 static void
13185 do_t_rsb (void)
13186 {
13187 unsigned Rd, Rs;
13188
13189 Rd = inst.operands[0].reg;
13190 Rs = (inst.operands[1].present
13191 ? inst.operands[1].reg /* Rd, Rs, foo */
13192 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13193
13194 reject_bad_reg (Rd);
13195 reject_bad_reg (Rs);
13196 if (inst.operands[2].isreg)
13197 reject_bad_reg (inst.operands[2].reg);
13198
13199 inst.instruction |= Rd << 8;
13200 inst.instruction |= Rs << 16;
13201 if (!inst.operands[2].isreg)
13202 {
13203 bfd_boolean narrow;
13204
13205 if ((inst.instruction & 0x00100000) != 0)
13206 narrow = !in_pred_block ();
13207 else
13208 narrow = in_pred_block ();
13209
13210 if (Rd > 7 || Rs > 7)
13211 narrow = FALSE;
13212
13213 if (inst.size_req == 4 || !unified_syntax)
13214 narrow = FALSE;
13215
13216 if (inst.relocs[0].exp.X_op != O_constant
13217 || inst.relocs[0].exp.X_add_number != 0)
13218 narrow = FALSE;
13219
13220 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13221 relaxation, but it doesn't seem worth the hassle. */
13222 if (narrow)
13223 {
13224 inst.relocs[0].type = BFD_RELOC_UNUSED;
13225 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13226 inst.instruction |= Rs << 3;
13227 inst.instruction |= Rd;
13228 }
13229 else
13230 {
13231 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13232 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13233 }
13234 }
13235 else
13236 encode_thumb32_shifted_operand (2);
13237 }
13238
13239 static void
13240 do_t_setend (void)
13241 {
13242 if (warn_on_deprecated
13243 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13244 as_tsktsk (_("setend use is deprecated for ARMv8"));
13245
13246 set_pred_insn_type (OUTSIDE_PRED_INSN);
13247 if (inst.operands[0].imm)
13248 inst.instruction |= 0x8;
13249 }
13250
13251 static void
13252 do_t_shift (void)
13253 {
13254 if (!inst.operands[1].present)
13255 inst.operands[1].reg = inst.operands[0].reg;
13256
13257 if (unified_syntax)
13258 {
13259 bfd_boolean narrow;
13260 int shift_kind;
13261
13262 switch (inst.instruction)
13263 {
13264 case T_MNEM_asr:
13265 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13266 case T_MNEM_lsl:
13267 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13268 case T_MNEM_lsr:
13269 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13270 case T_MNEM_ror:
13271 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13272 default: abort ();
13273 }
13274
13275 if (THUMB_SETS_FLAGS (inst.instruction))
13276 narrow = !in_pred_block ();
13277 else
13278 narrow = in_pred_block ();
13279 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13280 narrow = FALSE;
13281 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13282 narrow = FALSE;
13283 if (inst.operands[2].isreg
13284 && (inst.operands[1].reg != inst.operands[0].reg
13285 || inst.operands[2].reg > 7))
13286 narrow = FALSE;
13287 if (inst.size_req == 4)
13288 narrow = FALSE;
13289
13290 reject_bad_reg (inst.operands[0].reg);
13291 reject_bad_reg (inst.operands[1].reg);
13292
13293 if (!narrow)
13294 {
13295 if (inst.operands[2].isreg)
13296 {
13297 reject_bad_reg (inst.operands[2].reg);
13298 inst.instruction = THUMB_OP32 (inst.instruction);
13299 inst.instruction |= inst.operands[0].reg << 8;
13300 inst.instruction |= inst.operands[1].reg << 16;
13301 inst.instruction |= inst.operands[2].reg;
13302
13303 /* PR 12854: Error on extraneous shifts. */
13304 constraint (inst.operands[2].shifted,
13305 _("extraneous shift as part of operand to shift insn"));
13306 }
13307 else
13308 {
13309 inst.operands[1].shifted = 1;
13310 inst.operands[1].shift_kind = shift_kind;
13311 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13312 ? T_MNEM_movs : T_MNEM_mov);
13313 inst.instruction |= inst.operands[0].reg << 8;
13314 encode_thumb32_shifted_operand (1);
13315 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13316 inst.relocs[0].type = BFD_RELOC_UNUSED;
13317 }
13318 }
13319 else
13320 {
13321 if (inst.operands[2].isreg)
13322 {
13323 switch (shift_kind)
13324 {
13325 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13326 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13327 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13328 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13329 default: abort ();
13330 }
13331
13332 inst.instruction |= inst.operands[0].reg;
13333 inst.instruction |= inst.operands[2].reg << 3;
13334
13335 /* PR 12854: Error on extraneous shifts. */
13336 constraint (inst.operands[2].shifted,
13337 _("extraneous shift as part of operand to shift insn"));
13338 }
13339 else
13340 {
13341 switch (shift_kind)
13342 {
13343 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13344 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13345 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13346 default: abort ();
13347 }
13348 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13349 inst.instruction |= inst.operands[0].reg;
13350 inst.instruction |= inst.operands[1].reg << 3;
13351 }
13352 }
13353 }
13354 else
13355 {
13356 constraint (inst.operands[0].reg > 7
13357 || inst.operands[1].reg > 7, BAD_HIREG);
13358 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13359
13360 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13361 {
13362 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13363 constraint (inst.operands[0].reg != inst.operands[1].reg,
13364 _("source1 and dest must be same register"));
13365
13366 switch (inst.instruction)
13367 {
13368 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13369 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13370 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13371 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13372 default: abort ();
13373 }
13374
13375 inst.instruction |= inst.operands[0].reg;
13376 inst.instruction |= inst.operands[2].reg << 3;
13377
13378 /* PR 12854: Error on extraneous shifts. */
13379 constraint (inst.operands[2].shifted,
13380 _("extraneous shift as part of operand to shift insn"));
13381 }
13382 else
13383 {
13384 switch (inst.instruction)
13385 {
13386 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13387 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13388 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13389 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13390 default: abort ();
13391 }
13392 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13393 inst.instruction |= inst.operands[0].reg;
13394 inst.instruction |= inst.operands[1].reg << 3;
13395 }
13396 }
13397 }
13398
13399 static void
13400 do_t_simd (void)
13401 {
13402 unsigned Rd, Rn, Rm;
13403
13404 Rd = inst.operands[0].reg;
13405 Rn = inst.operands[1].reg;
13406 Rm = inst.operands[2].reg;
13407
13408 reject_bad_reg (Rd);
13409 reject_bad_reg (Rn);
13410 reject_bad_reg (Rm);
13411
13412 inst.instruction |= Rd << 8;
13413 inst.instruction |= Rn << 16;
13414 inst.instruction |= Rm;
13415 }
13416
13417 static void
13418 do_t_simd2 (void)
13419 {
13420 unsigned Rd, Rn, Rm;
13421
13422 Rd = inst.operands[0].reg;
13423 Rm = inst.operands[1].reg;
13424 Rn = inst.operands[2].reg;
13425
13426 reject_bad_reg (Rd);
13427 reject_bad_reg (Rn);
13428 reject_bad_reg (Rm);
13429
13430 inst.instruction |= Rd << 8;
13431 inst.instruction |= Rn << 16;
13432 inst.instruction |= Rm;
13433 }
13434
13435 static void
13436 do_t_smc (void)
13437 {
13438 unsigned int value = inst.relocs[0].exp.X_add_number;
13439 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13440 _("SMC is not permitted on this architecture"));
13441 constraint (inst.relocs[0].exp.X_op != O_constant,
13442 _("expression too complex"));
13443 inst.relocs[0].type = BFD_RELOC_UNUSED;
13444 inst.instruction |= (value & 0xf000) >> 12;
13445 inst.instruction |= (value & 0x0ff0);
13446 inst.instruction |= (value & 0x000f) << 16;
13447 /* PR gas/15623: SMC instructions must be last in an IT block. */
13448 set_pred_insn_type_last ();
13449 }
13450
13451 static void
13452 do_t_hvc (void)
13453 {
13454 unsigned int value = inst.relocs[0].exp.X_add_number;
13455
13456 inst.relocs[0].type = BFD_RELOC_UNUSED;
13457 inst.instruction |= (value & 0x0fff);
13458 inst.instruction |= (value & 0xf000) << 4;
13459 }
13460
13461 static void
13462 do_t_ssat_usat (int bias)
13463 {
13464 unsigned Rd, Rn;
13465
13466 Rd = inst.operands[0].reg;
13467 Rn = inst.operands[2].reg;
13468
13469 reject_bad_reg (Rd);
13470 reject_bad_reg (Rn);
13471
13472 inst.instruction |= Rd << 8;
13473 inst.instruction |= inst.operands[1].imm - bias;
13474 inst.instruction |= Rn << 16;
13475
13476 if (inst.operands[3].present)
13477 {
13478 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13479
13480 inst.relocs[0].type = BFD_RELOC_UNUSED;
13481
13482 constraint (inst.relocs[0].exp.X_op != O_constant,
13483 _("expression too complex"));
13484
13485 if (shift_amount != 0)
13486 {
13487 constraint (shift_amount > 31,
13488 _("shift expression is too large"));
13489
13490 if (inst.operands[3].shift_kind == SHIFT_ASR)
13491 inst.instruction |= 0x00200000; /* sh bit. */
13492
13493 inst.instruction |= (shift_amount & 0x1c) << 10;
13494 inst.instruction |= (shift_amount & 0x03) << 6;
13495 }
13496 }
13497 }
13498
13499 static void
13500 do_t_ssat (void)
13501 {
13502 do_t_ssat_usat (1);
13503 }
13504
13505 static void
13506 do_t_ssat16 (void)
13507 {
13508 unsigned Rd, Rn;
13509
13510 Rd = inst.operands[0].reg;
13511 Rn = inst.operands[2].reg;
13512
13513 reject_bad_reg (Rd);
13514 reject_bad_reg (Rn);
13515
13516 inst.instruction |= Rd << 8;
13517 inst.instruction |= inst.operands[1].imm - 1;
13518 inst.instruction |= Rn << 16;
13519 }
13520
13521 static void
13522 do_t_strex (void)
13523 {
13524 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13525 || inst.operands[2].postind || inst.operands[2].writeback
13526 || inst.operands[2].immisreg || inst.operands[2].shifted
13527 || inst.operands[2].negative,
13528 BAD_ADDR_MODE);
13529
13530 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13531
13532 inst.instruction |= inst.operands[0].reg << 8;
13533 inst.instruction |= inst.operands[1].reg << 12;
13534 inst.instruction |= inst.operands[2].reg << 16;
13535 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13536 }
13537
13538 static void
13539 do_t_strexd (void)
13540 {
13541 if (!inst.operands[2].present)
13542 inst.operands[2].reg = inst.operands[1].reg + 1;
13543
13544 constraint (inst.operands[0].reg == inst.operands[1].reg
13545 || inst.operands[0].reg == inst.operands[2].reg
13546 || inst.operands[0].reg == inst.operands[3].reg,
13547 BAD_OVERLAP);
13548
13549 inst.instruction |= inst.operands[0].reg;
13550 inst.instruction |= inst.operands[1].reg << 12;
13551 inst.instruction |= inst.operands[2].reg << 8;
13552 inst.instruction |= inst.operands[3].reg << 16;
13553 }
13554
13555 static void
13556 do_t_sxtah (void)
13557 {
13558 unsigned Rd, Rn, Rm;
13559
13560 Rd = inst.operands[0].reg;
13561 Rn = inst.operands[1].reg;
13562 Rm = inst.operands[2].reg;
13563
13564 reject_bad_reg (Rd);
13565 reject_bad_reg (Rn);
13566 reject_bad_reg (Rm);
13567
13568 inst.instruction |= Rd << 8;
13569 inst.instruction |= Rn << 16;
13570 inst.instruction |= Rm;
13571 inst.instruction |= inst.operands[3].imm << 4;
13572 }
13573
13574 static void
13575 do_t_sxth (void)
13576 {
13577 unsigned Rd, Rm;
13578
13579 Rd = inst.operands[0].reg;
13580 Rm = inst.operands[1].reg;
13581
13582 reject_bad_reg (Rd);
13583 reject_bad_reg (Rm);
13584
13585 if (inst.instruction <= 0xffff
13586 && inst.size_req != 4
13587 && Rd <= 7 && Rm <= 7
13588 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13589 {
13590 inst.instruction = THUMB_OP16 (inst.instruction);
13591 inst.instruction |= Rd;
13592 inst.instruction |= Rm << 3;
13593 }
13594 else if (unified_syntax)
13595 {
13596 if (inst.instruction <= 0xffff)
13597 inst.instruction = THUMB_OP32 (inst.instruction);
13598 inst.instruction |= Rd << 8;
13599 inst.instruction |= Rm;
13600 inst.instruction |= inst.operands[2].imm << 4;
13601 }
13602 else
13603 {
13604 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13605 _("Thumb encoding does not support rotation"));
13606 constraint (1, BAD_HIREG);
13607 }
13608 }
13609
13610 static void
13611 do_t_swi (void)
13612 {
13613 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13614 }
13615
13616 static void
13617 do_t_tb (void)
13618 {
13619 unsigned Rn, Rm;
13620 int half;
13621
13622 half = (inst.instruction & 0x10) != 0;
13623 set_pred_insn_type_last ();
13624 constraint (inst.operands[0].immisreg,
13625 _("instruction requires register index"));
13626
13627 Rn = inst.operands[0].reg;
13628 Rm = inst.operands[0].imm;
13629
13630 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13631 constraint (Rn == REG_SP, BAD_SP);
13632 reject_bad_reg (Rm);
13633
13634 constraint (!half && inst.operands[0].shifted,
13635 _("instruction does not allow shifted index"));
13636 inst.instruction |= (Rn << 16) | Rm;
13637 }
13638
13639 static void
13640 do_t_udf (void)
13641 {
13642 if (!inst.operands[0].present)
13643 inst.operands[0].imm = 0;
13644
13645 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13646 {
13647 constraint (inst.size_req == 2,
13648 _("immediate value out of range"));
13649 inst.instruction = THUMB_OP32 (inst.instruction);
13650 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13651 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13652 }
13653 else
13654 {
13655 inst.instruction = THUMB_OP16 (inst.instruction);
13656 inst.instruction |= inst.operands[0].imm;
13657 }
13658
13659 set_pred_insn_type (NEUTRAL_IT_INSN);
13660 }
13661
13662
13663 static void
13664 do_t_usat (void)
13665 {
13666 do_t_ssat_usat (0);
13667 }
13668
13669 static void
13670 do_t_usat16 (void)
13671 {
13672 unsigned Rd, Rn;
13673
13674 Rd = inst.operands[0].reg;
13675 Rn = inst.operands[2].reg;
13676
13677 reject_bad_reg (Rd);
13678 reject_bad_reg (Rn);
13679
13680 inst.instruction |= Rd << 8;
13681 inst.instruction |= inst.operands[1].imm;
13682 inst.instruction |= Rn << 16;
13683 }
13684
13685 /* Checking the range of the branch offset (VAL) with NBITS bits
13686 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13687 static int
13688 v8_1_branch_value_check (int val, int nbits, int is_signed)
13689 {
13690 gas_assert (nbits > 0 && nbits <= 32);
13691 if (is_signed)
13692 {
13693 int cmp = (1 << (nbits - 1));
13694 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13695 return FAIL;
13696 }
13697 else
13698 {
13699 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13700 return FAIL;
13701 }
13702 return SUCCESS;
13703 }
13704
13705 /* For branches in Armv8.1-M Mainline. */
13706 static void
13707 do_t_branch_future (void)
13708 {
13709 unsigned long insn = inst.instruction;
13710
13711 inst.instruction = THUMB_OP32 (inst.instruction);
13712 if (inst.operands[0].hasreloc == 0)
13713 {
13714 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13715 as_bad (BAD_BRANCH_OFF);
13716
13717 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13718 }
13719 else
13720 {
13721 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13722 inst.relocs[0].pc_rel = 1;
13723 }
13724
13725 switch (insn)
13726 {
13727 case T_MNEM_bf:
13728 if (inst.operands[1].hasreloc == 0)
13729 {
13730 int val = inst.operands[1].imm;
13731 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13732 as_bad (BAD_BRANCH_OFF);
13733
13734 int immA = (val & 0x0001f000) >> 12;
13735 int immB = (val & 0x00000ffc) >> 2;
13736 int immC = (val & 0x00000002) >> 1;
13737 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13738 }
13739 else
13740 {
13741 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13742 inst.relocs[1].pc_rel = 1;
13743 }
13744 break;
13745
13746 case T_MNEM_bfl:
13747 if (inst.operands[1].hasreloc == 0)
13748 {
13749 int val = inst.operands[1].imm;
13750 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13751 as_bad (BAD_BRANCH_OFF);
13752
13753 int immA = (val & 0x0007f000) >> 12;
13754 int immB = (val & 0x00000ffc) >> 2;
13755 int immC = (val & 0x00000002) >> 1;
13756 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13757 }
13758 else
13759 {
13760 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13761 inst.relocs[1].pc_rel = 1;
13762 }
13763 break;
13764
13765 case T_MNEM_bfcsel:
13766 /* Operand 1. */
13767 if (inst.operands[1].hasreloc == 0)
13768 {
13769 int val = inst.operands[1].imm;
13770 int immA = (val & 0x00001000) >> 12;
13771 int immB = (val & 0x00000ffc) >> 2;
13772 int immC = (val & 0x00000002) >> 1;
13773 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13774 }
13775 else
13776 {
13777 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13778 inst.relocs[1].pc_rel = 1;
13779 }
13780
13781 /* Operand 2. */
13782 if (inst.operands[2].hasreloc == 0)
13783 {
13784 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13785 int val2 = inst.operands[2].imm;
13786 int val0 = inst.operands[0].imm & 0x1f;
13787 int diff = val2 - val0;
13788 if (diff == 4)
13789 inst.instruction |= 1 << 17; /* T bit. */
13790 else if (diff != 2)
13791 as_bad (_("out of range label-relative fixup value"));
13792 }
13793 else
13794 {
13795 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13796 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13797 inst.relocs[2].pc_rel = 1;
13798 }
13799
13800 /* Operand 3. */
13801 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13802 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13803 break;
13804
13805 case T_MNEM_bfx:
13806 case T_MNEM_bflx:
13807 inst.instruction |= inst.operands[1].reg << 16;
13808 break;
13809
13810 default: abort ();
13811 }
13812 }
13813
13814 /* Helper function for do_t_loloop to handle relocations. */
13815 static void
13816 v8_1_loop_reloc (int is_le)
13817 {
13818 if (inst.relocs[0].exp.X_op == O_constant)
13819 {
13820 int value = inst.relocs[0].exp.X_add_number;
13821 value = (is_le) ? -value : value;
13822
13823 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13824 as_bad (BAD_BRANCH_OFF);
13825
13826 int imml, immh;
13827
13828 immh = (value & 0x00000ffc) >> 2;
13829 imml = (value & 0x00000002) >> 1;
13830
13831 inst.instruction |= (imml << 11) | (immh << 1);
13832 }
13833 else
13834 {
13835 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13836 inst.relocs[0].pc_rel = 1;
13837 }
13838 }
13839
13840 /* To handle the Scalar Low Overhead Loop instructions
13841 in Armv8.1-M Mainline. */
13842 static void
13843 do_t_loloop (void)
13844 {
13845 unsigned long insn = inst.instruction;
13846
13847 set_pred_insn_type (OUTSIDE_PRED_INSN);
13848 inst.instruction = THUMB_OP32 (inst.instruction);
13849
13850 switch (insn)
13851 {
13852 case T_MNEM_le:
13853 /* le <label>. */
13854 if (!inst.operands[0].present)
13855 inst.instruction |= 1 << 21;
13856
13857 v8_1_loop_reloc (TRUE);
13858 break;
13859
13860 case T_MNEM_wls:
13861 v8_1_loop_reloc (FALSE);
13862 /* Fall through. */
13863 case T_MNEM_dls:
13864 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13865 inst.instruction |= (inst.operands[1].reg << 16);
13866 break;
13867
13868 default: abort();
13869 }
13870 }
13871
13872 /* MVE instruction encoder helpers. */
13873 #define M_MNEM_vabav 0xee800f01
13874 #define M_MNEM_vmladav 0xeef00e00
13875 #define M_MNEM_vmladava 0xeef00e20
13876 #define M_MNEM_vmladavx 0xeef01e00
13877 #define M_MNEM_vmladavax 0xeef01e20
13878 #define M_MNEM_vmlsdav 0xeef00e01
13879 #define M_MNEM_vmlsdava 0xeef00e21
13880 #define M_MNEM_vmlsdavx 0xeef01e01
13881 #define M_MNEM_vmlsdavax 0xeef01e21
13882 #define M_MNEM_vmullt 0xee011e00
13883 #define M_MNEM_vmullb 0xee010e00
13884 #define M_MNEM_vst20 0xfc801e00
13885 #define M_MNEM_vst21 0xfc801e20
13886 #define M_MNEM_vst40 0xfc801e01
13887 #define M_MNEM_vst41 0xfc801e21
13888 #define M_MNEM_vst42 0xfc801e41
13889 #define M_MNEM_vst43 0xfc801e61
13890 #define M_MNEM_vld20 0xfc901e00
13891 #define M_MNEM_vld21 0xfc901e20
13892 #define M_MNEM_vld40 0xfc901e01
13893 #define M_MNEM_vld41 0xfc901e21
13894 #define M_MNEM_vld42 0xfc901e41
13895 #define M_MNEM_vld43 0xfc901e61
13896
13897 /* Neon instruction encoder helpers. */
13898
13899 /* Encodings for the different types for various Neon opcodes. */
13900
13901 /* An "invalid" code for the following tables. */
13902 #define N_INV -1u
13903
13904 struct neon_tab_entry
13905 {
13906 unsigned integer;
13907 unsigned float_or_poly;
13908 unsigned scalar_or_imm;
13909 };
13910
13911 /* Map overloaded Neon opcodes to their respective encodings. */
13912 #define NEON_ENC_TAB \
13913 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13914 X(vabdl, 0x0800700, N_INV, N_INV), \
13915 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13916 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13917 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13918 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13919 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13920 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13921 X(vaddl, 0x0800000, N_INV, N_INV), \
13922 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13923 X(vsubl, 0x0800200, N_INV, N_INV), \
13924 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13925 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13926 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13927 /* Register variants of the following two instructions are encoded as
13928 vcge / vcgt with the operands reversed. */ \
13929 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13930 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13931 X(vfma, N_INV, 0x0000c10, N_INV), \
13932 X(vfms, N_INV, 0x0200c10, N_INV), \
13933 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13934 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13935 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13936 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13937 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13938 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13939 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13940 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13941 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13942 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13943 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13944 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13945 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13946 X(vshl, 0x0000400, N_INV, 0x0800510), \
13947 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13948 X(vand, 0x0000110, N_INV, 0x0800030), \
13949 X(vbic, 0x0100110, N_INV, 0x0800030), \
13950 X(veor, 0x1000110, N_INV, N_INV), \
13951 X(vorn, 0x0300110, N_INV, 0x0800010), \
13952 X(vorr, 0x0200110, N_INV, 0x0800010), \
13953 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13954 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13955 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13956 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13957 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13958 X(vst1, 0x0000000, 0x0800000, N_INV), \
13959 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13960 X(vst2, 0x0000100, 0x0800100, N_INV), \
13961 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13962 X(vst3, 0x0000200, 0x0800200, N_INV), \
13963 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13964 X(vst4, 0x0000300, 0x0800300, N_INV), \
13965 X(vmovn, 0x1b20200, N_INV, N_INV), \
13966 X(vtrn, 0x1b20080, N_INV, N_INV), \
13967 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13968 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13969 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13970 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13971 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13972 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13973 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13974 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13975 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13976 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13977 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13978 X(vseleq, 0xe000a00, N_INV, N_INV), \
13979 X(vselvs, 0xe100a00, N_INV, N_INV), \
13980 X(vselge, 0xe200a00, N_INV, N_INV), \
13981 X(vselgt, 0xe300a00, N_INV, N_INV), \
13982 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13983 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13984 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13985 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13986 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13987 X(aes, 0x3b00300, N_INV, N_INV), \
13988 X(sha3op, 0x2000c00, N_INV, N_INV), \
13989 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13990 X(sha2op, 0x3ba0380, N_INV, N_INV)
13991
13992 enum neon_opc
13993 {
13994 #define X(OPC,I,F,S) N_MNEM_##OPC
13995 NEON_ENC_TAB
13996 #undef X
13997 };
13998
13999 static const struct neon_tab_entry neon_enc_tab[] =
14000 {
14001 #define X(OPC,I,F,S) { (I), (F), (S) }
14002 NEON_ENC_TAB
14003 #undef X
14004 };
14005
14006 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14007 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14008 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14009 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14010 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14011 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14012 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14013 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14014 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14015 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14016 #define NEON_ENC_SINGLE_(X) \
14017 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14018 #define NEON_ENC_DOUBLE_(X) \
14019 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14020 #define NEON_ENC_FPV8_(X) \
14021 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14022
14023 #define NEON_ENCODE(type, inst) \
14024 do \
14025 { \
14026 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14027 inst.is_neon = 1; \
14028 } \
14029 while (0)
14030
14031 #define check_neon_suffixes \
14032 do \
14033 { \
14034 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14035 { \
14036 as_bad (_("invalid neon suffix for non neon instruction")); \
14037 return; \
14038 } \
14039 } \
14040 while (0)
14041
14042 /* Define shapes for instruction operands. The following mnemonic characters
14043 are used in this table:
14044
14045 F - VFP S<n> register
14046 D - Neon D<n> register
14047 Q - Neon Q<n> register
14048 I - Immediate
14049 S - Scalar
14050 R - ARM register
14051 L - D<n> register list
14052
14053 This table is used to generate various data:
14054 - enumerations of the form NS_DDR to be used as arguments to
14055 neon_select_shape.
14056 - a table classifying shapes into single, double, quad, mixed.
14057 - a table used to drive neon_select_shape. */
14058
14059 #define NEON_SHAPE_DEF \
14060 X(3, (R, Q, Q), QUAD), \
14061 X(3, (D, D, D), DOUBLE), \
14062 X(3, (Q, Q, Q), QUAD), \
14063 X(3, (D, D, I), DOUBLE), \
14064 X(3, (Q, Q, I), QUAD), \
14065 X(3, (D, D, S), DOUBLE), \
14066 X(3, (Q, Q, S), QUAD), \
14067 X(3, (Q, Q, R), QUAD), \
14068 X(2, (D, D), DOUBLE), \
14069 X(2, (Q, Q), QUAD), \
14070 X(2, (D, S), DOUBLE), \
14071 X(2, (Q, S), QUAD), \
14072 X(2, (D, R), DOUBLE), \
14073 X(2, (Q, R), QUAD), \
14074 X(2, (D, I), DOUBLE), \
14075 X(2, (Q, I), QUAD), \
14076 X(3, (D, L, D), DOUBLE), \
14077 X(2, (D, Q), MIXED), \
14078 X(2, (Q, D), MIXED), \
14079 X(3, (D, Q, I), MIXED), \
14080 X(3, (Q, D, I), MIXED), \
14081 X(3, (Q, D, D), MIXED), \
14082 X(3, (D, Q, Q), MIXED), \
14083 X(3, (Q, Q, D), MIXED), \
14084 X(3, (Q, D, S), MIXED), \
14085 X(3, (D, Q, S), MIXED), \
14086 X(4, (D, D, D, I), DOUBLE), \
14087 X(4, (Q, Q, Q, I), QUAD), \
14088 X(4, (D, D, S, I), DOUBLE), \
14089 X(4, (Q, Q, S, I), QUAD), \
14090 X(2, (F, F), SINGLE), \
14091 X(3, (F, F, F), SINGLE), \
14092 X(2, (F, I), SINGLE), \
14093 X(2, (F, D), MIXED), \
14094 X(2, (D, F), MIXED), \
14095 X(3, (F, F, I), MIXED), \
14096 X(4, (R, R, F, F), SINGLE), \
14097 X(4, (F, F, R, R), SINGLE), \
14098 X(3, (D, R, R), DOUBLE), \
14099 X(3, (R, R, D), DOUBLE), \
14100 X(2, (S, R), SINGLE), \
14101 X(2, (R, S), SINGLE), \
14102 X(2, (F, R), SINGLE), \
14103 X(2, (R, F), SINGLE), \
14104 /* Half float shape supported so far. */\
14105 X (2, (H, D), MIXED), \
14106 X (2, (D, H), MIXED), \
14107 X (2, (H, F), MIXED), \
14108 X (2, (F, H), MIXED), \
14109 X (2, (H, H), HALF), \
14110 X (2, (H, R), HALF), \
14111 X (2, (R, H), HALF), \
14112 X (2, (H, I), HALF), \
14113 X (3, (H, H, H), HALF), \
14114 X (3, (H, F, I), MIXED), \
14115 X (3, (F, H, I), MIXED), \
14116 X (3, (D, H, H), MIXED), \
14117 X (3, (D, H, S), MIXED)
14118
14119 #define S2(A,B) NS_##A##B
14120 #define S3(A,B,C) NS_##A##B##C
14121 #define S4(A,B,C,D) NS_##A##B##C##D
14122
14123 #define X(N, L, C) S##N L
14124
14125 enum neon_shape
14126 {
14127 NEON_SHAPE_DEF,
14128 NS_NULL
14129 };
14130
14131 #undef X
14132 #undef S2
14133 #undef S3
14134 #undef S4
14135
14136 enum neon_shape_class
14137 {
14138 SC_HALF,
14139 SC_SINGLE,
14140 SC_DOUBLE,
14141 SC_QUAD,
14142 SC_MIXED
14143 };
14144
14145 #define X(N, L, C) SC_##C
14146
14147 static enum neon_shape_class neon_shape_class[] =
14148 {
14149 NEON_SHAPE_DEF
14150 };
14151
14152 #undef X
14153
14154 enum neon_shape_el
14155 {
14156 SE_H,
14157 SE_F,
14158 SE_D,
14159 SE_Q,
14160 SE_I,
14161 SE_S,
14162 SE_R,
14163 SE_L
14164 };
14165
14166 /* Register widths of above. */
14167 static unsigned neon_shape_el_size[] =
14168 {
14169 16,
14170 32,
14171 64,
14172 128,
14173 0,
14174 32,
14175 32,
14176 0
14177 };
14178
14179 struct neon_shape_info
14180 {
14181 unsigned els;
14182 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14183 };
14184
14185 #define S2(A,B) { SE_##A, SE_##B }
14186 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14187 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14188
14189 #define X(N, L, C) { N, S##N L }
14190
14191 static struct neon_shape_info neon_shape_tab[] =
14192 {
14193 NEON_SHAPE_DEF
14194 };
14195
14196 #undef X
14197 #undef S2
14198 #undef S3
14199 #undef S4
14200
14201 /* Bit masks used in type checking given instructions.
14202 'N_EQK' means the type must be the same as (or based on in some way) the key
14203 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14204 set, various other bits can be set as well in order to modify the meaning of
14205 the type constraint. */
14206
14207 enum neon_type_mask
14208 {
14209 N_S8 = 0x0000001,
14210 N_S16 = 0x0000002,
14211 N_S32 = 0x0000004,
14212 N_S64 = 0x0000008,
14213 N_U8 = 0x0000010,
14214 N_U16 = 0x0000020,
14215 N_U32 = 0x0000040,
14216 N_U64 = 0x0000080,
14217 N_I8 = 0x0000100,
14218 N_I16 = 0x0000200,
14219 N_I32 = 0x0000400,
14220 N_I64 = 0x0000800,
14221 N_8 = 0x0001000,
14222 N_16 = 0x0002000,
14223 N_32 = 0x0004000,
14224 N_64 = 0x0008000,
14225 N_P8 = 0x0010000,
14226 N_P16 = 0x0020000,
14227 N_F16 = 0x0040000,
14228 N_F32 = 0x0080000,
14229 N_F64 = 0x0100000,
14230 N_P64 = 0x0200000,
14231 N_KEY = 0x1000000, /* Key element (main type specifier). */
14232 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14233 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14234 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14235 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14236 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14237 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14238 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14239 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14240 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14241 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14242 N_UTYP = 0,
14243 N_MAX_NONSPECIAL = N_P64
14244 };
14245
14246 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14247
14248 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14249 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14250 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14251 #define N_S_32 (N_S8 | N_S16 | N_S32)
14252 #define N_F_16_32 (N_F16 | N_F32)
14253 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14254 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14255 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14256 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14257 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14258 #define N_F_MVE (N_F16 | N_F32)
14259 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14260
14261 /* Pass this as the first type argument to neon_check_type to ignore types
14262 altogether. */
14263 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14264
14265 /* Select a "shape" for the current instruction (describing register types or
14266 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14267 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14268 function of operand parsing, so this function doesn't need to be called.
14269 Shapes should be listed in order of decreasing length. */
14270
14271 static enum neon_shape
14272 neon_select_shape (enum neon_shape shape, ...)
14273 {
14274 va_list ap;
14275 enum neon_shape first_shape = shape;
14276
14277 /* Fix missing optional operands. FIXME: we don't know at this point how
14278 many arguments we should have, so this makes the assumption that we have
14279 > 1. This is true of all current Neon opcodes, I think, but may not be
14280 true in the future. */
14281 if (!inst.operands[1].present)
14282 inst.operands[1] = inst.operands[0];
14283
14284 va_start (ap, shape);
14285
14286 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14287 {
14288 unsigned j;
14289 int matches = 1;
14290
14291 for (j = 0; j < neon_shape_tab[shape].els; j++)
14292 {
14293 if (!inst.operands[j].present)
14294 {
14295 matches = 0;
14296 break;
14297 }
14298
14299 switch (neon_shape_tab[shape].el[j])
14300 {
14301 /* If a .f16, .16, .u16, .s16 type specifier is given over
14302 a VFP single precision register operand, it's essentially
14303 means only half of the register is used.
14304
14305 If the type specifier is given after the mnemonics, the
14306 information is stored in inst.vectype. If the type specifier
14307 is given after register operand, the information is stored
14308 in inst.operands[].vectype.
14309
14310 When there is only one type specifier, and all the register
14311 operands are the same type of hardware register, the type
14312 specifier applies to all register operands.
14313
14314 If no type specifier is given, the shape is inferred from
14315 operand information.
14316
14317 for example:
14318 vadd.f16 s0, s1, s2: NS_HHH
14319 vabs.f16 s0, s1: NS_HH
14320 vmov.f16 s0, r1: NS_HR
14321 vmov.f16 r0, s1: NS_RH
14322 vcvt.f16 r0, s1: NS_RH
14323 vcvt.f16.s32 s2, s2, #29: NS_HFI
14324 vcvt.f16.s32 s2, s2: NS_HF
14325 */
14326 case SE_H:
14327 if (!(inst.operands[j].isreg
14328 && inst.operands[j].isvec
14329 && inst.operands[j].issingle
14330 && !inst.operands[j].isquad
14331 && ((inst.vectype.elems == 1
14332 && inst.vectype.el[0].size == 16)
14333 || (inst.vectype.elems > 1
14334 && inst.vectype.el[j].size == 16)
14335 || (inst.vectype.elems == 0
14336 && inst.operands[j].vectype.type != NT_invtype
14337 && inst.operands[j].vectype.size == 16))))
14338 matches = 0;
14339 break;
14340
14341 case SE_F:
14342 if (!(inst.operands[j].isreg
14343 && inst.operands[j].isvec
14344 && inst.operands[j].issingle
14345 && !inst.operands[j].isquad
14346 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14347 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14348 || (inst.vectype.elems == 0
14349 && (inst.operands[j].vectype.size == 32
14350 || inst.operands[j].vectype.type == NT_invtype)))))
14351 matches = 0;
14352 break;
14353
14354 case SE_D:
14355 if (!(inst.operands[j].isreg
14356 && inst.operands[j].isvec
14357 && !inst.operands[j].isquad
14358 && !inst.operands[j].issingle))
14359 matches = 0;
14360 break;
14361
14362 case SE_R:
14363 if (!(inst.operands[j].isreg
14364 && !inst.operands[j].isvec))
14365 matches = 0;
14366 break;
14367
14368 case SE_Q:
14369 if (!(inst.operands[j].isreg
14370 && inst.operands[j].isvec
14371 && inst.operands[j].isquad
14372 && !inst.operands[j].issingle))
14373 matches = 0;
14374 break;
14375
14376 case SE_I:
14377 if (!(!inst.operands[j].isreg
14378 && !inst.operands[j].isscalar))
14379 matches = 0;
14380 break;
14381
14382 case SE_S:
14383 if (!(!inst.operands[j].isreg
14384 && inst.operands[j].isscalar))
14385 matches = 0;
14386 break;
14387
14388 case SE_L:
14389 break;
14390 }
14391 if (!matches)
14392 break;
14393 }
14394 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14395 /* We've matched all the entries in the shape table, and we don't
14396 have any left over operands which have not been matched. */
14397 break;
14398 }
14399
14400 va_end (ap);
14401
14402 if (shape == NS_NULL && first_shape != NS_NULL)
14403 first_error (_("invalid instruction shape"));
14404
14405 return shape;
14406 }
14407
14408 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14409 means the Q bit should be set). */
14410
14411 static int
14412 neon_quad (enum neon_shape shape)
14413 {
14414 return neon_shape_class[shape] == SC_QUAD;
14415 }
14416
14417 static void
14418 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14419 unsigned *g_size)
14420 {
14421 /* Allow modification to be made to types which are constrained to be
14422 based on the key element, based on bits set alongside N_EQK. */
14423 if ((typebits & N_EQK) != 0)
14424 {
14425 if ((typebits & N_HLF) != 0)
14426 *g_size /= 2;
14427 else if ((typebits & N_DBL) != 0)
14428 *g_size *= 2;
14429 if ((typebits & N_SGN) != 0)
14430 *g_type = NT_signed;
14431 else if ((typebits & N_UNS) != 0)
14432 *g_type = NT_unsigned;
14433 else if ((typebits & N_INT) != 0)
14434 *g_type = NT_integer;
14435 else if ((typebits & N_FLT) != 0)
14436 *g_type = NT_float;
14437 else if ((typebits & N_SIZ) != 0)
14438 *g_type = NT_untyped;
14439 }
14440 }
14441
14442 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14443 operand type, i.e. the single type specified in a Neon instruction when it
14444 is the only one given. */
14445
14446 static struct neon_type_el
14447 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14448 {
14449 struct neon_type_el dest = *key;
14450
14451 gas_assert ((thisarg & N_EQK) != 0);
14452
14453 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14454
14455 return dest;
14456 }
14457
14458 /* Convert Neon type and size into compact bitmask representation. */
14459
14460 static enum neon_type_mask
14461 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14462 {
14463 switch (type)
14464 {
14465 case NT_untyped:
14466 switch (size)
14467 {
14468 case 8: return N_8;
14469 case 16: return N_16;
14470 case 32: return N_32;
14471 case 64: return N_64;
14472 default: ;
14473 }
14474 break;
14475
14476 case NT_integer:
14477 switch (size)
14478 {
14479 case 8: return N_I8;
14480 case 16: return N_I16;
14481 case 32: return N_I32;
14482 case 64: return N_I64;
14483 default: ;
14484 }
14485 break;
14486
14487 case NT_float:
14488 switch (size)
14489 {
14490 case 16: return N_F16;
14491 case 32: return N_F32;
14492 case 64: return N_F64;
14493 default: ;
14494 }
14495 break;
14496
14497 case NT_poly:
14498 switch (size)
14499 {
14500 case 8: return N_P8;
14501 case 16: return N_P16;
14502 case 64: return N_P64;
14503 default: ;
14504 }
14505 break;
14506
14507 case NT_signed:
14508 switch (size)
14509 {
14510 case 8: return N_S8;
14511 case 16: return N_S16;
14512 case 32: return N_S32;
14513 case 64: return N_S64;
14514 default: ;
14515 }
14516 break;
14517
14518 case NT_unsigned:
14519 switch (size)
14520 {
14521 case 8: return N_U8;
14522 case 16: return N_U16;
14523 case 32: return N_U32;
14524 case 64: return N_U64;
14525 default: ;
14526 }
14527 break;
14528
14529 default: ;
14530 }
14531
14532 return N_UTYP;
14533 }
14534
14535 /* Convert compact Neon bitmask type representation to a type and size. Only
14536 handles the case where a single bit is set in the mask. */
14537
14538 static int
14539 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14540 enum neon_type_mask mask)
14541 {
14542 if ((mask & N_EQK) != 0)
14543 return FAIL;
14544
14545 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14546 *size = 8;
14547 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14548 *size = 16;
14549 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14550 *size = 32;
14551 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14552 *size = 64;
14553 else
14554 return FAIL;
14555
14556 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14557 *type = NT_signed;
14558 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14559 *type = NT_unsigned;
14560 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14561 *type = NT_integer;
14562 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14563 *type = NT_untyped;
14564 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14565 *type = NT_poly;
14566 else if ((mask & (N_F_ALL)) != 0)
14567 *type = NT_float;
14568 else
14569 return FAIL;
14570
14571 return SUCCESS;
14572 }
14573
14574 /* Modify a bitmask of allowed types. This is only needed for type
14575 relaxation. */
14576
14577 static unsigned
14578 modify_types_allowed (unsigned allowed, unsigned mods)
14579 {
14580 unsigned size;
14581 enum neon_el_type type;
14582 unsigned destmask;
14583 int i;
14584
14585 destmask = 0;
14586
14587 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14588 {
14589 if (el_type_of_type_chk (&type, &size,
14590 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14591 {
14592 neon_modify_type_size (mods, &type, &size);
14593 destmask |= type_chk_of_el_type (type, size);
14594 }
14595 }
14596
14597 return destmask;
14598 }
14599
14600 /* Check type and return type classification.
14601 The manual states (paraphrase): If one datatype is given, it indicates the
14602 type given in:
14603 - the second operand, if there is one
14604 - the operand, if there is no second operand
14605 - the result, if there are no operands.
14606 This isn't quite good enough though, so we use a concept of a "key" datatype
14607 which is set on a per-instruction basis, which is the one which matters when
14608 only one data type is written.
14609 Note: this function has side-effects (e.g. filling in missing operands). All
14610 Neon instructions should call it before performing bit encoding. */
14611
14612 static struct neon_type_el
14613 neon_check_type (unsigned els, enum neon_shape ns, ...)
14614 {
14615 va_list ap;
14616 unsigned i, pass, key_el = 0;
14617 unsigned types[NEON_MAX_TYPE_ELS];
14618 enum neon_el_type k_type = NT_invtype;
14619 unsigned k_size = -1u;
14620 struct neon_type_el badtype = {NT_invtype, -1};
14621 unsigned key_allowed = 0;
14622
14623 /* Optional registers in Neon instructions are always (not) in operand 1.
14624 Fill in the missing operand here, if it was omitted. */
14625 if (els > 1 && !inst.operands[1].present)
14626 inst.operands[1] = inst.operands[0];
14627
14628 /* Suck up all the varargs. */
14629 va_start (ap, ns);
14630 for (i = 0; i < els; i++)
14631 {
14632 unsigned thisarg = va_arg (ap, unsigned);
14633 if (thisarg == N_IGNORE_TYPE)
14634 {
14635 va_end (ap);
14636 return badtype;
14637 }
14638 types[i] = thisarg;
14639 if ((thisarg & N_KEY) != 0)
14640 key_el = i;
14641 }
14642 va_end (ap);
14643
14644 if (inst.vectype.elems > 0)
14645 for (i = 0; i < els; i++)
14646 if (inst.operands[i].vectype.type != NT_invtype)
14647 {
14648 first_error (_("types specified in both the mnemonic and operands"));
14649 return badtype;
14650 }
14651
14652 /* Duplicate inst.vectype elements here as necessary.
14653 FIXME: No idea if this is exactly the same as the ARM assembler,
14654 particularly when an insn takes one register and one non-register
14655 operand. */
14656 if (inst.vectype.elems == 1 && els > 1)
14657 {
14658 unsigned j;
14659 inst.vectype.elems = els;
14660 inst.vectype.el[key_el] = inst.vectype.el[0];
14661 for (j = 0; j < els; j++)
14662 if (j != key_el)
14663 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14664 types[j]);
14665 }
14666 else if (inst.vectype.elems == 0 && els > 0)
14667 {
14668 unsigned j;
14669 /* No types were given after the mnemonic, so look for types specified
14670 after each operand. We allow some flexibility here; as long as the
14671 "key" operand has a type, we can infer the others. */
14672 for (j = 0; j < els; j++)
14673 if (inst.operands[j].vectype.type != NT_invtype)
14674 inst.vectype.el[j] = inst.operands[j].vectype;
14675
14676 if (inst.operands[key_el].vectype.type != NT_invtype)
14677 {
14678 for (j = 0; j < els; j++)
14679 if (inst.operands[j].vectype.type == NT_invtype)
14680 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14681 types[j]);
14682 }
14683 else
14684 {
14685 first_error (_("operand types can't be inferred"));
14686 return badtype;
14687 }
14688 }
14689 else if (inst.vectype.elems != els)
14690 {
14691 first_error (_("type specifier has the wrong number of parts"));
14692 return badtype;
14693 }
14694
14695 for (pass = 0; pass < 2; pass++)
14696 {
14697 for (i = 0; i < els; i++)
14698 {
14699 unsigned thisarg = types[i];
14700 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14701 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14702 enum neon_el_type g_type = inst.vectype.el[i].type;
14703 unsigned g_size = inst.vectype.el[i].size;
14704
14705 /* Decay more-specific signed & unsigned types to sign-insensitive
14706 integer types if sign-specific variants are unavailable. */
14707 if ((g_type == NT_signed || g_type == NT_unsigned)
14708 && (types_allowed & N_SU_ALL) == 0)
14709 g_type = NT_integer;
14710
14711 /* If only untyped args are allowed, decay any more specific types to
14712 them. Some instructions only care about signs for some element
14713 sizes, so handle that properly. */
14714 if (((types_allowed & N_UNT) == 0)
14715 && ((g_size == 8 && (types_allowed & N_8) != 0)
14716 || (g_size == 16 && (types_allowed & N_16) != 0)
14717 || (g_size == 32 && (types_allowed & N_32) != 0)
14718 || (g_size == 64 && (types_allowed & N_64) != 0)))
14719 g_type = NT_untyped;
14720
14721 if (pass == 0)
14722 {
14723 if ((thisarg & N_KEY) != 0)
14724 {
14725 k_type = g_type;
14726 k_size = g_size;
14727 key_allowed = thisarg & ~N_KEY;
14728
14729 /* Check architecture constraint on FP16 extension. */
14730 if (k_size == 16
14731 && k_type == NT_float
14732 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14733 {
14734 inst.error = _(BAD_FP16);
14735 return badtype;
14736 }
14737 }
14738 }
14739 else
14740 {
14741 if ((thisarg & N_VFP) != 0)
14742 {
14743 enum neon_shape_el regshape;
14744 unsigned regwidth, match;
14745
14746 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14747 if (ns == NS_NULL)
14748 {
14749 first_error (_("invalid instruction shape"));
14750 return badtype;
14751 }
14752 regshape = neon_shape_tab[ns].el[i];
14753 regwidth = neon_shape_el_size[regshape];
14754
14755 /* In VFP mode, operands must match register widths. If we
14756 have a key operand, use its width, else use the width of
14757 the current operand. */
14758 if (k_size != -1u)
14759 match = k_size;
14760 else
14761 match = g_size;
14762
14763 /* FP16 will use a single precision register. */
14764 if (regwidth == 32 && match == 16)
14765 {
14766 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14767 match = regwidth;
14768 else
14769 {
14770 inst.error = _(BAD_FP16);
14771 return badtype;
14772 }
14773 }
14774
14775 if (regwidth != match)
14776 {
14777 first_error (_("operand size must match register width"));
14778 return badtype;
14779 }
14780 }
14781
14782 if ((thisarg & N_EQK) == 0)
14783 {
14784 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14785
14786 if ((given_type & types_allowed) == 0)
14787 {
14788 first_error (BAD_SIMD_TYPE);
14789 return badtype;
14790 }
14791 }
14792 else
14793 {
14794 enum neon_el_type mod_k_type = k_type;
14795 unsigned mod_k_size = k_size;
14796 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14797 if (g_type != mod_k_type || g_size != mod_k_size)
14798 {
14799 first_error (_("inconsistent types in Neon instruction"));
14800 return badtype;
14801 }
14802 }
14803 }
14804 }
14805 }
14806
14807 return inst.vectype.el[key_el];
14808 }
14809
14810 /* Neon-style VFP instruction forwarding. */
14811
14812 /* Thumb VFP instructions have 0xE in the condition field. */
14813
14814 static void
14815 do_vfp_cond_or_thumb (void)
14816 {
14817 inst.is_neon = 1;
14818
14819 if (thumb_mode)
14820 inst.instruction |= 0xe0000000;
14821 else
14822 inst.instruction |= inst.cond << 28;
14823 }
14824
14825 /* Look up and encode a simple mnemonic, for use as a helper function for the
14826 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14827 etc. It is assumed that operand parsing has already been done, and that the
14828 operands are in the form expected by the given opcode (this isn't necessarily
14829 the same as the form in which they were parsed, hence some massaging must
14830 take place before this function is called).
14831 Checks current arch version against that in the looked-up opcode. */
14832
14833 static void
14834 do_vfp_nsyn_opcode (const char *opname)
14835 {
14836 const struct asm_opcode *opcode;
14837
14838 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14839
14840 if (!opcode)
14841 abort ();
14842
14843 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14844 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14845 _(BAD_FPU));
14846
14847 inst.is_neon = 1;
14848
14849 if (thumb_mode)
14850 {
14851 inst.instruction = opcode->tvalue;
14852 opcode->tencode ();
14853 }
14854 else
14855 {
14856 inst.instruction = (inst.cond << 28) | opcode->avalue;
14857 opcode->aencode ();
14858 }
14859 }
14860
14861 static void
14862 do_vfp_nsyn_add_sub (enum neon_shape rs)
14863 {
14864 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14865
14866 if (rs == NS_FFF || rs == NS_HHH)
14867 {
14868 if (is_add)
14869 do_vfp_nsyn_opcode ("fadds");
14870 else
14871 do_vfp_nsyn_opcode ("fsubs");
14872
14873 /* ARMv8.2 fp16 instruction. */
14874 if (rs == NS_HHH)
14875 do_scalar_fp16_v82_encode ();
14876 }
14877 else
14878 {
14879 if (is_add)
14880 do_vfp_nsyn_opcode ("faddd");
14881 else
14882 do_vfp_nsyn_opcode ("fsubd");
14883 }
14884 }
14885
14886 /* Check operand types to see if this is a VFP instruction, and if so call
14887 PFN (). */
14888
14889 static int
14890 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14891 {
14892 enum neon_shape rs;
14893 struct neon_type_el et;
14894
14895 switch (args)
14896 {
14897 case 2:
14898 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14899 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14900 break;
14901
14902 case 3:
14903 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14904 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14905 N_F_ALL | N_KEY | N_VFP);
14906 break;
14907
14908 default:
14909 abort ();
14910 }
14911
14912 if (et.type != NT_invtype)
14913 {
14914 pfn (rs);
14915 return SUCCESS;
14916 }
14917
14918 inst.error = NULL;
14919 return FAIL;
14920 }
14921
14922 static void
14923 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14924 {
14925 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14926
14927 if (rs == NS_FFF || rs == NS_HHH)
14928 {
14929 if (is_mla)
14930 do_vfp_nsyn_opcode ("fmacs");
14931 else
14932 do_vfp_nsyn_opcode ("fnmacs");
14933
14934 /* ARMv8.2 fp16 instruction. */
14935 if (rs == NS_HHH)
14936 do_scalar_fp16_v82_encode ();
14937 }
14938 else
14939 {
14940 if (is_mla)
14941 do_vfp_nsyn_opcode ("fmacd");
14942 else
14943 do_vfp_nsyn_opcode ("fnmacd");
14944 }
14945 }
14946
14947 static void
14948 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14949 {
14950 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14951
14952 if (rs == NS_FFF || rs == NS_HHH)
14953 {
14954 if (is_fma)
14955 do_vfp_nsyn_opcode ("ffmas");
14956 else
14957 do_vfp_nsyn_opcode ("ffnmas");
14958
14959 /* ARMv8.2 fp16 instruction. */
14960 if (rs == NS_HHH)
14961 do_scalar_fp16_v82_encode ();
14962 }
14963 else
14964 {
14965 if (is_fma)
14966 do_vfp_nsyn_opcode ("ffmad");
14967 else
14968 do_vfp_nsyn_opcode ("ffnmad");
14969 }
14970 }
14971
14972 static void
14973 do_vfp_nsyn_mul (enum neon_shape rs)
14974 {
14975 if (rs == NS_FFF || rs == NS_HHH)
14976 {
14977 do_vfp_nsyn_opcode ("fmuls");
14978
14979 /* ARMv8.2 fp16 instruction. */
14980 if (rs == NS_HHH)
14981 do_scalar_fp16_v82_encode ();
14982 }
14983 else
14984 do_vfp_nsyn_opcode ("fmuld");
14985 }
14986
14987 static void
14988 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14989 {
14990 int is_neg = (inst.instruction & 0x80) != 0;
14991 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14992
14993 if (rs == NS_FF || rs == NS_HH)
14994 {
14995 if (is_neg)
14996 do_vfp_nsyn_opcode ("fnegs");
14997 else
14998 do_vfp_nsyn_opcode ("fabss");
14999
15000 /* ARMv8.2 fp16 instruction. */
15001 if (rs == NS_HH)
15002 do_scalar_fp16_v82_encode ();
15003 }
15004 else
15005 {
15006 if (is_neg)
15007 do_vfp_nsyn_opcode ("fnegd");
15008 else
15009 do_vfp_nsyn_opcode ("fabsd");
15010 }
15011 }
15012
15013 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15014 insns belong to Neon, and are handled elsewhere. */
15015
15016 static void
15017 do_vfp_nsyn_ldm_stm (int is_dbmode)
15018 {
15019 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15020 if (is_ldm)
15021 {
15022 if (is_dbmode)
15023 do_vfp_nsyn_opcode ("fldmdbs");
15024 else
15025 do_vfp_nsyn_opcode ("fldmias");
15026 }
15027 else
15028 {
15029 if (is_dbmode)
15030 do_vfp_nsyn_opcode ("fstmdbs");
15031 else
15032 do_vfp_nsyn_opcode ("fstmias");
15033 }
15034 }
15035
15036 static void
15037 do_vfp_nsyn_sqrt (void)
15038 {
15039 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15040 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15041
15042 if (rs == NS_FF || rs == NS_HH)
15043 {
15044 do_vfp_nsyn_opcode ("fsqrts");
15045
15046 /* ARMv8.2 fp16 instruction. */
15047 if (rs == NS_HH)
15048 do_scalar_fp16_v82_encode ();
15049 }
15050 else
15051 do_vfp_nsyn_opcode ("fsqrtd");
15052 }
15053
15054 static void
15055 do_vfp_nsyn_div (void)
15056 {
15057 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15058 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15059 N_F_ALL | N_KEY | N_VFP);
15060
15061 if (rs == NS_FFF || rs == NS_HHH)
15062 {
15063 do_vfp_nsyn_opcode ("fdivs");
15064
15065 /* ARMv8.2 fp16 instruction. */
15066 if (rs == NS_HHH)
15067 do_scalar_fp16_v82_encode ();
15068 }
15069 else
15070 do_vfp_nsyn_opcode ("fdivd");
15071 }
15072
15073 static void
15074 do_vfp_nsyn_nmul (void)
15075 {
15076 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15077 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15078 N_F_ALL | N_KEY | N_VFP);
15079
15080 if (rs == NS_FFF || rs == NS_HHH)
15081 {
15082 NEON_ENCODE (SINGLE, inst);
15083 do_vfp_sp_dyadic ();
15084
15085 /* ARMv8.2 fp16 instruction. */
15086 if (rs == NS_HHH)
15087 do_scalar_fp16_v82_encode ();
15088 }
15089 else
15090 {
15091 NEON_ENCODE (DOUBLE, inst);
15092 do_vfp_dp_rd_rn_rm ();
15093 }
15094 do_vfp_cond_or_thumb ();
15095
15096 }
15097
15098 static void
15099 do_vfp_nsyn_cmp (void)
15100 {
15101 enum neon_shape rs;
15102 if (inst.operands[1].isreg)
15103 {
15104 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15105 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15106
15107 if (rs == NS_FF || rs == NS_HH)
15108 {
15109 NEON_ENCODE (SINGLE, inst);
15110 do_vfp_sp_monadic ();
15111 }
15112 else
15113 {
15114 NEON_ENCODE (DOUBLE, inst);
15115 do_vfp_dp_rd_rm ();
15116 }
15117 }
15118 else
15119 {
15120 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
15121 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
15122
15123 switch (inst.instruction & 0x0fffffff)
15124 {
15125 case N_MNEM_vcmp:
15126 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
15127 break;
15128 case N_MNEM_vcmpe:
15129 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
15130 break;
15131 default:
15132 abort ();
15133 }
15134
15135 if (rs == NS_FI || rs == NS_HI)
15136 {
15137 NEON_ENCODE (SINGLE, inst);
15138 do_vfp_sp_compare_z ();
15139 }
15140 else
15141 {
15142 NEON_ENCODE (DOUBLE, inst);
15143 do_vfp_dp_rd ();
15144 }
15145 }
15146 do_vfp_cond_or_thumb ();
15147
15148 /* ARMv8.2 fp16 instruction. */
15149 if (rs == NS_HI || rs == NS_HH)
15150 do_scalar_fp16_v82_encode ();
15151 }
15152
15153 static void
15154 nsyn_insert_sp (void)
15155 {
15156 inst.operands[1] = inst.operands[0];
15157 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
15158 inst.operands[0].reg = REG_SP;
15159 inst.operands[0].isreg = 1;
15160 inst.operands[0].writeback = 1;
15161 inst.operands[0].present = 1;
15162 }
15163
15164 static void
15165 do_vfp_nsyn_push (void)
15166 {
15167 nsyn_insert_sp ();
15168
15169 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15170 _("register list must contain at least 1 and at most 16 "
15171 "registers"));
15172
15173 if (inst.operands[1].issingle)
15174 do_vfp_nsyn_opcode ("fstmdbs");
15175 else
15176 do_vfp_nsyn_opcode ("fstmdbd");
15177 }
15178
15179 static void
15180 do_vfp_nsyn_pop (void)
15181 {
15182 nsyn_insert_sp ();
15183
15184 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15185 _("register list must contain at least 1 and at most 16 "
15186 "registers"));
15187
15188 if (inst.operands[1].issingle)
15189 do_vfp_nsyn_opcode ("fldmias");
15190 else
15191 do_vfp_nsyn_opcode ("fldmiad");
15192 }
15193
15194 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15195 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15196
15197 static void
15198 neon_dp_fixup (struct arm_it* insn)
15199 {
15200 unsigned int i = insn->instruction;
15201 insn->is_neon = 1;
15202
15203 if (thumb_mode)
15204 {
15205 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15206 if (i & (1 << 24))
15207 i |= 1 << 28;
15208
15209 i &= ~(1 << 24);
15210
15211 i |= 0xef000000;
15212 }
15213 else
15214 i |= 0xf2000000;
15215
15216 insn->instruction = i;
15217 }
15218
15219 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15220 (0, 1, 2, 3). */
15221
15222 static unsigned
15223 neon_logbits (unsigned x)
15224 {
15225 return ffs (x) - 4;
15226 }
15227
15228 #define LOW4(R) ((R) & 0xf)
15229 #define HI1(R) (((R) >> 4) & 1)
15230
15231 static void
15232 mve_encode_qqr (int size, int fp)
15233 {
15234 if (inst.operands[2].reg == REG_SP)
15235 as_tsktsk (MVE_BAD_SP);
15236 else if (inst.operands[2].reg == REG_PC)
15237 as_tsktsk (MVE_BAD_PC);
15238
15239 if (fp)
15240 {
15241 /* vadd. */
15242 if (((unsigned)inst.instruction) == 0xd00)
15243 inst.instruction = 0xee300f40;
15244 /* vsub. */
15245 else if (((unsigned)inst.instruction) == 0x200d00)
15246 inst.instruction = 0xee301f40;
15247
15248 /* Setting size which is 1 for F16 and 0 for F32. */
15249 inst.instruction |= (size == 16) << 28;
15250 }
15251 else
15252 {
15253 /* vadd. */
15254 if (((unsigned)inst.instruction) == 0x800)
15255 inst.instruction = 0xee010f40;
15256 /* vsub. */
15257 else if (((unsigned)inst.instruction) == 0x1000800)
15258 inst.instruction = 0xee011f40;
15259 /* Setting bits for size. */
15260 inst.instruction |= neon_logbits (size) << 20;
15261 }
15262 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15263 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15264 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15265 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15266 inst.instruction |= inst.operands[2].reg;
15267 inst.is_neon = 1;
15268 }
15269
15270 static void
15271 mve_encode_rqq (unsigned bit28, unsigned size)
15272 {
15273 inst.instruction |= bit28 << 28;
15274 inst.instruction |= neon_logbits (size) << 20;
15275 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15276 inst.instruction |= inst.operands[0].reg << 12;
15277 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15278 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15279 inst.instruction |= LOW4 (inst.operands[2].reg);
15280 inst.is_neon = 1;
15281 }
15282
15283 static void
15284 mve_encode_qqq (int ubit, int size)
15285 {
15286
15287 inst.instruction |= (ubit != 0) << 28;
15288 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15289 inst.instruction |= neon_logbits (size) << 20;
15290 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15291 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15292 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15293 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15294 inst.instruction |= LOW4 (inst.operands[2].reg);
15295
15296 inst.is_neon = 1;
15297 }
15298
15299
15300 /* Encode insns with bit pattern:
15301
15302 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15303 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15304
15305 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15306 different meaning for some instruction. */
15307
15308 static void
15309 neon_three_same (int isquad, int ubit, int size)
15310 {
15311 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15312 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15313 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15314 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15315 inst.instruction |= LOW4 (inst.operands[2].reg);
15316 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15317 inst.instruction |= (isquad != 0) << 6;
15318 inst.instruction |= (ubit != 0) << 24;
15319 if (size != -1)
15320 inst.instruction |= neon_logbits (size) << 20;
15321
15322 neon_dp_fixup (&inst);
15323 }
15324
15325 /* Encode instructions of the form:
15326
15327 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15328 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15329
15330 Don't write size if SIZE == -1. */
15331
15332 static void
15333 neon_two_same (int qbit, int ubit, int size)
15334 {
15335 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15336 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15337 inst.instruction |= LOW4 (inst.operands[1].reg);
15338 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15339 inst.instruction |= (qbit != 0) << 6;
15340 inst.instruction |= (ubit != 0) << 24;
15341
15342 if (size != -1)
15343 inst.instruction |= neon_logbits (size) << 18;
15344
15345 neon_dp_fixup (&inst);
15346 }
15347
15348 /* Neon instruction encoders, in approximate order of appearance. */
15349
15350 static void
15351 do_neon_dyadic_i_su (void)
15352 {
15353 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15354 struct neon_type_el et = neon_check_type (3, rs,
15355 N_EQK, N_EQK, N_SU_32 | N_KEY);
15356 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15357 }
15358
15359 static void
15360 do_neon_dyadic_i64_su (void)
15361 {
15362 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15363 struct neon_type_el et = neon_check_type (3, rs,
15364 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15365 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15366 }
15367
15368 static void
15369 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
15370 unsigned immbits)
15371 {
15372 unsigned size = et.size >> 3;
15373 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15374 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15375 inst.instruction |= LOW4 (inst.operands[1].reg);
15376 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15377 inst.instruction |= (isquad != 0) << 6;
15378 inst.instruction |= immbits << 16;
15379 inst.instruction |= (size >> 3) << 7;
15380 inst.instruction |= (size & 0x7) << 19;
15381 if (write_ubit)
15382 inst.instruction |= (uval != 0) << 24;
15383
15384 neon_dp_fixup (&inst);
15385 }
15386
15387 static void
15388 do_neon_shl_imm (void)
15389 {
15390 if (!inst.operands[2].isreg)
15391 {
15392 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15393 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
15394 int imm = inst.operands[2].imm;
15395
15396 constraint (imm < 0 || (unsigned)imm >= et.size,
15397 _("immediate out of range for shift"));
15398 NEON_ENCODE (IMMED, inst);
15399 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15400 }
15401 else
15402 {
15403 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15404 struct neon_type_el et = neon_check_type (3, rs,
15405 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15406 unsigned int tmp;
15407
15408 /* VSHL/VQSHL 3-register variants have syntax such as:
15409 vshl.xx Dd, Dm, Dn
15410 whereas other 3-register operations encoded by neon_three_same have
15411 syntax like:
15412 vadd.xx Dd, Dn, Dm
15413 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15414 here. */
15415 tmp = inst.operands[2].reg;
15416 inst.operands[2].reg = inst.operands[1].reg;
15417 inst.operands[1].reg = tmp;
15418 NEON_ENCODE (INTEGER, inst);
15419 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15420 }
15421 }
15422
15423 static void
15424 do_neon_qshl_imm (void)
15425 {
15426 if (!inst.operands[2].isreg)
15427 {
15428 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15429 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15430 int imm = inst.operands[2].imm;
15431
15432 constraint (imm < 0 || (unsigned)imm >= et.size,
15433 _("immediate out of range for shift"));
15434 NEON_ENCODE (IMMED, inst);
15435 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15436 }
15437 else
15438 {
15439 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15440 struct neon_type_el et = neon_check_type (3, rs,
15441 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15442 unsigned int tmp;
15443
15444 /* See note in do_neon_shl_imm. */
15445 tmp = inst.operands[2].reg;
15446 inst.operands[2].reg = inst.operands[1].reg;
15447 inst.operands[1].reg = tmp;
15448 NEON_ENCODE (INTEGER, inst);
15449 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15450 }
15451 }
15452
15453 static void
15454 do_neon_rshl (void)
15455 {
15456 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15457 struct neon_type_el et = neon_check_type (3, rs,
15458 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15459 unsigned int tmp;
15460
15461 tmp = inst.operands[2].reg;
15462 inst.operands[2].reg = inst.operands[1].reg;
15463 inst.operands[1].reg = tmp;
15464 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15465 }
15466
15467 static int
15468 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15469 {
15470 /* Handle .I8 pseudo-instructions. */
15471 if (size == 8)
15472 {
15473 /* Unfortunately, this will make everything apart from zero out-of-range.
15474 FIXME is this the intended semantics? There doesn't seem much point in
15475 accepting .I8 if so. */
15476 immediate |= immediate << 8;
15477 size = 16;
15478 }
15479
15480 if (size >= 32)
15481 {
15482 if (immediate == (immediate & 0x000000ff))
15483 {
15484 *immbits = immediate;
15485 return 0x1;
15486 }
15487 else if (immediate == (immediate & 0x0000ff00))
15488 {
15489 *immbits = immediate >> 8;
15490 return 0x3;
15491 }
15492 else if (immediate == (immediate & 0x00ff0000))
15493 {
15494 *immbits = immediate >> 16;
15495 return 0x5;
15496 }
15497 else if (immediate == (immediate & 0xff000000))
15498 {
15499 *immbits = immediate >> 24;
15500 return 0x7;
15501 }
15502 if ((immediate & 0xffff) != (immediate >> 16))
15503 goto bad_immediate;
15504 immediate &= 0xffff;
15505 }
15506
15507 if (immediate == (immediate & 0x000000ff))
15508 {
15509 *immbits = immediate;
15510 return 0x9;
15511 }
15512 else if (immediate == (immediate & 0x0000ff00))
15513 {
15514 *immbits = immediate >> 8;
15515 return 0xb;
15516 }
15517
15518 bad_immediate:
15519 first_error (_("immediate value out of range"));
15520 return FAIL;
15521 }
15522
15523 static void
15524 do_neon_logic (void)
15525 {
15526 if (inst.operands[2].present && inst.operands[2].isreg)
15527 {
15528 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15529 neon_check_type (3, rs, N_IGNORE_TYPE);
15530 /* U bit and size field were set as part of the bitmask. */
15531 NEON_ENCODE (INTEGER, inst);
15532 neon_three_same (neon_quad (rs), 0, -1);
15533 }
15534 else
15535 {
15536 const int three_ops_form = (inst.operands[2].present
15537 && !inst.operands[2].isreg);
15538 const int immoperand = (three_ops_form ? 2 : 1);
15539 enum neon_shape rs = (three_ops_form
15540 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15541 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15542 struct neon_type_el et = neon_check_type (2, rs,
15543 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15544 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15545 unsigned immbits;
15546 int cmode;
15547
15548 if (et.type == NT_invtype)
15549 return;
15550
15551 if (three_ops_form)
15552 constraint (inst.operands[0].reg != inst.operands[1].reg,
15553 _("first and second operands shall be the same register"));
15554
15555 NEON_ENCODE (IMMED, inst);
15556
15557 immbits = inst.operands[immoperand].imm;
15558 if (et.size == 64)
15559 {
15560 /* .i64 is a pseudo-op, so the immediate must be a repeating
15561 pattern. */
15562 if (immbits != (inst.operands[immoperand].regisimm ?
15563 inst.operands[immoperand].reg : 0))
15564 {
15565 /* Set immbits to an invalid constant. */
15566 immbits = 0xdeadbeef;
15567 }
15568 }
15569
15570 switch (opcode)
15571 {
15572 case N_MNEM_vbic:
15573 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15574 break;
15575
15576 case N_MNEM_vorr:
15577 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15578 break;
15579
15580 case N_MNEM_vand:
15581 /* Pseudo-instruction for VBIC. */
15582 neon_invert_size (&immbits, 0, et.size);
15583 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15584 break;
15585
15586 case N_MNEM_vorn:
15587 /* Pseudo-instruction for VORR. */
15588 neon_invert_size (&immbits, 0, et.size);
15589 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15590 break;
15591
15592 default:
15593 abort ();
15594 }
15595
15596 if (cmode == FAIL)
15597 return;
15598
15599 inst.instruction |= neon_quad (rs) << 6;
15600 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15601 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15602 inst.instruction |= cmode << 8;
15603 neon_write_immbits (immbits);
15604
15605 neon_dp_fixup (&inst);
15606 }
15607 }
15608
15609 static void
15610 do_neon_bitfield (void)
15611 {
15612 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15613 neon_check_type (3, rs, N_IGNORE_TYPE);
15614 neon_three_same (neon_quad (rs), 0, -1);
15615 }
15616
15617 static void
15618 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15619 unsigned destbits)
15620 {
15621 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15622 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15623 types | N_KEY);
15624 if (et.type == NT_float)
15625 {
15626 NEON_ENCODE (FLOAT, inst);
15627 if (rs == NS_QQR)
15628 mve_encode_qqr (et.size, 1);
15629 else
15630 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15631 }
15632 else
15633 {
15634 NEON_ENCODE (INTEGER, inst);
15635 if (rs == NS_QQR)
15636 mve_encode_qqr (et.size, 0);
15637 else
15638 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15639 }
15640 }
15641
15642
15643 static void
15644 do_neon_dyadic_if_su_d (void)
15645 {
15646 /* This version only allow D registers, but that constraint is enforced during
15647 operand parsing so we don't need to do anything extra here. */
15648 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15649 }
15650
15651 static void
15652 do_neon_dyadic_if_i_d (void)
15653 {
15654 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15655 affected if we specify unsigned args. */
15656 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15657 }
15658
15659 enum vfp_or_neon_is_neon_bits
15660 {
15661 NEON_CHECK_CC = 1,
15662 NEON_CHECK_ARCH = 2,
15663 NEON_CHECK_ARCH8 = 4
15664 };
15665
15666 /* Call this function if an instruction which may have belonged to the VFP or
15667 Neon instruction sets, but turned out to be a Neon instruction (due to the
15668 operand types involved, etc.). We have to check and/or fix-up a couple of
15669 things:
15670
15671 - Make sure the user hasn't attempted to make a Neon instruction
15672 conditional.
15673 - Alter the value in the condition code field if necessary.
15674 - Make sure that the arch supports Neon instructions.
15675
15676 Which of these operations take place depends on bits from enum
15677 vfp_or_neon_is_neon_bits.
15678
15679 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15680 current instruction's condition is COND_ALWAYS, the condition field is
15681 changed to inst.uncond_value. This is necessary because instructions shared
15682 between VFP and Neon may be conditional for the VFP variants only, and the
15683 unconditional Neon version must have, e.g., 0xF in the condition field. */
15684
15685 static int
15686 vfp_or_neon_is_neon (unsigned check)
15687 {
15688 /* Conditions are always legal in Thumb mode (IT blocks). */
15689 if (!thumb_mode && (check & NEON_CHECK_CC))
15690 {
15691 if (inst.cond != COND_ALWAYS)
15692 {
15693 first_error (_(BAD_COND));
15694 return FAIL;
15695 }
15696 if (inst.uncond_value != -1)
15697 inst.instruction |= inst.uncond_value << 28;
15698 }
15699
15700
15701 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
15702 || ((check & NEON_CHECK_ARCH8)
15703 && !mark_feature_used (&fpu_neon_ext_armv8)))
15704 {
15705 first_error (_(BAD_FPU));
15706 return FAIL;
15707 }
15708
15709 return SUCCESS;
15710 }
15711
15712 static int
15713 check_simd_pred_availability (int fp, unsigned check)
15714 {
15715 if (inst.cond > COND_ALWAYS)
15716 {
15717 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15718 {
15719 inst.error = BAD_FPU;
15720 return 1;
15721 }
15722 inst.pred_insn_type = INSIDE_VPT_INSN;
15723 }
15724 else if (inst.cond < COND_ALWAYS)
15725 {
15726 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15727 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15728 else if (vfp_or_neon_is_neon (check) == FAIL)
15729 return 2;
15730 }
15731 else
15732 {
15733 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
15734 && vfp_or_neon_is_neon (check) == FAIL)
15735 return 3;
15736
15737 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15738 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15739 }
15740 return 0;
15741 }
15742
15743 static void
15744 do_mve_vst_vld (void)
15745 {
15746 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15747 return;
15748
15749 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
15750 || inst.relocs[0].exp.X_add_number != 0
15751 || inst.operands[1].immisreg != 0,
15752 BAD_ADDR_MODE);
15753 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
15754 if (inst.operands[1].reg == REG_PC)
15755 as_tsktsk (MVE_BAD_PC);
15756 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
15757 as_tsktsk (MVE_BAD_SP);
15758
15759
15760 /* These instructions are one of the "exceptions" mentioned in
15761 handle_pred_state. They are MVE instructions that are not VPT compatible
15762 and do not accept a VPT code, thus appending such a code is a syntax
15763 error. */
15764 if (inst.cond > COND_ALWAYS)
15765 first_error (BAD_SYNTAX);
15766 /* If we append a scalar condition code we can set this to
15767 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
15768 else if (inst.cond < COND_ALWAYS)
15769 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15770 else
15771 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
15772
15773 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15774 inst.instruction |= inst.operands[1].writeback << 21;
15775 inst.instruction |= inst.operands[1].reg << 16;
15776 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15777 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
15778 inst.is_neon = 1;
15779 }
15780
15781 static void
15782 do_neon_dyadic_if_su (void)
15783 {
15784 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15785 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
15786 N_SUF_32 | N_KEY);
15787
15788 if (check_simd_pred_availability (et.type == NT_float,
15789 NEON_CHECK_ARCH | NEON_CHECK_CC))
15790 return;
15791
15792 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15793 }
15794
15795 static void
15796 do_neon_addsub_if_i (void)
15797 {
15798 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
15799 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15800 return;
15801
15802 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15803 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
15804 N_EQK, N_IF_32 | N_I64 | N_KEY);
15805
15806 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
15807 /* If we are parsing Q registers and the element types match MVE, which NEON
15808 also supports, then we must check whether this is an instruction that can
15809 be used by both MVE/NEON. This distinction can be made based on whether
15810 they are predicated or not. */
15811 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
15812 {
15813 if (check_simd_pred_availability (et.type == NT_float,
15814 NEON_CHECK_ARCH | NEON_CHECK_CC))
15815 return;
15816 }
15817 else
15818 {
15819 /* If they are either in a D register or are using an unsupported. */
15820 if (rs != NS_QQR
15821 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15822 return;
15823 }
15824
15825 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15826 affected if we specify unsigned args. */
15827 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15828 }
15829
15830 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15831 result to be:
15832 V<op> A,B (A is operand 0, B is operand 2)
15833 to mean:
15834 V<op> A,B,A
15835 not:
15836 V<op> A,B,B
15837 so handle that case specially. */
15838
15839 static void
15840 neon_exchange_operands (void)
15841 {
15842 if (inst.operands[1].present)
15843 {
15844 void *scratch = xmalloc (sizeof (inst.operands[0]));
15845
15846 /* Swap operands[1] and operands[2]. */
15847 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15848 inst.operands[1] = inst.operands[2];
15849 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15850 free (scratch);
15851 }
15852 else
15853 {
15854 inst.operands[1] = inst.operands[2];
15855 inst.operands[2] = inst.operands[0];
15856 }
15857 }
15858
15859 static void
15860 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15861 {
15862 if (inst.operands[2].isreg)
15863 {
15864 if (invert)
15865 neon_exchange_operands ();
15866 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15867 }
15868 else
15869 {
15870 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15871 struct neon_type_el et = neon_check_type (2, rs,
15872 N_EQK | N_SIZ, immtypes | N_KEY);
15873
15874 NEON_ENCODE (IMMED, inst);
15875 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15876 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15877 inst.instruction |= LOW4 (inst.operands[1].reg);
15878 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15879 inst.instruction |= neon_quad (rs) << 6;
15880 inst.instruction |= (et.type == NT_float) << 10;
15881 inst.instruction |= neon_logbits (et.size) << 18;
15882
15883 neon_dp_fixup (&inst);
15884 }
15885 }
15886
15887 static void
15888 do_neon_cmp (void)
15889 {
15890 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15891 }
15892
15893 static void
15894 do_neon_cmp_inv (void)
15895 {
15896 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15897 }
15898
15899 static void
15900 do_neon_ceq (void)
15901 {
15902 neon_compare (N_IF_32, N_IF_32, FALSE);
15903 }
15904
15905 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15906 scalars, which are encoded in 5 bits, M : Rm.
15907 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15908 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15909 index in M.
15910
15911 Dot Product instructions are similar to multiply instructions except elsize
15912 should always be 32.
15913
15914 This function translates SCALAR, which is GAS's internal encoding of indexed
15915 scalar register, to raw encoding. There is also register and index range
15916 check based on ELSIZE. */
15917
15918 static unsigned
15919 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15920 {
15921 unsigned regno = NEON_SCALAR_REG (scalar);
15922 unsigned elno = NEON_SCALAR_INDEX (scalar);
15923
15924 switch (elsize)
15925 {
15926 case 16:
15927 if (regno > 7 || elno > 3)
15928 goto bad_scalar;
15929 return regno | (elno << 3);
15930
15931 case 32:
15932 if (regno > 15 || elno > 1)
15933 goto bad_scalar;
15934 return regno | (elno << 4);
15935
15936 default:
15937 bad_scalar:
15938 first_error (_("scalar out of range for multiply instruction"));
15939 }
15940
15941 return 0;
15942 }
15943
15944 /* Encode multiply / multiply-accumulate scalar instructions. */
15945
15946 static void
15947 neon_mul_mac (struct neon_type_el et, int ubit)
15948 {
15949 unsigned scalar;
15950
15951 /* Give a more helpful error message if we have an invalid type. */
15952 if (et.type == NT_invtype)
15953 return;
15954
15955 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15956 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15957 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15958 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15959 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15960 inst.instruction |= LOW4 (scalar);
15961 inst.instruction |= HI1 (scalar) << 5;
15962 inst.instruction |= (et.type == NT_float) << 8;
15963 inst.instruction |= neon_logbits (et.size) << 20;
15964 inst.instruction |= (ubit != 0) << 24;
15965
15966 neon_dp_fixup (&inst);
15967 }
15968
15969 static void
15970 do_neon_mac_maybe_scalar (void)
15971 {
15972 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15973 return;
15974
15975 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15976 return;
15977
15978 if (inst.operands[2].isscalar)
15979 {
15980 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15981 struct neon_type_el et = neon_check_type (3, rs,
15982 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15983 NEON_ENCODE (SCALAR, inst);
15984 neon_mul_mac (et, neon_quad (rs));
15985 }
15986 else
15987 {
15988 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15989 affected if we specify unsigned args. */
15990 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15991 }
15992 }
15993
15994 static void
15995 do_neon_fmac (void)
15996 {
15997 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15998 return;
15999
16000 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16001 return;
16002
16003 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
16004 }
16005
16006 static void
16007 do_neon_tst (void)
16008 {
16009 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16010 struct neon_type_el et = neon_check_type (3, rs,
16011 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
16012 neon_three_same (neon_quad (rs), 0, et.size);
16013 }
16014
16015 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
16016 same types as the MAC equivalents. The polynomial type for this instruction
16017 is encoded the same as the integer type. */
16018
16019 static void
16020 do_neon_mul (void)
16021 {
16022 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
16023 return;
16024
16025 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16026 return;
16027
16028 if (inst.operands[2].isscalar)
16029 do_neon_mac_maybe_scalar ();
16030 else
16031 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
16032 }
16033
16034 static void
16035 do_neon_qdmulh (void)
16036 {
16037 if (inst.operands[2].isscalar)
16038 {
16039 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16040 struct neon_type_el et = neon_check_type (3, rs,
16041 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16042 NEON_ENCODE (SCALAR, inst);
16043 neon_mul_mac (et, neon_quad (rs));
16044 }
16045 else
16046 {
16047 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16048 struct neon_type_el et = neon_check_type (3, rs,
16049 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16050 NEON_ENCODE (INTEGER, inst);
16051 /* The U bit (rounding) comes from bit mask. */
16052 neon_three_same (neon_quad (rs), 0, et.size);
16053 }
16054 }
16055
16056 static void
16057 do_mve_vmull (void)
16058 {
16059
16060 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
16061 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
16062 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
16063 && inst.cond == COND_ALWAYS
16064 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
16065 {
16066 if (rs == NS_QQQ)
16067 {
16068
16069 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16070 N_SUF_32 | N_F64 | N_P8
16071 | N_P16 | N_I_MVE | N_KEY);
16072 if (((et.type == NT_poly) && et.size == 8
16073 && ARM_CPU_IS_ANY (cpu_variant))
16074 || (et.type == NT_integer) || (et.type == NT_float))
16075 goto neon_vmul;
16076 }
16077 else
16078 goto neon_vmul;
16079 }
16080
16081 constraint (rs != NS_QQQ, BAD_FPU);
16082 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16083 N_SU_32 | N_P8 | N_P16 | N_KEY);
16084
16085 /* We are dealing with MVE's vmullt. */
16086 if (et.size == 32
16087 && (inst.operands[0].reg == inst.operands[1].reg
16088 || inst.operands[0].reg == inst.operands[2].reg))
16089 as_tsktsk (BAD_MVE_SRCDEST);
16090
16091 if (inst.cond > COND_ALWAYS)
16092 inst.pred_insn_type = INSIDE_VPT_INSN;
16093 else
16094 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16095
16096 if (et.type == NT_poly)
16097 mve_encode_qqq (neon_logbits (et.size), 64);
16098 else
16099 mve_encode_qqq (et.type == NT_unsigned, et.size);
16100
16101 return;
16102
16103 neon_vmul:
16104 inst.instruction = N_MNEM_vmul;
16105 inst.cond = 0xb;
16106 if (thumb_mode)
16107 inst.pred_insn_type = INSIDE_IT_INSN;
16108 do_neon_mul ();
16109 }
16110
16111 static void
16112 do_mve_vabav (void)
16113 {
16114 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16115
16116 if (rs == NS_NULL)
16117 return;
16118
16119 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16120 return;
16121
16122 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
16123 | N_S16 | N_S32 | N_U8 | N_U16
16124 | N_U32);
16125
16126 if (inst.cond > COND_ALWAYS)
16127 inst.pred_insn_type = INSIDE_VPT_INSN;
16128 else
16129 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16130
16131 mve_encode_rqq (et.type == NT_unsigned, et.size);
16132 }
16133
16134 static void
16135 do_mve_vmladav (void)
16136 {
16137 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16138 struct neon_type_el et = neon_check_type (3, rs,
16139 N_EQK, N_EQK, N_SU_MVE | N_KEY);
16140
16141 if (et.type == NT_unsigned
16142 && (inst.instruction == M_MNEM_vmladavx
16143 || inst.instruction == M_MNEM_vmladavax
16144 || inst.instruction == M_MNEM_vmlsdav
16145 || inst.instruction == M_MNEM_vmlsdava
16146 || inst.instruction == M_MNEM_vmlsdavx
16147 || inst.instruction == M_MNEM_vmlsdavax))
16148 first_error (BAD_SIMD_TYPE);
16149
16150 constraint (inst.operands[2].reg > 14,
16151 _("MVE vector register in the range [Q0..Q7] expected"));
16152
16153 if (inst.cond > COND_ALWAYS)
16154 inst.pred_insn_type = INSIDE_VPT_INSN;
16155 else
16156 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16157
16158 if (inst.instruction == M_MNEM_vmlsdav
16159 || inst.instruction == M_MNEM_vmlsdava
16160 || inst.instruction == M_MNEM_vmlsdavx
16161 || inst.instruction == M_MNEM_vmlsdavax)
16162 inst.instruction |= (et.size == 8) << 28;
16163 else
16164 inst.instruction |= (et.size == 8) << 8;
16165
16166 mve_encode_rqq (et.type == NT_unsigned, 64);
16167 inst.instruction |= (et.size == 32) << 16;
16168 }
16169
16170 static void
16171 do_neon_qrdmlah (void)
16172 {
16173 /* Check we're on the correct architecture. */
16174 if (!mark_feature_used (&fpu_neon_ext_armv8))
16175 inst.error =
16176 _("instruction form not available on this architecture.");
16177 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
16178 {
16179 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16180 record_feature_use (&fpu_neon_ext_v8_1);
16181 }
16182
16183 if (inst.operands[2].isscalar)
16184 {
16185 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16186 struct neon_type_el et = neon_check_type (3, rs,
16187 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16188 NEON_ENCODE (SCALAR, inst);
16189 neon_mul_mac (et, neon_quad (rs));
16190 }
16191 else
16192 {
16193 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16194 struct neon_type_el et = neon_check_type (3, rs,
16195 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16196 NEON_ENCODE (INTEGER, inst);
16197 /* The U bit (rounding) comes from bit mask. */
16198 neon_three_same (neon_quad (rs), 0, et.size);
16199 }
16200 }
16201
16202 static void
16203 do_neon_fcmp_absolute (void)
16204 {
16205 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16206 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16207 N_F_16_32 | N_KEY);
16208 /* Size field comes from bit mask. */
16209 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
16210 }
16211
16212 static void
16213 do_neon_fcmp_absolute_inv (void)
16214 {
16215 neon_exchange_operands ();
16216 do_neon_fcmp_absolute ();
16217 }
16218
16219 static void
16220 do_neon_step (void)
16221 {
16222 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16223 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16224 N_F_16_32 | N_KEY);
16225 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
16226 }
16227
16228 static void
16229 do_neon_abs_neg (void)
16230 {
16231 enum neon_shape rs;
16232 struct neon_type_el et;
16233
16234 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
16235 return;
16236
16237 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16238 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
16239
16240 if (check_simd_pred_availability (et.type == NT_float,
16241 NEON_CHECK_ARCH | NEON_CHECK_CC))
16242 return;
16243
16244 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16245 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16246 inst.instruction |= LOW4 (inst.operands[1].reg);
16247 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16248 inst.instruction |= neon_quad (rs) << 6;
16249 inst.instruction |= (et.type == NT_float) << 10;
16250 inst.instruction |= neon_logbits (et.size) << 18;
16251
16252 neon_dp_fixup (&inst);
16253 }
16254
16255 static void
16256 do_neon_sli (void)
16257 {
16258 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16259 struct neon_type_el et = neon_check_type (2, rs,
16260 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16261 int imm = inst.operands[2].imm;
16262 constraint (imm < 0 || (unsigned)imm >= et.size,
16263 _("immediate out of range for insert"));
16264 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16265 }
16266
16267 static void
16268 do_neon_sri (void)
16269 {
16270 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16271 struct neon_type_el et = neon_check_type (2, rs,
16272 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16273 int imm = inst.operands[2].imm;
16274 constraint (imm < 1 || (unsigned)imm > et.size,
16275 _("immediate out of range for insert"));
16276 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
16277 }
16278
16279 static void
16280 do_neon_qshlu_imm (void)
16281 {
16282 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16283 struct neon_type_el et = neon_check_type (2, rs,
16284 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
16285 int imm = inst.operands[2].imm;
16286 constraint (imm < 0 || (unsigned)imm >= et.size,
16287 _("immediate out of range for shift"));
16288 /* Only encodes the 'U present' variant of the instruction.
16289 In this case, signed types have OP (bit 8) set to 0.
16290 Unsigned types have OP set to 1. */
16291 inst.instruction |= (et.type == NT_unsigned) << 8;
16292 /* The rest of the bits are the same as other immediate shifts. */
16293 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16294 }
16295
16296 static void
16297 do_neon_qmovn (void)
16298 {
16299 struct neon_type_el et = neon_check_type (2, NS_DQ,
16300 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16301 /* Saturating move where operands can be signed or unsigned, and the
16302 destination has the same signedness. */
16303 NEON_ENCODE (INTEGER, inst);
16304 if (et.type == NT_unsigned)
16305 inst.instruction |= 0xc0;
16306 else
16307 inst.instruction |= 0x80;
16308 neon_two_same (0, 1, et.size / 2);
16309 }
16310
16311 static void
16312 do_neon_qmovun (void)
16313 {
16314 struct neon_type_el et = neon_check_type (2, NS_DQ,
16315 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16316 /* Saturating move with unsigned results. Operands must be signed. */
16317 NEON_ENCODE (INTEGER, inst);
16318 neon_two_same (0, 1, et.size / 2);
16319 }
16320
16321 static void
16322 do_neon_rshift_sat_narrow (void)
16323 {
16324 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16325 or unsigned. If operands are unsigned, results must also be unsigned. */
16326 struct neon_type_el et = neon_check_type (2, NS_DQI,
16327 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16328 int imm = inst.operands[2].imm;
16329 /* This gets the bounds check, size encoding and immediate bits calculation
16330 right. */
16331 et.size /= 2;
16332
16333 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16334 VQMOVN.I<size> <Dd>, <Qm>. */
16335 if (imm == 0)
16336 {
16337 inst.operands[2].present = 0;
16338 inst.instruction = N_MNEM_vqmovn;
16339 do_neon_qmovn ();
16340 return;
16341 }
16342
16343 constraint (imm < 1 || (unsigned)imm > et.size,
16344 _("immediate out of range"));
16345 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
16346 }
16347
16348 static void
16349 do_neon_rshift_sat_narrow_u (void)
16350 {
16351 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16352 or unsigned. If operands are unsigned, results must also be unsigned. */
16353 struct neon_type_el et = neon_check_type (2, NS_DQI,
16354 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16355 int imm = inst.operands[2].imm;
16356 /* This gets the bounds check, size encoding and immediate bits calculation
16357 right. */
16358 et.size /= 2;
16359
16360 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16361 VQMOVUN.I<size> <Dd>, <Qm>. */
16362 if (imm == 0)
16363 {
16364 inst.operands[2].present = 0;
16365 inst.instruction = N_MNEM_vqmovun;
16366 do_neon_qmovun ();
16367 return;
16368 }
16369
16370 constraint (imm < 1 || (unsigned)imm > et.size,
16371 _("immediate out of range"));
16372 /* FIXME: The manual is kind of unclear about what value U should have in
16373 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16374 must be 1. */
16375 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
16376 }
16377
16378 static void
16379 do_neon_movn (void)
16380 {
16381 struct neon_type_el et = neon_check_type (2, NS_DQ,
16382 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16383 NEON_ENCODE (INTEGER, inst);
16384 neon_two_same (0, 1, et.size / 2);
16385 }
16386
16387 static void
16388 do_neon_rshift_narrow (void)
16389 {
16390 struct neon_type_el et = neon_check_type (2, NS_DQI,
16391 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16392 int imm = inst.operands[2].imm;
16393 /* This gets the bounds check, size encoding and immediate bits calculation
16394 right. */
16395 et.size /= 2;
16396
16397 /* If immediate is zero then we are a pseudo-instruction for
16398 VMOVN.I<size> <Dd>, <Qm> */
16399 if (imm == 0)
16400 {
16401 inst.operands[2].present = 0;
16402 inst.instruction = N_MNEM_vmovn;
16403 do_neon_movn ();
16404 return;
16405 }
16406
16407 constraint (imm < 1 || (unsigned)imm > et.size,
16408 _("immediate out of range for narrowing operation"));
16409 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
16410 }
16411
16412 static void
16413 do_neon_shll (void)
16414 {
16415 /* FIXME: Type checking when lengthening. */
16416 struct neon_type_el et = neon_check_type (2, NS_QDI,
16417 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
16418 unsigned imm = inst.operands[2].imm;
16419
16420 if (imm == et.size)
16421 {
16422 /* Maximum shift variant. */
16423 NEON_ENCODE (INTEGER, inst);
16424 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16425 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16426 inst.instruction |= LOW4 (inst.operands[1].reg);
16427 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16428 inst.instruction |= neon_logbits (et.size) << 18;
16429
16430 neon_dp_fixup (&inst);
16431 }
16432 else
16433 {
16434 /* A more-specific type check for non-max versions. */
16435 et = neon_check_type (2, NS_QDI,
16436 N_EQK | N_DBL, N_SU_32 | N_KEY);
16437 NEON_ENCODE (IMMED, inst);
16438 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
16439 }
16440 }
16441
16442 /* Check the various types for the VCVT instruction, and return which version
16443 the current instruction is. */
16444
16445 #define CVT_FLAVOUR_VAR \
16446 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16447 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16448 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16449 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16450 /* Half-precision conversions. */ \
16451 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16452 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16453 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16454 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16455 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16456 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16457 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16458 Compared with single/double precision variants, only the co-processor \
16459 field is different, so the encoding flow is reused here. */ \
16460 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16461 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16462 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16463 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16464 /* VFP instructions. */ \
16465 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16466 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16467 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16468 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16469 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16470 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16471 /* VFP instructions with bitshift. */ \
16472 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16473 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16474 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16475 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16476 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16477 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16478 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16479 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16480
16481 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16482 neon_cvt_flavour_##C,
16483
16484 /* The different types of conversions we can do. */
16485 enum neon_cvt_flavour
16486 {
16487 CVT_FLAVOUR_VAR
16488 neon_cvt_flavour_invalid,
16489 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
16490 };
16491
16492 #undef CVT_VAR
16493
16494 static enum neon_cvt_flavour
16495 get_neon_cvt_flavour (enum neon_shape rs)
16496 {
16497 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16498 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16499 if (et.type != NT_invtype) \
16500 { \
16501 inst.error = NULL; \
16502 return (neon_cvt_flavour_##C); \
16503 }
16504
16505 struct neon_type_el et;
16506 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
16507 || rs == NS_FF) ? N_VFP : 0;
16508 /* The instruction versions which take an immediate take one register
16509 argument, which is extended to the width of the full register. Thus the
16510 "source" and "destination" registers must have the same width. Hack that
16511 here by making the size equal to the key (wider, in this case) operand. */
16512 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
16513
16514 CVT_FLAVOUR_VAR;
16515
16516 return neon_cvt_flavour_invalid;
16517 #undef CVT_VAR
16518 }
16519
16520 enum neon_cvt_mode
16521 {
16522 neon_cvt_mode_a,
16523 neon_cvt_mode_n,
16524 neon_cvt_mode_p,
16525 neon_cvt_mode_m,
16526 neon_cvt_mode_z,
16527 neon_cvt_mode_x,
16528 neon_cvt_mode_r
16529 };
16530
16531 /* Neon-syntax VFP conversions. */
16532
16533 static void
16534 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
16535 {
16536 const char *opname = 0;
16537
16538 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
16539 || rs == NS_FHI || rs == NS_HFI)
16540 {
16541 /* Conversions with immediate bitshift. */
16542 const char *enc[] =
16543 {
16544 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16545 CVT_FLAVOUR_VAR
16546 NULL
16547 #undef CVT_VAR
16548 };
16549
16550 if (flavour < (int) ARRAY_SIZE (enc))
16551 {
16552 opname = enc[flavour];
16553 constraint (inst.operands[0].reg != inst.operands[1].reg,
16554 _("operands 0 and 1 must be the same register"));
16555 inst.operands[1] = inst.operands[2];
16556 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
16557 }
16558 }
16559 else
16560 {
16561 /* Conversions without bitshift. */
16562 const char *enc[] =
16563 {
16564 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16565 CVT_FLAVOUR_VAR
16566 NULL
16567 #undef CVT_VAR
16568 };
16569
16570 if (flavour < (int) ARRAY_SIZE (enc))
16571 opname = enc[flavour];
16572 }
16573
16574 if (opname)
16575 do_vfp_nsyn_opcode (opname);
16576
16577 /* ARMv8.2 fp16 VCVT instruction. */
16578 if (flavour == neon_cvt_flavour_s32_f16
16579 || flavour == neon_cvt_flavour_u32_f16
16580 || flavour == neon_cvt_flavour_f16_u32
16581 || flavour == neon_cvt_flavour_f16_s32)
16582 do_scalar_fp16_v82_encode ();
16583 }
16584
16585 static void
16586 do_vfp_nsyn_cvtz (void)
16587 {
16588 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
16589 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16590 const char *enc[] =
16591 {
16592 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16593 CVT_FLAVOUR_VAR
16594 NULL
16595 #undef CVT_VAR
16596 };
16597
16598 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
16599 do_vfp_nsyn_opcode (enc[flavour]);
16600 }
16601
16602 static void
16603 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
16604 enum neon_cvt_mode mode)
16605 {
16606 int sz, op;
16607 int rm;
16608
16609 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16610 D register operands. */
16611 if (flavour == neon_cvt_flavour_s32_f64
16612 || flavour == neon_cvt_flavour_u32_f64)
16613 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16614 _(BAD_FPU));
16615
16616 if (flavour == neon_cvt_flavour_s32_f16
16617 || flavour == neon_cvt_flavour_u32_f16)
16618 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
16619 _(BAD_FP16));
16620
16621 set_pred_insn_type (OUTSIDE_PRED_INSN);
16622
16623 switch (flavour)
16624 {
16625 case neon_cvt_flavour_s32_f64:
16626 sz = 1;
16627 op = 1;
16628 break;
16629 case neon_cvt_flavour_s32_f32:
16630 sz = 0;
16631 op = 1;
16632 break;
16633 case neon_cvt_flavour_s32_f16:
16634 sz = 0;
16635 op = 1;
16636 break;
16637 case neon_cvt_flavour_u32_f64:
16638 sz = 1;
16639 op = 0;
16640 break;
16641 case neon_cvt_flavour_u32_f32:
16642 sz = 0;
16643 op = 0;
16644 break;
16645 case neon_cvt_flavour_u32_f16:
16646 sz = 0;
16647 op = 0;
16648 break;
16649 default:
16650 first_error (_("invalid instruction shape"));
16651 return;
16652 }
16653
16654 switch (mode)
16655 {
16656 case neon_cvt_mode_a: rm = 0; break;
16657 case neon_cvt_mode_n: rm = 1; break;
16658 case neon_cvt_mode_p: rm = 2; break;
16659 case neon_cvt_mode_m: rm = 3; break;
16660 default: first_error (_("invalid rounding mode")); return;
16661 }
16662
16663 NEON_ENCODE (FPV8, inst);
16664 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16665 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16666 inst.instruction |= sz << 8;
16667
16668 /* ARMv8.2 fp16 VCVT instruction. */
16669 if (flavour == neon_cvt_flavour_s32_f16
16670 ||flavour == neon_cvt_flavour_u32_f16)
16671 do_scalar_fp16_v82_encode ();
16672 inst.instruction |= op << 7;
16673 inst.instruction |= rm << 16;
16674 inst.instruction |= 0xf0000000;
16675 inst.is_neon = TRUE;
16676 }
16677
16678 static void
16679 do_neon_cvt_1 (enum neon_cvt_mode mode)
16680 {
16681 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16682 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16683 NS_FH, NS_HF, NS_FHI, NS_HFI,
16684 NS_NULL);
16685 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16686
16687 if (flavour == neon_cvt_flavour_invalid)
16688 return;
16689
16690 /* PR11109: Handle round-to-zero for VCVT conversions. */
16691 if (mode == neon_cvt_mode_z
16692 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
16693 && (flavour == neon_cvt_flavour_s16_f16
16694 || flavour == neon_cvt_flavour_u16_f16
16695 || flavour == neon_cvt_flavour_s32_f32
16696 || flavour == neon_cvt_flavour_u32_f32
16697 || flavour == neon_cvt_flavour_s32_f64
16698 || flavour == neon_cvt_flavour_u32_f64)
16699 && (rs == NS_FD || rs == NS_FF))
16700 {
16701 do_vfp_nsyn_cvtz ();
16702 return;
16703 }
16704
16705 /* ARMv8.2 fp16 VCVT conversions. */
16706 if (mode == neon_cvt_mode_z
16707 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
16708 && (flavour == neon_cvt_flavour_s32_f16
16709 || flavour == neon_cvt_flavour_u32_f16)
16710 && (rs == NS_FH))
16711 {
16712 do_vfp_nsyn_cvtz ();
16713 do_scalar_fp16_v82_encode ();
16714 return;
16715 }
16716
16717 /* VFP rather than Neon conversions. */
16718 if (flavour >= neon_cvt_flavour_first_fp)
16719 {
16720 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16721 do_vfp_nsyn_cvt (rs, flavour);
16722 else
16723 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16724
16725 return;
16726 }
16727
16728 switch (rs)
16729 {
16730 case NS_DDI:
16731 case NS_QQI:
16732 {
16733 unsigned immbits;
16734 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16735 0x0000100, 0x1000100, 0x0, 0x1000000};
16736
16737 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16738 return;
16739
16740 /* Fixed-point conversion with #0 immediate is encoded as an
16741 integer conversion. */
16742 if (inst.operands[2].present && inst.operands[2].imm == 0)
16743 goto int_encode;
16744 NEON_ENCODE (IMMED, inst);
16745 if (flavour != neon_cvt_flavour_invalid)
16746 inst.instruction |= enctab[flavour];
16747 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16748 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16749 inst.instruction |= LOW4 (inst.operands[1].reg);
16750 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16751 inst.instruction |= neon_quad (rs) << 6;
16752 inst.instruction |= 1 << 21;
16753 if (flavour < neon_cvt_flavour_s16_f16)
16754 {
16755 inst.instruction |= 1 << 21;
16756 immbits = 32 - inst.operands[2].imm;
16757 inst.instruction |= immbits << 16;
16758 }
16759 else
16760 {
16761 inst.instruction |= 3 << 20;
16762 immbits = 16 - inst.operands[2].imm;
16763 inst.instruction |= immbits << 16;
16764 inst.instruction &= ~(1 << 9);
16765 }
16766
16767 neon_dp_fixup (&inst);
16768 }
16769 break;
16770
16771 case NS_DD:
16772 case NS_QQ:
16773 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
16774 {
16775 NEON_ENCODE (FLOAT, inst);
16776 set_pred_insn_type (OUTSIDE_PRED_INSN);
16777
16778 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16779 return;
16780
16781 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16782 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16783 inst.instruction |= LOW4 (inst.operands[1].reg);
16784 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16785 inst.instruction |= neon_quad (rs) << 6;
16786 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
16787 || flavour == neon_cvt_flavour_u32_f32) << 7;
16788 inst.instruction |= mode << 8;
16789 if (flavour == neon_cvt_flavour_u16_f16
16790 || flavour == neon_cvt_flavour_s16_f16)
16791 /* Mask off the original size bits and reencode them. */
16792 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
16793
16794 if (thumb_mode)
16795 inst.instruction |= 0xfc000000;
16796 else
16797 inst.instruction |= 0xf0000000;
16798 }
16799 else
16800 {
16801 int_encode:
16802 {
16803 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
16804 0x100, 0x180, 0x0, 0x080};
16805
16806 NEON_ENCODE (INTEGER, inst);
16807
16808 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16809 return;
16810
16811 if (flavour != neon_cvt_flavour_invalid)
16812 inst.instruction |= enctab[flavour];
16813
16814 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16815 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16816 inst.instruction |= LOW4 (inst.operands[1].reg);
16817 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16818 inst.instruction |= neon_quad (rs) << 6;
16819 if (flavour >= neon_cvt_flavour_s16_f16
16820 && flavour <= neon_cvt_flavour_f16_u16)
16821 /* Half precision. */
16822 inst.instruction |= 1 << 18;
16823 else
16824 inst.instruction |= 2 << 18;
16825
16826 neon_dp_fixup (&inst);
16827 }
16828 }
16829 break;
16830
16831 /* Half-precision conversions for Advanced SIMD -- neon. */
16832 case NS_QD:
16833 case NS_DQ:
16834 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16835 return;
16836
16837 if ((rs == NS_DQ)
16838 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
16839 {
16840 as_bad (_("operand size must match register width"));
16841 break;
16842 }
16843
16844 if ((rs == NS_QD)
16845 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
16846 {
16847 as_bad (_("operand size must match register width"));
16848 break;
16849 }
16850
16851 if (rs == NS_DQ)
16852 inst.instruction = 0x3b60600;
16853 else
16854 inst.instruction = 0x3b60700;
16855
16856 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16857 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16858 inst.instruction |= LOW4 (inst.operands[1].reg);
16859 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16860 neon_dp_fixup (&inst);
16861 break;
16862
16863 default:
16864 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16865 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16866 do_vfp_nsyn_cvt (rs, flavour);
16867 else
16868 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16869 }
16870 }
16871
16872 static void
16873 do_neon_cvtr (void)
16874 {
16875 do_neon_cvt_1 (neon_cvt_mode_x);
16876 }
16877
16878 static void
16879 do_neon_cvt (void)
16880 {
16881 do_neon_cvt_1 (neon_cvt_mode_z);
16882 }
16883
16884 static void
16885 do_neon_cvta (void)
16886 {
16887 do_neon_cvt_1 (neon_cvt_mode_a);
16888 }
16889
16890 static void
16891 do_neon_cvtn (void)
16892 {
16893 do_neon_cvt_1 (neon_cvt_mode_n);
16894 }
16895
16896 static void
16897 do_neon_cvtp (void)
16898 {
16899 do_neon_cvt_1 (neon_cvt_mode_p);
16900 }
16901
16902 static void
16903 do_neon_cvtm (void)
16904 {
16905 do_neon_cvt_1 (neon_cvt_mode_m);
16906 }
16907
16908 static void
16909 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
16910 {
16911 if (is_double)
16912 mark_feature_used (&fpu_vfp_ext_armv8);
16913
16914 encode_arm_vfp_reg (inst.operands[0].reg,
16915 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
16916 encode_arm_vfp_reg (inst.operands[1].reg,
16917 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
16918 inst.instruction |= to ? 0x10000 : 0;
16919 inst.instruction |= t ? 0x80 : 0;
16920 inst.instruction |= is_double ? 0x100 : 0;
16921 do_vfp_cond_or_thumb ();
16922 }
16923
16924 static void
16925 do_neon_cvttb_1 (bfd_boolean t)
16926 {
16927 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16928 NS_DF, NS_DH, NS_NULL);
16929
16930 if (rs == NS_NULL)
16931 return;
16932 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16933 {
16934 inst.error = NULL;
16935 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16936 }
16937 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16938 {
16939 inst.error = NULL;
16940 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16941 }
16942 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16943 {
16944 /* The VCVTB and VCVTT instructions with D-register operands
16945 don't work for SP only targets. */
16946 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16947 _(BAD_FPU));
16948
16949 inst.error = NULL;
16950 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16951 }
16952 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16953 {
16954 /* The VCVTB and VCVTT instructions with D-register operands
16955 don't work for SP only targets. */
16956 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16957 _(BAD_FPU));
16958
16959 inst.error = NULL;
16960 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16961 }
16962 else
16963 return;
16964 }
16965
16966 static void
16967 do_neon_cvtb (void)
16968 {
16969 do_neon_cvttb_1 (FALSE);
16970 }
16971
16972
16973 static void
16974 do_neon_cvtt (void)
16975 {
16976 do_neon_cvttb_1 (TRUE);
16977 }
16978
16979 static void
16980 neon_move_immediate (void)
16981 {
16982 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16983 struct neon_type_el et = neon_check_type (2, rs,
16984 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16985 unsigned immlo, immhi = 0, immbits;
16986 int op, cmode, float_p;
16987
16988 constraint (et.type == NT_invtype,
16989 _("operand size must be specified for immediate VMOV"));
16990
16991 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16992 op = (inst.instruction & (1 << 5)) != 0;
16993
16994 immlo = inst.operands[1].imm;
16995 if (inst.operands[1].regisimm)
16996 immhi = inst.operands[1].reg;
16997
16998 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16999 _("immediate has bits set outside the operand size"));
17000
17001 float_p = inst.operands[1].immisfloat;
17002
17003 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
17004 et.size, et.type)) == FAIL)
17005 {
17006 /* Invert relevant bits only. */
17007 neon_invert_size (&immlo, &immhi, et.size);
17008 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
17009 with one or the other; those cases are caught by
17010 neon_cmode_for_move_imm. */
17011 op = !op;
17012 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
17013 &op, et.size, et.type)) == FAIL)
17014 {
17015 first_error (_("immediate out of range"));
17016 return;
17017 }
17018 }
17019
17020 inst.instruction &= ~(1 << 5);
17021 inst.instruction |= op << 5;
17022
17023 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17024 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17025 inst.instruction |= neon_quad (rs) << 6;
17026 inst.instruction |= cmode << 8;
17027
17028 neon_write_immbits (immbits);
17029 }
17030
17031 static void
17032 do_neon_mvn (void)
17033 {
17034 if (inst.operands[1].isreg)
17035 {
17036 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17037
17038 NEON_ENCODE (INTEGER, inst);
17039 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17040 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17041 inst.instruction |= LOW4 (inst.operands[1].reg);
17042 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17043 inst.instruction |= neon_quad (rs) << 6;
17044 }
17045 else
17046 {
17047 NEON_ENCODE (IMMED, inst);
17048 neon_move_immediate ();
17049 }
17050
17051 neon_dp_fixup (&inst);
17052 }
17053
17054 /* Encode instructions of form:
17055
17056 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
17057 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
17058
17059 static void
17060 neon_mixed_length (struct neon_type_el et, unsigned size)
17061 {
17062 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17063 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17064 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17065 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17066 inst.instruction |= LOW4 (inst.operands[2].reg);
17067 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17068 inst.instruction |= (et.type == NT_unsigned) << 24;
17069 inst.instruction |= neon_logbits (size) << 20;
17070
17071 neon_dp_fixup (&inst);
17072 }
17073
17074 static void
17075 do_neon_dyadic_long (void)
17076 {
17077 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
17078 if (rs == NS_QDD)
17079 {
17080 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
17081 return;
17082
17083 NEON_ENCODE (INTEGER, inst);
17084 /* FIXME: Type checking for lengthening op. */
17085 struct neon_type_el et = neon_check_type (3, NS_QDD,
17086 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
17087 neon_mixed_length (et, et.size);
17088 }
17089 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
17090 && (inst.cond == 0xf || inst.cond == 0x10))
17091 {
17092 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17093 in an IT block with le/lt conditions. */
17094
17095 if (inst.cond == 0xf)
17096 inst.cond = 0xb;
17097 else if (inst.cond == 0x10)
17098 inst.cond = 0xd;
17099
17100 inst.pred_insn_type = INSIDE_IT_INSN;
17101
17102 if (inst.instruction == N_MNEM_vaddl)
17103 {
17104 inst.instruction = N_MNEM_vadd;
17105 do_neon_addsub_if_i ();
17106 }
17107 else if (inst.instruction == N_MNEM_vsubl)
17108 {
17109 inst.instruction = N_MNEM_vsub;
17110 do_neon_addsub_if_i ();
17111 }
17112 else if (inst.instruction == N_MNEM_vabdl)
17113 {
17114 inst.instruction = N_MNEM_vabd;
17115 do_neon_dyadic_if_su ();
17116 }
17117 }
17118 else
17119 first_error (BAD_FPU);
17120 }
17121
17122 static void
17123 do_neon_abal (void)
17124 {
17125 struct neon_type_el et = neon_check_type (3, NS_QDD,
17126 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
17127 neon_mixed_length (et, et.size);
17128 }
17129
17130 static void
17131 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
17132 {
17133 if (inst.operands[2].isscalar)
17134 {
17135 struct neon_type_el et = neon_check_type (3, NS_QDS,
17136 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
17137 NEON_ENCODE (SCALAR, inst);
17138 neon_mul_mac (et, et.type == NT_unsigned);
17139 }
17140 else
17141 {
17142 struct neon_type_el et = neon_check_type (3, NS_QDD,
17143 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
17144 NEON_ENCODE (INTEGER, inst);
17145 neon_mixed_length (et, et.size);
17146 }
17147 }
17148
17149 static void
17150 do_neon_mac_maybe_scalar_long (void)
17151 {
17152 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
17153 }
17154
17155 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17156 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17157
17158 static unsigned
17159 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
17160 {
17161 unsigned regno = NEON_SCALAR_REG (scalar);
17162 unsigned elno = NEON_SCALAR_INDEX (scalar);
17163
17164 if (quad_p)
17165 {
17166 if (regno > 7 || elno > 3)
17167 goto bad_scalar;
17168
17169 return ((regno & 0x7)
17170 | ((elno & 0x1) << 3)
17171 | (((elno >> 1) & 0x1) << 5));
17172 }
17173 else
17174 {
17175 if (regno > 15 || elno > 1)
17176 goto bad_scalar;
17177
17178 return (((regno & 0x1) << 5)
17179 | ((regno >> 1) & 0x7)
17180 | ((elno & 0x1) << 3));
17181 }
17182
17183 bad_scalar:
17184 first_error (_("scalar out of range for multiply instruction"));
17185 return 0;
17186 }
17187
17188 static void
17189 do_neon_fmac_maybe_scalar_long (int subtype)
17190 {
17191 enum neon_shape rs;
17192 int high8;
17193 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17194 field (bits[21:20]) has different meaning. For scalar index variant, it's
17195 used to differentiate add and subtract, otherwise it's with fixed value
17196 0x2. */
17197 int size = -1;
17198
17199 if (inst.cond != COND_ALWAYS)
17200 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17201 "behaviour is UNPREDICTABLE"));
17202
17203 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
17204 _(BAD_FP16));
17205
17206 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17207 _(BAD_FPU));
17208
17209 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17210 be a scalar index register. */
17211 if (inst.operands[2].isscalar)
17212 {
17213 high8 = 0xfe000000;
17214 if (subtype)
17215 size = 16;
17216 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
17217 }
17218 else
17219 {
17220 high8 = 0xfc000000;
17221 size = 32;
17222 if (subtype)
17223 inst.instruction |= (0x1 << 23);
17224 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
17225 }
17226
17227 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
17228
17229 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17230 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17231 so we simply pass -1 as size. */
17232 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
17233 neon_three_same (quad_p, 0, size);
17234
17235 /* Undo neon_dp_fixup. Redo the high eight bits. */
17236 inst.instruction &= 0x00ffffff;
17237 inst.instruction |= high8;
17238
17239 #define LOW1(R) ((R) & 0x1)
17240 #define HI4(R) (((R) >> 1) & 0xf)
17241 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17242 whether the instruction is in Q form and whether Vm is a scalar indexed
17243 operand. */
17244 if (inst.operands[2].isscalar)
17245 {
17246 unsigned rm
17247 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
17248 inst.instruction &= 0xffffffd0;
17249 inst.instruction |= rm;
17250
17251 if (!quad_p)
17252 {
17253 /* Redo Rn as well. */
17254 inst.instruction &= 0xfff0ff7f;
17255 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17256 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17257 }
17258 }
17259 else if (!quad_p)
17260 {
17261 /* Redo Rn and Rm. */
17262 inst.instruction &= 0xfff0ff50;
17263 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17264 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17265 inst.instruction |= HI4 (inst.operands[2].reg);
17266 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
17267 }
17268 }
17269
17270 static void
17271 do_neon_vfmal (void)
17272 {
17273 return do_neon_fmac_maybe_scalar_long (0);
17274 }
17275
17276 static void
17277 do_neon_vfmsl (void)
17278 {
17279 return do_neon_fmac_maybe_scalar_long (1);
17280 }
17281
17282 static void
17283 do_neon_dyadic_wide (void)
17284 {
17285 struct neon_type_el et = neon_check_type (3, NS_QQD,
17286 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
17287 neon_mixed_length (et, et.size);
17288 }
17289
17290 static void
17291 do_neon_dyadic_narrow (void)
17292 {
17293 struct neon_type_el et = neon_check_type (3, NS_QDD,
17294 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
17295 /* Operand sign is unimportant, and the U bit is part of the opcode,
17296 so force the operand type to integer. */
17297 et.type = NT_integer;
17298 neon_mixed_length (et, et.size / 2);
17299 }
17300
17301 static void
17302 do_neon_mul_sat_scalar_long (void)
17303 {
17304 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
17305 }
17306
17307 static void
17308 do_neon_vmull (void)
17309 {
17310 if (inst.operands[2].isscalar)
17311 do_neon_mac_maybe_scalar_long ();
17312 else
17313 {
17314 struct neon_type_el et = neon_check_type (3, NS_QDD,
17315 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
17316
17317 if (et.type == NT_poly)
17318 NEON_ENCODE (POLY, inst);
17319 else
17320 NEON_ENCODE (INTEGER, inst);
17321
17322 /* For polynomial encoding the U bit must be zero, and the size must
17323 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17324 obviously, as 0b10). */
17325 if (et.size == 64)
17326 {
17327 /* Check we're on the correct architecture. */
17328 if (!mark_feature_used (&fpu_crypto_ext_armv8))
17329 inst.error =
17330 _("Instruction form not available on this architecture.");
17331
17332 et.size = 32;
17333 }
17334
17335 neon_mixed_length (et, et.size);
17336 }
17337 }
17338
17339 static void
17340 do_neon_ext (void)
17341 {
17342 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17343 struct neon_type_el et = neon_check_type (3, rs,
17344 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
17345 unsigned imm = (inst.operands[3].imm * et.size) / 8;
17346
17347 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
17348 _("shift out of range"));
17349 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17350 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17351 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17352 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17353 inst.instruction |= LOW4 (inst.operands[2].reg);
17354 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17355 inst.instruction |= neon_quad (rs) << 6;
17356 inst.instruction |= imm << 8;
17357
17358 neon_dp_fixup (&inst);
17359 }
17360
17361 static void
17362 do_neon_rev (void)
17363 {
17364 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17365 struct neon_type_el et = neon_check_type (2, rs,
17366 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17367 unsigned op = (inst.instruction >> 7) & 3;
17368 /* N (width of reversed regions) is encoded as part of the bitmask. We
17369 extract it here to check the elements to be reversed are smaller.
17370 Otherwise we'd get a reserved instruction. */
17371 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
17372 gas_assert (elsize != 0);
17373 constraint (et.size >= elsize,
17374 _("elements must be smaller than reversal region"));
17375 neon_two_same (neon_quad (rs), 1, et.size);
17376 }
17377
17378 static void
17379 do_neon_dup (void)
17380 {
17381 if (inst.operands[1].isscalar)
17382 {
17383 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
17384 struct neon_type_el et = neon_check_type (2, rs,
17385 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17386 unsigned sizebits = et.size >> 3;
17387 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
17388 int logsize = neon_logbits (et.size);
17389 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
17390
17391 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
17392 return;
17393
17394 NEON_ENCODE (SCALAR, inst);
17395 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17396 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17397 inst.instruction |= LOW4 (dm);
17398 inst.instruction |= HI1 (dm) << 5;
17399 inst.instruction |= neon_quad (rs) << 6;
17400 inst.instruction |= x << 17;
17401 inst.instruction |= sizebits << 16;
17402
17403 neon_dp_fixup (&inst);
17404 }
17405 else
17406 {
17407 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
17408 struct neon_type_el et = neon_check_type (2, rs,
17409 N_8 | N_16 | N_32 | N_KEY, N_EQK);
17410 /* Duplicate ARM register to lanes of vector. */
17411 NEON_ENCODE (ARMREG, inst);
17412 switch (et.size)
17413 {
17414 case 8: inst.instruction |= 0x400000; break;
17415 case 16: inst.instruction |= 0x000020; break;
17416 case 32: inst.instruction |= 0x000000; break;
17417 default: break;
17418 }
17419 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17420 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
17421 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
17422 inst.instruction |= neon_quad (rs) << 21;
17423 /* The encoding for this instruction is identical for the ARM and Thumb
17424 variants, except for the condition field. */
17425 do_vfp_cond_or_thumb ();
17426 }
17427 }
17428
17429 /* VMOV has particularly many variations. It can be one of:
17430 0. VMOV<c><q> <Qd>, <Qm>
17431 1. VMOV<c><q> <Dd>, <Dm>
17432 (Register operations, which are VORR with Rm = Rn.)
17433 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17434 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17435 (Immediate loads.)
17436 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17437 (ARM register to scalar.)
17438 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17439 (Two ARM registers to vector.)
17440 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17441 (Scalar to ARM register.)
17442 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17443 (Vector to two ARM registers.)
17444 8. VMOV.F32 <Sd>, <Sm>
17445 9. VMOV.F64 <Dd>, <Dm>
17446 (VFP register moves.)
17447 10. VMOV.F32 <Sd>, #imm
17448 11. VMOV.F64 <Dd>, #imm
17449 (VFP float immediate load.)
17450 12. VMOV <Rd>, <Sm>
17451 (VFP single to ARM reg.)
17452 13. VMOV <Sd>, <Rm>
17453 (ARM reg to VFP single.)
17454 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17455 (Two ARM regs to two VFP singles.)
17456 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17457 (Two VFP singles to two ARM regs.)
17458
17459 These cases can be disambiguated using neon_select_shape, except cases 1/9
17460 and 3/11 which depend on the operand type too.
17461
17462 All the encoded bits are hardcoded by this function.
17463
17464 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17465 Cases 5, 7 may be used with VFPv2 and above.
17466
17467 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17468 can specify a type where it doesn't make sense to, and is ignored). */
17469
17470 static void
17471 do_neon_mov (void)
17472 {
17473 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
17474 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
17475 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
17476 NS_HR, NS_RH, NS_HI, NS_NULL);
17477 struct neon_type_el et;
17478 const char *ldconst = 0;
17479
17480 switch (rs)
17481 {
17482 case NS_DD: /* case 1/9. */
17483 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17484 /* It is not an error here if no type is given. */
17485 inst.error = NULL;
17486 if (et.type == NT_float && et.size == 64)
17487 {
17488 do_vfp_nsyn_opcode ("fcpyd");
17489 break;
17490 }
17491 /* fall through. */
17492
17493 case NS_QQ: /* case 0/1. */
17494 {
17495 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17496 return;
17497 /* The architecture manual I have doesn't explicitly state which
17498 value the U bit should have for register->register moves, but
17499 the equivalent VORR instruction has U = 0, so do that. */
17500 inst.instruction = 0x0200110;
17501 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17502 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17503 inst.instruction |= LOW4 (inst.operands[1].reg);
17504 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17505 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17506 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17507 inst.instruction |= neon_quad (rs) << 6;
17508
17509 neon_dp_fixup (&inst);
17510 }
17511 break;
17512
17513 case NS_DI: /* case 3/11. */
17514 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17515 inst.error = NULL;
17516 if (et.type == NT_float && et.size == 64)
17517 {
17518 /* case 11 (fconstd). */
17519 ldconst = "fconstd";
17520 goto encode_fconstd;
17521 }
17522 /* fall through. */
17523
17524 case NS_QI: /* case 2/3. */
17525 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17526 return;
17527 inst.instruction = 0x0800010;
17528 neon_move_immediate ();
17529 neon_dp_fixup (&inst);
17530 break;
17531
17532 case NS_SR: /* case 4. */
17533 {
17534 unsigned bcdebits = 0;
17535 int logsize;
17536 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
17537 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
17538
17539 /* .<size> is optional here, defaulting to .32. */
17540 if (inst.vectype.elems == 0
17541 && inst.operands[0].vectype.type == NT_invtype
17542 && inst.operands[1].vectype.type == NT_invtype)
17543 {
17544 inst.vectype.el[0].type = NT_untyped;
17545 inst.vectype.el[0].size = 32;
17546 inst.vectype.elems = 1;
17547 }
17548
17549 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
17550 logsize = neon_logbits (et.size);
17551
17552 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17553 _(BAD_FPU));
17554 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17555 && et.size != 32, _(BAD_FPU));
17556 constraint (et.type == NT_invtype, _("bad type for scalar"));
17557 constraint (x >= 64 / et.size, _("scalar index out of range"));
17558
17559 switch (et.size)
17560 {
17561 case 8: bcdebits = 0x8; break;
17562 case 16: bcdebits = 0x1; break;
17563 case 32: bcdebits = 0x0; break;
17564 default: ;
17565 }
17566
17567 bcdebits |= x << logsize;
17568
17569 inst.instruction = 0xe000b10;
17570 do_vfp_cond_or_thumb ();
17571 inst.instruction |= LOW4 (dn) << 16;
17572 inst.instruction |= HI1 (dn) << 7;
17573 inst.instruction |= inst.operands[1].reg << 12;
17574 inst.instruction |= (bcdebits & 3) << 5;
17575 inst.instruction |= (bcdebits >> 2) << 21;
17576 }
17577 break;
17578
17579 case NS_DRR: /* case 5 (fmdrr). */
17580 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17581 _(BAD_FPU));
17582
17583 inst.instruction = 0xc400b10;
17584 do_vfp_cond_or_thumb ();
17585 inst.instruction |= LOW4 (inst.operands[0].reg);
17586 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
17587 inst.instruction |= inst.operands[1].reg << 12;
17588 inst.instruction |= inst.operands[2].reg << 16;
17589 break;
17590
17591 case NS_RS: /* case 6. */
17592 {
17593 unsigned logsize;
17594 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
17595 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
17596 unsigned abcdebits = 0;
17597
17598 /* .<dt> is optional here, defaulting to .32. */
17599 if (inst.vectype.elems == 0
17600 && inst.operands[0].vectype.type == NT_invtype
17601 && inst.operands[1].vectype.type == NT_invtype)
17602 {
17603 inst.vectype.el[0].type = NT_untyped;
17604 inst.vectype.el[0].size = 32;
17605 inst.vectype.elems = 1;
17606 }
17607
17608 et = neon_check_type (2, NS_NULL,
17609 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
17610 logsize = neon_logbits (et.size);
17611
17612 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17613 _(BAD_FPU));
17614 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17615 && et.size != 32, _(BAD_FPU));
17616 constraint (et.type == NT_invtype, _("bad type for scalar"));
17617 constraint (x >= 64 / et.size, _("scalar index out of range"));
17618
17619 switch (et.size)
17620 {
17621 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
17622 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
17623 case 32: abcdebits = 0x00; break;
17624 default: ;
17625 }
17626
17627 abcdebits |= x << logsize;
17628 inst.instruction = 0xe100b10;
17629 do_vfp_cond_or_thumb ();
17630 inst.instruction |= LOW4 (dn) << 16;
17631 inst.instruction |= HI1 (dn) << 7;
17632 inst.instruction |= inst.operands[0].reg << 12;
17633 inst.instruction |= (abcdebits & 3) << 5;
17634 inst.instruction |= (abcdebits >> 2) << 21;
17635 }
17636 break;
17637
17638 case NS_RRD: /* case 7 (fmrrd). */
17639 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17640 _(BAD_FPU));
17641
17642 inst.instruction = 0xc500b10;
17643 do_vfp_cond_or_thumb ();
17644 inst.instruction |= inst.operands[0].reg << 12;
17645 inst.instruction |= inst.operands[1].reg << 16;
17646 inst.instruction |= LOW4 (inst.operands[2].reg);
17647 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17648 break;
17649
17650 case NS_FF: /* case 8 (fcpys). */
17651 do_vfp_nsyn_opcode ("fcpys");
17652 break;
17653
17654 case NS_HI:
17655 case NS_FI: /* case 10 (fconsts). */
17656 ldconst = "fconsts";
17657 encode_fconstd:
17658 if (!inst.operands[1].immisfloat)
17659 {
17660 unsigned new_imm;
17661 /* Immediate has to fit in 8 bits so float is enough. */
17662 float imm = (float) inst.operands[1].imm;
17663 memcpy (&new_imm, &imm, sizeof (float));
17664 /* But the assembly may have been written to provide an integer
17665 bit pattern that equates to a float, so check that the
17666 conversion has worked. */
17667 if (is_quarter_float (new_imm))
17668 {
17669 if (is_quarter_float (inst.operands[1].imm))
17670 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17671
17672 inst.operands[1].imm = new_imm;
17673 inst.operands[1].immisfloat = 1;
17674 }
17675 }
17676
17677 if (is_quarter_float (inst.operands[1].imm))
17678 {
17679 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
17680 do_vfp_nsyn_opcode (ldconst);
17681
17682 /* ARMv8.2 fp16 vmov.f16 instruction. */
17683 if (rs == NS_HI)
17684 do_scalar_fp16_v82_encode ();
17685 }
17686 else
17687 first_error (_("immediate out of range"));
17688 break;
17689
17690 case NS_RH:
17691 case NS_RF: /* case 12 (fmrs). */
17692 do_vfp_nsyn_opcode ("fmrs");
17693 /* ARMv8.2 fp16 vmov.f16 instruction. */
17694 if (rs == NS_RH)
17695 do_scalar_fp16_v82_encode ();
17696 break;
17697
17698 case NS_HR:
17699 case NS_FR: /* case 13 (fmsr). */
17700 do_vfp_nsyn_opcode ("fmsr");
17701 /* ARMv8.2 fp16 vmov.f16 instruction. */
17702 if (rs == NS_HR)
17703 do_scalar_fp16_v82_encode ();
17704 break;
17705
17706 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17707 (one of which is a list), but we have parsed four. Do some fiddling to
17708 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17709 expect. */
17710 case NS_RRFF: /* case 14 (fmrrs). */
17711 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
17712 _("VFP registers must be adjacent"));
17713 inst.operands[2].imm = 2;
17714 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17715 do_vfp_nsyn_opcode ("fmrrs");
17716 break;
17717
17718 case NS_FFRR: /* case 15 (fmsrr). */
17719 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
17720 _("VFP registers must be adjacent"));
17721 inst.operands[1] = inst.operands[2];
17722 inst.operands[2] = inst.operands[3];
17723 inst.operands[0].imm = 2;
17724 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17725 do_vfp_nsyn_opcode ("fmsrr");
17726 break;
17727
17728 case NS_NULL:
17729 /* neon_select_shape has determined that the instruction
17730 shape is wrong and has already set the error message. */
17731 break;
17732
17733 default:
17734 abort ();
17735 }
17736 }
17737
17738 static void
17739 do_neon_rshift_round_imm (void)
17740 {
17741 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17742 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17743 int imm = inst.operands[2].imm;
17744
17745 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17746 if (imm == 0)
17747 {
17748 inst.operands[2].present = 0;
17749 do_neon_mov ();
17750 return;
17751 }
17752
17753 constraint (imm < 1 || (unsigned)imm > et.size,
17754 _("immediate out of range for shift"));
17755 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
17756 et.size - imm);
17757 }
17758
17759 static void
17760 do_neon_movhf (void)
17761 {
17762 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
17763 constraint (rs != NS_HH, _("invalid suffix"));
17764
17765 if (inst.cond != COND_ALWAYS)
17766 {
17767 if (thumb_mode)
17768 {
17769 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17770 " the behaviour is UNPREDICTABLE"));
17771 }
17772 else
17773 {
17774 inst.error = BAD_COND;
17775 return;
17776 }
17777 }
17778
17779 do_vfp_sp_monadic ();
17780
17781 inst.is_neon = 1;
17782 inst.instruction |= 0xf0000000;
17783 }
17784
17785 static void
17786 do_neon_movl (void)
17787 {
17788 struct neon_type_el et = neon_check_type (2, NS_QD,
17789 N_EQK | N_DBL, N_SU_32 | N_KEY);
17790 unsigned sizebits = et.size >> 3;
17791 inst.instruction |= sizebits << 19;
17792 neon_two_same (0, et.type == NT_unsigned, -1);
17793 }
17794
17795 static void
17796 do_neon_trn (void)
17797 {
17798 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17799 struct neon_type_el et = neon_check_type (2, rs,
17800 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17801 NEON_ENCODE (INTEGER, inst);
17802 neon_two_same (neon_quad (rs), 1, et.size);
17803 }
17804
17805 static void
17806 do_neon_zip_uzp (void)
17807 {
17808 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17809 struct neon_type_el et = neon_check_type (2, rs,
17810 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17811 if (rs == NS_DD && et.size == 32)
17812 {
17813 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17814 inst.instruction = N_MNEM_vtrn;
17815 do_neon_trn ();
17816 return;
17817 }
17818 neon_two_same (neon_quad (rs), 1, et.size);
17819 }
17820
17821 static void
17822 do_neon_sat_abs_neg (void)
17823 {
17824 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17825 struct neon_type_el et = neon_check_type (2, rs,
17826 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17827 neon_two_same (neon_quad (rs), 1, et.size);
17828 }
17829
17830 static void
17831 do_neon_pair_long (void)
17832 {
17833 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17834 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
17835 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17836 inst.instruction |= (et.type == NT_unsigned) << 7;
17837 neon_two_same (neon_quad (rs), 1, et.size);
17838 }
17839
17840 static void
17841 do_neon_recip_est (void)
17842 {
17843 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17844 struct neon_type_el et = neon_check_type (2, rs,
17845 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
17846 inst.instruction |= (et.type == NT_float) << 8;
17847 neon_two_same (neon_quad (rs), 1, et.size);
17848 }
17849
17850 static void
17851 do_neon_cls (void)
17852 {
17853 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17854 struct neon_type_el et = neon_check_type (2, rs,
17855 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17856 neon_two_same (neon_quad (rs), 1, et.size);
17857 }
17858
17859 static void
17860 do_neon_clz (void)
17861 {
17862 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17863 struct neon_type_el et = neon_check_type (2, rs,
17864 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
17865 neon_two_same (neon_quad (rs), 1, et.size);
17866 }
17867
17868 static void
17869 do_neon_cnt (void)
17870 {
17871 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17872 struct neon_type_el et = neon_check_type (2, rs,
17873 N_EQK | N_INT, N_8 | N_KEY);
17874 neon_two_same (neon_quad (rs), 1, et.size);
17875 }
17876
17877 static void
17878 do_neon_swp (void)
17879 {
17880 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17881 neon_two_same (neon_quad (rs), 1, -1);
17882 }
17883
17884 static void
17885 do_neon_tbl_tbx (void)
17886 {
17887 unsigned listlenbits;
17888 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
17889
17890 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
17891 {
17892 first_error (_("bad list length for table lookup"));
17893 return;
17894 }
17895
17896 listlenbits = inst.operands[1].imm - 1;
17897 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17898 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17899 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17900 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17901 inst.instruction |= LOW4 (inst.operands[2].reg);
17902 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17903 inst.instruction |= listlenbits << 8;
17904
17905 neon_dp_fixup (&inst);
17906 }
17907
17908 static void
17909 do_neon_ldm_stm (void)
17910 {
17911 /* P, U and L bits are part of bitmask. */
17912 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
17913 unsigned offsetbits = inst.operands[1].imm * 2;
17914
17915 if (inst.operands[1].issingle)
17916 {
17917 do_vfp_nsyn_ldm_stm (is_dbmode);
17918 return;
17919 }
17920
17921 constraint (is_dbmode && !inst.operands[0].writeback,
17922 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17923
17924 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
17925 _("register list must contain at least 1 and at most 16 "
17926 "registers"));
17927
17928 inst.instruction |= inst.operands[0].reg << 16;
17929 inst.instruction |= inst.operands[0].writeback << 21;
17930 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17931 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
17932
17933 inst.instruction |= offsetbits;
17934
17935 do_vfp_cond_or_thumb ();
17936 }
17937
17938 static void
17939 do_neon_ldr_str (void)
17940 {
17941 int is_ldr = (inst.instruction & (1 << 20)) != 0;
17942
17943 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17944 And is UNPREDICTABLE in thumb mode. */
17945 if (!is_ldr
17946 && inst.operands[1].reg == REG_PC
17947 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
17948 {
17949 if (thumb_mode)
17950 inst.error = _("Use of PC here is UNPREDICTABLE");
17951 else if (warn_on_deprecated)
17952 as_tsktsk (_("Use of PC here is deprecated"));
17953 }
17954
17955 if (inst.operands[0].issingle)
17956 {
17957 if (is_ldr)
17958 do_vfp_nsyn_opcode ("flds");
17959 else
17960 do_vfp_nsyn_opcode ("fsts");
17961
17962 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17963 if (inst.vectype.el[0].size == 16)
17964 do_scalar_fp16_v82_encode ();
17965 }
17966 else
17967 {
17968 if (is_ldr)
17969 do_vfp_nsyn_opcode ("fldd");
17970 else
17971 do_vfp_nsyn_opcode ("fstd");
17972 }
17973 }
17974
17975 static void
17976 do_t_vldr_vstr_sysreg (void)
17977 {
17978 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
17979 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
17980
17981 /* Use of PC is UNPREDICTABLE. */
17982 if (inst.operands[1].reg == REG_PC)
17983 inst.error = _("Use of PC here is UNPREDICTABLE");
17984
17985 if (inst.operands[1].immisreg)
17986 inst.error = _("instruction does not accept register index");
17987
17988 if (!inst.operands[1].isreg)
17989 inst.error = _("instruction does not accept PC-relative addressing");
17990
17991 if (abs (inst.operands[1].imm) >= (1 << 7))
17992 inst.error = _("immediate value out of range");
17993
17994 inst.instruction = 0xec000f80;
17995 if (is_vldr)
17996 inst.instruction |= 1 << sysreg_vldr_bitno;
17997 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
17998 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
17999 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
18000 }
18001
18002 static void
18003 do_vldr_vstr (void)
18004 {
18005 bfd_boolean sysreg_op = !inst.operands[0].isreg;
18006
18007 /* VLDR/VSTR (System Register). */
18008 if (sysreg_op)
18009 {
18010 if (!mark_feature_used (&arm_ext_v8_1m_main))
18011 as_bad (_("Instruction not permitted on this architecture"));
18012
18013 do_t_vldr_vstr_sysreg ();
18014 }
18015 /* VLDR/VSTR. */
18016 else
18017 {
18018 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
18019 as_bad (_("Instruction not permitted on this architecture"));
18020 do_neon_ldr_str ();
18021 }
18022 }
18023
18024 /* "interleave" version also handles non-interleaving register VLD1/VST1
18025 instructions. */
18026
18027 static void
18028 do_neon_ld_st_interleave (void)
18029 {
18030 struct neon_type_el et = neon_check_type (1, NS_NULL,
18031 N_8 | N_16 | N_32 | N_64);
18032 unsigned alignbits = 0;
18033 unsigned idx;
18034 /* The bits in this table go:
18035 0: register stride of one (0) or two (1)
18036 1,2: register list length, minus one (1, 2, 3, 4).
18037 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
18038 We use -1 for invalid entries. */
18039 const int typetable[] =
18040 {
18041 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
18042 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
18043 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
18044 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
18045 };
18046 int typebits;
18047
18048 if (et.type == NT_invtype)
18049 return;
18050
18051 if (inst.operands[1].immisalign)
18052 switch (inst.operands[1].imm >> 8)
18053 {
18054 case 64: alignbits = 1; break;
18055 case 128:
18056 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
18057 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
18058 goto bad_alignment;
18059 alignbits = 2;
18060 break;
18061 case 256:
18062 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
18063 goto bad_alignment;
18064 alignbits = 3;
18065 break;
18066 default:
18067 bad_alignment:
18068 first_error (_("bad alignment"));
18069 return;
18070 }
18071
18072 inst.instruction |= alignbits << 4;
18073 inst.instruction |= neon_logbits (et.size) << 6;
18074
18075 /* Bits [4:6] of the immediate in a list specifier encode register stride
18076 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18077 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18078 up the right value for "type" in a table based on this value and the given
18079 list style, then stick it back. */
18080 idx = ((inst.operands[0].imm >> 4) & 7)
18081 | (((inst.instruction >> 8) & 3) << 3);
18082
18083 typebits = typetable[idx];
18084
18085 constraint (typebits == -1, _("bad list type for instruction"));
18086 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
18087 BAD_EL_TYPE);
18088
18089 inst.instruction &= ~0xf00;
18090 inst.instruction |= typebits << 8;
18091 }
18092
18093 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18094 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18095 otherwise. The variable arguments are a list of pairs of legal (size, align)
18096 values, terminated with -1. */
18097
18098 static int
18099 neon_alignment_bit (int size, int align, int *do_alignment, ...)
18100 {
18101 va_list ap;
18102 int result = FAIL, thissize, thisalign;
18103
18104 if (!inst.operands[1].immisalign)
18105 {
18106 *do_alignment = 0;
18107 return SUCCESS;
18108 }
18109
18110 va_start (ap, do_alignment);
18111
18112 do
18113 {
18114 thissize = va_arg (ap, int);
18115 if (thissize == -1)
18116 break;
18117 thisalign = va_arg (ap, int);
18118
18119 if (size == thissize && align == thisalign)
18120 result = SUCCESS;
18121 }
18122 while (result != SUCCESS);
18123
18124 va_end (ap);
18125
18126 if (result == SUCCESS)
18127 *do_alignment = 1;
18128 else
18129 first_error (_("unsupported alignment for instruction"));
18130
18131 return result;
18132 }
18133
18134 static void
18135 do_neon_ld_st_lane (void)
18136 {
18137 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18138 int align_good, do_alignment = 0;
18139 int logsize = neon_logbits (et.size);
18140 int align = inst.operands[1].imm >> 8;
18141 int n = (inst.instruction >> 8) & 3;
18142 int max_el = 64 / et.size;
18143
18144 if (et.type == NT_invtype)
18145 return;
18146
18147 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
18148 _("bad list length"));
18149 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
18150 _("scalar index out of range"));
18151 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
18152 && et.size == 8,
18153 _("stride of 2 unavailable when element size is 8"));
18154
18155 switch (n)
18156 {
18157 case 0: /* VLD1 / VST1. */
18158 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
18159 32, 32, -1);
18160 if (align_good == FAIL)
18161 return;
18162 if (do_alignment)
18163 {
18164 unsigned alignbits = 0;
18165 switch (et.size)
18166 {
18167 case 16: alignbits = 0x1; break;
18168 case 32: alignbits = 0x3; break;
18169 default: ;
18170 }
18171 inst.instruction |= alignbits << 4;
18172 }
18173 break;
18174
18175 case 1: /* VLD2 / VST2. */
18176 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
18177 16, 32, 32, 64, -1);
18178 if (align_good == FAIL)
18179 return;
18180 if (do_alignment)
18181 inst.instruction |= 1 << 4;
18182 break;
18183
18184 case 2: /* VLD3 / VST3. */
18185 constraint (inst.operands[1].immisalign,
18186 _("can't use alignment with this instruction"));
18187 break;
18188
18189 case 3: /* VLD4 / VST4. */
18190 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18191 16, 64, 32, 64, 32, 128, -1);
18192 if (align_good == FAIL)
18193 return;
18194 if (do_alignment)
18195 {
18196 unsigned alignbits = 0;
18197 switch (et.size)
18198 {
18199 case 8: alignbits = 0x1; break;
18200 case 16: alignbits = 0x1; break;
18201 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
18202 default: ;
18203 }
18204 inst.instruction |= alignbits << 4;
18205 }
18206 break;
18207
18208 default: ;
18209 }
18210
18211 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18212 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18213 inst.instruction |= 1 << (4 + logsize);
18214
18215 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
18216 inst.instruction |= logsize << 10;
18217 }
18218
18219 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18220
18221 static void
18222 do_neon_ld_dup (void)
18223 {
18224 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18225 int align_good, do_alignment = 0;
18226
18227 if (et.type == NT_invtype)
18228 return;
18229
18230 switch ((inst.instruction >> 8) & 3)
18231 {
18232 case 0: /* VLD1. */
18233 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
18234 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18235 &do_alignment, 16, 16, 32, 32, -1);
18236 if (align_good == FAIL)
18237 return;
18238 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
18239 {
18240 case 1: break;
18241 case 2: inst.instruction |= 1 << 5; break;
18242 default: first_error (_("bad list length")); return;
18243 }
18244 inst.instruction |= neon_logbits (et.size) << 6;
18245 break;
18246
18247 case 1: /* VLD2. */
18248 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18249 &do_alignment, 8, 16, 16, 32, 32, 64,
18250 -1);
18251 if (align_good == FAIL)
18252 return;
18253 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
18254 _("bad list length"));
18255 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18256 inst.instruction |= 1 << 5;
18257 inst.instruction |= neon_logbits (et.size) << 6;
18258 break;
18259
18260 case 2: /* VLD3. */
18261 constraint (inst.operands[1].immisalign,
18262 _("can't use alignment with this instruction"));
18263 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
18264 _("bad list length"));
18265 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18266 inst.instruction |= 1 << 5;
18267 inst.instruction |= neon_logbits (et.size) << 6;
18268 break;
18269
18270 case 3: /* VLD4. */
18271 {
18272 int align = inst.operands[1].imm >> 8;
18273 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18274 16, 64, 32, 64, 32, 128, -1);
18275 if (align_good == FAIL)
18276 return;
18277 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
18278 _("bad list length"));
18279 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18280 inst.instruction |= 1 << 5;
18281 if (et.size == 32 && align == 128)
18282 inst.instruction |= 0x3 << 6;
18283 else
18284 inst.instruction |= neon_logbits (et.size) << 6;
18285 }
18286 break;
18287
18288 default: ;
18289 }
18290
18291 inst.instruction |= do_alignment << 4;
18292 }
18293
18294 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18295 apart from bits [11:4]. */
18296
18297 static void
18298 do_neon_ldx_stx (void)
18299 {
18300 if (inst.operands[1].isreg)
18301 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18302
18303 switch (NEON_LANE (inst.operands[0].imm))
18304 {
18305 case NEON_INTERLEAVE_LANES:
18306 NEON_ENCODE (INTERLV, inst);
18307 do_neon_ld_st_interleave ();
18308 break;
18309
18310 case NEON_ALL_LANES:
18311 NEON_ENCODE (DUP, inst);
18312 if (inst.instruction == N_INV)
18313 {
18314 first_error ("only loads support such operands");
18315 break;
18316 }
18317 do_neon_ld_dup ();
18318 break;
18319
18320 default:
18321 NEON_ENCODE (LANE, inst);
18322 do_neon_ld_st_lane ();
18323 }
18324
18325 /* L bit comes from bit mask. */
18326 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18327 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18328 inst.instruction |= inst.operands[1].reg << 16;
18329
18330 if (inst.operands[1].postind)
18331 {
18332 int postreg = inst.operands[1].imm & 0xf;
18333 constraint (!inst.operands[1].immisreg,
18334 _("post-index must be a register"));
18335 constraint (postreg == 0xd || postreg == 0xf,
18336 _("bad register for post-index"));
18337 inst.instruction |= postreg;
18338 }
18339 else
18340 {
18341 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
18342 constraint (inst.relocs[0].exp.X_op != O_constant
18343 || inst.relocs[0].exp.X_add_number != 0,
18344 BAD_ADDR_MODE);
18345
18346 if (inst.operands[1].writeback)
18347 {
18348 inst.instruction |= 0xd;
18349 }
18350 else
18351 inst.instruction |= 0xf;
18352 }
18353
18354 if (thumb_mode)
18355 inst.instruction |= 0xf9000000;
18356 else
18357 inst.instruction |= 0xf4000000;
18358 }
18359
18360 /* FP v8. */
18361 static void
18362 do_vfp_nsyn_fpv8 (enum neon_shape rs)
18363 {
18364 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18365 D register operands. */
18366 if (neon_shape_class[rs] == SC_DOUBLE)
18367 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18368 _(BAD_FPU));
18369
18370 NEON_ENCODE (FPV8, inst);
18371
18372 if (rs == NS_FFF || rs == NS_HHH)
18373 {
18374 do_vfp_sp_dyadic ();
18375
18376 /* ARMv8.2 fp16 instruction. */
18377 if (rs == NS_HHH)
18378 do_scalar_fp16_v82_encode ();
18379 }
18380 else
18381 do_vfp_dp_rd_rn_rm ();
18382
18383 if (rs == NS_DDD)
18384 inst.instruction |= 0x100;
18385
18386 inst.instruction |= 0xf0000000;
18387 }
18388
18389 static void
18390 do_vsel (void)
18391 {
18392 set_pred_insn_type (OUTSIDE_PRED_INSN);
18393
18394 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
18395 first_error (_("invalid instruction shape"));
18396 }
18397
18398 static void
18399 do_vmaxnm (void)
18400 {
18401 set_pred_insn_type (OUTSIDE_PRED_INSN);
18402
18403 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
18404 return;
18405
18406 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18407 return;
18408
18409 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
18410 }
18411
18412 static void
18413 do_vrint_1 (enum neon_cvt_mode mode)
18414 {
18415 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
18416 struct neon_type_el et;
18417
18418 if (rs == NS_NULL)
18419 return;
18420
18421 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18422 D register operands. */
18423 if (neon_shape_class[rs] == SC_DOUBLE)
18424 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18425 _(BAD_FPU));
18426
18427 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
18428 | N_VFP);
18429 if (et.type != NT_invtype)
18430 {
18431 /* VFP encodings. */
18432 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
18433 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
18434 set_pred_insn_type (OUTSIDE_PRED_INSN);
18435
18436 NEON_ENCODE (FPV8, inst);
18437 if (rs == NS_FF || rs == NS_HH)
18438 do_vfp_sp_monadic ();
18439 else
18440 do_vfp_dp_rd_rm ();
18441
18442 switch (mode)
18443 {
18444 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
18445 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
18446 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
18447 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
18448 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
18449 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
18450 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
18451 default: abort ();
18452 }
18453
18454 inst.instruction |= (rs == NS_DD) << 8;
18455 do_vfp_cond_or_thumb ();
18456
18457 /* ARMv8.2 fp16 vrint instruction. */
18458 if (rs == NS_HH)
18459 do_scalar_fp16_v82_encode ();
18460 }
18461 else
18462 {
18463 /* Neon encodings (or something broken...). */
18464 inst.error = NULL;
18465 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
18466
18467 if (et.type == NT_invtype)
18468 return;
18469
18470 set_pred_insn_type (OUTSIDE_PRED_INSN);
18471 NEON_ENCODE (FLOAT, inst);
18472
18473 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18474 return;
18475
18476 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18477 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18478 inst.instruction |= LOW4 (inst.operands[1].reg);
18479 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18480 inst.instruction |= neon_quad (rs) << 6;
18481 /* Mask off the original size bits and reencode them. */
18482 inst.instruction = ((inst.instruction & 0xfff3ffff)
18483 | neon_logbits (et.size) << 18);
18484
18485 switch (mode)
18486 {
18487 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
18488 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
18489 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
18490 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
18491 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
18492 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
18493 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
18494 default: abort ();
18495 }
18496
18497 if (thumb_mode)
18498 inst.instruction |= 0xfc000000;
18499 else
18500 inst.instruction |= 0xf0000000;
18501 }
18502 }
18503
18504 static void
18505 do_vrintx (void)
18506 {
18507 do_vrint_1 (neon_cvt_mode_x);
18508 }
18509
18510 static void
18511 do_vrintz (void)
18512 {
18513 do_vrint_1 (neon_cvt_mode_z);
18514 }
18515
18516 static void
18517 do_vrintr (void)
18518 {
18519 do_vrint_1 (neon_cvt_mode_r);
18520 }
18521
18522 static void
18523 do_vrinta (void)
18524 {
18525 do_vrint_1 (neon_cvt_mode_a);
18526 }
18527
18528 static void
18529 do_vrintn (void)
18530 {
18531 do_vrint_1 (neon_cvt_mode_n);
18532 }
18533
18534 static void
18535 do_vrintp (void)
18536 {
18537 do_vrint_1 (neon_cvt_mode_p);
18538 }
18539
18540 static void
18541 do_vrintm (void)
18542 {
18543 do_vrint_1 (neon_cvt_mode_m);
18544 }
18545
18546 static unsigned
18547 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
18548 {
18549 unsigned regno = NEON_SCALAR_REG (opnd);
18550 unsigned elno = NEON_SCALAR_INDEX (opnd);
18551
18552 if (elsize == 16 && elno < 2 && regno < 16)
18553 return regno | (elno << 4);
18554 else if (elsize == 32 && elno == 0)
18555 return regno;
18556
18557 first_error (_("scalar out of range"));
18558 return 0;
18559 }
18560
18561 static void
18562 do_vcmla (void)
18563 {
18564 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18565 _(BAD_FPU));
18566 constraint (inst.relocs[0].exp.X_op != O_constant,
18567 _("expression too complex"));
18568 unsigned rot = inst.relocs[0].exp.X_add_number;
18569 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
18570 _("immediate out of range"));
18571 rot /= 90;
18572 if (inst.operands[2].isscalar)
18573 {
18574 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
18575 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18576 N_KEY | N_F16 | N_F32).size;
18577 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
18578 inst.is_neon = 1;
18579 inst.instruction = 0xfe000800;
18580 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18581 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18582 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18583 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18584 inst.instruction |= LOW4 (m);
18585 inst.instruction |= HI1 (m) << 5;
18586 inst.instruction |= neon_quad (rs) << 6;
18587 inst.instruction |= rot << 20;
18588 inst.instruction |= (size == 32) << 23;
18589 }
18590 else
18591 {
18592 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18593 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18594 N_KEY | N_F16 | N_F32).size;
18595 neon_three_same (neon_quad (rs), 0, -1);
18596 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18597 inst.instruction |= 0xfc200800;
18598 inst.instruction |= rot << 23;
18599 inst.instruction |= (size == 32) << 20;
18600 }
18601 }
18602
18603 static void
18604 do_vcadd (void)
18605 {
18606 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18607 _(BAD_FPU));
18608 constraint (inst.relocs[0].exp.X_op != O_constant,
18609 _("expression too complex"));
18610 unsigned rot = inst.relocs[0].exp.X_add_number;
18611 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18612 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18613 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18614 N_KEY | N_F16 | N_F32).size;
18615 neon_three_same (neon_quad (rs), 0, -1);
18616 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18617 inst.instruction |= 0xfc800800;
18618 inst.instruction |= (rot == 270) << 24;
18619 inst.instruction |= (size == 32) << 20;
18620 }
18621
18622 /* Dot Product instructions encoding support. */
18623
18624 static void
18625 do_neon_dotproduct (int unsigned_p)
18626 {
18627 enum neon_shape rs;
18628 unsigned scalar_oprd2 = 0;
18629 int high8;
18630
18631 if (inst.cond != COND_ALWAYS)
18632 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18633 "is UNPREDICTABLE"));
18634
18635 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18636 _(BAD_FPU));
18637
18638 /* Dot Product instructions are in three-same D/Q register format or the third
18639 operand can be a scalar index register. */
18640 if (inst.operands[2].isscalar)
18641 {
18642 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
18643 high8 = 0xfe000000;
18644 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18645 }
18646 else
18647 {
18648 high8 = 0xfc000000;
18649 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18650 }
18651
18652 if (unsigned_p)
18653 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
18654 else
18655 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
18656
18657 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18658 Product instruction, so we pass 0 as the "ubit" parameter. And the
18659 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18660 neon_three_same (neon_quad (rs), 0, 32);
18661
18662 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18663 different NEON three-same encoding. */
18664 inst.instruction &= 0x00ffffff;
18665 inst.instruction |= high8;
18666 /* Encode 'U' bit which indicates signedness. */
18667 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
18668 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18669 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18670 the instruction encoding. */
18671 if (inst.operands[2].isscalar)
18672 {
18673 inst.instruction &= 0xffffffd0;
18674 inst.instruction |= LOW4 (scalar_oprd2);
18675 inst.instruction |= HI1 (scalar_oprd2) << 5;
18676 }
18677 }
18678
18679 /* Dot Product instructions for signed integer. */
18680
18681 static void
18682 do_neon_dotproduct_s (void)
18683 {
18684 return do_neon_dotproduct (0);
18685 }
18686
18687 /* Dot Product instructions for unsigned integer. */
18688
18689 static void
18690 do_neon_dotproduct_u (void)
18691 {
18692 return do_neon_dotproduct (1);
18693 }
18694
18695 /* Crypto v1 instructions. */
18696 static void
18697 do_crypto_2op_1 (unsigned elttype, int op)
18698 {
18699 set_pred_insn_type (OUTSIDE_PRED_INSN);
18700
18701 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
18702 == NT_invtype)
18703 return;
18704
18705 inst.error = NULL;
18706
18707 NEON_ENCODE (INTEGER, inst);
18708 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18709 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18710 inst.instruction |= LOW4 (inst.operands[1].reg);
18711 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18712 if (op != -1)
18713 inst.instruction |= op << 6;
18714
18715 if (thumb_mode)
18716 inst.instruction |= 0xfc000000;
18717 else
18718 inst.instruction |= 0xf0000000;
18719 }
18720
18721 static void
18722 do_crypto_3op_1 (int u, int op)
18723 {
18724 set_pred_insn_type (OUTSIDE_PRED_INSN);
18725
18726 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
18727 N_32 | N_UNT | N_KEY).type == NT_invtype)
18728 return;
18729
18730 inst.error = NULL;
18731
18732 NEON_ENCODE (INTEGER, inst);
18733 neon_three_same (1, u, 8 << op);
18734 }
18735
18736 static void
18737 do_aese (void)
18738 {
18739 do_crypto_2op_1 (N_8, 0);
18740 }
18741
18742 static void
18743 do_aesd (void)
18744 {
18745 do_crypto_2op_1 (N_8, 1);
18746 }
18747
18748 static void
18749 do_aesmc (void)
18750 {
18751 do_crypto_2op_1 (N_8, 2);
18752 }
18753
18754 static void
18755 do_aesimc (void)
18756 {
18757 do_crypto_2op_1 (N_8, 3);
18758 }
18759
18760 static void
18761 do_sha1c (void)
18762 {
18763 do_crypto_3op_1 (0, 0);
18764 }
18765
18766 static void
18767 do_sha1p (void)
18768 {
18769 do_crypto_3op_1 (0, 1);
18770 }
18771
18772 static void
18773 do_sha1m (void)
18774 {
18775 do_crypto_3op_1 (0, 2);
18776 }
18777
18778 static void
18779 do_sha1su0 (void)
18780 {
18781 do_crypto_3op_1 (0, 3);
18782 }
18783
18784 static void
18785 do_sha256h (void)
18786 {
18787 do_crypto_3op_1 (1, 0);
18788 }
18789
18790 static void
18791 do_sha256h2 (void)
18792 {
18793 do_crypto_3op_1 (1, 1);
18794 }
18795
18796 static void
18797 do_sha256su1 (void)
18798 {
18799 do_crypto_3op_1 (1, 2);
18800 }
18801
18802 static void
18803 do_sha1h (void)
18804 {
18805 do_crypto_2op_1 (N_32, -1);
18806 }
18807
18808 static void
18809 do_sha1su1 (void)
18810 {
18811 do_crypto_2op_1 (N_32, 0);
18812 }
18813
18814 static void
18815 do_sha256su0 (void)
18816 {
18817 do_crypto_2op_1 (N_32, 1);
18818 }
18819
18820 static void
18821 do_crc32_1 (unsigned int poly, unsigned int sz)
18822 {
18823 unsigned int Rd = inst.operands[0].reg;
18824 unsigned int Rn = inst.operands[1].reg;
18825 unsigned int Rm = inst.operands[2].reg;
18826
18827 set_pred_insn_type (OUTSIDE_PRED_INSN);
18828 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
18829 inst.instruction |= LOW4 (Rn) << 16;
18830 inst.instruction |= LOW4 (Rm);
18831 inst.instruction |= sz << (thumb_mode ? 4 : 21);
18832 inst.instruction |= poly << (thumb_mode ? 20 : 9);
18833
18834 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
18835 as_warn (UNPRED_REG ("r15"));
18836 }
18837
18838 static void
18839 do_crc32b (void)
18840 {
18841 do_crc32_1 (0, 0);
18842 }
18843
18844 static void
18845 do_crc32h (void)
18846 {
18847 do_crc32_1 (0, 1);
18848 }
18849
18850 static void
18851 do_crc32w (void)
18852 {
18853 do_crc32_1 (0, 2);
18854 }
18855
18856 static void
18857 do_crc32cb (void)
18858 {
18859 do_crc32_1 (1, 0);
18860 }
18861
18862 static void
18863 do_crc32ch (void)
18864 {
18865 do_crc32_1 (1, 1);
18866 }
18867
18868 static void
18869 do_crc32cw (void)
18870 {
18871 do_crc32_1 (1, 2);
18872 }
18873
18874 static void
18875 do_vjcvt (void)
18876 {
18877 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18878 _(BAD_FPU));
18879 neon_check_type (2, NS_FD, N_S32, N_F64);
18880 do_vfp_sp_dp_cvt ();
18881 do_vfp_cond_or_thumb ();
18882 }
18883
18884 \f
18885 /* Overall per-instruction processing. */
18886
18887 /* We need to be able to fix up arbitrary expressions in some statements.
18888 This is so that we can handle symbols that are an arbitrary distance from
18889 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18890 which returns part of an address in a form which will be valid for
18891 a data instruction. We do this by pushing the expression into a symbol
18892 in the expr_section, and creating a fix for that. */
18893
18894 static void
18895 fix_new_arm (fragS * frag,
18896 int where,
18897 short int size,
18898 expressionS * exp,
18899 int pc_rel,
18900 int reloc)
18901 {
18902 fixS * new_fix;
18903
18904 switch (exp->X_op)
18905 {
18906 case O_constant:
18907 if (pc_rel)
18908 {
18909 /* Create an absolute valued symbol, so we have something to
18910 refer to in the object file. Unfortunately for us, gas's
18911 generic expression parsing will already have folded out
18912 any use of .set foo/.type foo %function that may have
18913 been used to set type information of the target location,
18914 that's being specified symbolically. We have to presume
18915 the user knows what they are doing. */
18916 char name[16 + 8];
18917 symbolS *symbol;
18918
18919 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
18920
18921 symbol = symbol_find_or_make (name);
18922 S_SET_SEGMENT (symbol, absolute_section);
18923 symbol_set_frag (symbol, &zero_address_frag);
18924 S_SET_VALUE (symbol, exp->X_add_number);
18925 exp->X_op = O_symbol;
18926 exp->X_add_symbol = symbol;
18927 exp->X_add_number = 0;
18928 }
18929 /* FALLTHROUGH */
18930 case O_symbol:
18931 case O_add:
18932 case O_subtract:
18933 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
18934 (enum bfd_reloc_code_real) reloc);
18935 break;
18936
18937 default:
18938 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
18939 pc_rel, (enum bfd_reloc_code_real) reloc);
18940 break;
18941 }
18942
18943 /* Mark whether the fix is to a THUMB instruction, or an ARM
18944 instruction. */
18945 new_fix->tc_fix_data = thumb_mode;
18946 }
18947
18948 /* Create a frg for an instruction requiring relaxation. */
18949 static void
18950 output_relax_insn (void)
18951 {
18952 char * to;
18953 symbolS *sym;
18954 int offset;
18955
18956 /* The size of the instruction is unknown, so tie the debug info to the
18957 start of the instruction. */
18958 dwarf2_emit_insn (0);
18959
18960 switch (inst.relocs[0].exp.X_op)
18961 {
18962 case O_symbol:
18963 sym = inst.relocs[0].exp.X_add_symbol;
18964 offset = inst.relocs[0].exp.X_add_number;
18965 break;
18966 case O_constant:
18967 sym = NULL;
18968 offset = inst.relocs[0].exp.X_add_number;
18969 break;
18970 default:
18971 sym = make_expr_symbol (&inst.relocs[0].exp);
18972 offset = 0;
18973 break;
18974 }
18975 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
18976 inst.relax, sym, offset, NULL/*offset, opcode*/);
18977 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
18978 }
18979
18980 /* Write a 32-bit thumb instruction to buf. */
18981 static void
18982 put_thumb32_insn (char * buf, unsigned long insn)
18983 {
18984 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
18985 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
18986 }
18987
18988 static void
18989 output_inst (const char * str)
18990 {
18991 char * to = NULL;
18992
18993 if (inst.error)
18994 {
18995 as_bad ("%s -- `%s'", inst.error, str);
18996 return;
18997 }
18998 if (inst.relax)
18999 {
19000 output_relax_insn ();
19001 return;
19002 }
19003 if (inst.size == 0)
19004 return;
19005
19006 to = frag_more (inst.size);
19007 /* PR 9814: Record the thumb mode into the current frag so that we know
19008 what type of NOP padding to use, if necessary. We override any previous
19009 setting so that if the mode has changed then the NOPS that we use will
19010 match the encoding of the last instruction in the frag. */
19011 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19012
19013 if (thumb_mode && (inst.size > THUMB_SIZE))
19014 {
19015 gas_assert (inst.size == (2 * THUMB_SIZE));
19016 put_thumb32_insn (to, inst.instruction);
19017 }
19018 else if (inst.size > INSN_SIZE)
19019 {
19020 gas_assert (inst.size == (2 * INSN_SIZE));
19021 md_number_to_chars (to, inst.instruction, INSN_SIZE);
19022 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
19023 }
19024 else
19025 md_number_to_chars (to, inst.instruction, inst.size);
19026
19027 int r;
19028 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19029 {
19030 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
19031 fix_new_arm (frag_now, to - frag_now->fr_literal,
19032 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
19033 inst.relocs[r].type);
19034 }
19035
19036 dwarf2_emit_insn (inst.size);
19037 }
19038
19039 static char *
19040 output_it_inst (int cond, int mask, char * to)
19041 {
19042 unsigned long instruction = 0xbf00;
19043
19044 mask &= 0xf;
19045 instruction |= mask;
19046 instruction |= cond << 4;
19047
19048 if (to == NULL)
19049 {
19050 to = frag_more (2);
19051 #ifdef OBJ_ELF
19052 dwarf2_emit_insn (2);
19053 #endif
19054 }
19055
19056 md_number_to_chars (to, instruction, 2);
19057
19058 return to;
19059 }
19060
19061 /* Tag values used in struct asm_opcode's tag field. */
19062 enum opcode_tag
19063 {
19064 OT_unconditional, /* Instruction cannot be conditionalized.
19065 The ARM condition field is still 0xE. */
19066 OT_unconditionalF, /* Instruction cannot be conditionalized
19067 and carries 0xF in its ARM condition field. */
19068 OT_csuffix, /* Instruction takes a conditional suffix. */
19069 OT_csuffixF, /* Some forms of the instruction take a scalar
19070 conditional suffix, others place 0xF where the
19071 condition field would be, others take a vector
19072 conditional suffix. */
19073 OT_cinfix3, /* Instruction takes a conditional infix,
19074 beginning at character index 3. (In
19075 unified mode, it becomes a suffix.) */
19076 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
19077 tsts, cmps, cmns, and teqs. */
19078 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
19079 character index 3, even in unified mode. Used for
19080 legacy instructions where suffix and infix forms
19081 may be ambiguous. */
19082 OT_csuf_or_in3, /* Instruction takes either a conditional
19083 suffix or an infix at character index 3. */
19084 OT_odd_infix_unc, /* This is the unconditional variant of an
19085 instruction that takes a conditional infix
19086 at an unusual position. In unified mode,
19087 this variant will accept a suffix. */
19088 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
19089 are the conditional variants of instructions that
19090 take conditional infixes in unusual positions.
19091 The infix appears at character index
19092 (tag - OT_odd_infix_0). These are not accepted
19093 in unified mode. */
19094 };
19095
19096 /* Subroutine of md_assemble, responsible for looking up the primary
19097 opcode from the mnemonic the user wrote. STR points to the
19098 beginning of the mnemonic.
19099
19100 This is not simply a hash table lookup, because of conditional
19101 variants. Most instructions have conditional variants, which are
19102 expressed with a _conditional affix_ to the mnemonic. If we were
19103 to encode each conditional variant as a literal string in the opcode
19104 table, it would have approximately 20,000 entries.
19105
19106 Most mnemonics take this affix as a suffix, and in unified syntax,
19107 'most' is upgraded to 'all'. However, in the divided syntax, some
19108 instructions take the affix as an infix, notably the s-variants of
19109 the arithmetic instructions. Of those instructions, all but six
19110 have the infix appear after the third character of the mnemonic.
19111
19112 Accordingly, the algorithm for looking up primary opcodes given
19113 an identifier is:
19114
19115 1. Look up the identifier in the opcode table.
19116 If we find a match, go to step U.
19117
19118 2. Look up the last two characters of the identifier in the
19119 conditions table. If we find a match, look up the first N-2
19120 characters of the identifier in the opcode table. If we
19121 find a match, go to step CE.
19122
19123 3. Look up the fourth and fifth characters of the identifier in
19124 the conditions table. If we find a match, extract those
19125 characters from the identifier, and look up the remaining
19126 characters in the opcode table. If we find a match, go
19127 to step CM.
19128
19129 4. Fail.
19130
19131 U. Examine the tag field of the opcode structure, in case this is
19132 one of the six instructions with its conditional infix in an
19133 unusual place. If it is, the tag tells us where to find the
19134 infix; look it up in the conditions table and set inst.cond
19135 accordingly. Otherwise, this is an unconditional instruction.
19136 Again set inst.cond accordingly. Return the opcode structure.
19137
19138 CE. Examine the tag field to make sure this is an instruction that
19139 should receive a conditional suffix. If it is not, fail.
19140 Otherwise, set inst.cond from the suffix we already looked up,
19141 and return the opcode structure.
19142
19143 CM. Examine the tag field to make sure this is an instruction that
19144 should receive a conditional infix after the third character.
19145 If it is not, fail. Otherwise, undo the edits to the current
19146 line of input and proceed as for case CE. */
19147
19148 static const struct asm_opcode *
19149 opcode_lookup (char **str)
19150 {
19151 char *end, *base;
19152 char *affix;
19153 const struct asm_opcode *opcode;
19154 const struct asm_cond *cond;
19155 char save[2];
19156
19157 /* Scan up to the end of the mnemonic, which must end in white space,
19158 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19159 for (base = end = *str; *end != '\0'; end++)
19160 if (*end == ' ' || *end == '.')
19161 break;
19162
19163 if (end == base)
19164 return NULL;
19165
19166 /* Handle a possible width suffix and/or Neon type suffix. */
19167 if (end[0] == '.')
19168 {
19169 int offset = 2;
19170
19171 /* The .w and .n suffixes are only valid if the unified syntax is in
19172 use. */
19173 if (unified_syntax && end[1] == 'w')
19174 inst.size_req = 4;
19175 else if (unified_syntax && end[1] == 'n')
19176 inst.size_req = 2;
19177 else
19178 offset = 0;
19179
19180 inst.vectype.elems = 0;
19181
19182 *str = end + offset;
19183
19184 if (end[offset] == '.')
19185 {
19186 /* See if we have a Neon type suffix (possible in either unified or
19187 non-unified ARM syntax mode). */
19188 if (parse_neon_type (&inst.vectype, str) == FAIL)
19189 return NULL;
19190 }
19191 else if (end[offset] != '\0' && end[offset] != ' ')
19192 return NULL;
19193 }
19194 else
19195 *str = end;
19196
19197 /* Look for unaffixed or special-case affixed mnemonic. */
19198 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19199 end - base);
19200 if (opcode)
19201 {
19202 /* step U */
19203 if (opcode->tag < OT_odd_infix_0)
19204 {
19205 inst.cond = COND_ALWAYS;
19206 return opcode;
19207 }
19208
19209 if (warn_on_deprecated && unified_syntax)
19210 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19211 affix = base + (opcode->tag - OT_odd_infix_0);
19212 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19213 gas_assert (cond);
19214
19215 inst.cond = cond->value;
19216 return opcode;
19217 }
19218 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19219 {
19220 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19221 */
19222 if (end - base < 2)
19223 return NULL;
19224 affix = end - 1;
19225 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
19226 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19227 affix - base);
19228 /* If this opcode can not be vector predicated then don't accept it with a
19229 vector predication code. */
19230 if (opcode && !opcode->mayBeVecPred)
19231 opcode = NULL;
19232 }
19233 if (!opcode || !cond)
19234 {
19235 /* Cannot have a conditional suffix on a mnemonic of less than two
19236 characters. */
19237 if (end - base < 3)
19238 return NULL;
19239
19240 /* Look for suffixed mnemonic. */
19241 affix = end - 2;
19242 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19243 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19244 affix - base);
19245 }
19246
19247 if (opcode && cond)
19248 {
19249 /* step CE */
19250 switch (opcode->tag)
19251 {
19252 case OT_cinfix3_legacy:
19253 /* Ignore conditional suffixes matched on infix only mnemonics. */
19254 break;
19255
19256 case OT_cinfix3:
19257 case OT_cinfix3_deprecated:
19258 case OT_odd_infix_unc:
19259 if (!unified_syntax)
19260 return NULL;
19261 /* Fall through. */
19262
19263 case OT_csuffix:
19264 case OT_csuffixF:
19265 case OT_csuf_or_in3:
19266 inst.cond = cond->value;
19267 return opcode;
19268
19269 case OT_unconditional:
19270 case OT_unconditionalF:
19271 if (thumb_mode)
19272 inst.cond = cond->value;
19273 else
19274 {
19275 /* Delayed diagnostic. */
19276 inst.error = BAD_COND;
19277 inst.cond = COND_ALWAYS;
19278 }
19279 return opcode;
19280
19281 default:
19282 return NULL;
19283 }
19284 }
19285
19286 /* Cannot have a usual-position infix on a mnemonic of less than
19287 six characters (five would be a suffix). */
19288 if (end - base < 6)
19289 return NULL;
19290
19291 /* Look for infixed mnemonic in the usual position. */
19292 affix = base + 3;
19293 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19294 if (!cond)
19295 return NULL;
19296
19297 memcpy (save, affix, 2);
19298 memmove (affix, affix + 2, (end - affix) - 2);
19299 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19300 (end - base) - 2);
19301 memmove (affix + 2, affix, (end - affix) - 2);
19302 memcpy (affix, save, 2);
19303
19304 if (opcode
19305 && (opcode->tag == OT_cinfix3
19306 || opcode->tag == OT_cinfix3_deprecated
19307 || opcode->tag == OT_csuf_or_in3
19308 || opcode->tag == OT_cinfix3_legacy))
19309 {
19310 /* Step CM. */
19311 if (warn_on_deprecated && unified_syntax
19312 && (opcode->tag == OT_cinfix3
19313 || opcode->tag == OT_cinfix3_deprecated))
19314 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19315
19316 inst.cond = cond->value;
19317 return opcode;
19318 }
19319
19320 return NULL;
19321 }
19322
19323 /* This function generates an initial IT instruction, leaving its block
19324 virtually open for the new instructions. Eventually,
19325 the mask will be updated by now_pred_add_mask () each time
19326 a new instruction needs to be included in the IT block.
19327 Finally, the block is closed with close_automatic_it_block ().
19328 The block closure can be requested either from md_assemble (),
19329 a tencode (), or due to a label hook. */
19330
19331 static void
19332 new_automatic_it_block (int cond)
19333 {
19334 now_pred.state = AUTOMATIC_PRED_BLOCK;
19335 now_pred.mask = 0x18;
19336 now_pred.cc = cond;
19337 now_pred.block_length = 1;
19338 mapping_state (MAP_THUMB);
19339 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
19340 now_pred.warn_deprecated = FALSE;
19341 now_pred.insn_cond = TRUE;
19342 }
19343
19344 /* Close an automatic IT block.
19345 See comments in new_automatic_it_block (). */
19346
19347 static void
19348 close_automatic_it_block (void)
19349 {
19350 now_pred.mask = 0x10;
19351 now_pred.block_length = 0;
19352 }
19353
19354 /* Update the mask of the current automatically-generated IT
19355 instruction. See comments in new_automatic_it_block (). */
19356
19357 static void
19358 now_pred_add_mask (int cond)
19359 {
19360 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19361 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19362 | ((bitvalue) << (nbit)))
19363 const int resulting_bit = (cond & 1);
19364
19365 now_pred.mask &= 0xf;
19366 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19367 resulting_bit,
19368 (5 - now_pred.block_length));
19369 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19370 1,
19371 ((5 - now_pred.block_length) - 1));
19372 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
19373
19374 #undef CLEAR_BIT
19375 #undef SET_BIT_VALUE
19376 }
19377
19378 /* The IT blocks handling machinery is accessed through the these functions:
19379 it_fsm_pre_encode () from md_assemble ()
19380 set_pred_insn_type () optional, from the tencode functions
19381 set_pred_insn_type_last () ditto
19382 in_pred_block () ditto
19383 it_fsm_post_encode () from md_assemble ()
19384 force_automatic_it_block_close () from label handling functions
19385
19386 Rationale:
19387 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19388 initializing the IT insn type with a generic initial value depending
19389 on the inst.condition.
19390 2) During the tencode function, two things may happen:
19391 a) The tencode function overrides the IT insn type by
19392 calling either set_pred_insn_type (type) or
19393 set_pred_insn_type_last ().
19394 b) The tencode function queries the IT block state by
19395 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19396
19397 Both set_pred_insn_type and in_pred_block run the internal FSM state
19398 handling function (handle_pred_state), because: a) setting the IT insn
19399 type may incur in an invalid state (exiting the function),
19400 and b) querying the state requires the FSM to be updated.
19401 Specifically we want to avoid creating an IT block for conditional
19402 branches, so it_fsm_pre_encode is actually a guess and we can't
19403 determine whether an IT block is required until the tencode () routine
19404 has decided what type of instruction this actually it.
19405 Because of this, if set_pred_insn_type and in_pred_block have to be
19406 used, set_pred_insn_type has to be called first.
19407
19408 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19409 that determines the insn IT type depending on the inst.cond code.
19410 When a tencode () routine encodes an instruction that can be
19411 either outside an IT block, or, in the case of being inside, has to be
19412 the last one, set_pred_insn_type_last () will determine the proper
19413 IT instruction type based on the inst.cond code. Otherwise,
19414 set_pred_insn_type can be called for overriding that logic or
19415 for covering other cases.
19416
19417 Calling handle_pred_state () may not transition the IT block state to
19418 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19419 still queried. Instead, if the FSM determines that the state should
19420 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19421 after the tencode () function: that's what it_fsm_post_encode () does.
19422
19423 Since in_pred_block () calls the state handling function to get an
19424 updated state, an error may occur (due to invalid insns combination).
19425 In that case, inst.error is set.
19426 Therefore, inst.error has to be checked after the execution of
19427 the tencode () routine.
19428
19429 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19430 any pending state change (if any) that didn't take place in
19431 handle_pred_state () as explained above. */
19432
19433 static void
19434 it_fsm_pre_encode (void)
19435 {
19436 if (inst.cond != COND_ALWAYS)
19437 inst.pred_insn_type = INSIDE_IT_INSN;
19438 else
19439 inst.pred_insn_type = OUTSIDE_PRED_INSN;
19440
19441 now_pred.state_handled = 0;
19442 }
19443
19444 /* IT state FSM handling function. */
19445 /* MVE instructions and non-MVE instructions are handled differently because of
19446 the introduction of VPT blocks.
19447 Specifications say that any non-MVE instruction inside a VPT block is
19448 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19449 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19450 few exceptions we have MVE_UNPREDICABLE_INSN.
19451 The error messages provided depending on the different combinations possible
19452 are described in the cases below:
19453 For 'most' MVE instructions:
19454 1) In an IT block, with an IT code: syntax error
19455 2) In an IT block, with a VPT code: error: must be in a VPT block
19456 3) In an IT block, with no code: warning: UNPREDICTABLE
19457 4) In a VPT block, with an IT code: syntax error
19458 5) In a VPT block, with a VPT code: OK!
19459 6) In a VPT block, with no code: error: missing code
19460 7) Outside a pred block, with an IT code: error: syntax error
19461 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19462 9) Outside a pred block, with no code: OK!
19463 For non-MVE instructions:
19464 10) In an IT block, with an IT code: OK!
19465 11) In an IT block, with a VPT code: syntax error
19466 12) In an IT block, with no code: error: missing code
19467 13) In a VPT block, with an IT code: error: should be in an IT block
19468 14) In a VPT block, with a VPT code: syntax error
19469 15) In a VPT block, with no code: UNPREDICTABLE
19470 16) Outside a pred block, with an IT code: error: should be in an IT block
19471 17) Outside a pred block, with a VPT code: syntax error
19472 18) Outside a pred block, with no code: OK!
19473 */
19474
19475
19476 static int
19477 handle_pred_state (void)
19478 {
19479 now_pred.state_handled = 1;
19480 now_pred.insn_cond = FALSE;
19481
19482 switch (now_pred.state)
19483 {
19484 case OUTSIDE_PRED_BLOCK:
19485 switch (inst.pred_insn_type)
19486 {
19487 case MVE_UNPREDICABLE_INSN:
19488 case MVE_OUTSIDE_PRED_INSN:
19489 if (inst.cond < COND_ALWAYS)
19490 {
19491 /* Case 7: Outside a pred block, with an IT code: error: syntax
19492 error. */
19493 inst.error = BAD_SYNTAX;
19494 return FAIL;
19495 }
19496 /* Case 9: Outside a pred block, with no code: OK! */
19497 break;
19498 case OUTSIDE_PRED_INSN:
19499 if (inst.cond > COND_ALWAYS)
19500 {
19501 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19502 */
19503 inst.error = BAD_SYNTAX;
19504 return FAIL;
19505 }
19506 /* Case 18: Outside a pred block, with no code: OK! */
19507 break;
19508
19509 case INSIDE_VPT_INSN:
19510 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19511 a VPT block. */
19512 inst.error = BAD_OUT_VPT;
19513 return FAIL;
19514
19515 case INSIDE_IT_INSN:
19516 case INSIDE_IT_LAST_INSN:
19517 if (inst.cond < COND_ALWAYS)
19518 {
19519 /* Case 16: Outside a pred block, with an IT code: error: should
19520 be in an IT block. */
19521 if (thumb_mode == 0)
19522 {
19523 if (unified_syntax
19524 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
19525 as_tsktsk (_("Warning: conditional outside an IT block"\
19526 " for Thumb."));
19527 }
19528 else
19529 {
19530 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
19531 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
19532 {
19533 /* Automatically generate the IT instruction. */
19534 new_automatic_it_block (inst.cond);
19535 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
19536 close_automatic_it_block ();
19537 }
19538 else
19539 {
19540 inst.error = BAD_OUT_IT;
19541 return FAIL;
19542 }
19543 }
19544 break;
19545 }
19546 else if (inst.cond > COND_ALWAYS)
19547 {
19548 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19549 */
19550 inst.error = BAD_SYNTAX;
19551 return FAIL;
19552 }
19553 else
19554 gas_assert (0);
19555 case IF_INSIDE_IT_LAST_INSN:
19556 case NEUTRAL_IT_INSN:
19557 break;
19558
19559 case VPT_INSN:
19560 if (inst.cond != COND_ALWAYS)
19561 first_error (BAD_SYNTAX);
19562 now_pred.state = MANUAL_PRED_BLOCK;
19563 now_pred.block_length = 0;
19564 now_pred.type = VECTOR_PRED;
19565 now_pred.cc = 0;
19566 break;
19567 case IT_INSN:
19568 now_pred.state = MANUAL_PRED_BLOCK;
19569 now_pred.block_length = 0;
19570 now_pred.type = SCALAR_PRED;
19571 break;
19572 }
19573 break;
19574
19575 case AUTOMATIC_PRED_BLOCK:
19576 /* Three things may happen now:
19577 a) We should increment current it block size;
19578 b) We should close current it block (closing insn or 4 insns);
19579 c) We should close current it block and start a new one (due
19580 to incompatible conditions or
19581 4 insns-length block reached). */
19582
19583 switch (inst.pred_insn_type)
19584 {
19585 case INSIDE_VPT_INSN:
19586 case VPT_INSN:
19587 case MVE_UNPREDICABLE_INSN:
19588 case MVE_OUTSIDE_PRED_INSN:
19589 gas_assert (0);
19590 case OUTSIDE_PRED_INSN:
19591 /* The closure of the block shall happen immediately,
19592 so any in_pred_block () call reports the block as closed. */
19593 force_automatic_it_block_close ();
19594 break;
19595
19596 case INSIDE_IT_INSN:
19597 case INSIDE_IT_LAST_INSN:
19598 case IF_INSIDE_IT_LAST_INSN:
19599 now_pred.block_length++;
19600
19601 if (now_pred.block_length > 4
19602 || !now_pred_compatible (inst.cond))
19603 {
19604 force_automatic_it_block_close ();
19605 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
19606 new_automatic_it_block (inst.cond);
19607 }
19608 else
19609 {
19610 now_pred.insn_cond = TRUE;
19611 now_pred_add_mask (inst.cond);
19612 }
19613
19614 if (now_pred.state == AUTOMATIC_PRED_BLOCK
19615 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
19616 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
19617 close_automatic_it_block ();
19618 break;
19619
19620 case NEUTRAL_IT_INSN:
19621 now_pred.block_length++;
19622 now_pred.insn_cond = TRUE;
19623
19624 if (now_pred.block_length > 4)
19625 force_automatic_it_block_close ();
19626 else
19627 now_pred_add_mask (now_pred.cc & 1);
19628 break;
19629
19630 case IT_INSN:
19631 close_automatic_it_block ();
19632 now_pred.state = MANUAL_PRED_BLOCK;
19633 break;
19634 }
19635 break;
19636
19637 case MANUAL_PRED_BLOCK:
19638 {
19639 int cond, is_last;
19640 if (now_pred.type == SCALAR_PRED)
19641 {
19642 /* Check conditional suffixes. */
19643 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
19644 now_pred.mask <<= 1;
19645 now_pred.mask &= 0x1f;
19646 is_last = (now_pred.mask == 0x10);
19647 }
19648 else
19649 {
19650 now_pred.cc ^= (now_pred.mask >> 4);
19651 cond = now_pred.cc + 0xf;
19652 now_pred.mask <<= 1;
19653 now_pred.mask &= 0x1f;
19654 is_last = now_pred.mask == 0x10;
19655 }
19656 now_pred.insn_cond = TRUE;
19657
19658 switch (inst.pred_insn_type)
19659 {
19660 case OUTSIDE_PRED_INSN:
19661 if (now_pred.type == SCALAR_PRED)
19662 {
19663 if (inst.cond == COND_ALWAYS)
19664 {
19665 /* Case 12: In an IT block, with no code: error: missing
19666 code. */
19667 inst.error = BAD_NOT_IT;
19668 return FAIL;
19669 }
19670 else if (inst.cond > COND_ALWAYS)
19671 {
19672 /* Case 11: In an IT block, with a VPT code: syntax error.
19673 */
19674 inst.error = BAD_SYNTAX;
19675 return FAIL;
19676 }
19677 else if (thumb_mode)
19678 {
19679 /* This is for some special cases where a non-MVE
19680 instruction is not allowed in an IT block, such as cbz,
19681 but are put into one with a condition code.
19682 You could argue this should be a syntax error, but we
19683 gave the 'not allowed in IT block' diagnostic in the
19684 past so we will keep doing so. */
19685 inst.error = BAD_NOT_IT;
19686 return FAIL;
19687 }
19688 break;
19689 }
19690 else
19691 {
19692 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19693 as_tsktsk (MVE_NOT_VPT);
19694 return SUCCESS;
19695 }
19696 case MVE_OUTSIDE_PRED_INSN:
19697 if (now_pred.type == SCALAR_PRED)
19698 {
19699 if (inst.cond == COND_ALWAYS)
19700 {
19701 /* Case 3: In an IT block, with no code: warning:
19702 UNPREDICTABLE. */
19703 as_tsktsk (MVE_NOT_IT);
19704 return SUCCESS;
19705 }
19706 else if (inst.cond < COND_ALWAYS)
19707 {
19708 /* Case 1: In an IT block, with an IT code: syntax error.
19709 */
19710 inst.error = BAD_SYNTAX;
19711 return FAIL;
19712 }
19713 else
19714 gas_assert (0);
19715 }
19716 else
19717 {
19718 if (inst.cond < COND_ALWAYS)
19719 {
19720 /* Case 4: In a VPT block, with an IT code: syntax error.
19721 */
19722 inst.error = BAD_SYNTAX;
19723 return FAIL;
19724 }
19725 else if (inst.cond == COND_ALWAYS)
19726 {
19727 /* Case 6: In a VPT block, with no code: error: missing
19728 code. */
19729 inst.error = BAD_NOT_VPT;
19730 return FAIL;
19731 }
19732 else
19733 {
19734 gas_assert (0);
19735 }
19736 }
19737 case MVE_UNPREDICABLE_INSN:
19738 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
19739 return SUCCESS;
19740 case INSIDE_IT_INSN:
19741 if (inst.cond > COND_ALWAYS)
19742 {
19743 /* Case 11: In an IT block, with a VPT code: syntax error. */
19744 /* Case 14: In a VPT block, with a VPT code: syntax error. */
19745 inst.error = BAD_SYNTAX;
19746 return FAIL;
19747 }
19748 else if (now_pred.type == SCALAR_PRED)
19749 {
19750 /* Case 10: In an IT block, with an IT code: OK! */
19751 if (cond != inst.cond)
19752 {
19753 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
19754 BAD_VPT_COND;
19755 return FAIL;
19756 }
19757 }
19758 else
19759 {
19760 /* Case 13: In a VPT block, with an IT code: error: should be
19761 in an IT block. */
19762 inst.error = BAD_OUT_IT;
19763 return FAIL;
19764 }
19765 break;
19766
19767 case INSIDE_VPT_INSN:
19768 if (now_pred.type == SCALAR_PRED)
19769 {
19770 /* Case 2: In an IT block, with a VPT code: error: must be in a
19771 VPT block. */
19772 inst.error = BAD_OUT_VPT;
19773 return FAIL;
19774 }
19775 /* Case 5: In a VPT block, with a VPT code: OK! */
19776 else if (cond != inst.cond)
19777 {
19778 inst.error = BAD_VPT_COND;
19779 return FAIL;
19780 }
19781 break;
19782 case INSIDE_IT_LAST_INSN:
19783 case IF_INSIDE_IT_LAST_INSN:
19784 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
19785 {
19786 /* Case 4: In a VPT block, with an IT code: syntax error. */
19787 /* Case 11: In an IT block, with a VPT code: syntax error. */
19788 inst.error = BAD_SYNTAX;
19789 return FAIL;
19790 }
19791 else if (cond != inst.cond)
19792 {
19793 inst.error = BAD_IT_COND;
19794 return FAIL;
19795 }
19796 if (!is_last)
19797 {
19798 inst.error = BAD_BRANCH;
19799 return FAIL;
19800 }
19801 break;
19802
19803 case NEUTRAL_IT_INSN:
19804 /* The BKPT instruction is unconditional even in a IT or VPT
19805 block. */
19806 break;
19807
19808 case IT_INSN:
19809 if (now_pred.type == SCALAR_PRED)
19810 {
19811 inst.error = BAD_IT_IT;
19812 return FAIL;
19813 }
19814 /* fall through. */
19815 case VPT_INSN:
19816 if (inst.cond == COND_ALWAYS)
19817 {
19818 /* Executing a VPT/VPST instruction inside an IT block or a
19819 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
19820 */
19821 if (now_pred.type == SCALAR_PRED)
19822 as_tsktsk (MVE_NOT_IT);
19823 else
19824 as_tsktsk (MVE_NOT_VPT);
19825 return SUCCESS;
19826 }
19827 else
19828 {
19829 /* VPT/VPST do not accept condition codes. */
19830 inst.error = BAD_SYNTAX;
19831 return FAIL;
19832 }
19833 }
19834 }
19835 break;
19836 }
19837
19838 return SUCCESS;
19839 }
19840
19841 struct depr_insn_mask
19842 {
19843 unsigned long pattern;
19844 unsigned long mask;
19845 const char* description;
19846 };
19847
19848 /* List of 16-bit instruction patterns deprecated in an IT block in
19849 ARMv8. */
19850 static const struct depr_insn_mask depr_it_insns[] = {
19851 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19852 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19853 { 0xa000, 0xb800, N_("ADR") },
19854 { 0x4800, 0xf800, N_("Literal loads") },
19855 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19856 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19857 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19858 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19859 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19860 { 0, 0, NULL }
19861 };
19862
19863 static void
19864 it_fsm_post_encode (void)
19865 {
19866 int is_last;
19867
19868 if (!now_pred.state_handled)
19869 handle_pred_state ();
19870
19871 if (now_pred.insn_cond
19872 && !now_pred.warn_deprecated
19873 && warn_on_deprecated
19874 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
19875 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
19876 {
19877 if (inst.instruction >= 0x10000)
19878 {
19879 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19880 "performance deprecated in ARMv8-A and ARMv8-R"));
19881 now_pred.warn_deprecated = TRUE;
19882 }
19883 else
19884 {
19885 const struct depr_insn_mask *p = depr_it_insns;
19886
19887 while (p->mask != 0)
19888 {
19889 if ((inst.instruction & p->mask) == p->pattern)
19890 {
19891 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19892 "instructions of the following class are "
19893 "performance deprecated in ARMv8-A and "
19894 "ARMv8-R: %s"), p->description);
19895 now_pred.warn_deprecated = TRUE;
19896 break;
19897 }
19898
19899 ++p;
19900 }
19901 }
19902
19903 if (now_pred.block_length > 1)
19904 {
19905 as_tsktsk (_("IT blocks containing more than one conditional "
19906 "instruction are performance deprecated in ARMv8-A and "
19907 "ARMv8-R"));
19908 now_pred.warn_deprecated = TRUE;
19909 }
19910 }
19911
19912 is_last = (now_pred.mask == 0x10);
19913 if (is_last)
19914 {
19915 now_pred.state = OUTSIDE_PRED_BLOCK;
19916 now_pred.mask = 0;
19917 }
19918 }
19919
19920 static void
19921 force_automatic_it_block_close (void)
19922 {
19923 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
19924 {
19925 close_automatic_it_block ();
19926 now_pred.state = OUTSIDE_PRED_BLOCK;
19927 now_pred.mask = 0;
19928 }
19929 }
19930
19931 static int
19932 in_pred_block (void)
19933 {
19934 if (!now_pred.state_handled)
19935 handle_pred_state ();
19936
19937 return now_pred.state != OUTSIDE_PRED_BLOCK;
19938 }
19939
19940 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19941 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19942 here, hence the "known" in the function name. */
19943
19944 static bfd_boolean
19945 known_t32_only_insn (const struct asm_opcode *opcode)
19946 {
19947 /* Original Thumb-1 wide instruction. */
19948 if (opcode->tencode == do_t_blx
19949 || opcode->tencode == do_t_branch23
19950 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
19951 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
19952 return TRUE;
19953
19954 /* Wide-only instruction added to ARMv8-M Baseline. */
19955 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
19956 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
19957 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
19958 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
19959 return TRUE;
19960
19961 return FALSE;
19962 }
19963
19964 /* Whether wide instruction variant can be used if available for a valid OPCODE
19965 in ARCH. */
19966
19967 static bfd_boolean
19968 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
19969 {
19970 if (known_t32_only_insn (opcode))
19971 return TRUE;
19972
19973 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19974 of variant T3 of B.W is checked in do_t_branch. */
19975 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19976 && opcode->tencode == do_t_branch)
19977 return TRUE;
19978
19979 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19980 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19981 && opcode->tencode == do_t_mov_cmp
19982 /* Make sure CMP instruction is not affected. */
19983 && opcode->aencode == do_mov)
19984 return TRUE;
19985
19986 /* Wide instruction variants of all instructions with narrow *and* wide
19987 variants become available with ARMv6t2. Other opcodes are either
19988 narrow-only or wide-only and are thus available if OPCODE is valid. */
19989 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
19990 return TRUE;
19991
19992 /* OPCODE with narrow only instruction variant or wide variant not
19993 available. */
19994 return FALSE;
19995 }
19996
19997 void
19998 md_assemble (char *str)
19999 {
20000 char *p = str;
20001 const struct asm_opcode * opcode;
20002
20003 /* Align the previous label if needed. */
20004 if (last_label_seen != NULL)
20005 {
20006 symbol_set_frag (last_label_seen, frag_now);
20007 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
20008 S_SET_SEGMENT (last_label_seen, now_seg);
20009 }
20010
20011 memset (&inst, '\0', sizeof (inst));
20012 int r;
20013 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
20014 inst.relocs[r].type = BFD_RELOC_UNUSED;
20015
20016 opcode = opcode_lookup (&p);
20017 if (!opcode)
20018 {
20019 /* It wasn't an instruction, but it might be a register alias of
20020 the form alias .req reg, or a Neon .dn/.qn directive. */
20021 if (! create_register_alias (str, p)
20022 && ! create_neon_reg_alias (str, p))
20023 as_bad (_("bad instruction `%s'"), str);
20024
20025 return;
20026 }
20027
20028 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
20029 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
20030
20031 /* The value which unconditional instructions should have in place of the
20032 condition field. */
20033 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
20034
20035 if (thumb_mode)
20036 {
20037 arm_feature_set variant;
20038
20039 variant = cpu_variant;
20040 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
20041 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
20042 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
20043 /* Check that this instruction is supported for this CPU. */
20044 if (!opcode->tvariant
20045 || (thumb_mode == 1
20046 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
20047 {
20048 if (opcode->tencode == do_t_swi)
20049 as_bad (_("SVC is not permitted on this architecture"));
20050 else
20051 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
20052 return;
20053 }
20054 if (inst.cond != COND_ALWAYS && !unified_syntax
20055 && opcode->tencode != do_t_branch)
20056 {
20057 as_bad (_("Thumb does not support conditional execution"));
20058 return;
20059 }
20060
20061 /* Two things are addressed here:
20062 1) Implicit require narrow instructions on Thumb-1.
20063 This avoids relaxation accidentally introducing Thumb-2
20064 instructions.
20065 2) Reject wide instructions in non Thumb-2 cores.
20066
20067 Only instructions with narrow and wide variants need to be handled
20068 but selecting all non wide-only instructions is easier. */
20069 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
20070 && !t32_insn_ok (variant, opcode))
20071 {
20072 if (inst.size_req == 0)
20073 inst.size_req = 2;
20074 else if (inst.size_req == 4)
20075 {
20076 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
20077 as_bad (_("selected processor does not support 32bit wide "
20078 "variant of instruction `%s'"), str);
20079 else
20080 as_bad (_("selected processor does not support `%s' in "
20081 "Thumb-2 mode"), str);
20082 return;
20083 }
20084 }
20085
20086 inst.instruction = opcode->tvalue;
20087
20088 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
20089 {
20090 /* Prepare the pred_insn_type for those encodings that don't set
20091 it. */
20092 it_fsm_pre_encode ();
20093
20094 opcode->tencode ();
20095
20096 it_fsm_post_encode ();
20097 }
20098
20099 if (!(inst.error || inst.relax))
20100 {
20101 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
20102 inst.size = (inst.instruction > 0xffff ? 4 : 2);
20103 if (inst.size_req && inst.size_req != inst.size)
20104 {
20105 as_bad (_("cannot honor width suffix -- `%s'"), str);
20106 return;
20107 }
20108 }
20109
20110 /* Something has gone badly wrong if we try to relax a fixed size
20111 instruction. */
20112 gas_assert (inst.size_req == 0 || !inst.relax);
20113
20114 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20115 *opcode->tvariant);
20116 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20117 set those bits when Thumb-2 32-bit instructions are seen. The impact
20118 of relaxable instructions will be considered later after we finish all
20119 relaxation. */
20120 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
20121 variant = arm_arch_none;
20122 else
20123 variant = cpu_variant;
20124 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
20125 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20126 arm_ext_v6t2);
20127
20128 check_neon_suffixes;
20129
20130 if (!inst.error)
20131 {
20132 mapping_state (MAP_THUMB);
20133 }
20134 }
20135 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
20136 {
20137 bfd_boolean is_bx;
20138
20139 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20140 is_bx = (opcode->aencode == do_bx);
20141
20142 /* Check that this instruction is supported for this CPU. */
20143 if (!(is_bx && fix_v4bx)
20144 && !(opcode->avariant &&
20145 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
20146 {
20147 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
20148 return;
20149 }
20150 if (inst.size_req)
20151 {
20152 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
20153 return;
20154 }
20155
20156 inst.instruction = opcode->avalue;
20157 if (opcode->tag == OT_unconditionalF)
20158 inst.instruction |= 0xFU << 28;
20159 else
20160 inst.instruction |= inst.cond << 28;
20161 inst.size = INSN_SIZE;
20162 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
20163 {
20164 it_fsm_pre_encode ();
20165 opcode->aencode ();
20166 it_fsm_post_encode ();
20167 }
20168 /* Arm mode bx is marked as both v4T and v5 because it's still required
20169 on a hypothetical non-thumb v5 core. */
20170 if (is_bx)
20171 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
20172 else
20173 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
20174 *opcode->avariant);
20175
20176 check_neon_suffixes;
20177
20178 if (!inst.error)
20179 {
20180 mapping_state (MAP_ARM);
20181 }
20182 }
20183 else
20184 {
20185 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20186 "-- `%s'"), str);
20187 return;
20188 }
20189 output_inst (str);
20190 }
20191
20192 static void
20193 check_pred_blocks_finished (void)
20194 {
20195 #ifdef OBJ_ELF
20196 asection *sect;
20197
20198 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
20199 if (seg_info (sect)->tc_segment_info_data.current_pred.state
20200 == MANUAL_PRED_BLOCK)
20201 {
20202 if (now_pred.type == SCALAR_PRED)
20203 as_warn (_("section '%s' finished with an open IT block."),
20204 sect->name);
20205 else
20206 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20207 sect->name);
20208 }
20209 #else
20210 if (now_pred.state == MANUAL_PRED_BLOCK)
20211 {
20212 if (now_pred.type == SCALAR_PRED)
20213 as_warn (_("file finished with an open IT block."));
20214 else
20215 as_warn (_("file finished with an open VPT/VPST block."));
20216 }
20217 #endif
20218 }
20219
20220 /* Various frobbings of labels and their addresses. */
20221
20222 void
20223 arm_start_line_hook (void)
20224 {
20225 last_label_seen = NULL;
20226 }
20227
20228 void
20229 arm_frob_label (symbolS * sym)
20230 {
20231 last_label_seen = sym;
20232
20233 ARM_SET_THUMB (sym, thumb_mode);
20234
20235 #if defined OBJ_COFF || defined OBJ_ELF
20236 ARM_SET_INTERWORK (sym, support_interwork);
20237 #endif
20238
20239 force_automatic_it_block_close ();
20240
20241 /* Note - do not allow local symbols (.Lxxx) to be labelled
20242 as Thumb functions. This is because these labels, whilst
20243 they exist inside Thumb code, are not the entry points for
20244 possible ARM->Thumb calls. Also, these labels can be used
20245 as part of a computed goto or switch statement. eg gcc
20246 can generate code that looks like this:
20247
20248 ldr r2, [pc, .Laaa]
20249 lsl r3, r3, #2
20250 ldr r2, [r3, r2]
20251 mov pc, r2
20252
20253 .Lbbb: .word .Lxxx
20254 .Lccc: .word .Lyyy
20255 ..etc...
20256 .Laaa: .word Lbbb
20257
20258 The first instruction loads the address of the jump table.
20259 The second instruction converts a table index into a byte offset.
20260 The third instruction gets the jump address out of the table.
20261 The fourth instruction performs the jump.
20262
20263 If the address stored at .Laaa is that of a symbol which has the
20264 Thumb_Func bit set, then the linker will arrange for this address
20265 to have the bottom bit set, which in turn would mean that the
20266 address computation performed by the third instruction would end
20267 up with the bottom bit set. Since the ARM is capable of unaligned
20268 word loads, the instruction would then load the incorrect address
20269 out of the jump table, and chaos would ensue. */
20270 if (label_is_thumb_function_name
20271 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
20272 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
20273 {
20274 /* When the address of a Thumb function is taken the bottom
20275 bit of that address should be set. This will allow
20276 interworking between Arm and Thumb functions to work
20277 correctly. */
20278
20279 THUMB_SET_FUNC (sym, 1);
20280
20281 label_is_thumb_function_name = FALSE;
20282 }
20283
20284 dwarf2_emit_label (sym);
20285 }
20286
20287 bfd_boolean
20288 arm_data_in_code (void)
20289 {
20290 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
20291 {
20292 *input_line_pointer = '/';
20293 input_line_pointer += 5;
20294 *input_line_pointer = 0;
20295 return TRUE;
20296 }
20297
20298 return FALSE;
20299 }
20300
20301 char *
20302 arm_canonicalize_symbol_name (char * name)
20303 {
20304 int len;
20305
20306 if (thumb_mode && (len = strlen (name)) > 5
20307 && streq (name + len - 5, "/data"))
20308 *(name + len - 5) = 0;
20309
20310 return name;
20311 }
20312 \f
20313 /* Table of all register names defined by default. The user can
20314 define additional names with .req. Note that all register names
20315 should appear in both upper and lowercase variants. Some registers
20316 also have mixed-case names. */
20317
20318 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20319 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20320 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20321 #define REGSET(p,t) \
20322 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20323 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20324 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20325 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20326 #define REGSETH(p,t) \
20327 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20328 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20329 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20330 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20331 #define REGSET2(p,t) \
20332 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20333 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20334 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20335 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20336 #define SPLRBANK(base,bank,t) \
20337 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20338 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20339 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20340 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20341 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20342 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20343
20344 static const struct reg_entry reg_names[] =
20345 {
20346 /* ARM integer registers. */
20347 REGSET(r, RN), REGSET(R, RN),
20348
20349 /* ATPCS synonyms. */
20350 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
20351 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
20352 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
20353
20354 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
20355 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
20356 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
20357
20358 /* Well-known aliases. */
20359 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
20360 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
20361
20362 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
20363 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
20364
20365 /* Coprocessor numbers. */
20366 REGSET(p, CP), REGSET(P, CP),
20367
20368 /* Coprocessor register numbers. The "cr" variants are for backward
20369 compatibility. */
20370 REGSET(c, CN), REGSET(C, CN),
20371 REGSET(cr, CN), REGSET(CR, CN),
20372
20373 /* ARM banked registers. */
20374 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
20375 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
20376 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
20377 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
20378 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
20379 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
20380 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
20381
20382 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
20383 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
20384 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
20385 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
20386 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
20387 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
20388 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
20389 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
20390
20391 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
20392 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
20393 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
20394 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
20395 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
20396 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
20397 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
20398 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
20399 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
20400
20401 /* FPA registers. */
20402 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
20403 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
20404
20405 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
20406 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
20407
20408 /* VFP SP registers. */
20409 REGSET(s,VFS), REGSET(S,VFS),
20410 REGSETH(s,VFS), REGSETH(S,VFS),
20411
20412 /* VFP DP Registers. */
20413 REGSET(d,VFD), REGSET(D,VFD),
20414 /* Extra Neon DP registers. */
20415 REGSETH(d,VFD), REGSETH(D,VFD),
20416
20417 /* Neon QP registers. */
20418 REGSET2(q,NQ), REGSET2(Q,NQ),
20419
20420 /* VFP control registers. */
20421 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
20422 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
20423 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
20424 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
20425 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
20426 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
20427 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
20428
20429 /* Maverick DSP coprocessor registers. */
20430 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
20431 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
20432
20433 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
20434 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
20435 REGDEF(dspsc,0,DSPSC),
20436
20437 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
20438 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
20439 REGDEF(DSPSC,0,DSPSC),
20440
20441 /* iWMMXt data registers - p0, c0-15. */
20442 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
20443
20444 /* iWMMXt control registers - p1, c0-3. */
20445 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
20446 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
20447 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
20448 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
20449
20450 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20451 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
20452 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
20453 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
20454 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
20455
20456 /* XScale accumulator registers. */
20457 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
20458 };
20459 #undef REGDEF
20460 #undef REGNUM
20461 #undef REGSET
20462
20463 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20464 within psr_required_here. */
20465 static const struct asm_psr psrs[] =
20466 {
20467 /* Backward compatibility notation. Note that "all" is no longer
20468 truly all possible PSR bits. */
20469 {"all", PSR_c | PSR_f},
20470 {"flg", PSR_f},
20471 {"ctl", PSR_c},
20472
20473 /* Individual flags. */
20474 {"f", PSR_f},
20475 {"c", PSR_c},
20476 {"x", PSR_x},
20477 {"s", PSR_s},
20478
20479 /* Combinations of flags. */
20480 {"fs", PSR_f | PSR_s},
20481 {"fx", PSR_f | PSR_x},
20482 {"fc", PSR_f | PSR_c},
20483 {"sf", PSR_s | PSR_f},
20484 {"sx", PSR_s | PSR_x},
20485 {"sc", PSR_s | PSR_c},
20486 {"xf", PSR_x | PSR_f},
20487 {"xs", PSR_x | PSR_s},
20488 {"xc", PSR_x | PSR_c},
20489 {"cf", PSR_c | PSR_f},
20490 {"cs", PSR_c | PSR_s},
20491 {"cx", PSR_c | PSR_x},
20492 {"fsx", PSR_f | PSR_s | PSR_x},
20493 {"fsc", PSR_f | PSR_s | PSR_c},
20494 {"fxs", PSR_f | PSR_x | PSR_s},
20495 {"fxc", PSR_f | PSR_x | PSR_c},
20496 {"fcs", PSR_f | PSR_c | PSR_s},
20497 {"fcx", PSR_f | PSR_c | PSR_x},
20498 {"sfx", PSR_s | PSR_f | PSR_x},
20499 {"sfc", PSR_s | PSR_f | PSR_c},
20500 {"sxf", PSR_s | PSR_x | PSR_f},
20501 {"sxc", PSR_s | PSR_x | PSR_c},
20502 {"scf", PSR_s | PSR_c | PSR_f},
20503 {"scx", PSR_s | PSR_c | PSR_x},
20504 {"xfs", PSR_x | PSR_f | PSR_s},
20505 {"xfc", PSR_x | PSR_f | PSR_c},
20506 {"xsf", PSR_x | PSR_s | PSR_f},
20507 {"xsc", PSR_x | PSR_s | PSR_c},
20508 {"xcf", PSR_x | PSR_c | PSR_f},
20509 {"xcs", PSR_x | PSR_c | PSR_s},
20510 {"cfs", PSR_c | PSR_f | PSR_s},
20511 {"cfx", PSR_c | PSR_f | PSR_x},
20512 {"csf", PSR_c | PSR_s | PSR_f},
20513 {"csx", PSR_c | PSR_s | PSR_x},
20514 {"cxf", PSR_c | PSR_x | PSR_f},
20515 {"cxs", PSR_c | PSR_x | PSR_s},
20516 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
20517 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
20518 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
20519 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
20520 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
20521 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
20522 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
20523 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
20524 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
20525 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
20526 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
20527 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
20528 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
20529 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
20530 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
20531 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
20532 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
20533 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
20534 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
20535 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
20536 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
20537 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
20538 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
20539 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
20540 };
20541
20542 /* Table of V7M psr names. */
20543 static const struct asm_psr v7m_psrs[] =
20544 {
20545 {"apsr", 0x0 }, {"APSR", 0x0 },
20546 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20547 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20548 {"psr", 0x3 }, {"PSR", 0x3 },
20549 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20550 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20551 {"epsr", 0x6 }, {"EPSR", 0x6 },
20552 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20553 {"msp", 0x8 }, {"MSP", 0x8 },
20554 {"psp", 0x9 }, {"PSP", 0x9 },
20555 {"msplim", 0xa }, {"MSPLIM", 0xa },
20556 {"psplim", 0xb }, {"PSPLIM", 0xb },
20557 {"primask", 0x10}, {"PRIMASK", 0x10},
20558 {"basepri", 0x11}, {"BASEPRI", 0x11},
20559 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20560 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20561 {"control", 0x14}, {"CONTROL", 0x14},
20562 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20563 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20564 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20565 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20566 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20567 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20568 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20569 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20570 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20571 };
20572
20573 /* Table of all shift-in-operand names. */
20574 static const struct asm_shift_name shift_names [] =
20575 {
20576 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
20577 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
20578 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
20579 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
20580 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
20581 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
20582 };
20583
20584 /* Table of all explicit relocation names. */
20585 #ifdef OBJ_ELF
20586 static struct reloc_entry reloc_names[] =
20587 {
20588 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
20589 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
20590 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
20591 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
20592 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
20593 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
20594 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
20595 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
20596 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
20597 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
20598 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
20599 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
20600 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
20601 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
20602 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
20603 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
20604 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
20605 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
20606 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
20607 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
20608 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
20609 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
20610 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
20611 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
20612 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
20613 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
20614 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
20615 };
20616 #endif
20617
20618 /* Table of all conditional affixes. */
20619 static const struct asm_cond conds[] =
20620 {
20621 {"eq", 0x0},
20622 {"ne", 0x1},
20623 {"cs", 0x2}, {"hs", 0x2},
20624 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20625 {"mi", 0x4},
20626 {"pl", 0x5},
20627 {"vs", 0x6},
20628 {"vc", 0x7},
20629 {"hi", 0x8},
20630 {"ls", 0x9},
20631 {"ge", 0xa},
20632 {"lt", 0xb},
20633 {"gt", 0xc},
20634 {"le", 0xd},
20635 {"al", 0xe}
20636 };
20637 static const struct asm_cond vconds[] =
20638 {
20639 {"t", 0xf},
20640 {"e", 0x10}
20641 };
20642
20643 #define UL_BARRIER(L,U,CODE,FEAT) \
20644 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20645 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20646
20647 static struct asm_barrier_opt barrier_opt_names[] =
20648 {
20649 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
20650 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
20651 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
20652 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
20653 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
20654 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
20655 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
20656 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
20657 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
20658 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
20659 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
20660 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
20661 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
20662 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
20663 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
20664 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
20665 };
20666
20667 #undef UL_BARRIER
20668
20669 /* Table of ARM-format instructions. */
20670
20671 /* Macros for gluing together operand strings. N.B. In all cases
20672 other than OPS0, the trailing OP_stop comes from default
20673 zero-initialization of the unspecified elements of the array. */
20674 #define OPS0() { OP_stop, }
20675 #define OPS1(a) { OP_##a, }
20676 #define OPS2(a,b) { OP_##a,OP_##b, }
20677 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20678 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20679 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20680 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20681
20682 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20683 This is useful when mixing operands for ARM and THUMB, i.e. using the
20684 MIX_ARM_THUMB_OPERANDS macro.
20685 In order to use these macros, prefix the number of operands with _
20686 e.g. _3. */
20687 #define OPS_1(a) { a, }
20688 #define OPS_2(a,b) { a,b, }
20689 #define OPS_3(a,b,c) { a,b,c, }
20690 #define OPS_4(a,b,c,d) { a,b,c,d, }
20691 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20692 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
20693
20694 /* These macros abstract out the exact format of the mnemonic table and
20695 save some repeated characters. */
20696
20697 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
20698 #define TxCE(mnem, op, top, nops, ops, ae, te) \
20699 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
20700 THUMB_VARIANT, do_##ae, do_##te, 0 }
20701
20702 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
20703 a T_MNEM_xyz enumerator. */
20704 #define TCE(mnem, aop, top, nops, ops, ae, te) \
20705 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
20706 #define tCE(mnem, aop, top, nops, ops, ae, te) \
20707 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20708
20709 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
20710 infix after the third character. */
20711 #define TxC3(mnem, op, top, nops, ops, ae, te) \
20712 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
20713 THUMB_VARIANT, do_##ae, do_##te, 0 }
20714 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
20715 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
20716 THUMB_VARIANT, do_##ae, do_##te, 0 }
20717 #define TC3(mnem, aop, top, nops, ops, ae, te) \
20718 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
20719 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
20720 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
20721 #define tC3(mnem, aop, top, nops, ops, ae, te) \
20722 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20723 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
20724 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20725
20726 /* Mnemonic that cannot be conditionalized. The ARM condition-code
20727 field is still 0xE. Many of the Thumb variants can be executed
20728 conditionally, so this is checked separately. */
20729 #define TUE(mnem, op, top, nops, ops, ae, te) \
20730 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20731 THUMB_VARIANT, do_##ae, do_##te, 0 }
20732
20733 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
20734 Used by mnemonics that have very minimal differences in the encoding for
20735 ARM and Thumb variants and can be handled in a common function. */
20736 #define TUEc(mnem, op, top, nops, ops, en) \
20737 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20738 THUMB_VARIANT, do_##en, do_##en, 0 }
20739
20740 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
20741 condition code field. */
20742 #define TUF(mnem, op, top, nops, ops, ae, te) \
20743 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
20744 THUMB_VARIANT, do_##ae, do_##te, 0 }
20745
20746 /* ARM-only variants of all the above. */
20747 #define CE(mnem, op, nops, ops, ae) \
20748 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20749
20750 #define C3(mnem, op, nops, ops, ae) \
20751 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20752
20753 /* Thumb-only variants of TCE and TUE. */
20754 #define ToC(mnem, top, nops, ops, te) \
20755 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20756 do_##te, 0 }
20757
20758 #define ToU(mnem, top, nops, ops, te) \
20759 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
20760 NULL, do_##te, 0 }
20761
20762 /* T_MNEM_xyz enumerator variants of ToC. */
20763 #define toC(mnem, top, nops, ops, te) \
20764 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
20765 do_##te, 0 }
20766
20767 /* T_MNEM_xyz enumerator variants of ToU. */
20768 #define toU(mnem, top, nops, ops, te) \
20769 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
20770 NULL, do_##te, 0 }
20771
20772 /* Legacy mnemonics that always have conditional infix after the third
20773 character. */
20774 #define CL(mnem, op, nops, ops, ae) \
20775 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20776 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20777
20778 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
20779 #define cCE(mnem, op, nops, ops, ae) \
20780 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20781
20782 /* Legacy coprocessor instructions where conditional infix and conditional
20783 suffix are ambiguous. For consistency this includes all FPA instructions,
20784 not just the potentially ambiguous ones. */
20785 #define cCL(mnem, op, nops, ops, ae) \
20786 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20787 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20788
20789 /* Coprocessor, takes either a suffix or a position-3 infix
20790 (for an FPA corner case). */
20791 #define C3E(mnem, op, nops, ops, ae) \
20792 { mnem, OPS##nops ops, OT_csuf_or_in3, \
20793 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20794
20795 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
20796 { m1 #m2 m3, OPS##nops ops, \
20797 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20798 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20799
20800 #define CM(m1, m2, op, nops, ops, ae) \
20801 xCM_ (m1, , m2, op, nops, ops, ae), \
20802 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20803 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20804 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20805 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20806 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20807 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20808 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20809 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20810 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20811 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20812 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20813 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20814 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20815 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20816 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20817 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20818 xCM_ (m1, le, m2, op, nops, ops, ae), \
20819 xCM_ (m1, al, m2, op, nops, ops, ae)
20820
20821 #define UE(mnem, op, nops, ops, ae) \
20822 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20823
20824 #define UF(mnem, op, nops, ops, ae) \
20825 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20826
20827 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20828 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20829 use the same encoding function for each. */
20830 #define NUF(mnem, op, nops, ops, enc) \
20831 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20832 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20833
20834 /* Neon data processing, version which indirects through neon_enc_tab for
20835 the various overloaded versions of opcodes. */
20836 #define nUF(mnem, op, nops, ops, enc) \
20837 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20838 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20839
20840 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20841 version. */
20842 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20843 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20844 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20845
20846 #define NCE(mnem, op, nops, ops, enc) \
20847 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20848
20849 #define NCEF(mnem, op, nops, ops, enc) \
20850 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20851
20852 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20853 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20854 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20855 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20856
20857 #define nCE(mnem, op, nops, ops, enc) \
20858 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20859
20860 #define nCEF(mnem, op, nops, ops, enc) \
20861 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20862
20863 /* */
20864 #define mCEF(mnem, op, nops, ops, enc) \
20865 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
20866 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20867
20868
20869 /* nCEF but for MVE predicated instructions. */
20870 #define mnCEF(mnem, op, nops, ops, enc) \
20871 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20872
20873 /* nCE but for MVE predicated instructions. */
20874 #define mnCE(mnem, op, nops, ops, enc) \
20875 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20876
20877 /* NUF but for potentially MVE predicated instructions. */
20878 #define MNUF(mnem, op, nops, ops, enc) \
20879 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20880 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20881
20882 /* nUF but for potentially MVE predicated instructions. */
20883 #define mnUF(mnem, op, nops, ops, enc) \
20884 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20885 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20886
20887 /* ToC but for potentially MVE predicated instructions. */
20888 #define mToC(mnem, top, nops, ops, te) \
20889 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20890 do_##te, 1 }
20891
20892 /* NCE but for MVE predicated instructions. */
20893 #define MNCE(mnem, op, nops, ops, enc) \
20894 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20895
20896 /* NCEF but for MVE predicated instructions. */
20897 #define MNCEF(mnem, op, nops, ops, enc) \
20898 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20899 #define do_0 0
20900
20901 static const struct asm_opcode insns[] =
20902 {
20903 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20904 #define THUMB_VARIANT & arm_ext_v4t
20905 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
20906 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
20907 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
20908 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
20909 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
20910 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
20911 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
20912 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
20913 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
20914 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
20915 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
20916 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
20917 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
20918 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
20919 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
20920 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
20921
20922 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20923 for setting PSR flag bits. They are obsolete in V6 and do not
20924 have Thumb equivalents. */
20925 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20926 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20927 CL("tstp", 110f000, 2, (RR, SH), cmp),
20928 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20929 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20930 CL("cmpp", 150f000, 2, (RR, SH), cmp),
20931 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20932 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20933 CL("cmnp", 170f000, 2, (RR, SH), cmp),
20934
20935 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
20936 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
20937 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
20938 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
20939
20940 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
20941 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20942 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
20943 OP_RRnpc),
20944 OP_ADDRGLDR),ldst, t_ldst),
20945 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20946
20947 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20948 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20949 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20950 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20951 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20952 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20953
20954 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
20955 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
20956
20957 /* Pseudo ops. */
20958 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
20959 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
20960 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
20961 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
20962
20963 /* Thumb-compatibility pseudo ops. */
20964 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
20965 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
20966 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
20967 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
20968 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
20969 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
20970 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
20971 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
20972 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
20973 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
20974 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
20975 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
20976
20977 /* These may simplify to neg. */
20978 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
20979 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
20980
20981 #undef THUMB_VARIANT
20982 #define THUMB_VARIANT & arm_ext_os
20983
20984 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
20985 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
20986
20987 #undef THUMB_VARIANT
20988 #define THUMB_VARIANT & arm_ext_v6
20989
20990 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
20991
20992 /* V1 instructions with no Thumb analogue prior to V6T2. */
20993 #undef THUMB_VARIANT
20994 #define THUMB_VARIANT & arm_ext_v6t2
20995
20996 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20997 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20998 CL("teqp", 130f000, 2, (RR, SH), cmp),
20999
21000 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21001 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21002 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
21003 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21004
21005 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21006 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21007
21008 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21009 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21010
21011 /* V1 instructions with no Thumb analogue at all. */
21012 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
21013 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
21014
21015 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
21016 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
21017 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
21018 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
21019 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
21020 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
21021 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
21022 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
21023
21024 #undef ARM_VARIANT
21025 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
21026 #undef THUMB_VARIANT
21027 #define THUMB_VARIANT & arm_ext_v4t
21028
21029 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
21030 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
21031
21032 #undef THUMB_VARIANT
21033 #define THUMB_VARIANT & arm_ext_v6t2
21034
21035 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21036 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
21037
21038 /* Generic coprocessor instructions. */
21039 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21040 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21041 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21042 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21043 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21044 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21045 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
21046
21047 #undef ARM_VARIANT
21048 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
21049
21050 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
21051 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
21052
21053 #undef ARM_VARIANT
21054 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
21055 #undef THUMB_VARIANT
21056 #define THUMB_VARIANT & arm_ext_msr
21057
21058 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
21059 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
21060
21061 #undef ARM_VARIANT
21062 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
21063 #undef THUMB_VARIANT
21064 #define THUMB_VARIANT & arm_ext_v6t2
21065
21066 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21067 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21068 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21069 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21070 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21071 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21072 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21073 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21074
21075 #undef ARM_VARIANT
21076 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
21077 #undef THUMB_VARIANT
21078 #define THUMB_VARIANT & arm_ext_v4t
21079
21080 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21081 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21082 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21083 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21084 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21085 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21086
21087 #undef ARM_VARIANT
21088 #define ARM_VARIANT & arm_ext_v4t_5
21089
21090 /* ARM Architecture 4T. */
21091 /* Note: bx (and blx) are required on V5, even if the processor does
21092 not support Thumb. */
21093 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
21094
21095 #undef ARM_VARIANT
21096 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21097 #undef THUMB_VARIANT
21098 #define THUMB_VARIANT & arm_ext_v5t
21099
21100 /* Note: blx has 2 variants; the .value coded here is for
21101 BLX(2). Only this variant has conditional execution. */
21102 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
21103 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
21104
21105 #undef THUMB_VARIANT
21106 #define THUMB_VARIANT & arm_ext_v6t2
21107
21108 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
21109 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21110 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21111 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21112 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21113 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21114 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21115 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21116
21117 #undef ARM_VARIANT
21118 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21119 #undef THUMB_VARIANT
21120 #define THUMB_VARIANT & arm_ext_v5exp
21121
21122 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21123 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21124 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21125 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21126
21127 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21128 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21129
21130 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21131 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21132 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21133 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21134
21135 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21136 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21137 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21138 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21139
21140 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21141 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21142
21143 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21144 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21145 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21146 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21147
21148 #undef ARM_VARIANT
21149 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21150 #undef THUMB_VARIANT
21151 #define THUMB_VARIANT & arm_ext_v6t2
21152
21153 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
21154 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
21155 ldrd, t_ldstd),
21156 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
21157 ADDRGLDRS), ldrd, t_ldstd),
21158
21159 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21160 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21161
21162 #undef ARM_VARIANT
21163 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21164
21165 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
21166
21167 #undef ARM_VARIANT
21168 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21169 #undef THUMB_VARIANT
21170 #define THUMB_VARIANT & arm_ext_v6
21171
21172 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
21173 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
21174 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21175 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21176 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21177 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21178 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21179 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21180 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21181 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
21182
21183 #undef THUMB_VARIANT
21184 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21185
21186 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
21187 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21188 strex, t_strex),
21189 #undef THUMB_VARIANT
21190 #define THUMB_VARIANT & arm_ext_v6t2
21191
21192 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21193 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21194
21195 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
21196 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
21197
21198 /* ARM V6 not included in V7M. */
21199 #undef THUMB_VARIANT
21200 #define THUMB_VARIANT & arm_ext_v6_notm
21201 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21202 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21203 UF(rfeib, 9900a00, 1, (RRw), rfe),
21204 UF(rfeda, 8100a00, 1, (RRw), rfe),
21205 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21206 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21207 UF(rfefa, 8100a00, 1, (RRw), rfe),
21208 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21209 UF(rfeed, 9900a00, 1, (RRw), rfe),
21210 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21211 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21212 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21213 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
21214 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
21215 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
21216 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
21217 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21218 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21219 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
21220
21221 /* ARM V6 not included in V7M (eg. integer SIMD). */
21222 #undef THUMB_VARIANT
21223 #define THUMB_VARIANT & arm_ext_v6_dsp
21224 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
21225 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
21226 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21227 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21228 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21229 /* Old name for QASX. */
21230 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21231 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21232 /* Old name for QSAX. */
21233 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21234 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21235 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21236 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21237 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21238 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21239 /* Old name for SASX. */
21240 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21241 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21242 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21243 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21244 /* Old name for SHASX. */
21245 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21246 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21247 /* Old name for SHSAX. */
21248 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21249 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21250 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21251 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21252 /* Old name for SSAX. */
21253 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21254 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21255 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21256 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21257 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21258 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21259 /* Old name for UASX. */
21260 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21261 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21262 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21263 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21264 /* Old name for UHASX. */
21265 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21266 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21267 /* Old name for UHSAX. */
21268 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21269 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21270 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21271 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21272 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21273 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21274 /* Old name for UQASX. */
21275 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21276 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21277 /* Old name for UQSAX. */
21278 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21279 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21280 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21281 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21282 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21283 /* Old name for USAX. */
21284 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21285 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21286 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21287 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21288 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21289 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21290 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21291 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21292 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21293 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21294 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21295 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21296 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21297 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21298 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21299 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21300 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21301 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21302 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21303 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21304 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21305 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21306 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21307 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21308 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21309 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21310 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21311 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21312 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21313 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
21314 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
21315 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21316 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21317 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
21318
21319 #undef ARM_VARIANT
21320 #define ARM_VARIANT & arm_ext_v6k_v6t2
21321 #undef THUMB_VARIANT
21322 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21323
21324 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
21325 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
21326 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
21327 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
21328
21329 #undef THUMB_VARIANT
21330 #define THUMB_VARIANT & arm_ext_v6_notm
21331 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
21332 ldrexd, t_ldrexd),
21333 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
21334 RRnpcb), strexd, t_strexd),
21335
21336 #undef THUMB_VARIANT
21337 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21338 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
21339 rd_rn, rd_rn),
21340 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
21341 rd_rn, rd_rn),
21342 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21343 strex, t_strexbh),
21344 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21345 strex, t_strexbh),
21346 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
21347
21348 #undef ARM_VARIANT
21349 #define ARM_VARIANT & arm_ext_sec
21350 #undef THUMB_VARIANT
21351 #define THUMB_VARIANT & arm_ext_sec
21352
21353 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
21354
21355 #undef ARM_VARIANT
21356 #define ARM_VARIANT & arm_ext_virt
21357 #undef THUMB_VARIANT
21358 #define THUMB_VARIANT & arm_ext_virt
21359
21360 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
21361 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
21362
21363 #undef ARM_VARIANT
21364 #define ARM_VARIANT & arm_ext_pan
21365 #undef THUMB_VARIANT
21366 #define THUMB_VARIANT & arm_ext_pan
21367
21368 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
21369
21370 #undef ARM_VARIANT
21371 #define ARM_VARIANT & arm_ext_v6t2
21372 #undef THUMB_VARIANT
21373 #define THUMB_VARIANT & arm_ext_v6t2
21374
21375 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
21376 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
21377 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21378 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21379
21380 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21381 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
21382
21383 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21384 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21385 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21386 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21387
21388 #undef ARM_VARIANT
21389 #define ARM_VARIANT & arm_ext_v3
21390 #undef THUMB_VARIANT
21391 #define THUMB_VARIANT & arm_ext_v6t2
21392
21393 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
21394 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
21395 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
21396
21397 #undef ARM_VARIANT
21398 #define ARM_VARIANT & arm_ext_v6t2
21399 #undef THUMB_VARIANT
21400 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21401 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
21402 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
21403
21404 /* Thumb-only instructions. */
21405 #undef ARM_VARIANT
21406 #define ARM_VARIANT NULL
21407 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
21408 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
21409
21410 /* ARM does not really have an IT instruction, so always allow it.
21411 The opcode is copied from Thumb in order to allow warnings in
21412 -mimplicit-it=[never | arm] modes. */
21413 #undef ARM_VARIANT
21414 #define ARM_VARIANT & arm_ext_v1
21415 #undef THUMB_VARIANT
21416 #define THUMB_VARIANT & arm_ext_v6t2
21417
21418 TUE("it", bf08, bf08, 1, (COND), it, t_it),
21419 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
21420 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
21421 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
21422 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
21423 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
21424 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
21425 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
21426 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
21427 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
21428 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
21429 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
21430 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
21431 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
21432 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
21433 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21434 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
21435 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
21436
21437 /* Thumb2 only instructions. */
21438 #undef ARM_VARIANT
21439 #define ARM_VARIANT NULL
21440
21441 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21442 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21443 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
21444 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
21445 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
21446 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
21447
21448 /* Hardware division instructions. */
21449 #undef ARM_VARIANT
21450 #define ARM_VARIANT & arm_ext_adiv
21451 #undef THUMB_VARIANT
21452 #define THUMB_VARIANT & arm_ext_div
21453
21454 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
21455 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
21456
21457 /* ARM V6M/V7 instructions. */
21458 #undef ARM_VARIANT
21459 #define ARM_VARIANT & arm_ext_barrier
21460 #undef THUMB_VARIANT
21461 #define THUMB_VARIANT & arm_ext_barrier
21462
21463 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
21464 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
21465 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
21466
21467 /* ARM V7 instructions. */
21468 #undef ARM_VARIANT
21469 #define ARM_VARIANT & arm_ext_v7
21470 #undef THUMB_VARIANT
21471 #define THUMB_VARIANT & arm_ext_v7
21472
21473 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
21474 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
21475
21476 #undef ARM_VARIANT
21477 #define ARM_VARIANT & arm_ext_mp
21478 #undef THUMB_VARIANT
21479 #define THUMB_VARIANT & arm_ext_mp
21480
21481 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
21482
21483 /* AArchv8 instructions. */
21484 #undef ARM_VARIANT
21485 #define ARM_VARIANT & arm_ext_v8
21486
21487 /* Instructions shared between armv8-a and armv8-m. */
21488 #undef THUMB_VARIANT
21489 #define THUMB_VARIANT & arm_ext_atomics
21490
21491 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21492 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21493 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21494 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21495 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21496 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21497 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21498 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
21499 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21500 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
21501 stlex, t_stlex),
21502 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
21503 stlex, t_stlex),
21504 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
21505 stlex, t_stlex),
21506 #undef THUMB_VARIANT
21507 #define THUMB_VARIANT & arm_ext_v8
21508
21509 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
21510 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
21511 ldrexd, t_ldrexd),
21512 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
21513 strexd, t_strexd),
21514
21515 /* Defined in V8 but is in undefined encoding space for earlier
21516 architectures. However earlier architectures are required to treat
21517 this instuction as a semihosting trap as well. Hence while not explicitly
21518 defined as such, it is in fact correct to define the instruction for all
21519 architectures. */
21520 #undef THUMB_VARIANT
21521 #define THUMB_VARIANT & arm_ext_v1
21522 #undef ARM_VARIANT
21523 #define ARM_VARIANT & arm_ext_v1
21524 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
21525
21526 /* ARMv8 T32 only. */
21527 #undef ARM_VARIANT
21528 #define ARM_VARIANT NULL
21529 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
21530 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
21531 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
21532
21533 /* FP for ARMv8. */
21534 #undef ARM_VARIANT
21535 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21536 #undef THUMB_VARIANT
21537 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21538
21539 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
21540 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
21541 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
21542 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
21543 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21544 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21545 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
21546 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
21547 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
21548 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
21549 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
21550 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
21551 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
21552 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
21553 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
21554 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
21555 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
21556
21557 /* Crypto v1 extensions. */
21558 #undef ARM_VARIANT
21559 #define ARM_VARIANT & fpu_crypto_ext_armv8
21560 #undef THUMB_VARIANT
21561 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21562
21563 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
21564 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
21565 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
21566 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
21567 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
21568 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
21569 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
21570 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
21571 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
21572 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
21573 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
21574 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
21575 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
21576 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
21577
21578 #undef ARM_VARIANT
21579 #define ARM_VARIANT & crc_ext_armv8
21580 #undef THUMB_VARIANT
21581 #define THUMB_VARIANT & crc_ext_armv8
21582 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
21583 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
21584 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
21585 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
21586 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
21587 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
21588
21589 /* ARMv8.2 RAS extension. */
21590 #undef ARM_VARIANT
21591 #define ARM_VARIANT & arm_ext_ras
21592 #undef THUMB_VARIANT
21593 #define THUMB_VARIANT & arm_ext_ras
21594 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
21595
21596 #undef ARM_VARIANT
21597 #define ARM_VARIANT & arm_ext_v8_3
21598 #undef THUMB_VARIANT
21599 #define THUMB_VARIANT & arm_ext_v8_3
21600 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
21601 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
21602 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
21603
21604 #undef ARM_VARIANT
21605 #define ARM_VARIANT & fpu_neon_ext_dotprod
21606 #undef THUMB_VARIANT
21607 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21608 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
21609 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
21610
21611 #undef ARM_VARIANT
21612 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21613 #undef THUMB_VARIANT
21614 #define THUMB_VARIANT NULL
21615
21616 cCE("wfs", e200110, 1, (RR), rd),
21617 cCE("rfs", e300110, 1, (RR), rd),
21618 cCE("wfc", e400110, 1, (RR), rd),
21619 cCE("rfc", e500110, 1, (RR), rd),
21620
21621 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
21622 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
21623 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
21624 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
21625
21626 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
21627 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
21628 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
21629 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
21630
21631 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
21632 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
21633 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
21634 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
21635 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
21636 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
21637 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
21638 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
21639 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
21640 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
21641 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
21642 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
21643
21644 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
21645 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
21646 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
21647 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
21648 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
21649 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
21650 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
21651 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
21652 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
21653 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
21654 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
21655 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
21656
21657 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
21658 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
21659 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
21660 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
21661 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
21662 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
21663 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
21664 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
21665 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
21666 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
21667 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
21668 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
21669
21670 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
21671 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
21672 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
21673 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
21674 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
21675 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
21676 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
21677 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
21678 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
21679 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
21680 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
21681 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
21682
21683 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
21684 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
21685 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
21686 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
21687 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
21688 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
21689 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
21690 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
21691 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
21692 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
21693 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
21694 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
21695
21696 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
21697 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
21698 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
21699 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
21700 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
21701 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
21702 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
21703 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
21704 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
21705 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
21706 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
21707 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
21708
21709 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
21710 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
21711 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
21712 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
21713 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
21714 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
21715 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
21716 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
21717 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
21718 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
21719 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
21720 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
21721
21722 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
21723 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
21724 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
21725 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
21726 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
21727 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
21728 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
21729 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
21730 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
21731 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
21732 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
21733 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
21734
21735 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
21736 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
21737 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
21738 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
21739 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
21740 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
21741 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
21742 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
21743 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
21744 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
21745 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
21746 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
21747
21748 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
21749 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
21750 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
21751 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
21752 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
21753 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
21754 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
21755 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
21756 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
21757 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
21758 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
21759 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
21760
21761 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
21762 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
21763 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
21764 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
21765 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
21766 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
21767 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
21768 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
21769 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
21770 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
21771 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
21772 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
21773
21774 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
21775 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
21776 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
21777 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
21778 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
21779 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
21780 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
21781 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
21782 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
21783 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
21784 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
21785 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
21786
21787 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
21788 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
21789 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
21790 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
21791 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
21792 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
21793 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
21794 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
21795 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
21796 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
21797 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
21798 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
21799
21800 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
21801 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
21802 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
21803 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
21804 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
21805 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
21806 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
21807 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
21808 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
21809 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
21810 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
21811 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
21812
21813 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
21814 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
21815 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
21816 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
21817 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
21818 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
21819 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
21820 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
21821 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
21822 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
21823 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
21824 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
21825
21826 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
21827 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
21828 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
21829 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
21830 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
21831 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
21832 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
21833 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
21834 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
21835 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
21836 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
21837 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
21838
21839 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
21840 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
21841 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
21842 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
21843 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
21844 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21845 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21846 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21847 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
21848 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
21849 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
21850 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
21851
21852 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
21853 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
21854 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
21855 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
21856 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
21857 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21858 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21859 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21860 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
21861 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
21862 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
21863 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
21864
21865 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
21866 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
21867 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
21868 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
21869 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
21870 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21871 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21872 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21873 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
21874 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
21875 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
21876 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
21877
21878 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
21879 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
21880 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
21881 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
21882 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
21883 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21884 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21885 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21886 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
21887 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
21888 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
21889 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
21890
21891 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
21892 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
21893 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
21894 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
21895 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
21896 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21897 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21898 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21899 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
21900 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
21901 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
21902 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
21903
21904 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
21905 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
21906 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
21907 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
21908 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
21909 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21910 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21911 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21912 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
21913 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
21914 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
21915 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
21916
21917 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
21918 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
21919 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
21920 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
21921 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
21922 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21923 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21924 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21925 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
21926 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
21927 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
21928 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
21929
21930 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
21931 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
21932 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
21933 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
21934 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
21935 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21936 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21937 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21938 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
21939 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
21940 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
21941 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
21942
21943 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
21944 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
21945 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
21946 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
21947 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
21948 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21949 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21950 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21951 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
21952 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
21953 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
21954 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
21955
21956 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
21957 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
21958 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
21959 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
21960 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
21961 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21962 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21963 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21964 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
21965 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
21966 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
21967 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
21968
21969 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21970 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21971 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21972 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21973 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21974 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21975 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21976 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21977 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21978 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21979 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21980 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21981
21982 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21983 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21984 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21985 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21986 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21987 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21988 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21989 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21990 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21991 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21992 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21993 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21994
21995 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21996 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21997 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21998 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21999 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22000 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22001 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22002 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22003 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22004 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22005 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22006 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22007
22008 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
22009 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
22010 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
22011 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
22012
22013 cCL("flts", e000110, 2, (RF, RR), rn_rd),
22014 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
22015 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
22016 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
22017 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
22018 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
22019 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
22020 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
22021 cCL("flte", e080110, 2, (RF, RR), rn_rd),
22022 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
22023 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
22024 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
22025
22026 /* The implementation of the FIX instruction is broken on some
22027 assemblers, in that it accepts a precision specifier as well as a
22028 rounding specifier, despite the fact that this is meaningless.
22029 To be more compatible, we accept it as well, though of course it
22030 does not set any bits. */
22031 cCE("fix", e100110, 2, (RR, RF), rd_rm),
22032 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
22033 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
22034 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
22035 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
22036 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
22037 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
22038 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
22039 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
22040 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
22041 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
22042 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
22043 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
22044
22045 /* Instructions that were new with the real FPA, call them V2. */
22046 #undef ARM_VARIANT
22047 #define ARM_VARIANT & fpu_fpa_ext_v2
22048
22049 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22050 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22051 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22052 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22053 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22054 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22055
22056 #undef ARM_VARIANT
22057 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
22058
22059 /* Moves and type conversions. */
22060 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
22061 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
22062 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
22063 cCE("fmstat", ef1fa10, 0, (), noargs),
22064 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
22065 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
22066 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
22067 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
22068 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
22069 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
22070 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
22071 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
22072 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
22073 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
22074
22075 /* Memory operations. */
22076 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
22077 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
22078 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22079 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22080 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22081 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22082 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22083 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22084 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22085 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22086 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22087 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22088 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22089 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22090 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22091 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22092 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22093 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22094
22095 /* Monadic operations. */
22096 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
22097 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
22098 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
22099
22100 /* Dyadic operations. */
22101 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22102 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22103 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22104 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22105 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22106 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22107 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22108 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22109 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22110
22111 /* Comparisons. */
22112 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
22113 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
22114 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
22115 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
22116
22117 /* Double precision load/store are still present on single precision
22118 implementations. */
22119 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22120 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22121 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22122 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22123 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22124 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22125 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22126 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22127 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22128 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22129
22130 #undef ARM_VARIANT
22131 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22132
22133 /* Moves and type conversions. */
22134 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22135 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22136 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22137 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
22138 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
22139 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
22140 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
22141 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22142 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
22143 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22144 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22145 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22146 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22147
22148 /* Monadic operations. */
22149 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22150 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22151 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22152
22153 /* Dyadic operations. */
22154 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22155 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22156 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22157 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22158 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22159 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22160 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22161 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22162 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22163
22164 /* Comparisons. */
22165 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22166 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
22167 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22168 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
22169
22170 #undef ARM_VARIANT
22171 #define ARM_VARIANT & fpu_vfp_ext_v2
22172
22173 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
22174 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
22175 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
22176 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
22177
22178 /* Instructions which may belong to either the Neon or VFP instruction sets.
22179 Individual encoder functions perform additional architecture checks. */
22180 #undef ARM_VARIANT
22181 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22182 #undef THUMB_VARIANT
22183 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22184
22185 /* These mnemonics are unique to VFP. */
22186 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
22187 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
22188 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22189 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22190 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22191 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22192 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22193 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
22194 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
22195 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
22196
22197 /* Mnemonics shared by Neon and VFP. */
22198 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
22199 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22200 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22201
22202 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22203 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22204 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22205 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22206 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22207 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22208
22209 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
22210 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
22211 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
22212 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
22213
22214
22215 /* NOTE: All VMOV encoding is special-cased! */
22216 NCE(vmov, 0, 1, (VMOV), neon_mov),
22217 NCE(vmovq, 0, 1, (VMOV), neon_mov),
22218
22219 #undef THUMB_VARIANT
22220 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22221 by different feature bits. Since we are setting the Thumb guard, we can
22222 require Thumb-1 which makes it a nop guard and set the right feature bit in
22223 do_vldr_vstr (). */
22224 #define THUMB_VARIANT & arm_ext_v4t
22225 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22226 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22227
22228 #undef ARM_VARIANT
22229 #define ARM_VARIANT & arm_ext_fp16
22230 #undef THUMB_VARIANT
22231 #define THUMB_VARIANT & arm_ext_fp16
22232 /* New instructions added from v8.2, allowing the extraction and insertion of
22233 the upper 16 bits of a 32-bit vector register. */
22234 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
22235 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
22236
22237 /* New backported fma/fms instructions optional in v8.2. */
22238 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
22239 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
22240
22241 #undef THUMB_VARIANT
22242 #define THUMB_VARIANT & fpu_neon_ext_v1
22243 #undef ARM_VARIANT
22244 #define ARM_VARIANT & fpu_neon_ext_v1
22245
22246 /* Data processing with three registers of the same length. */
22247 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22248 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
22249 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
22250 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22251 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22252 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22253 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22254 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22255 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22256 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22257 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22258 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22259 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22260 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22261 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22262 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22263 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22264 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22265 /* If not immediate, fall back to neon_dyadic_i64_su.
22266 shl_imm should accept I8 I16 I32 I64,
22267 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22268 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
22269 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
22270 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
22271 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
22272 /* Logic ops, types optional & ignored. */
22273 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22274 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22275 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22276 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22277 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22278 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22279 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22280 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22281 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
22282 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
22283 /* Bitfield ops, untyped. */
22284 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22285 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22286 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22287 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22288 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22289 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22290 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22291 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22292 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22293 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22294 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22295 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22296 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22297 back to neon_dyadic_if_su. */
22298 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22299 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22300 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22301 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22302 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22303 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22304 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22305 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22306 /* Comparison. Type I8 I16 I32 F32. */
22307 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
22308 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
22309 /* As above, D registers only. */
22310 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22311 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22312 /* Int and float variants, signedness unimportant. */
22313 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22314 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22315 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
22316 /* Add/sub take types I8 I16 I32 I64 F32. */
22317 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22318 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22319 /* vtst takes sizes 8, 16, 32. */
22320 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
22321 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
22322 /* VMUL takes I8 I16 I32 F32 P8. */
22323 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
22324 /* VQD{R}MULH takes S16 S32. */
22325 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22326 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22327 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22328 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22329 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22330 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22331 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22332 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22333 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22334 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22335 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22336 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22337 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22338 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22339 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22340 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22341 /* ARM v8.1 extension. */
22342 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22343 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22344 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22345 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22346
22347 /* Two address, int/float. Types S8 S16 S32 F32. */
22348 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
22349 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
22350
22351 /* Data processing with two registers and a shift amount. */
22352 /* Right shifts, and variants with rounding.
22353 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22354 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22355 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22356 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22357 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22358 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22359 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22360 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22361 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22362 /* Shift and insert. Sizes accepted 8 16 32 64. */
22363 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
22364 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
22365 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
22366 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
22367 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22368 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
22369 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
22370 /* Right shift immediate, saturating & narrowing, with rounding variants.
22371 Types accepted S16 S32 S64 U16 U32 U64. */
22372 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22373 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22374 /* As above, unsigned. Types accepted S16 S32 S64. */
22375 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22376 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22377 /* Right shift narrowing. Types accepted I16 I32 I64. */
22378 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22379 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22380 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22381 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
22382 /* CVT with optional immediate for fixed-point variant. */
22383 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
22384
22385 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
22386 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
22387
22388 /* Data processing, three registers of different lengths. */
22389 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22390 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
22391 /* If not scalar, fall back to neon_dyadic_long.
22392 Vector types as above, scalar types S16 S32 U16 U32. */
22393 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22394 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22395 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22396 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22397 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22398 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22399 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22400 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22401 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22402 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22403 /* Saturating doubling multiplies. Types S16 S32. */
22404 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22405 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22406 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22407 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22408 S16 S32 U16 U32. */
22409 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
22410
22411 /* Extract. Size 8. */
22412 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
22413 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
22414
22415 /* Two registers, miscellaneous. */
22416 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22417 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
22418 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
22419 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
22420 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
22421 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
22422 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
22423 /* Vector replicate. Sizes 8 16 32. */
22424 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
22425 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
22426 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22427 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
22428 /* VMOVN. Types I16 I32 I64. */
22429 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
22430 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22431 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
22432 /* VQMOVUN. Types S16 S32 S64. */
22433 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
22434 /* VZIP / VUZP. Sizes 8 16 32. */
22435 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
22436 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
22437 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
22438 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
22439 /* VQABS / VQNEG. Types S8 S16 S32. */
22440 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22441 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
22442 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22443 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
22444 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22445 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
22446 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
22447 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
22448 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
22449 /* Reciprocal estimates. Types U32 F16 F32. */
22450 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
22451 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
22452 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
22453 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
22454 /* VCLS. Types S8 S16 S32. */
22455 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
22456 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
22457 /* VCLZ. Types I8 I16 I32. */
22458 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
22459 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
22460 /* VCNT. Size 8. */
22461 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
22462 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
22463 /* Two address, untyped. */
22464 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
22465 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
22466 /* VTRN. Sizes 8 16 32. */
22467 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
22468 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
22469
22470 /* Table lookup. Size 8. */
22471 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22472 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22473
22474 #undef THUMB_VARIANT
22475 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22476 #undef ARM_VARIANT
22477 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22478
22479 /* Neon element/structure load/store. */
22480 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22481 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22482 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22483 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22484 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22485 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22486 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22487 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22488
22489 #undef THUMB_VARIANT
22490 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22491 #undef ARM_VARIANT
22492 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22493 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
22494 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22495 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22496 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22497 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22498 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22499 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22500 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22501 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22502
22503 #undef THUMB_VARIANT
22504 #define THUMB_VARIANT & fpu_vfp_ext_v3
22505 #undef ARM_VARIANT
22506 #define ARM_VARIANT & fpu_vfp_ext_v3
22507
22508 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
22509 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22510 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22511 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22512 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22513 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22514 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22515 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22516 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22517
22518 #undef ARM_VARIANT
22519 #define ARM_VARIANT & fpu_vfp_ext_fma
22520 #undef THUMB_VARIANT
22521 #define THUMB_VARIANT & fpu_vfp_ext_fma
22522 /* Mnemonics shared by Neon and VFP. These are included in the
22523 VFP FMA variant; NEON and VFP FMA always includes the NEON
22524 FMA instructions. */
22525 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22526 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22527 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22528 the v form should always be used. */
22529 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22530 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22531 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22532 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22533 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22534 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22535
22536 #undef THUMB_VARIANT
22537 #undef ARM_VARIANT
22538 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22539
22540 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22541 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22542 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22543 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22544 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22545 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22546 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
22547 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
22548
22549 #undef ARM_VARIANT
22550 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22551
22552 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
22553 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
22554 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
22555 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
22556 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
22557 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
22558 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
22559 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
22560 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
22561 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22562 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22563 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22564 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22565 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22566 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22567 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22568 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22569 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22570 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
22571 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
22572 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22573 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22574 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22575 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22576 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22577 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22578 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
22579 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
22580 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
22581 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
22582 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
22583 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
22584 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
22585 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
22586 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
22587 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
22588 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
22589 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22590 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22591 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22592 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22593 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22594 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22595 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22596 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22597 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22598 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
22599 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22600 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22601 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22602 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22603 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22604 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22605 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22606 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22607 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22608 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22609 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22610 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22611 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22612 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22613 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22614 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22615 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22616 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22617 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22618 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22619 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22620 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
22621 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
22622 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22623 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22624 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22625 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22626 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22627 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22628 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22629 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22630 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22631 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22632 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22633 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22634 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22635 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22636 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22637 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22638 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22639 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22640 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
22641 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22642 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22643 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22644 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22645 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22646 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22647 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22648 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22649 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22650 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22651 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22652 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22653 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22654 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22655 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22656 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22657 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22658 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22659 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22660 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22661 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22662 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
22663 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22664 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22665 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22666 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22667 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22668 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22669 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22670 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22671 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22672 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22673 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22674 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22675 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22676 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22677 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22678 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22679 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22680 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22681 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22682 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22683 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
22684 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
22685 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22686 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22687 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22688 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22689 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22690 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22691 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22692 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22693 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22694 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
22695 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
22696 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
22697 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
22698 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
22699 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
22700 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22701 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22702 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22703 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
22704 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
22705 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
22706 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
22707 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
22708 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
22709 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22710 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22711 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22712 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22713 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
22714
22715 #undef ARM_VARIANT
22716 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
22717
22718 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
22719 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
22720 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
22721 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
22722 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
22723 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
22724 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22725 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22726 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22727 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22728 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22729 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22730 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22731 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22732 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22733 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22734 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22735 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22736 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22737 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22738 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
22739 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22740 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22741 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22742 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22743 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22744 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22745 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22746 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22747 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22748 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22749 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22750 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22751 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22752 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22753 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22754 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22755 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22756 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22757 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22758 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22759 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22760 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22761 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22762 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22763 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22764 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22765 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22766 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22767 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22768 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22769 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22770 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22771 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22772 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22773 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22774 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22775
22776 #undef ARM_VARIANT
22777 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
22778
22779 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
22780 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
22781 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
22782 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
22783 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
22784 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
22785 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
22786 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
22787 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
22788 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
22789 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
22790 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
22791 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
22792 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
22793 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
22794 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
22795 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
22796 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
22797 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
22798 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
22799 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
22800 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
22801 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
22802 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
22803 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
22804 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
22805 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
22806 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
22807 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
22808 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
22809 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
22810 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
22811 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
22812 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
22813 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
22814 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
22815 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
22816 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
22817 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
22818 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
22819 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
22820 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
22821 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
22822 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
22823 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
22824 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
22825 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
22826 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
22827 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
22828 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
22829 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
22830 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
22831 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
22832 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
22833 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
22834 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
22835 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
22836 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
22837 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
22838 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
22839 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
22840 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
22841 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
22842 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
22843 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22844 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22845 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22846 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22847 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22848 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22849 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22850 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22851 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22852 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22853 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22854 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22855
22856 /* ARMv8.5-A instructions. */
22857 #undef ARM_VARIANT
22858 #define ARM_VARIANT & arm_ext_sb
22859 #undef THUMB_VARIANT
22860 #define THUMB_VARIANT & arm_ext_sb
22861 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
22862
22863 #undef ARM_VARIANT
22864 #define ARM_VARIANT & arm_ext_predres
22865 #undef THUMB_VARIANT
22866 #define THUMB_VARIANT & arm_ext_predres
22867 CE("cfprctx", e070f93, 1, (RRnpc), rd),
22868 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
22869 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
22870
22871 /* ARMv8-M instructions. */
22872 #undef ARM_VARIANT
22873 #define ARM_VARIANT NULL
22874 #undef THUMB_VARIANT
22875 #define THUMB_VARIANT & arm_ext_v8m
22876 ToU("sg", e97fe97f, 0, (), noargs),
22877 ToC("blxns", 4784, 1, (RRnpc), t_blx),
22878 ToC("bxns", 4704, 1, (RRnpc), t_bx),
22879 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
22880 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
22881 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
22882 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
22883
22884 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22885 instructions behave as nop if no VFP is present. */
22886 #undef THUMB_VARIANT
22887 #define THUMB_VARIANT & arm_ext_v8m_main
22888 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
22889 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
22890
22891 /* Armv8.1-M Mainline instructions. */
22892 #undef THUMB_VARIANT
22893 #define THUMB_VARIANT & arm_ext_v8_1m_main
22894 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
22895 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
22896 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
22897 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
22898 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
22899
22900 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
22901 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
22902 toU("le", _le, 2, (oLR, EXP), t_loloop),
22903
22904 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
22905 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
22906
22907 #undef THUMB_VARIANT
22908 #define THUMB_VARIANT & mve_ext
22909 ToC("vpst", fe710f4d, 0, (), mve_vpt),
22910 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
22911 ToC("vpste", fe718f4d, 0, (), mve_vpt),
22912 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
22913 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
22914 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
22915 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
22916 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
22917 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
22918 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
22919 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
22920 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
22921 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
22922 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
22923 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
22924
22925 /* MVE and MVE FP only. */
22926 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
22927 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
22928 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
22929 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
22930 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
22931 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
22932 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
22933 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
22934 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
22935 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
22936 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
22937 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
22938
22939 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
22940 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
22941 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22942 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22943 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22944 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22945 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
22946 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
22947 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22948 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22949 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22950 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
22951
22952 #undef ARM_VARIANT
22953 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22954 #undef THUMB_VARIANT
22955 #define THUMB_VARIANT & arm_ext_v6t2
22956
22957 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
22958 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
22959 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
22960
22961 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
22962 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
22963
22964 #undef ARM_VARIANT
22965 #define ARM_VARIANT & fpu_neon_ext_v1
22966 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
22967 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
22968 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
22969 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
22970 };
22971 #undef ARM_VARIANT
22972 #undef THUMB_VARIANT
22973 #undef TCE
22974 #undef TUE
22975 #undef TUF
22976 #undef TCC
22977 #undef cCE
22978 #undef cCL
22979 #undef C3E
22980 #undef C3
22981 #undef CE
22982 #undef CM
22983 #undef CL
22984 #undef UE
22985 #undef UF
22986 #undef UT
22987 #undef NUF
22988 #undef nUF
22989 #undef NCE
22990 #undef nCE
22991 #undef OPS0
22992 #undef OPS1
22993 #undef OPS2
22994 #undef OPS3
22995 #undef OPS4
22996 #undef OPS5
22997 #undef OPS6
22998 #undef do_0
22999 #undef ToC
23000 #undef toC
23001 #undef ToU
23002 #undef toU
23003 \f
23004 /* MD interface: bits in the object file. */
23005
23006 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
23007 for use in the a.out file, and stores them in the array pointed to by buf.
23008 This knows about the endian-ness of the target machine and does
23009 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
23010 2 (short) and 4 (long) Floating numbers are put out as a series of
23011 LITTLENUMS (shorts, here at least). */
23012
23013 void
23014 md_number_to_chars (char * buf, valueT val, int n)
23015 {
23016 if (target_big_endian)
23017 number_to_chars_bigendian (buf, val, n);
23018 else
23019 number_to_chars_littleendian (buf, val, n);
23020 }
23021
23022 static valueT
23023 md_chars_to_number (char * buf, int n)
23024 {
23025 valueT result = 0;
23026 unsigned char * where = (unsigned char *) buf;
23027
23028 if (target_big_endian)
23029 {
23030 while (n--)
23031 {
23032 result <<= 8;
23033 result |= (*where++ & 255);
23034 }
23035 }
23036 else
23037 {
23038 while (n--)
23039 {
23040 result <<= 8;
23041 result |= (where[n] & 255);
23042 }
23043 }
23044
23045 return result;
23046 }
23047
23048 /* MD interface: Sections. */
23049
23050 /* Calculate the maximum variable size (i.e., excluding fr_fix)
23051 that an rs_machine_dependent frag may reach. */
23052
23053 unsigned int
23054 arm_frag_max_var (fragS *fragp)
23055 {
23056 /* We only use rs_machine_dependent for variable-size Thumb instructions,
23057 which are either THUMB_SIZE (2) or INSN_SIZE (4).
23058
23059 Note that we generate relaxable instructions even for cases that don't
23060 really need it, like an immediate that's a trivial constant. So we're
23061 overestimating the instruction size for some of those cases. Rather
23062 than putting more intelligence here, it would probably be better to
23063 avoid generating a relaxation frag in the first place when it can be
23064 determined up front that a short instruction will suffice. */
23065
23066 gas_assert (fragp->fr_type == rs_machine_dependent);
23067 return INSN_SIZE;
23068 }
23069
23070 /* Estimate the size of a frag before relaxing. Assume everything fits in
23071 2 bytes. */
23072
23073 int
23074 md_estimate_size_before_relax (fragS * fragp,
23075 segT segtype ATTRIBUTE_UNUSED)
23076 {
23077 fragp->fr_var = 2;
23078 return 2;
23079 }
23080
23081 /* Convert a machine dependent frag. */
23082
23083 void
23084 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
23085 {
23086 unsigned long insn;
23087 unsigned long old_op;
23088 char *buf;
23089 expressionS exp;
23090 fixS *fixp;
23091 int reloc_type;
23092 int pc_rel;
23093 int opcode;
23094
23095 buf = fragp->fr_literal + fragp->fr_fix;
23096
23097 old_op = bfd_get_16(abfd, buf);
23098 if (fragp->fr_symbol)
23099 {
23100 exp.X_op = O_symbol;
23101 exp.X_add_symbol = fragp->fr_symbol;
23102 }
23103 else
23104 {
23105 exp.X_op = O_constant;
23106 }
23107 exp.X_add_number = fragp->fr_offset;
23108 opcode = fragp->fr_subtype;
23109 switch (opcode)
23110 {
23111 case T_MNEM_ldr_pc:
23112 case T_MNEM_ldr_pc2:
23113 case T_MNEM_ldr_sp:
23114 case T_MNEM_str_sp:
23115 case T_MNEM_ldr:
23116 case T_MNEM_ldrb:
23117 case T_MNEM_ldrh:
23118 case T_MNEM_str:
23119 case T_MNEM_strb:
23120 case T_MNEM_strh:
23121 if (fragp->fr_var == 4)
23122 {
23123 insn = THUMB_OP32 (opcode);
23124 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
23125 {
23126 insn |= (old_op & 0x700) << 4;
23127 }
23128 else
23129 {
23130 insn |= (old_op & 7) << 12;
23131 insn |= (old_op & 0x38) << 13;
23132 }
23133 insn |= 0x00000c00;
23134 put_thumb32_insn (buf, insn);
23135 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
23136 }
23137 else
23138 {
23139 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
23140 }
23141 pc_rel = (opcode == T_MNEM_ldr_pc2);
23142 break;
23143 case T_MNEM_adr:
23144 if (fragp->fr_var == 4)
23145 {
23146 insn = THUMB_OP32 (opcode);
23147 insn |= (old_op & 0xf0) << 4;
23148 put_thumb32_insn (buf, insn);
23149 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
23150 }
23151 else
23152 {
23153 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23154 exp.X_add_number -= 4;
23155 }
23156 pc_rel = 1;
23157 break;
23158 case T_MNEM_mov:
23159 case T_MNEM_movs:
23160 case T_MNEM_cmp:
23161 case T_MNEM_cmn:
23162 if (fragp->fr_var == 4)
23163 {
23164 int r0off = (opcode == T_MNEM_mov
23165 || opcode == T_MNEM_movs) ? 0 : 8;
23166 insn = THUMB_OP32 (opcode);
23167 insn = (insn & 0xe1ffffff) | 0x10000000;
23168 insn |= (old_op & 0x700) << r0off;
23169 put_thumb32_insn (buf, insn);
23170 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23171 }
23172 else
23173 {
23174 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
23175 }
23176 pc_rel = 0;
23177 break;
23178 case T_MNEM_b:
23179 if (fragp->fr_var == 4)
23180 {
23181 insn = THUMB_OP32(opcode);
23182 put_thumb32_insn (buf, insn);
23183 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
23184 }
23185 else
23186 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
23187 pc_rel = 1;
23188 break;
23189 case T_MNEM_bcond:
23190 if (fragp->fr_var == 4)
23191 {
23192 insn = THUMB_OP32(opcode);
23193 insn |= (old_op & 0xf00) << 14;
23194 put_thumb32_insn (buf, insn);
23195 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
23196 }
23197 else
23198 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
23199 pc_rel = 1;
23200 break;
23201 case T_MNEM_add_sp:
23202 case T_MNEM_add_pc:
23203 case T_MNEM_inc_sp:
23204 case T_MNEM_dec_sp:
23205 if (fragp->fr_var == 4)
23206 {
23207 /* ??? Choose between add and addw. */
23208 insn = THUMB_OP32 (opcode);
23209 insn |= (old_op & 0xf0) << 4;
23210 put_thumb32_insn (buf, insn);
23211 if (opcode == T_MNEM_add_pc)
23212 reloc_type = BFD_RELOC_ARM_T32_IMM12;
23213 else
23214 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23215 }
23216 else
23217 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23218 pc_rel = 0;
23219 break;
23220
23221 case T_MNEM_addi:
23222 case T_MNEM_addis:
23223 case T_MNEM_subi:
23224 case T_MNEM_subis:
23225 if (fragp->fr_var == 4)
23226 {
23227 insn = THUMB_OP32 (opcode);
23228 insn |= (old_op & 0xf0) << 4;
23229 insn |= (old_op & 0xf) << 16;
23230 put_thumb32_insn (buf, insn);
23231 if (insn & (1 << 20))
23232 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23233 else
23234 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23235 }
23236 else
23237 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23238 pc_rel = 0;
23239 break;
23240 default:
23241 abort ();
23242 }
23243 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
23244 (enum bfd_reloc_code_real) reloc_type);
23245 fixp->fx_file = fragp->fr_file;
23246 fixp->fx_line = fragp->fr_line;
23247 fragp->fr_fix += fragp->fr_var;
23248
23249 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23250 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
23251 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
23252 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
23253 }
23254
23255 /* Return the size of a relaxable immediate operand instruction.
23256 SHIFT and SIZE specify the form of the allowable immediate. */
23257 static int
23258 relax_immediate (fragS *fragp, int size, int shift)
23259 {
23260 offsetT offset;
23261 offsetT mask;
23262 offsetT low;
23263
23264 /* ??? Should be able to do better than this. */
23265 if (fragp->fr_symbol)
23266 return 4;
23267
23268 low = (1 << shift) - 1;
23269 mask = (1 << (shift + size)) - (1 << shift);
23270 offset = fragp->fr_offset;
23271 /* Force misaligned offsets to 32-bit variant. */
23272 if (offset & low)
23273 return 4;
23274 if (offset & ~mask)
23275 return 4;
23276 return 2;
23277 }
23278
23279 /* Get the address of a symbol during relaxation. */
23280 static addressT
23281 relaxed_symbol_addr (fragS *fragp, long stretch)
23282 {
23283 fragS *sym_frag;
23284 addressT addr;
23285 symbolS *sym;
23286
23287 sym = fragp->fr_symbol;
23288 sym_frag = symbol_get_frag (sym);
23289 know (S_GET_SEGMENT (sym) != absolute_section
23290 || sym_frag == &zero_address_frag);
23291 addr = S_GET_VALUE (sym) + fragp->fr_offset;
23292
23293 /* If frag has yet to be reached on this pass, assume it will
23294 move by STRETCH just as we did. If this is not so, it will
23295 be because some frag between grows, and that will force
23296 another pass. */
23297
23298 if (stretch != 0
23299 && sym_frag->relax_marker != fragp->relax_marker)
23300 {
23301 fragS *f;
23302
23303 /* Adjust stretch for any alignment frag. Note that if have
23304 been expanding the earlier code, the symbol may be
23305 defined in what appears to be an earlier frag. FIXME:
23306 This doesn't handle the fr_subtype field, which specifies
23307 a maximum number of bytes to skip when doing an
23308 alignment. */
23309 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
23310 {
23311 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
23312 {
23313 if (stretch < 0)
23314 stretch = - ((- stretch)
23315 & ~ ((1 << (int) f->fr_offset) - 1));
23316 else
23317 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
23318 if (stretch == 0)
23319 break;
23320 }
23321 }
23322 if (f != NULL)
23323 addr += stretch;
23324 }
23325
23326 return addr;
23327 }
23328
23329 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23330 load. */
23331 static int
23332 relax_adr (fragS *fragp, asection *sec, long stretch)
23333 {
23334 addressT addr;
23335 offsetT val;
23336
23337 /* Assume worst case for symbols not known to be in the same section. */
23338 if (fragp->fr_symbol == NULL
23339 || !S_IS_DEFINED (fragp->fr_symbol)
23340 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23341 || S_IS_WEAK (fragp->fr_symbol))
23342 return 4;
23343
23344 val = relaxed_symbol_addr (fragp, stretch);
23345 addr = fragp->fr_address + fragp->fr_fix;
23346 addr = (addr + 4) & ~3;
23347 /* Force misaligned targets to 32-bit variant. */
23348 if (val & 3)
23349 return 4;
23350 val -= addr;
23351 if (val < 0 || val > 1020)
23352 return 4;
23353 return 2;
23354 }
23355
23356 /* Return the size of a relaxable add/sub immediate instruction. */
23357 static int
23358 relax_addsub (fragS *fragp, asection *sec)
23359 {
23360 char *buf;
23361 int op;
23362
23363 buf = fragp->fr_literal + fragp->fr_fix;
23364 op = bfd_get_16(sec->owner, buf);
23365 if ((op & 0xf) == ((op >> 4) & 0xf))
23366 return relax_immediate (fragp, 8, 0);
23367 else
23368 return relax_immediate (fragp, 3, 0);
23369 }
23370
23371 /* Return TRUE iff the definition of symbol S could be pre-empted
23372 (overridden) at link or load time. */
23373 static bfd_boolean
23374 symbol_preemptible (symbolS *s)
23375 {
23376 /* Weak symbols can always be pre-empted. */
23377 if (S_IS_WEAK (s))
23378 return TRUE;
23379
23380 /* Non-global symbols cannot be pre-empted. */
23381 if (! S_IS_EXTERNAL (s))
23382 return FALSE;
23383
23384 #ifdef OBJ_ELF
23385 /* In ELF, a global symbol can be marked protected, or private. In that
23386 case it can't be pre-empted (other definitions in the same link unit
23387 would violate the ODR). */
23388 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
23389 return FALSE;
23390 #endif
23391
23392 /* Other global symbols might be pre-empted. */
23393 return TRUE;
23394 }
23395
23396 /* Return the size of a relaxable branch instruction. BITS is the
23397 size of the offset field in the narrow instruction. */
23398
23399 static int
23400 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
23401 {
23402 addressT addr;
23403 offsetT val;
23404 offsetT limit;
23405
23406 /* Assume worst case for symbols not known to be in the same section. */
23407 if (!S_IS_DEFINED (fragp->fr_symbol)
23408 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23409 || S_IS_WEAK (fragp->fr_symbol))
23410 return 4;
23411
23412 #ifdef OBJ_ELF
23413 /* A branch to a function in ARM state will require interworking. */
23414 if (S_IS_DEFINED (fragp->fr_symbol)
23415 && ARM_IS_FUNC (fragp->fr_symbol))
23416 return 4;
23417 #endif
23418
23419 if (symbol_preemptible (fragp->fr_symbol))
23420 return 4;
23421
23422 val = relaxed_symbol_addr (fragp, stretch);
23423 addr = fragp->fr_address + fragp->fr_fix + 4;
23424 val -= addr;
23425
23426 /* Offset is a signed value *2 */
23427 limit = 1 << bits;
23428 if (val >= limit || val < -limit)
23429 return 4;
23430 return 2;
23431 }
23432
23433
23434 /* Relax a machine dependent frag. This returns the amount by which
23435 the current size of the frag should change. */
23436
23437 int
23438 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
23439 {
23440 int oldsize;
23441 int newsize;
23442
23443 oldsize = fragp->fr_var;
23444 switch (fragp->fr_subtype)
23445 {
23446 case T_MNEM_ldr_pc2:
23447 newsize = relax_adr (fragp, sec, stretch);
23448 break;
23449 case T_MNEM_ldr_pc:
23450 case T_MNEM_ldr_sp:
23451 case T_MNEM_str_sp:
23452 newsize = relax_immediate (fragp, 8, 2);
23453 break;
23454 case T_MNEM_ldr:
23455 case T_MNEM_str:
23456 newsize = relax_immediate (fragp, 5, 2);
23457 break;
23458 case T_MNEM_ldrh:
23459 case T_MNEM_strh:
23460 newsize = relax_immediate (fragp, 5, 1);
23461 break;
23462 case T_MNEM_ldrb:
23463 case T_MNEM_strb:
23464 newsize = relax_immediate (fragp, 5, 0);
23465 break;
23466 case T_MNEM_adr:
23467 newsize = relax_adr (fragp, sec, stretch);
23468 break;
23469 case T_MNEM_mov:
23470 case T_MNEM_movs:
23471 case T_MNEM_cmp:
23472 case T_MNEM_cmn:
23473 newsize = relax_immediate (fragp, 8, 0);
23474 break;
23475 case T_MNEM_b:
23476 newsize = relax_branch (fragp, sec, 11, stretch);
23477 break;
23478 case T_MNEM_bcond:
23479 newsize = relax_branch (fragp, sec, 8, stretch);
23480 break;
23481 case T_MNEM_add_sp:
23482 case T_MNEM_add_pc:
23483 newsize = relax_immediate (fragp, 8, 2);
23484 break;
23485 case T_MNEM_inc_sp:
23486 case T_MNEM_dec_sp:
23487 newsize = relax_immediate (fragp, 7, 2);
23488 break;
23489 case T_MNEM_addi:
23490 case T_MNEM_addis:
23491 case T_MNEM_subi:
23492 case T_MNEM_subis:
23493 newsize = relax_addsub (fragp, sec);
23494 break;
23495 default:
23496 abort ();
23497 }
23498
23499 fragp->fr_var = newsize;
23500 /* Freeze wide instructions that are at or before the same location as
23501 in the previous pass. This avoids infinite loops.
23502 Don't freeze them unconditionally because targets may be artificially
23503 misaligned by the expansion of preceding frags. */
23504 if (stretch <= 0 && newsize > 2)
23505 {
23506 md_convert_frag (sec->owner, sec, fragp);
23507 frag_wane (fragp);
23508 }
23509
23510 return newsize - oldsize;
23511 }
23512
23513 /* Round up a section size to the appropriate boundary. */
23514
23515 valueT
23516 md_section_align (segT segment ATTRIBUTE_UNUSED,
23517 valueT size)
23518 {
23519 return size;
23520 }
23521
23522 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23523 of an rs_align_code fragment. */
23524
23525 void
23526 arm_handle_align (fragS * fragP)
23527 {
23528 static unsigned char const arm_noop[2][2][4] =
23529 {
23530 { /* ARMv1 */
23531 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23532 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23533 },
23534 { /* ARMv6k */
23535 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23536 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23537 },
23538 };
23539 static unsigned char const thumb_noop[2][2][2] =
23540 {
23541 { /* Thumb-1 */
23542 {0xc0, 0x46}, /* LE */
23543 {0x46, 0xc0}, /* BE */
23544 },
23545 { /* Thumb-2 */
23546 {0x00, 0xbf}, /* LE */
23547 {0xbf, 0x00} /* BE */
23548 }
23549 };
23550 static unsigned char const wide_thumb_noop[2][4] =
23551 { /* Wide Thumb-2 */
23552 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23553 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23554 };
23555
23556 unsigned bytes, fix, noop_size;
23557 char * p;
23558 const unsigned char * noop;
23559 const unsigned char *narrow_noop = NULL;
23560 #ifdef OBJ_ELF
23561 enum mstate state;
23562 #endif
23563
23564 if (fragP->fr_type != rs_align_code)
23565 return;
23566
23567 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
23568 p = fragP->fr_literal + fragP->fr_fix;
23569 fix = 0;
23570
23571 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
23572 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
23573
23574 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
23575
23576 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
23577 {
23578 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
23579 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
23580 {
23581 narrow_noop = thumb_noop[1][target_big_endian];
23582 noop = wide_thumb_noop[target_big_endian];
23583 }
23584 else
23585 noop = thumb_noop[0][target_big_endian];
23586 noop_size = 2;
23587 #ifdef OBJ_ELF
23588 state = MAP_THUMB;
23589 #endif
23590 }
23591 else
23592 {
23593 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
23594 ? selected_cpu : arm_arch_none,
23595 arm_ext_v6k) != 0]
23596 [target_big_endian];
23597 noop_size = 4;
23598 #ifdef OBJ_ELF
23599 state = MAP_ARM;
23600 #endif
23601 }
23602
23603 fragP->fr_var = noop_size;
23604
23605 if (bytes & (noop_size - 1))
23606 {
23607 fix = bytes & (noop_size - 1);
23608 #ifdef OBJ_ELF
23609 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
23610 #endif
23611 memset (p, 0, fix);
23612 p += fix;
23613 bytes -= fix;
23614 }
23615
23616 if (narrow_noop)
23617 {
23618 if (bytes & noop_size)
23619 {
23620 /* Insert a narrow noop. */
23621 memcpy (p, narrow_noop, noop_size);
23622 p += noop_size;
23623 bytes -= noop_size;
23624 fix += noop_size;
23625 }
23626
23627 /* Use wide noops for the remainder */
23628 noop_size = 4;
23629 }
23630
23631 while (bytes >= noop_size)
23632 {
23633 memcpy (p, noop, noop_size);
23634 p += noop_size;
23635 bytes -= noop_size;
23636 fix += noop_size;
23637 }
23638
23639 fragP->fr_fix += fix;
23640 }
23641
23642 /* Called from md_do_align. Used to create an alignment
23643 frag in a code section. */
23644
23645 void
23646 arm_frag_align_code (int n, int max)
23647 {
23648 char * p;
23649
23650 /* We assume that there will never be a requirement
23651 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23652 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
23653 {
23654 char err_msg[128];
23655
23656 sprintf (err_msg,
23657 _("alignments greater than %d bytes not supported in .text sections."),
23658 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
23659 as_fatal ("%s", err_msg);
23660 }
23661
23662 p = frag_var (rs_align_code,
23663 MAX_MEM_FOR_RS_ALIGN_CODE,
23664 1,
23665 (relax_substateT) max,
23666 (symbolS *) NULL,
23667 (offsetT) n,
23668 (char *) NULL);
23669 *p = 0;
23670 }
23671
23672 /* Perform target specific initialisation of a frag.
23673 Note - despite the name this initialisation is not done when the frag
23674 is created, but only when its type is assigned. A frag can be created
23675 and used a long time before its type is set, so beware of assuming that
23676 this initialisation is performed first. */
23677
23678 #ifndef OBJ_ELF
23679 void
23680 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
23681 {
23682 /* Record whether this frag is in an ARM or a THUMB area. */
23683 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
23684 }
23685
23686 #else /* OBJ_ELF is defined. */
23687 void
23688 arm_init_frag (fragS * fragP, int max_chars)
23689 {
23690 bfd_boolean frag_thumb_mode;
23691
23692 /* If the current ARM vs THUMB mode has not already
23693 been recorded into this frag then do so now. */
23694 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
23695 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
23696
23697 /* PR 21809: Do not set a mapping state for debug sections
23698 - it just confuses other tools. */
23699 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
23700 return;
23701
23702 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
23703
23704 /* Record a mapping symbol for alignment frags. We will delete this
23705 later if the alignment ends up empty. */
23706 switch (fragP->fr_type)
23707 {
23708 case rs_align:
23709 case rs_align_test:
23710 case rs_fill:
23711 mapping_state_2 (MAP_DATA, max_chars);
23712 break;
23713 case rs_align_code:
23714 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
23715 break;
23716 default:
23717 break;
23718 }
23719 }
23720
23721 /* When we change sections we need to issue a new mapping symbol. */
23722
23723 void
23724 arm_elf_change_section (void)
23725 {
23726 /* Link an unlinked unwind index table section to the .text section. */
23727 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
23728 && elf_linked_to_section (now_seg) == NULL)
23729 elf_linked_to_section (now_seg) = text_section;
23730 }
23731
23732 int
23733 arm_elf_section_type (const char * str, size_t len)
23734 {
23735 if (len == 5 && strncmp (str, "exidx", 5) == 0)
23736 return SHT_ARM_EXIDX;
23737
23738 return -1;
23739 }
23740 \f
23741 /* Code to deal with unwinding tables. */
23742
23743 static void add_unwind_adjustsp (offsetT);
23744
23745 /* Generate any deferred unwind frame offset. */
23746
23747 static void
23748 flush_pending_unwind (void)
23749 {
23750 offsetT offset;
23751
23752 offset = unwind.pending_offset;
23753 unwind.pending_offset = 0;
23754 if (offset != 0)
23755 add_unwind_adjustsp (offset);
23756 }
23757
23758 /* Add an opcode to this list for this function. Two-byte opcodes should
23759 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
23760 order. */
23761
23762 static void
23763 add_unwind_opcode (valueT op, int length)
23764 {
23765 /* Add any deferred stack adjustment. */
23766 if (unwind.pending_offset)
23767 flush_pending_unwind ();
23768
23769 unwind.sp_restored = 0;
23770
23771 if (unwind.opcode_count + length > unwind.opcode_alloc)
23772 {
23773 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
23774 if (unwind.opcodes)
23775 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
23776 unwind.opcode_alloc);
23777 else
23778 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
23779 }
23780 while (length > 0)
23781 {
23782 length--;
23783 unwind.opcodes[unwind.opcode_count] = op & 0xff;
23784 op >>= 8;
23785 unwind.opcode_count++;
23786 }
23787 }
23788
23789 /* Add unwind opcodes to adjust the stack pointer. */
23790
23791 static void
23792 add_unwind_adjustsp (offsetT offset)
23793 {
23794 valueT op;
23795
23796 if (offset > 0x200)
23797 {
23798 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
23799 char bytes[5];
23800 int n;
23801 valueT o;
23802
23803 /* Long form: 0xb2, uleb128. */
23804 /* This might not fit in a word so add the individual bytes,
23805 remembering the list is built in reverse order. */
23806 o = (valueT) ((offset - 0x204) >> 2);
23807 if (o == 0)
23808 add_unwind_opcode (0, 1);
23809
23810 /* Calculate the uleb128 encoding of the offset. */
23811 n = 0;
23812 while (o)
23813 {
23814 bytes[n] = o & 0x7f;
23815 o >>= 7;
23816 if (o)
23817 bytes[n] |= 0x80;
23818 n++;
23819 }
23820 /* Add the insn. */
23821 for (; n; n--)
23822 add_unwind_opcode (bytes[n - 1], 1);
23823 add_unwind_opcode (0xb2, 1);
23824 }
23825 else if (offset > 0x100)
23826 {
23827 /* Two short opcodes. */
23828 add_unwind_opcode (0x3f, 1);
23829 op = (offset - 0x104) >> 2;
23830 add_unwind_opcode (op, 1);
23831 }
23832 else if (offset > 0)
23833 {
23834 /* Short opcode. */
23835 op = (offset - 4) >> 2;
23836 add_unwind_opcode (op, 1);
23837 }
23838 else if (offset < 0)
23839 {
23840 offset = -offset;
23841 while (offset > 0x100)
23842 {
23843 add_unwind_opcode (0x7f, 1);
23844 offset -= 0x100;
23845 }
23846 op = ((offset - 4) >> 2) | 0x40;
23847 add_unwind_opcode (op, 1);
23848 }
23849 }
23850
23851 /* Finish the list of unwind opcodes for this function. */
23852
23853 static void
23854 finish_unwind_opcodes (void)
23855 {
23856 valueT op;
23857
23858 if (unwind.fp_used)
23859 {
23860 /* Adjust sp as necessary. */
23861 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
23862 flush_pending_unwind ();
23863
23864 /* After restoring sp from the frame pointer. */
23865 op = 0x90 | unwind.fp_reg;
23866 add_unwind_opcode (op, 1);
23867 }
23868 else
23869 flush_pending_unwind ();
23870 }
23871
23872
23873 /* Start an exception table entry. If idx is nonzero this is an index table
23874 entry. */
23875
23876 static void
23877 start_unwind_section (const segT text_seg, int idx)
23878 {
23879 const char * text_name;
23880 const char * prefix;
23881 const char * prefix_once;
23882 const char * group_name;
23883 char * sec_name;
23884 int type;
23885 int flags;
23886 int linkonce;
23887
23888 if (idx)
23889 {
23890 prefix = ELF_STRING_ARM_unwind;
23891 prefix_once = ELF_STRING_ARM_unwind_once;
23892 type = SHT_ARM_EXIDX;
23893 }
23894 else
23895 {
23896 prefix = ELF_STRING_ARM_unwind_info;
23897 prefix_once = ELF_STRING_ARM_unwind_info_once;
23898 type = SHT_PROGBITS;
23899 }
23900
23901 text_name = segment_name (text_seg);
23902 if (streq (text_name, ".text"))
23903 text_name = "";
23904
23905 if (strncmp (text_name, ".gnu.linkonce.t.",
23906 strlen (".gnu.linkonce.t.")) == 0)
23907 {
23908 prefix = prefix_once;
23909 text_name += strlen (".gnu.linkonce.t.");
23910 }
23911
23912 sec_name = concat (prefix, text_name, (char *) NULL);
23913
23914 flags = SHF_ALLOC;
23915 linkonce = 0;
23916 group_name = 0;
23917
23918 /* Handle COMDAT group. */
23919 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
23920 {
23921 group_name = elf_group_name (text_seg);
23922 if (group_name == NULL)
23923 {
23924 as_bad (_("Group section `%s' has no group signature"),
23925 segment_name (text_seg));
23926 ignore_rest_of_line ();
23927 return;
23928 }
23929 flags |= SHF_GROUP;
23930 linkonce = 1;
23931 }
23932
23933 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
23934 linkonce, 0);
23935
23936 /* Set the section link for index tables. */
23937 if (idx)
23938 elf_linked_to_section (now_seg) = text_seg;
23939 }
23940
23941
23942 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23943 personality routine data. Returns zero, or the index table value for
23944 an inline entry. */
23945
23946 static valueT
23947 create_unwind_entry (int have_data)
23948 {
23949 int size;
23950 addressT where;
23951 char *ptr;
23952 /* The current word of data. */
23953 valueT data;
23954 /* The number of bytes left in this word. */
23955 int n;
23956
23957 finish_unwind_opcodes ();
23958
23959 /* Remember the current text section. */
23960 unwind.saved_seg = now_seg;
23961 unwind.saved_subseg = now_subseg;
23962
23963 start_unwind_section (now_seg, 0);
23964
23965 if (unwind.personality_routine == NULL)
23966 {
23967 if (unwind.personality_index == -2)
23968 {
23969 if (have_data)
23970 as_bad (_("handlerdata in cantunwind frame"));
23971 return 1; /* EXIDX_CANTUNWIND. */
23972 }
23973
23974 /* Use a default personality routine if none is specified. */
23975 if (unwind.personality_index == -1)
23976 {
23977 if (unwind.opcode_count > 3)
23978 unwind.personality_index = 1;
23979 else
23980 unwind.personality_index = 0;
23981 }
23982
23983 /* Space for the personality routine entry. */
23984 if (unwind.personality_index == 0)
23985 {
23986 if (unwind.opcode_count > 3)
23987 as_bad (_("too many unwind opcodes for personality routine 0"));
23988
23989 if (!have_data)
23990 {
23991 /* All the data is inline in the index table. */
23992 data = 0x80;
23993 n = 3;
23994 while (unwind.opcode_count > 0)
23995 {
23996 unwind.opcode_count--;
23997 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23998 n--;
23999 }
24000
24001 /* Pad with "finish" opcodes. */
24002 while (n--)
24003 data = (data << 8) | 0xb0;
24004
24005 return data;
24006 }
24007 size = 0;
24008 }
24009 else
24010 /* We get two opcodes "free" in the first word. */
24011 size = unwind.opcode_count - 2;
24012 }
24013 else
24014 {
24015 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
24016 if (unwind.personality_index != -1)
24017 {
24018 as_bad (_("attempt to recreate an unwind entry"));
24019 return 1;
24020 }
24021
24022 /* An extra byte is required for the opcode count. */
24023 size = unwind.opcode_count + 1;
24024 }
24025
24026 size = (size + 3) >> 2;
24027 if (size > 0xff)
24028 as_bad (_("too many unwind opcodes"));
24029
24030 frag_align (2, 0, 0);
24031 record_alignment (now_seg, 2);
24032 unwind.table_entry = expr_build_dot ();
24033
24034 /* Allocate the table entry. */
24035 ptr = frag_more ((size << 2) + 4);
24036 /* PR 13449: Zero the table entries in case some of them are not used. */
24037 memset (ptr, 0, (size << 2) + 4);
24038 where = frag_now_fix () - ((size << 2) + 4);
24039
24040 switch (unwind.personality_index)
24041 {
24042 case -1:
24043 /* ??? Should this be a PLT generating relocation? */
24044 /* Custom personality routine. */
24045 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
24046 BFD_RELOC_ARM_PREL31);
24047
24048 where += 4;
24049 ptr += 4;
24050
24051 /* Set the first byte to the number of additional words. */
24052 data = size > 0 ? size - 1 : 0;
24053 n = 3;
24054 break;
24055
24056 /* ABI defined personality routines. */
24057 case 0:
24058 /* Three opcodes bytes are packed into the first word. */
24059 data = 0x80;
24060 n = 3;
24061 break;
24062
24063 case 1:
24064 case 2:
24065 /* The size and first two opcode bytes go in the first word. */
24066 data = ((0x80 + unwind.personality_index) << 8) | size;
24067 n = 2;
24068 break;
24069
24070 default:
24071 /* Should never happen. */
24072 abort ();
24073 }
24074
24075 /* Pack the opcodes into words (MSB first), reversing the list at the same
24076 time. */
24077 while (unwind.opcode_count > 0)
24078 {
24079 if (n == 0)
24080 {
24081 md_number_to_chars (ptr, data, 4);
24082 ptr += 4;
24083 n = 4;
24084 data = 0;
24085 }
24086 unwind.opcode_count--;
24087 n--;
24088 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
24089 }
24090
24091 /* Finish off the last word. */
24092 if (n < 4)
24093 {
24094 /* Pad with "finish" opcodes. */
24095 while (n--)
24096 data = (data << 8) | 0xb0;
24097
24098 md_number_to_chars (ptr, data, 4);
24099 }
24100
24101 if (!have_data)
24102 {
24103 /* Add an empty descriptor if there is no user-specified data. */
24104 ptr = frag_more (4);
24105 md_number_to_chars (ptr, 0, 4);
24106 }
24107
24108 return 0;
24109 }
24110
24111
24112 /* Initialize the DWARF-2 unwind information for this procedure. */
24113
24114 void
24115 tc_arm_frame_initial_instructions (void)
24116 {
24117 cfi_add_CFA_def_cfa (REG_SP, 0);
24118 }
24119 #endif /* OBJ_ELF */
24120
24121 /* Convert REGNAME to a DWARF-2 register number. */
24122
24123 int
24124 tc_arm_regname_to_dw2regnum (char *regname)
24125 {
24126 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
24127 if (reg != FAIL)
24128 return reg;
24129
24130 /* PR 16694: Allow VFP registers as well. */
24131 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
24132 if (reg != FAIL)
24133 return 64 + reg;
24134
24135 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
24136 if (reg != FAIL)
24137 return reg + 256;
24138
24139 return FAIL;
24140 }
24141
24142 #ifdef TE_PE
24143 void
24144 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
24145 {
24146 expressionS exp;
24147
24148 exp.X_op = O_secrel;
24149 exp.X_add_symbol = symbol;
24150 exp.X_add_number = 0;
24151 emit_expr (&exp, size);
24152 }
24153 #endif
24154
24155 /* MD interface: Symbol and relocation handling. */
24156
24157 /* Return the address within the segment that a PC-relative fixup is
24158 relative to. For ARM, PC-relative fixups applied to instructions
24159 are generally relative to the location of the fixup plus 8 bytes.
24160 Thumb branches are offset by 4, and Thumb loads relative to PC
24161 require special handling. */
24162
24163 long
24164 md_pcrel_from_section (fixS * fixP, segT seg)
24165 {
24166 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
24167
24168 /* If this is pc-relative and we are going to emit a relocation
24169 then we just want to put out any pipeline compensation that the linker
24170 will need. Otherwise we want to use the calculated base.
24171 For WinCE we skip the bias for externals as well, since this
24172 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24173 if (fixP->fx_pcrel
24174 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
24175 || (arm_force_relocation (fixP)
24176 #ifdef TE_WINCE
24177 && !S_IS_EXTERNAL (fixP->fx_addsy)
24178 #endif
24179 )))
24180 base = 0;
24181
24182
24183 switch (fixP->fx_r_type)
24184 {
24185 /* PC relative addressing on the Thumb is slightly odd as the
24186 bottom two bits of the PC are forced to zero for the
24187 calculation. This happens *after* application of the
24188 pipeline offset. However, Thumb adrl already adjusts for
24189 this, so we need not do it again. */
24190 case BFD_RELOC_ARM_THUMB_ADD:
24191 return base & ~3;
24192
24193 case BFD_RELOC_ARM_THUMB_OFFSET:
24194 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24195 case BFD_RELOC_ARM_T32_ADD_PC12:
24196 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24197 return (base + 4) & ~3;
24198
24199 /* Thumb branches are simply offset by +4. */
24200 case BFD_RELOC_THUMB_PCREL_BRANCH5:
24201 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24202 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24203 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24204 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24205 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24206 case BFD_RELOC_THUMB_PCREL_BFCSEL:
24207 case BFD_RELOC_ARM_THUMB_BF17:
24208 case BFD_RELOC_ARM_THUMB_BF19:
24209 case BFD_RELOC_ARM_THUMB_BF13:
24210 case BFD_RELOC_ARM_THUMB_LOOP12:
24211 return base + 4;
24212
24213 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24214 if (fixP->fx_addsy
24215 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24216 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24217 && ARM_IS_FUNC (fixP->fx_addsy)
24218 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24219 base = fixP->fx_where + fixP->fx_frag->fr_address;
24220 return base + 4;
24221
24222 /* BLX is like branches above, but forces the low two bits of PC to
24223 zero. */
24224 case BFD_RELOC_THUMB_PCREL_BLX:
24225 if (fixP->fx_addsy
24226 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24227 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24228 && THUMB_IS_FUNC (fixP->fx_addsy)
24229 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24230 base = fixP->fx_where + fixP->fx_frag->fr_address;
24231 return (base + 4) & ~3;
24232
24233 /* ARM mode branches are offset by +8. However, the Windows CE
24234 loader expects the relocation not to take this into account. */
24235 case BFD_RELOC_ARM_PCREL_BLX:
24236 if (fixP->fx_addsy
24237 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24238 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24239 && ARM_IS_FUNC (fixP->fx_addsy)
24240 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24241 base = fixP->fx_where + fixP->fx_frag->fr_address;
24242 return base + 8;
24243
24244 case BFD_RELOC_ARM_PCREL_CALL:
24245 if (fixP->fx_addsy
24246 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24247 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24248 && THUMB_IS_FUNC (fixP->fx_addsy)
24249 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24250 base = fixP->fx_where + fixP->fx_frag->fr_address;
24251 return base + 8;
24252
24253 case BFD_RELOC_ARM_PCREL_BRANCH:
24254 case BFD_RELOC_ARM_PCREL_JUMP:
24255 case BFD_RELOC_ARM_PLT32:
24256 #ifdef TE_WINCE
24257 /* When handling fixups immediately, because we have already
24258 discovered the value of a symbol, or the address of the frag involved
24259 we must account for the offset by +8, as the OS loader will never see the reloc.
24260 see fixup_segment() in write.c
24261 The S_IS_EXTERNAL test handles the case of global symbols.
24262 Those need the calculated base, not just the pipe compensation the linker will need. */
24263 if (fixP->fx_pcrel
24264 && fixP->fx_addsy != NULL
24265 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24266 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
24267 return base + 8;
24268 return base;
24269 #else
24270 return base + 8;
24271 #endif
24272
24273
24274 /* ARM mode loads relative to PC are also offset by +8. Unlike
24275 branches, the Windows CE loader *does* expect the relocation
24276 to take this into account. */
24277 case BFD_RELOC_ARM_OFFSET_IMM:
24278 case BFD_RELOC_ARM_OFFSET_IMM8:
24279 case BFD_RELOC_ARM_HWLITERAL:
24280 case BFD_RELOC_ARM_LITERAL:
24281 case BFD_RELOC_ARM_CP_OFF_IMM:
24282 return base + 8;
24283
24284
24285 /* Other PC-relative relocations are un-offset. */
24286 default:
24287 return base;
24288 }
24289 }
24290
24291 static bfd_boolean flag_warn_syms = TRUE;
24292
24293 bfd_boolean
24294 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
24295 {
24296 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24297 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24298 does mean that the resulting code might be very confusing to the reader.
24299 Also this warning can be triggered if the user omits an operand before
24300 an immediate address, eg:
24301
24302 LDR =foo
24303
24304 GAS treats this as an assignment of the value of the symbol foo to a
24305 symbol LDR, and so (without this code) it will not issue any kind of
24306 warning or error message.
24307
24308 Note - ARM instructions are case-insensitive but the strings in the hash
24309 table are all stored in lower case, so we must first ensure that name is
24310 lower case too. */
24311 if (flag_warn_syms && arm_ops_hsh)
24312 {
24313 char * nbuf = strdup (name);
24314 char * p;
24315
24316 for (p = nbuf; *p; p++)
24317 *p = TOLOWER (*p);
24318 if (hash_find (arm_ops_hsh, nbuf) != NULL)
24319 {
24320 static struct hash_control * already_warned = NULL;
24321
24322 if (already_warned == NULL)
24323 already_warned = hash_new ();
24324 /* Only warn about the symbol once. To keep the code
24325 simple we let hash_insert do the lookup for us. */
24326 if (hash_insert (already_warned, nbuf, NULL) == NULL)
24327 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
24328 }
24329 else
24330 free (nbuf);
24331 }
24332
24333 return FALSE;
24334 }
24335
24336 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24337 Otherwise we have no need to default values of symbols. */
24338
24339 symbolS *
24340 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
24341 {
24342 #ifdef OBJ_ELF
24343 if (name[0] == '_' && name[1] == 'G'
24344 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
24345 {
24346 if (!GOT_symbol)
24347 {
24348 if (symbol_find (name))
24349 as_bad (_("GOT already in the symbol table"));
24350
24351 GOT_symbol = symbol_new (name, undefined_section,
24352 (valueT) 0, & zero_address_frag);
24353 }
24354
24355 return GOT_symbol;
24356 }
24357 #endif
24358
24359 return NULL;
24360 }
24361
24362 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24363 computed as two separate immediate values, added together. We
24364 already know that this value cannot be computed by just one ARM
24365 instruction. */
24366
24367 static unsigned int
24368 validate_immediate_twopart (unsigned int val,
24369 unsigned int * highpart)
24370 {
24371 unsigned int a;
24372 unsigned int i;
24373
24374 for (i = 0; i < 32; i += 2)
24375 if (((a = rotate_left (val, i)) & 0xff) != 0)
24376 {
24377 if (a & 0xff00)
24378 {
24379 if (a & ~ 0xffff)
24380 continue;
24381 * highpart = (a >> 8) | ((i + 24) << 7);
24382 }
24383 else if (a & 0xff0000)
24384 {
24385 if (a & 0xff000000)
24386 continue;
24387 * highpart = (a >> 16) | ((i + 16) << 7);
24388 }
24389 else
24390 {
24391 gas_assert (a & 0xff000000);
24392 * highpart = (a >> 24) | ((i + 8) << 7);
24393 }
24394
24395 return (a & 0xff) | (i << 7);
24396 }
24397
24398 return FAIL;
24399 }
24400
24401 static int
24402 validate_offset_imm (unsigned int val, int hwse)
24403 {
24404 if ((hwse && val > 255) || val > 4095)
24405 return FAIL;
24406 return val;
24407 }
24408
24409 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24410 negative immediate constant by altering the instruction. A bit of
24411 a hack really.
24412 MOV <-> MVN
24413 AND <-> BIC
24414 ADC <-> SBC
24415 by inverting the second operand, and
24416 ADD <-> SUB
24417 CMP <-> CMN
24418 by negating the second operand. */
24419
24420 static int
24421 negate_data_op (unsigned long * instruction,
24422 unsigned long value)
24423 {
24424 int op, new_inst;
24425 unsigned long negated, inverted;
24426
24427 negated = encode_arm_immediate (-value);
24428 inverted = encode_arm_immediate (~value);
24429
24430 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
24431 switch (op)
24432 {
24433 /* First negates. */
24434 case OPCODE_SUB: /* ADD <-> SUB */
24435 new_inst = OPCODE_ADD;
24436 value = negated;
24437 break;
24438
24439 case OPCODE_ADD:
24440 new_inst = OPCODE_SUB;
24441 value = negated;
24442 break;
24443
24444 case OPCODE_CMP: /* CMP <-> CMN */
24445 new_inst = OPCODE_CMN;
24446 value = negated;
24447 break;
24448
24449 case OPCODE_CMN:
24450 new_inst = OPCODE_CMP;
24451 value = negated;
24452 break;
24453
24454 /* Now Inverted ops. */
24455 case OPCODE_MOV: /* MOV <-> MVN */
24456 new_inst = OPCODE_MVN;
24457 value = inverted;
24458 break;
24459
24460 case OPCODE_MVN:
24461 new_inst = OPCODE_MOV;
24462 value = inverted;
24463 break;
24464
24465 case OPCODE_AND: /* AND <-> BIC */
24466 new_inst = OPCODE_BIC;
24467 value = inverted;
24468 break;
24469
24470 case OPCODE_BIC:
24471 new_inst = OPCODE_AND;
24472 value = inverted;
24473 break;
24474
24475 case OPCODE_ADC: /* ADC <-> SBC */
24476 new_inst = OPCODE_SBC;
24477 value = inverted;
24478 break;
24479
24480 case OPCODE_SBC:
24481 new_inst = OPCODE_ADC;
24482 value = inverted;
24483 break;
24484
24485 /* We cannot do anything. */
24486 default:
24487 return FAIL;
24488 }
24489
24490 if (value == (unsigned) FAIL)
24491 return FAIL;
24492
24493 *instruction &= OPCODE_MASK;
24494 *instruction |= new_inst << DATA_OP_SHIFT;
24495 return value;
24496 }
24497
24498 /* Like negate_data_op, but for Thumb-2. */
24499
24500 static unsigned int
24501 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
24502 {
24503 int op, new_inst;
24504 int rd;
24505 unsigned int negated, inverted;
24506
24507 negated = encode_thumb32_immediate (-value);
24508 inverted = encode_thumb32_immediate (~value);
24509
24510 rd = (*instruction >> 8) & 0xf;
24511 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
24512 switch (op)
24513 {
24514 /* ADD <-> SUB. Includes CMP <-> CMN. */
24515 case T2_OPCODE_SUB:
24516 new_inst = T2_OPCODE_ADD;
24517 value = negated;
24518 break;
24519
24520 case T2_OPCODE_ADD:
24521 new_inst = T2_OPCODE_SUB;
24522 value = negated;
24523 break;
24524
24525 /* ORR <-> ORN. Includes MOV <-> MVN. */
24526 case T2_OPCODE_ORR:
24527 new_inst = T2_OPCODE_ORN;
24528 value = inverted;
24529 break;
24530
24531 case T2_OPCODE_ORN:
24532 new_inst = T2_OPCODE_ORR;
24533 value = inverted;
24534 break;
24535
24536 /* AND <-> BIC. TST has no inverted equivalent. */
24537 case T2_OPCODE_AND:
24538 new_inst = T2_OPCODE_BIC;
24539 if (rd == 15)
24540 value = FAIL;
24541 else
24542 value = inverted;
24543 break;
24544
24545 case T2_OPCODE_BIC:
24546 new_inst = T2_OPCODE_AND;
24547 value = inverted;
24548 break;
24549
24550 /* ADC <-> SBC */
24551 case T2_OPCODE_ADC:
24552 new_inst = T2_OPCODE_SBC;
24553 value = inverted;
24554 break;
24555
24556 case T2_OPCODE_SBC:
24557 new_inst = T2_OPCODE_ADC;
24558 value = inverted;
24559 break;
24560
24561 /* We cannot do anything. */
24562 default:
24563 return FAIL;
24564 }
24565
24566 if (value == (unsigned int)FAIL)
24567 return FAIL;
24568
24569 *instruction &= T2_OPCODE_MASK;
24570 *instruction |= new_inst << T2_DATA_OP_SHIFT;
24571 return value;
24572 }
24573
24574 /* Read a 32-bit thumb instruction from buf. */
24575
24576 static unsigned long
24577 get_thumb32_insn (char * buf)
24578 {
24579 unsigned long insn;
24580 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
24581 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24582
24583 return insn;
24584 }
24585
24586 /* We usually want to set the low bit on the address of thumb function
24587 symbols. In particular .word foo - . should have the low bit set.
24588 Generic code tries to fold the difference of two symbols to
24589 a constant. Prevent this and force a relocation when the first symbols
24590 is a thumb function. */
24591
24592 bfd_boolean
24593 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
24594 {
24595 if (op == O_subtract
24596 && l->X_op == O_symbol
24597 && r->X_op == O_symbol
24598 && THUMB_IS_FUNC (l->X_add_symbol))
24599 {
24600 l->X_op = O_subtract;
24601 l->X_op_symbol = r->X_add_symbol;
24602 l->X_add_number -= r->X_add_number;
24603 return TRUE;
24604 }
24605
24606 /* Process as normal. */
24607 return FALSE;
24608 }
24609
24610 /* Encode Thumb2 unconditional branches and calls. The encoding
24611 for the 2 are identical for the immediate values. */
24612
24613 static void
24614 encode_thumb2_b_bl_offset (char * buf, offsetT value)
24615 {
24616 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24617 offsetT newval;
24618 offsetT newval2;
24619 addressT S, I1, I2, lo, hi;
24620
24621 S = (value >> 24) & 0x01;
24622 I1 = (value >> 23) & 0x01;
24623 I2 = (value >> 22) & 0x01;
24624 hi = (value >> 12) & 0x3ff;
24625 lo = (value >> 1) & 0x7ff;
24626 newval = md_chars_to_number (buf, THUMB_SIZE);
24627 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24628 newval |= (S << 10) | hi;
24629 newval2 &= ~T2I1I2MASK;
24630 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
24631 md_number_to_chars (buf, newval, THUMB_SIZE);
24632 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
24633 }
24634
24635 void
24636 md_apply_fix (fixS * fixP,
24637 valueT * valP,
24638 segT seg)
24639 {
24640 offsetT value = * valP;
24641 offsetT newval;
24642 unsigned int newimm;
24643 unsigned long temp;
24644 int sign;
24645 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
24646
24647 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
24648
24649 /* Note whether this will delete the relocation. */
24650
24651 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
24652 fixP->fx_done = 1;
24653
24654 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24655 consistency with the behaviour on 32-bit hosts. Remember value
24656 for emit_reloc. */
24657 value &= 0xffffffff;
24658 value ^= 0x80000000;
24659 value -= 0x80000000;
24660
24661 *valP = value;
24662 fixP->fx_addnumber = value;
24663
24664 /* Same treatment for fixP->fx_offset. */
24665 fixP->fx_offset &= 0xffffffff;
24666 fixP->fx_offset ^= 0x80000000;
24667 fixP->fx_offset -= 0x80000000;
24668
24669 switch (fixP->fx_r_type)
24670 {
24671 case BFD_RELOC_NONE:
24672 /* This will need to go in the object file. */
24673 fixP->fx_done = 0;
24674 break;
24675
24676 case BFD_RELOC_ARM_IMMEDIATE:
24677 /* We claim that this fixup has been processed here,
24678 even if in fact we generate an error because we do
24679 not have a reloc for it, so tc_gen_reloc will reject it. */
24680 fixP->fx_done = 1;
24681
24682 if (fixP->fx_addsy)
24683 {
24684 const char *msg = 0;
24685
24686 if (! S_IS_DEFINED (fixP->fx_addsy))
24687 msg = _("undefined symbol %s used as an immediate value");
24688 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
24689 msg = _("symbol %s is in a different section");
24690 else if (S_IS_WEAK (fixP->fx_addsy))
24691 msg = _("symbol %s is weak and may be overridden later");
24692
24693 if (msg)
24694 {
24695 as_bad_where (fixP->fx_file, fixP->fx_line,
24696 msg, S_GET_NAME (fixP->fx_addsy));
24697 break;
24698 }
24699 }
24700
24701 temp = md_chars_to_number (buf, INSN_SIZE);
24702
24703 /* If the offset is negative, we should use encoding A2 for ADR. */
24704 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
24705 newimm = negate_data_op (&temp, value);
24706 else
24707 {
24708 newimm = encode_arm_immediate (value);
24709
24710 /* If the instruction will fail, see if we can fix things up by
24711 changing the opcode. */
24712 if (newimm == (unsigned int) FAIL)
24713 newimm = negate_data_op (&temp, value);
24714 /* MOV accepts both ARM modified immediate (A1 encoding) and
24715 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
24716 When disassembling, MOV is preferred when there is no encoding
24717 overlap. */
24718 if (newimm == (unsigned int) FAIL
24719 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
24720 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
24721 && !((temp >> SBIT_SHIFT) & 0x1)
24722 && value >= 0 && value <= 0xffff)
24723 {
24724 /* Clear bits[23:20] to change encoding from A1 to A2. */
24725 temp &= 0xff0fffff;
24726 /* Encoding high 4bits imm. Code below will encode the remaining
24727 low 12bits. */
24728 temp |= (value & 0x0000f000) << 4;
24729 newimm = value & 0x00000fff;
24730 }
24731 }
24732
24733 if (newimm == (unsigned int) FAIL)
24734 {
24735 as_bad_where (fixP->fx_file, fixP->fx_line,
24736 _("invalid constant (%lx) after fixup"),
24737 (unsigned long) value);
24738 break;
24739 }
24740
24741 newimm |= (temp & 0xfffff000);
24742 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
24743 break;
24744
24745 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24746 {
24747 unsigned int highpart = 0;
24748 unsigned int newinsn = 0xe1a00000; /* nop. */
24749
24750 if (fixP->fx_addsy)
24751 {
24752 const char *msg = 0;
24753
24754 if (! S_IS_DEFINED (fixP->fx_addsy))
24755 msg = _("undefined symbol %s used as an immediate value");
24756 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
24757 msg = _("symbol %s is in a different section");
24758 else if (S_IS_WEAK (fixP->fx_addsy))
24759 msg = _("symbol %s is weak and may be overridden later");
24760
24761 if (msg)
24762 {
24763 as_bad_where (fixP->fx_file, fixP->fx_line,
24764 msg, S_GET_NAME (fixP->fx_addsy));
24765 break;
24766 }
24767 }
24768
24769 newimm = encode_arm_immediate (value);
24770 temp = md_chars_to_number (buf, INSN_SIZE);
24771
24772 /* If the instruction will fail, see if we can fix things up by
24773 changing the opcode. */
24774 if (newimm == (unsigned int) FAIL
24775 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
24776 {
24777 /* No ? OK - try using two ADD instructions to generate
24778 the value. */
24779 newimm = validate_immediate_twopart (value, & highpart);
24780
24781 /* Yes - then make sure that the second instruction is
24782 also an add. */
24783 if (newimm != (unsigned int) FAIL)
24784 newinsn = temp;
24785 /* Still No ? Try using a negated value. */
24786 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
24787 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
24788 /* Otherwise - give up. */
24789 else
24790 {
24791 as_bad_where (fixP->fx_file, fixP->fx_line,
24792 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
24793 (long) value);
24794 break;
24795 }
24796
24797 /* Replace the first operand in the 2nd instruction (which
24798 is the PC) with the destination register. We have
24799 already added in the PC in the first instruction and we
24800 do not want to do it again. */
24801 newinsn &= ~ 0xf0000;
24802 newinsn |= ((newinsn & 0x0f000) << 4);
24803 }
24804
24805 newimm |= (temp & 0xfffff000);
24806 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
24807
24808 highpart |= (newinsn & 0xfffff000);
24809 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
24810 }
24811 break;
24812
24813 case BFD_RELOC_ARM_OFFSET_IMM:
24814 if (!fixP->fx_done && seg->use_rela_p)
24815 value = 0;
24816 /* Fall through. */
24817
24818 case BFD_RELOC_ARM_LITERAL:
24819 sign = value > 0;
24820
24821 if (value < 0)
24822 value = - value;
24823
24824 if (validate_offset_imm (value, 0) == FAIL)
24825 {
24826 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
24827 as_bad_where (fixP->fx_file, fixP->fx_line,
24828 _("invalid literal constant: pool needs to be closer"));
24829 else
24830 as_bad_where (fixP->fx_file, fixP->fx_line,
24831 _("bad immediate value for offset (%ld)"),
24832 (long) value);
24833 break;
24834 }
24835
24836 newval = md_chars_to_number (buf, INSN_SIZE);
24837 if (value == 0)
24838 newval &= 0xfffff000;
24839 else
24840 {
24841 newval &= 0xff7ff000;
24842 newval |= value | (sign ? INDEX_UP : 0);
24843 }
24844 md_number_to_chars (buf, newval, INSN_SIZE);
24845 break;
24846
24847 case BFD_RELOC_ARM_OFFSET_IMM8:
24848 case BFD_RELOC_ARM_HWLITERAL:
24849 sign = value > 0;
24850
24851 if (value < 0)
24852 value = - value;
24853
24854 if (validate_offset_imm (value, 1) == FAIL)
24855 {
24856 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
24857 as_bad_where (fixP->fx_file, fixP->fx_line,
24858 _("invalid literal constant: pool needs to be closer"));
24859 else
24860 as_bad_where (fixP->fx_file, fixP->fx_line,
24861 _("bad immediate value for 8-bit offset (%ld)"),
24862 (long) value);
24863 break;
24864 }
24865
24866 newval = md_chars_to_number (buf, INSN_SIZE);
24867 if (value == 0)
24868 newval &= 0xfffff0f0;
24869 else
24870 {
24871 newval &= 0xff7ff0f0;
24872 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
24873 }
24874 md_number_to_chars (buf, newval, INSN_SIZE);
24875 break;
24876
24877 case BFD_RELOC_ARM_T32_OFFSET_U8:
24878 if (value < 0 || value > 1020 || value % 4 != 0)
24879 as_bad_where (fixP->fx_file, fixP->fx_line,
24880 _("bad immediate value for offset (%ld)"), (long) value);
24881 value /= 4;
24882
24883 newval = md_chars_to_number (buf+2, THUMB_SIZE);
24884 newval |= value;
24885 md_number_to_chars (buf+2, newval, THUMB_SIZE);
24886 break;
24887
24888 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24889 /* This is a complicated relocation used for all varieties of Thumb32
24890 load/store instruction with immediate offset:
24891
24892 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24893 *4, optional writeback(W)
24894 (doubleword load/store)
24895
24896 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24897 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24898 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24899 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24900 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24901
24902 Uppercase letters indicate bits that are already encoded at
24903 this point. Lowercase letters are our problem. For the
24904 second block of instructions, the secondary opcode nybble
24905 (bits 8..11) is present, and bit 23 is zero, even if this is
24906 a PC-relative operation. */
24907 newval = md_chars_to_number (buf, THUMB_SIZE);
24908 newval <<= 16;
24909 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
24910
24911 if ((newval & 0xf0000000) == 0xe0000000)
24912 {
24913 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24914 if (value >= 0)
24915 newval |= (1 << 23);
24916 else
24917 value = -value;
24918 if (value % 4 != 0)
24919 {
24920 as_bad_where (fixP->fx_file, fixP->fx_line,
24921 _("offset not a multiple of 4"));
24922 break;
24923 }
24924 value /= 4;
24925 if (value > 0xff)
24926 {
24927 as_bad_where (fixP->fx_file, fixP->fx_line,
24928 _("offset out of range"));
24929 break;
24930 }
24931 newval &= ~0xff;
24932 }
24933 else if ((newval & 0x000f0000) == 0x000f0000)
24934 {
24935 /* PC-relative, 12-bit offset. */
24936 if (value >= 0)
24937 newval |= (1 << 23);
24938 else
24939 value = -value;
24940 if (value > 0xfff)
24941 {
24942 as_bad_where (fixP->fx_file, fixP->fx_line,
24943 _("offset out of range"));
24944 break;
24945 }
24946 newval &= ~0xfff;
24947 }
24948 else if ((newval & 0x00000100) == 0x00000100)
24949 {
24950 /* Writeback: 8-bit, +/- offset. */
24951 if (value >= 0)
24952 newval |= (1 << 9);
24953 else
24954 value = -value;
24955 if (value > 0xff)
24956 {
24957 as_bad_where (fixP->fx_file, fixP->fx_line,
24958 _("offset out of range"));
24959 break;
24960 }
24961 newval &= ~0xff;
24962 }
24963 else if ((newval & 0x00000f00) == 0x00000e00)
24964 {
24965 /* T-instruction: positive 8-bit offset. */
24966 if (value < 0 || value > 0xff)
24967 {
24968 as_bad_where (fixP->fx_file, fixP->fx_line,
24969 _("offset out of range"));
24970 break;
24971 }
24972 newval &= ~0xff;
24973 newval |= value;
24974 }
24975 else
24976 {
24977 /* Positive 12-bit or negative 8-bit offset. */
24978 int limit;
24979 if (value >= 0)
24980 {
24981 newval |= (1 << 23);
24982 limit = 0xfff;
24983 }
24984 else
24985 {
24986 value = -value;
24987 limit = 0xff;
24988 }
24989 if (value > limit)
24990 {
24991 as_bad_where (fixP->fx_file, fixP->fx_line,
24992 _("offset out of range"));
24993 break;
24994 }
24995 newval &= ~limit;
24996 }
24997
24998 newval |= value;
24999 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
25000 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
25001 break;
25002
25003 case BFD_RELOC_ARM_SHIFT_IMM:
25004 newval = md_chars_to_number (buf, INSN_SIZE);
25005 if (((unsigned long) value) > 32
25006 || (value == 32
25007 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
25008 {
25009 as_bad_where (fixP->fx_file, fixP->fx_line,
25010 _("shift expression is too large"));
25011 break;
25012 }
25013
25014 if (value == 0)
25015 /* Shifts of zero must be done as lsl. */
25016 newval &= ~0x60;
25017 else if (value == 32)
25018 value = 0;
25019 newval &= 0xfffff07f;
25020 newval |= (value & 0x1f) << 7;
25021 md_number_to_chars (buf, newval, INSN_SIZE);
25022 break;
25023
25024 case BFD_RELOC_ARM_T32_IMMEDIATE:
25025 case BFD_RELOC_ARM_T32_ADD_IMM:
25026 case BFD_RELOC_ARM_T32_IMM12:
25027 case BFD_RELOC_ARM_T32_ADD_PC12:
25028 /* We claim that this fixup has been processed here,
25029 even if in fact we generate an error because we do
25030 not have a reloc for it, so tc_gen_reloc will reject it. */
25031 fixP->fx_done = 1;
25032
25033 if (fixP->fx_addsy
25034 && ! S_IS_DEFINED (fixP->fx_addsy))
25035 {
25036 as_bad_where (fixP->fx_file, fixP->fx_line,
25037 _("undefined symbol %s used as an immediate value"),
25038 S_GET_NAME (fixP->fx_addsy));
25039 break;
25040 }
25041
25042 newval = md_chars_to_number (buf, THUMB_SIZE);
25043 newval <<= 16;
25044 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
25045
25046 newimm = FAIL;
25047 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25048 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
25049 Thumb2 modified immediate encoding (T2). */
25050 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
25051 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
25052 {
25053 newimm = encode_thumb32_immediate (value);
25054 if (newimm == (unsigned int) FAIL)
25055 newimm = thumb32_negate_data_op (&newval, value);
25056 }
25057 if (newimm == (unsigned int) FAIL)
25058 {
25059 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
25060 {
25061 /* Turn add/sum into addw/subw. */
25062 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
25063 newval = (newval & 0xfeffffff) | 0x02000000;
25064 /* No flat 12-bit imm encoding for addsw/subsw. */
25065 if ((newval & 0x00100000) == 0)
25066 {
25067 /* 12 bit immediate for addw/subw. */
25068 if (value < 0)
25069 {
25070 value = -value;
25071 newval ^= 0x00a00000;
25072 }
25073 if (value > 0xfff)
25074 newimm = (unsigned int) FAIL;
25075 else
25076 newimm = value;
25077 }
25078 }
25079 else
25080 {
25081 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
25082 UINT16 (T3 encoding), MOVW only accepts UINT16. When
25083 disassembling, MOV is preferred when there is no encoding
25084 overlap. */
25085 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
25086 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
25087 but with the Rn field [19:16] set to 1111. */
25088 && (((newval >> 16) & 0xf) == 0xf)
25089 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
25090 && !((newval >> T2_SBIT_SHIFT) & 0x1)
25091 && value >= 0 && value <= 0xffff)
25092 {
25093 /* Toggle bit[25] to change encoding from T2 to T3. */
25094 newval ^= 1 << 25;
25095 /* Clear bits[19:16]. */
25096 newval &= 0xfff0ffff;
25097 /* Encoding high 4bits imm. Code below will encode the
25098 remaining low 12bits. */
25099 newval |= (value & 0x0000f000) << 4;
25100 newimm = value & 0x00000fff;
25101 }
25102 }
25103 }
25104
25105 if (newimm == (unsigned int)FAIL)
25106 {
25107 as_bad_where (fixP->fx_file, fixP->fx_line,
25108 _("invalid constant (%lx) after fixup"),
25109 (unsigned long) value);
25110 break;
25111 }
25112
25113 newval |= (newimm & 0x800) << 15;
25114 newval |= (newimm & 0x700) << 4;
25115 newval |= (newimm & 0x0ff);
25116
25117 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
25118 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
25119 break;
25120
25121 case BFD_RELOC_ARM_SMC:
25122 if (((unsigned long) value) > 0xffff)
25123 as_bad_where (fixP->fx_file, fixP->fx_line,
25124 _("invalid smc expression"));
25125 newval = md_chars_to_number (buf, INSN_SIZE);
25126 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25127 md_number_to_chars (buf, newval, INSN_SIZE);
25128 break;
25129
25130 case BFD_RELOC_ARM_HVC:
25131 if (((unsigned long) value) > 0xffff)
25132 as_bad_where (fixP->fx_file, fixP->fx_line,
25133 _("invalid hvc expression"));
25134 newval = md_chars_to_number (buf, INSN_SIZE);
25135 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25136 md_number_to_chars (buf, newval, INSN_SIZE);
25137 break;
25138
25139 case BFD_RELOC_ARM_SWI:
25140 if (fixP->tc_fix_data != 0)
25141 {
25142 if (((unsigned long) value) > 0xff)
25143 as_bad_where (fixP->fx_file, fixP->fx_line,
25144 _("invalid swi expression"));
25145 newval = md_chars_to_number (buf, THUMB_SIZE);
25146 newval |= value;
25147 md_number_to_chars (buf, newval, THUMB_SIZE);
25148 }
25149 else
25150 {
25151 if (((unsigned long) value) > 0x00ffffff)
25152 as_bad_where (fixP->fx_file, fixP->fx_line,
25153 _("invalid swi expression"));
25154 newval = md_chars_to_number (buf, INSN_SIZE);
25155 newval |= value;
25156 md_number_to_chars (buf, newval, INSN_SIZE);
25157 }
25158 break;
25159
25160 case BFD_RELOC_ARM_MULTI:
25161 if (((unsigned long) value) > 0xffff)
25162 as_bad_where (fixP->fx_file, fixP->fx_line,
25163 _("invalid expression in load/store multiple"));
25164 newval = value | md_chars_to_number (buf, INSN_SIZE);
25165 md_number_to_chars (buf, newval, INSN_SIZE);
25166 break;
25167
25168 #ifdef OBJ_ELF
25169 case BFD_RELOC_ARM_PCREL_CALL:
25170
25171 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25172 && fixP->fx_addsy
25173 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25174 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25175 && THUMB_IS_FUNC (fixP->fx_addsy))
25176 /* Flip the bl to blx. This is a simple flip
25177 bit here because we generate PCREL_CALL for
25178 unconditional bls. */
25179 {
25180 newval = md_chars_to_number (buf, INSN_SIZE);
25181 newval = newval | 0x10000000;
25182 md_number_to_chars (buf, newval, INSN_SIZE);
25183 temp = 1;
25184 fixP->fx_done = 1;
25185 }
25186 else
25187 temp = 3;
25188 goto arm_branch_common;
25189
25190 case BFD_RELOC_ARM_PCREL_JUMP:
25191 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25192 && fixP->fx_addsy
25193 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25194 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25195 && THUMB_IS_FUNC (fixP->fx_addsy))
25196 {
25197 /* This would map to a bl<cond>, b<cond>,
25198 b<always> to a Thumb function. We
25199 need to force a relocation for this particular
25200 case. */
25201 newval = md_chars_to_number (buf, INSN_SIZE);
25202 fixP->fx_done = 0;
25203 }
25204 /* Fall through. */
25205
25206 case BFD_RELOC_ARM_PLT32:
25207 #endif
25208 case BFD_RELOC_ARM_PCREL_BRANCH:
25209 temp = 3;
25210 goto arm_branch_common;
25211
25212 case BFD_RELOC_ARM_PCREL_BLX:
25213
25214 temp = 1;
25215 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25216 && fixP->fx_addsy
25217 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25218 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25219 && ARM_IS_FUNC (fixP->fx_addsy))
25220 {
25221 /* Flip the blx to a bl and warn. */
25222 const char *name = S_GET_NAME (fixP->fx_addsy);
25223 newval = 0xeb000000;
25224 as_warn_where (fixP->fx_file, fixP->fx_line,
25225 _("blx to '%s' an ARM ISA state function changed to bl"),
25226 name);
25227 md_number_to_chars (buf, newval, INSN_SIZE);
25228 temp = 3;
25229 fixP->fx_done = 1;
25230 }
25231
25232 #ifdef OBJ_ELF
25233 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25234 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
25235 #endif
25236
25237 arm_branch_common:
25238 /* We are going to store value (shifted right by two) in the
25239 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25240 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25241 also be clear. */
25242 if (value & temp)
25243 as_bad_where (fixP->fx_file, fixP->fx_line,
25244 _("misaligned branch destination"));
25245 if ((value & (offsetT)0xfe000000) != (offsetT)0
25246 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
25247 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25248
25249 if (fixP->fx_done || !seg->use_rela_p)
25250 {
25251 newval = md_chars_to_number (buf, INSN_SIZE);
25252 newval |= (value >> 2) & 0x00ffffff;
25253 /* Set the H bit on BLX instructions. */
25254 if (temp == 1)
25255 {
25256 if (value & 2)
25257 newval |= 0x01000000;
25258 else
25259 newval &= ~0x01000000;
25260 }
25261 md_number_to_chars (buf, newval, INSN_SIZE);
25262 }
25263 break;
25264
25265 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
25266 /* CBZ can only branch forward. */
25267
25268 /* Attempts to use CBZ to branch to the next instruction
25269 (which, strictly speaking, are prohibited) will be turned into
25270 no-ops.
25271
25272 FIXME: It may be better to remove the instruction completely and
25273 perform relaxation. */
25274 if (value == -2)
25275 {
25276 newval = md_chars_to_number (buf, THUMB_SIZE);
25277 newval = 0xbf00; /* NOP encoding T1 */
25278 md_number_to_chars (buf, newval, THUMB_SIZE);
25279 }
25280 else
25281 {
25282 if (value & ~0x7e)
25283 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25284
25285 if (fixP->fx_done || !seg->use_rela_p)
25286 {
25287 newval = md_chars_to_number (buf, THUMB_SIZE);
25288 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
25289 md_number_to_chars (buf, newval, THUMB_SIZE);
25290 }
25291 }
25292 break;
25293
25294 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
25295 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
25296 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25297
25298 if (fixP->fx_done || !seg->use_rela_p)
25299 {
25300 newval = md_chars_to_number (buf, THUMB_SIZE);
25301 newval |= (value & 0x1ff) >> 1;
25302 md_number_to_chars (buf, newval, THUMB_SIZE);
25303 }
25304 break;
25305
25306 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
25307 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
25308 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25309
25310 if (fixP->fx_done || !seg->use_rela_p)
25311 {
25312 newval = md_chars_to_number (buf, THUMB_SIZE);
25313 newval |= (value & 0xfff) >> 1;
25314 md_number_to_chars (buf, newval, THUMB_SIZE);
25315 }
25316 break;
25317
25318 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25319 if (fixP->fx_addsy
25320 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25321 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25322 && ARM_IS_FUNC (fixP->fx_addsy)
25323 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25324 {
25325 /* Force a relocation for a branch 20 bits wide. */
25326 fixP->fx_done = 0;
25327 }
25328 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
25329 as_bad_where (fixP->fx_file, fixP->fx_line,
25330 _("conditional branch out of range"));
25331
25332 if (fixP->fx_done || !seg->use_rela_p)
25333 {
25334 offsetT newval2;
25335 addressT S, J1, J2, lo, hi;
25336
25337 S = (value & 0x00100000) >> 20;
25338 J2 = (value & 0x00080000) >> 19;
25339 J1 = (value & 0x00040000) >> 18;
25340 hi = (value & 0x0003f000) >> 12;
25341 lo = (value & 0x00000ffe) >> 1;
25342
25343 newval = md_chars_to_number (buf, THUMB_SIZE);
25344 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25345 newval |= (S << 10) | hi;
25346 newval2 |= (J1 << 13) | (J2 << 11) | lo;
25347 md_number_to_chars (buf, newval, THUMB_SIZE);
25348 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25349 }
25350 break;
25351
25352 case BFD_RELOC_THUMB_PCREL_BLX:
25353 /* If there is a blx from a thumb state function to
25354 another thumb function flip this to a bl and warn
25355 about it. */
25356
25357 if (fixP->fx_addsy
25358 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25359 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25360 && THUMB_IS_FUNC (fixP->fx_addsy))
25361 {
25362 const char *name = S_GET_NAME (fixP->fx_addsy);
25363 as_warn_where (fixP->fx_file, fixP->fx_line,
25364 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25365 name);
25366 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25367 newval = newval | 0x1000;
25368 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25369 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25370 fixP->fx_done = 1;
25371 }
25372
25373
25374 goto thumb_bl_common;
25375
25376 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25377 /* A bl from Thumb state ISA to an internal ARM state function
25378 is converted to a blx. */
25379 if (fixP->fx_addsy
25380 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25381 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25382 && ARM_IS_FUNC (fixP->fx_addsy)
25383 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25384 {
25385 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25386 newval = newval & ~0x1000;
25387 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25388 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
25389 fixP->fx_done = 1;
25390 }
25391
25392 thumb_bl_common:
25393
25394 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25395 /* For a BLX instruction, make sure that the relocation is rounded up
25396 to a word boundary. This follows the semantics of the instruction
25397 which specifies that bit 1 of the target address will come from bit
25398 1 of the base address. */
25399 value = (value + 3) & ~ 3;
25400
25401 #ifdef OBJ_ELF
25402 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
25403 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25404 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25405 #endif
25406
25407 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
25408 {
25409 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
25410 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25411 else if ((value & ~0x1ffffff)
25412 && ((value & ~0x1ffffff) != ~0x1ffffff))
25413 as_bad_where (fixP->fx_file, fixP->fx_line,
25414 _("Thumb2 branch out of range"));
25415 }
25416
25417 if (fixP->fx_done || !seg->use_rela_p)
25418 encode_thumb2_b_bl_offset (buf, value);
25419
25420 break;
25421
25422 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25423 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
25424 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25425
25426 if (fixP->fx_done || !seg->use_rela_p)
25427 encode_thumb2_b_bl_offset (buf, value);
25428
25429 break;
25430
25431 case BFD_RELOC_8:
25432 if (fixP->fx_done || !seg->use_rela_p)
25433 *buf = value;
25434 break;
25435
25436 case BFD_RELOC_16:
25437 if (fixP->fx_done || !seg->use_rela_p)
25438 md_number_to_chars (buf, value, 2);
25439 break;
25440
25441 #ifdef OBJ_ELF
25442 case BFD_RELOC_ARM_TLS_CALL:
25443 case BFD_RELOC_ARM_THM_TLS_CALL:
25444 case BFD_RELOC_ARM_TLS_DESCSEQ:
25445 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25446 case BFD_RELOC_ARM_TLS_GOTDESC:
25447 case BFD_RELOC_ARM_TLS_GD32:
25448 case BFD_RELOC_ARM_TLS_LE32:
25449 case BFD_RELOC_ARM_TLS_IE32:
25450 case BFD_RELOC_ARM_TLS_LDM32:
25451 case BFD_RELOC_ARM_TLS_LDO32:
25452 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25453 break;
25454
25455 /* Same handling as above, but with the arm_fdpic guard. */
25456 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25457 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25458 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25459 if (arm_fdpic)
25460 {
25461 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25462 }
25463 else
25464 {
25465 as_bad_where (fixP->fx_file, fixP->fx_line,
25466 _("Relocation supported only in FDPIC mode"));
25467 }
25468 break;
25469
25470 case BFD_RELOC_ARM_GOT32:
25471 case BFD_RELOC_ARM_GOTOFF:
25472 break;
25473
25474 case BFD_RELOC_ARM_GOT_PREL:
25475 if (fixP->fx_done || !seg->use_rela_p)
25476 md_number_to_chars (buf, value, 4);
25477 break;
25478
25479 case BFD_RELOC_ARM_TARGET2:
25480 /* TARGET2 is not partial-inplace, so we need to write the
25481 addend here for REL targets, because it won't be written out
25482 during reloc processing later. */
25483 if (fixP->fx_done || !seg->use_rela_p)
25484 md_number_to_chars (buf, fixP->fx_offset, 4);
25485 break;
25486
25487 /* Relocations for FDPIC. */
25488 case BFD_RELOC_ARM_GOTFUNCDESC:
25489 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25490 case BFD_RELOC_ARM_FUNCDESC:
25491 if (arm_fdpic)
25492 {
25493 if (fixP->fx_done || !seg->use_rela_p)
25494 md_number_to_chars (buf, 0, 4);
25495 }
25496 else
25497 {
25498 as_bad_where (fixP->fx_file, fixP->fx_line,
25499 _("Relocation supported only in FDPIC mode"));
25500 }
25501 break;
25502 #endif
25503
25504 case BFD_RELOC_RVA:
25505 case BFD_RELOC_32:
25506 case BFD_RELOC_ARM_TARGET1:
25507 case BFD_RELOC_ARM_ROSEGREL32:
25508 case BFD_RELOC_ARM_SBREL32:
25509 case BFD_RELOC_32_PCREL:
25510 #ifdef TE_PE
25511 case BFD_RELOC_32_SECREL:
25512 #endif
25513 if (fixP->fx_done || !seg->use_rela_p)
25514 #ifdef TE_WINCE
25515 /* For WinCE we only do this for pcrel fixups. */
25516 if (fixP->fx_done || fixP->fx_pcrel)
25517 #endif
25518 md_number_to_chars (buf, value, 4);
25519 break;
25520
25521 #ifdef OBJ_ELF
25522 case BFD_RELOC_ARM_PREL31:
25523 if (fixP->fx_done || !seg->use_rela_p)
25524 {
25525 newval = md_chars_to_number (buf, 4) & 0x80000000;
25526 if ((value ^ (value >> 1)) & 0x40000000)
25527 {
25528 as_bad_where (fixP->fx_file, fixP->fx_line,
25529 _("rel31 relocation overflow"));
25530 }
25531 newval |= value & 0x7fffffff;
25532 md_number_to_chars (buf, newval, 4);
25533 }
25534 break;
25535 #endif
25536
25537 case BFD_RELOC_ARM_CP_OFF_IMM:
25538 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
25539 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
25540 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
25541 newval = md_chars_to_number (buf, INSN_SIZE);
25542 else
25543 newval = get_thumb32_insn (buf);
25544 if ((newval & 0x0f200f00) == 0x0d000900)
25545 {
25546 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25547 has permitted values that are multiples of 2, in the range 0
25548 to 510. */
25549 if (value < -510 || value > 510 || (value & 1))
25550 as_bad_where (fixP->fx_file, fixP->fx_line,
25551 _("co-processor offset out of range"));
25552 }
25553 else if ((newval & 0xfe001f80) == 0xec000f80)
25554 {
25555 if (value < -511 || value > 512 || (value & 3))
25556 as_bad_where (fixP->fx_file, fixP->fx_line,
25557 _("co-processor offset out of range"));
25558 }
25559 else if (value < -1023 || value > 1023 || (value & 3))
25560 as_bad_where (fixP->fx_file, fixP->fx_line,
25561 _("co-processor offset out of range"));
25562 cp_off_common:
25563 sign = value > 0;
25564 if (value < 0)
25565 value = -value;
25566 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25567 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
25568 newval = md_chars_to_number (buf, INSN_SIZE);
25569 else
25570 newval = get_thumb32_insn (buf);
25571 if (value == 0)
25572 {
25573 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
25574 newval &= 0xffffff80;
25575 else
25576 newval &= 0xffffff00;
25577 }
25578 else
25579 {
25580 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
25581 newval &= 0xff7fff80;
25582 else
25583 newval &= 0xff7fff00;
25584 if ((newval & 0x0f200f00) == 0x0d000900)
25585 {
25586 /* This is a fp16 vstr/vldr.
25587
25588 It requires the immediate offset in the instruction is shifted
25589 left by 1 to be a half-word offset.
25590
25591 Here, left shift by 1 first, and later right shift by 2
25592 should get the right offset. */
25593 value <<= 1;
25594 }
25595 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
25596 }
25597 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25598 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
25599 md_number_to_chars (buf, newval, INSN_SIZE);
25600 else
25601 put_thumb32_insn (buf, newval);
25602 break;
25603
25604 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
25605 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
25606 if (value < -255 || value > 255)
25607 as_bad_where (fixP->fx_file, fixP->fx_line,
25608 _("co-processor offset out of range"));
25609 value *= 4;
25610 goto cp_off_common;
25611
25612 case BFD_RELOC_ARM_THUMB_OFFSET:
25613 newval = md_chars_to_number (buf, THUMB_SIZE);
25614 /* Exactly what ranges, and where the offset is inserted depends
25615 on the type of instruction, we can establish this from the
25616 top 4 bits. */
25617 switch (newval >> 12)
25618 {
25619 case 4: /* PC load. */
25620 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25621 forced to zero for these loads; md_pcrel_from has already
25622 compensated for this. */
25623 if (value & 3)
25624 as_bad_where (fixP->fx_file, fixP->fx_line,
25625 _("invalid offset, target not word aligned (0x%08lX)"),
25626 (((unsigned long) fixP->fx_frag->fr_address
25627 + (unsigned long) fixP->fx_where) & ~3)
25628 + (unsigned long) value);
25629
25630 if (value & ~0x3fc)
25631 as_bad_where (fixP->fx_file, fixP->fx_line,
25632 _("invalid offset, value too big (0x%08lX)"),
25633 (long) value);
25634
25635 newval |= value >> 2;
25636 break;
25637
25638 case 9: /* SP load/store. */
25639 if (value & ~0x3fc)
25640 as_bad_where (fixP->fx_file, fixP->fx_line,
25641 _("invalid offset, value too big (0x%08lX)"),
25642 (long) value);
25643 newval |= value >> 2;
25644 break;
25645
25646 case 6: /* Word load/store. */
25647 if (value & ~0x7c)
25648 as_bad_where (fixP->fx_file, fixP->fx_line,
25649 _("invalid offset, value too big (0x%08lX)"),
25650 (long) value);
25651 newval |= value << 4; /* 6 - 2. */
25652 break;
25653
25654 case 7: /* Byte load/store. */
25655 if (value & ~0x1f)
25656 as_bad_where (fixP->fx_file, fixP->fx_line,
25657 _("invalid offset, value too big (0x%08lX)"),
25658 (long) value);
25659 newval |= value << 6;
25660 break;
25661
25662 case 8: /* Halfword load/store. */
25663 if (value & ~0x3e)
25664 as_bad_where (fixP->fx_file, fixP->fx_line,
25665 _("invalid offset, value too big (0x%08lX)"),
25666 (long) value);
25667 newval |= value << 5; /* 6 - 1. */
25668 break;
25669
25670 default:
25671 as_bad_where (fixP->fx_file, fixP->fx_line,
25672 "Unable to process relocation for thumb opcode: %lx",
25673 (unsigned long) newval);
25674 break;
25675 }
25676 md_number_to_chars (buf, newval, THUMB_SIZE);
25677 break;
25678
25679 case BFD_RELOC_ARM_THUMB_ADD:
25680 /* This is a complicated relocation, since we use it for all of
25681 the following immediate relocations:
25682
25683 3bit ADD/SUB
25684 8bit ADD/SUB
25685 9bit ADD/SUB SP word-aligned
25686 10bit ADD PC/SP word-aligned
25687
25688 The type of instruction being processed is encoded in the
25689 instruction field:
25690
25691 0x8000 SUB
25692 0x00F0 Rd
25693 0x000F Rs
25694 */
25695 newval = md_chars_to_number (buf, THUMB_SIZE);
25696 {
25697 int rd = (newval >> 4) & 0xf;
25698 int rs = newval & 0xf;
25699 int subtract = !!(newval & 0x8000);
25700
25701 /* Check for HI regs, only very restricted cases allowed:
25702 Adjusting SP, and using PC or SP to get an address. */
25703 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
25704 || (rs > 7 && rs != REG_SP && rs != REG_PC))
25705 as_bad_where (fixP->fx_file, fixP->fx_line,
25706 _("invalid Hi register with immediate"));
25707
25708 /* If value is negative, choose the opposite instruction. */
25709 if (value < 0)
25710 {
25711 value = -value;
25712 subtract = !subtract;
25713 if (value < 0)
25714 as_bad_where (fixP->fx_file, fixP->fx_line,
25715 _("immediate value out of range"));
25716 }
25717
25718 if (rd == REG_SP)
25719 {
25720 if (value & ~0x1fc)
25721 as_bad_where (fixP->fx_file, fixP->fx_line,
25722 _("invalid immediate for stack address calculation"));
25723 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
25724 newval |= value >> 2;
25725 }
25726 else if (rs == REG_PC || rs == REG_SP)
25727 {
25728 /* PR gas/18541. If the addition is for a defined symbol
25729 within range of an ADR instruction then accept it. */
25730 if (subtract
25731 && value == 4
25732 && fixP->fx_addsy != NULL)
25733 {
25734 subtract = 0;
25735
25736 if (! S_IS_DEFINED (fixP->fx_addsy)
25737 || S_GET_SEGMENT (fixP->fx_addsy) != seg
25738 || S_IS_WEAK (fixP->fx_addsy))
25739 {
25740 as_bad_where (fixP->fx_file, fixP->fx_line,
25741 _("address calculation needs a strongly defined nearby symbol"));
25742 }
25743 else
25744 {
25745 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
25746
25747 /* Round up to the next 4-byte boundary. */
25748 if (v & 3)
25749 v = (v + 3) & ~ 3;
25750 else
25751 v += 4;
25752 v = S_GET_VALUE (fixP->fx_addsy) - v;
25753
25754 if (v & ~0x3fc)
25755 {
25756 as_bad_where (fixP->fx_file, fixP->fx_line,
25757 _("symbol too far away"));
25758 }
25759 else
25760 {
25761 fixP->fx_done = 1;
25762 value = v;
25763 }
25764 }
25765 }
25766
25767 if (subtract || value & ~0x3fc)
25768 as_bad_where (fixP->fx_file, fixP->fx_line,
25769 _("invalid immediate for address calculation (value = 0x%08lX)"),
25770 (unsigned long) (subtract ? - value : value));
25771 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
25772 newval |= rd << 8;
25773 newval |= value >> 2;
25774 }
25775 else if (rs == rd)
25776 {
25777 if (value & ~0xff)
25778 as_bad_where (fixP->fx_file, fixP->fx_line,
25779 _("immediate value out of range"));
25780 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
25781 newval |= (rd << 8) | value;
25782 }
25783 else
25784 {
25785 if (value & ~0x7)
25786 as_bad_where (fixP->fx_file, fixP->fx_line,
25787 _("immediate value out of range"));
25788 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
25789 newval |= rd | (rs << 3) | (value << 6);
25790 }
25791 }
25792 md_number_to_chars (buf, newval, THUMB_SIZE);
25793 break;
25794
25795 case BFD_RELOC_ARM_THUMB_IMM:
25796 newval = md_chars_to_number (buf, THUMB_SIZE);
25797 if (value < 0 || value > 255)
25798 as_bad_where (fixP->fx_file, fixP->fx_line,
25799 _("invalid immediate: %ld is out of range"),
25800 (long) value);
25801 newval |= value;
25802 md_number_to_chars (buf, newval, THUMB_SIZE);
25803 break;
25804
25805 case BFD_RELOC_ARM_THUMB_SHIFT:
25806 /* 5bit shift value (0..32). LSL cannot take 32. */
25807 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
25808 temp = newval & 0xf800;
25809 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
25810 as_bad_where (fixP->fx_file, fixP->fx_line,
25811 _("invalid shift value: %ld"), (long) value);
25812 /* Shifts of zero must be encoded as LSL. */
25813 if (value == 0)
25814 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
25815 /* Shifts of 32 are encoded as zero. */
25816 else if (value == 32)
25817 value = 0;
25818 newval |= value << 6;
25819 md_number_to_chars (buf, newval, THUMB_SIZE);
25820 break;
25821
25822 case BFD_RELOC_VTABLE_INHERIT:
25823 case BFD_RELOC_VTABLE_ENTRY:
25824 fixP->fx_done = 0;
25825 return;
25826
25827 case BFD_RELOC_ARM_MOVW:
25828 case BFD_RELOC_ARM_MOVT:
25829 case BFD_RELOC_ARM_THUMB_MOVW:
25830 case BFD_RELOC_ARM_THUMB_MOVT:
25831 if (fixP->fx_done || !seg->use_rela_p)
25832 {
25833 /* REL format relocations are limited to a 16-bit addend. */
25834 if (!fixP->fx_done)
25835 {
25836 if (value < -0x8000 || value > 0x7fff)
25837 as_bad_where (fixP->fx_file, fixP->fx_line,
25838 _("offset out of range"));
25839 }
25840 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25841 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
25842 {
25843 value >>= 16;
25844 }
25845
25846 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25847 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
25848 {
25849 newval = get_thumb32_insn (buf);
25850 newval &= 0xfbf08f00;
25851 newval |= (value & 0xf000) << 4;
25852 newval |= (value & 0x0800) << 15;
25853 newval |= (value & 0x0700) << 4;
25854 newval |= (value & 0x00ff);
25855 put_thumb32_insn (buf, newval);
25856 }
25857 else
25858 {
25859 newval = md_chars_to_number (buf, 4);
25860 newval &= 0xfff0f000;
25861 newval |= value & 0x0fff;
25862 newval |= (value & 0xf000) << 4;
25863 md_number_to_chars (buf, newval, 4);
25864 }
25865 }
25866 return;
25867
25868 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
25869 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
25870 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
25871 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
25872 gas_assert (!fixP->fx_done);
25873 {
25874 bfd_vma insn;
25875 bfd_boolean is_mov;
25876 bfd_vma encoded_addend = value;
25877
25878 /* Check that addend can be encoded in instruction. */
25879 if (!seg->use_rela_p && (value < 0 || value > 255))
25880 as_bad_where (fixP->fx_file, fixP->fx_line,
25881 _("the offset 0x%08lX is not representable"),
25882 (unsigned long) encoded_addend);
25883
25884 /* Extract the instruction. */
25885 insn = md_chars_to_number (buf, THUMB_SIZE);
25886 is_mov = (insn & 0xf800) == 0x2000;
25887
25888 /* Encode insn. */
25889 if (is_mov)
25890 {
25891 if (!seg->use_rela_p)
25892 insn |= encoded_addend;
25893 }
25894 else
25895 {
25896 int rd, rs;
25897
25898 /* Extract the instruction. */
25899 /* Encoding is the following
25900 0x8000 SUB
25901 0x00F0 Rd
25902 0x000F Rs
25903 */
25904 /* The following conditions must be true :
25905 - ADD
25906 - Rd == Rs
25907 - Rd <= 7
25908 */
25909 rd = (insn >> 4) & 0xf;
25910 rs = insn & 0xf;
25911 if ((insn & 0x8000) || (rd != rs) || rd > 7)
25912 as_bad_where (fixP->fx_file, fixP->fx_line,
25913 _("Unable to process relocation for thumb opcode: %lx"),
25914 (unsigned long) insn);
25915
25916 /* Encode as ADD immediate8 thumb 1 code. */
25917 insn = 0x3000 | (rd << 8);
25918
25919 /* Place the encoded addend into the first 8 bits of the
25920 instruction. */
25921 if (!seg->use_rela_p)
25922 insn |= encoded_addend;
25923 }
25924
25925 /* Update the instruction. */
25926 md_number_to_chars (buf, insn, THUMB_SIZE);
25927 }
25928 break;
25929
25930 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25931 case BFD_RELOC_ARM_ALU_PC_G0:
25932 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25933 case BFD_RELOC_ARM_ALU_PC_G1:
25934 case BFD_RELOC_ARM_ALU_PC_G2:
25935 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25936 case BFD_RELOC_ARM_ALU_SB_G0:
25937 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25938 case BFD_RELOC_ARM_ALU_SB_G1:
25939 case BFD_RELOC_ARM_ALU_SB_G2:
25940 gas_assert (!fixP->fx_done);
25941 if (!seg->use_rela_p)
25942 {
25943 bfd_vma insn;
25944 bfd_vma encoded_addend;
25945 bfd_vma addend_abs = llabs (value);
25946
25947 /* Check that the absolute value of the addend can be
25948 expressed as an 8-bit constant plus a rotation. */
25949 encoded_addend = encode_arm_immediate (addend_abs);
25950 if (encoded_addend == (unsigned int) FAIL)
25951 as_bad_where (fixP->fx_file, fixP->fx_line,
25952 _("the offset 0x%08lX is not representable"),
25953 (unsigned long) addend_abs);
25954
25955 /* Extract the instruction. */
25956 insn = md_chars_to_number (buf, INSN_SIZE);
25957
25958 /* If the addend is positive, use an ADD instruction.
25959 Otherwise use a SUB. Take care not to destroy the S bit. */
25960 insn &= 0xff1fffff;
25961 if (value < 0)
25962 insn |= 1 << 22;
25963 else
25964 insn |= 1 << 23;
25965
25966 /* Place the encoded addend into the first 12 bits of the
25967 instruction. */
25968 insn &= 0xfffff000;
25969 insn |= encoded_addend;
25970
25971 /* Update the instruction. */
25972 md_number_to_chars (buf, insn, INSN_SIZE);
25973 }
25974 break;
25975
25976 case BFD_RELOC_ARM_LDR_PC_G0:
25977 case BFD_RELOC_ARM_LDR_PC_G1:
25978 case BFD_RELOC_ARM_LDR_PC_G2:
25979 case BFD_RELOC_ARM_LDR_SB_G0:
25980 case BFD_RELOC_ARM_LDR_SB_G1:
25981 case BFD_RELOC_ARM_LDR_SB_G2:
25982 gas_assert (!fixP->fx_done);
25983 if (!seg->use_rela_p)
25984 {
25985 bfd_vma insn;
25986 bfd_vma addend_abs = llabs (value);
25987
25988 /* Check that the absolute value of the addend can be
25989 encoded in 12 bits. */
25990 if (addend_abs >= 0x1000)
25991 as_bad_where (fixP->fx_file, fixP->fx_line,
25992 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25993 (unsigned long) addend_abs);
25994
25995 /* Extract the instruction. */
25996 insn = md_chars_to_number (buf, INSN_SIZE);
25997
25998 /* If the addend is negative, clear bit 23 of the instruction.
25999 Otherwise set it. */
26000 if (value < 0)
26001 insn &= ~(1 << 23);
26002 else
26003 insn |= 1 << 23;
26004
26005 /* Place the absolute value of the addend into the first 12 bits
26006 of the instruction. */
26007 insn &= 0xfffff000;
26008 insn |= addend_abs;
26009
26010 /* Update the instruction. */
26011 md_number_to_chars (buf, insn, INSN_SIZE);
26012 }
26013 break;
26014
26015 case BFD_RELOC_ARM_LDRS_PC_G0:
26016 case BFD_RELOC_ARM_LDRS_PC_G1:
26017 case BFD_RELOC_ARM_LDRS_PC_G2:
26018 case BFD_RELOC_ARM_LDRS_SB_G0:
26019 case BFD_RELOC_ARM_LDRS_SB_G1:
26020 case BFD_RELOC_ARM_LDRS_SB_G2:
26021 gas_assert (!fixP->fx_done);
26022 if (!seg->use_rela_p)
26023 {
26024 bfd_vma insn;
26025 bfd_vma addend_abs = llabs (value);
26026
26027 /* Check that the absolute value of the addend can be
26028 encoded in 8 bits. */
26029 if (addend_abs >= 0x100)
26030 as_bad_where (fixP->fx_file, fixP->fx_line,
26031 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
26032 (unsigned long) addend_abs);
26033
26034 /* Extract the instruction. */
26035 insn = md_chars_to_number (buf, INSN_SIZE);
26036
26037 /* If the addend is negative, clear bit 23 of the instruction.
26038 Otherwise set it. */
26039 if (value < 0)
26040 insn &= ~(1 << 23);
26041 else
26042 insn |= 1 << 23;
26043
26044 /* Place the first four bits of the absolute value of the addend
26045 into the first 4 bits of the instruction, and the remaining
26046 four into bits 8 .. 11. */
26047 insn &= 0xfffff0f0;
26048 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
26049
26050 /* Update the instruction. */
26051 md_number_to_chars (buf, insn, INSN_SIZE);
26052 }
26053 break;
26054
26055 case BFD_RELOC_ARM_LDC_PC_G0:
26056 case BFD_RELOC_ARM_LDC_PC_G1:
26057 case BFD_RELOC_ARM_LDC_PC_G2:
26058 case BFD_RELOC_ARM_LDC_SB_G0:
26059 case BFD_RELOC_ARM_LDC_SB_G1:
26060 case BFD_RELOC_ARM_LDC_SB_G2:
26061 gas_assert (!fixP->fx_done);
26062 if (!seg->use_rela_p)
26063 {
26064 bfd_vma insn;
26065 bfd_vma addend_abs = llabs (value);
26066
26067 /* Check that the absolute value of the addend is a multiple of
26068 four and, when divided by four, fits in 8 bits. */
26069 if (addend_abs & 0x3)
26070 as_bad_where (fixP->fx_file, fixP->fx_line,
26071 _("bad offset 0x%08lX (must be word-aligned)"),
26072 (unsigned long) addend_abs);
26073
26074 if ((addend_abs >> 2) > 0xff)
26075 as_bad_where (fixP->fx_file, fixP->fx_line,
26076 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
26077 (unsigned long) addend_abs);
26078
26079 /* Extract the instruction. */
26080 insn = md_chars_to_number (buf, INSN_SIZE);
26081
26082 /* If the addend is negative, clear bit 23 of the instruction.
26083 Otherwise set it. */
26084 if (value < 0)
26085 insn &= ~(1 << 23);
26086 else
26087 insn |= 1 << 23;
26088
26089 /* Place the addend (divided by four) into the first eight
26090 bits of the instruction. */
26091 insn &= 0xfffffff0;
26092 insn |= addend_abs >> 2;
26093
26094 /* Update the instruction. */
26095 md_number_to_chars (buf, insn, INSN_SIZE);
26096 }
26097 break;
26098
26099 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26100 if (fixP->fx_addsy
26101 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26102 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26103 && ARM_IS_FUNC (fixP->fx_addsy)
26104 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26105 {
26106 /* Force a relocation for a branch 5 bits wide. */
26107 fixP->fx_done = 0;
26108 }
26109 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
26110 as_bad_where (fixP->fx_file, fixP->fx_line,
26111 BAD_BRANCH_OFF);
26112
26113 if (fixP->fx_done || !seg->use_rela_p)
26114 {
26115 addressT boff = value >> 1;
26116
26117 newval = md_chars_to_number (buf, THUMB_SIZE);
26118 newval |= (boff << 7);
26119 md_number_to_chars (buf, newval, THUMB_SIZE);
26120 }
26121 break;
26122
26123 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26124 if (fixP->fx_addsy
26125 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26126 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26127 && ARM_IS_FUNC (fixP->fx_addsy)
26128 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26129 {
26130 fixP->fx_done = 0;
26131 }
26132 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
26133 as_bad_where (fixP->fx_file, fixP->fx_line,
26134 _("branch out of range"));
26135
26136 if (fixP->fx_done || !seg->use_rela_p)
26137 {
26138 newval = md_chars_to_number (buf, THUMB_SIZE);
26139
26140 addressT boff = ((newval & 0x0780) >> 7) << 1;
26141 addressT diff = value - boff;
26142
26143 if (diff == 4)
26144 {
26145 newval |= 1 << 1; /* T bit. */
26146 }
26147 else if (diff != 2)
26148 {
26149 as_bad_where (fixP->fx_file, fixP->fx_line,
26150 _("out of range label-relative fixup value"));
26151 }
26152 md_number_to_chars (buf, newval, THUMB_SIZE);
26153 }
26154 break;
26155
26156 case BFD_RELOC_ARM_THUMB_BF17:
26157 if (fixP->fx_addsy
26158 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26159 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26160 && ARM_IS_FUNC (fixP->fx_addsy)
26161 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26162 {
26163 /* Force a relocation for a branch 17 bits wide. */
26164 fixP->fx_done = 0;
26165 }
26166
26167 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
26168 as_bad_where (fixP->fx_file, fixP->fx_line,
26169 BAD_BRANCH_OFF);
26170
26171 if (fixP->fx_done || !seg->use_rela_p)
26172 {
26173 offsetT newval2;
26174 addressT immA, immB, immC;
26175
26176 immA = (value & 0x0001f000) >> 12;
26177 immB = (value & 0x00000ffc) >> 2;
26178 immC = (value & 0x00000002) >> 1;
26179
26180 newval = md_chars_to_number (buf, THUMB_SIZE);
26181 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26182 newval |= immA;
26183 newval2 |= (immC << 11) | (immB << 1);
26184 md_number_to_chars (buf, newval, THUMB_SIZE);
26185 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26186 }
26187 break;
26188
26189 case BFD_RELOC_ARM_THUMB_BF19:
26190 if (fixP->fx_addsy
26191 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26192 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26193 && ARM_IS_FUNC (fixP->fx_addsy)
26194 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26195 {
26196 /* Force a relocation for a branch 19 bits wide. */
26197 fixP->fx_done = 0;
26198 }
26199
26200 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
26201 as_bad_where (fixP->fx_file, fixP->fx_line,
26202 BAD_BRANCH_OFF);
26203
26204 if (fixP->fx_done || !seg->use_rela_p)
26205 {
26206 offsetT newval2;
26207 addressT immA, immB, immC;
26208
26209 immA = (value & 0x0007f000) >> 12;
26210 immB = (value & 0x00000ffc) >> 2;
26211 immC = (value & 0x00000002) >> 1;
26212
26213 newval = md_chars_to_number (buf, THUMB_SIZE);
26214 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26215 newval |= immA;
26216 newval2 |= (immC << 11) | (immB << 1);
26217 md_number_to_chars (buf, newval, THUMB_SIZE);
26218 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26219 }
26220 break;
26221
26222 case BFD_RELOC_ARM_THUMB_BF13:
26223 if (fixP->fx_addsy
26224 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26225 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26226 && ARM_IS_FUNC (fixP->fx_addsy)
26227 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26228 {
26229 /* Force a relocation for a branch 13 bits wide. */
26230 fixP->fx_done = 0;
26231 }
26232
26233 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
26234 as_bad_where (fixP->fx_file, fixP->fx_line,
26235 BAD_BRANCH_OFF);
26236
26237 if (fixP->fx_done || !seg->use_rela_p)
26238 {
26239 offsetT newval2;
26240 addressT immA, immB, immC;
26241
26242 immA = (value & 0x00001000) >> 12;
26243 immB = (value & 0x00000ffc) >> 2;
26244 immC = (value & 0x00000002) >> 1;
26245
26246 newval = md_chars_to_number (buf, THUMB_SIZE);
26247 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26248 newval |= immA;
26249 newval2 |= (immC << 11) | (immB << 1);
26250 md_number_to_chars (buf, newval, THUMB_SIZE);
26251 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26252 }
26253 break;
26254
26255 case BFD_RELOC_ARM_THUMB_LOOP12:
26256 if (fixP->fx_addsy
26257 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26258 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26259 && ARM_IS_FUNC (fixP->fx_addsy)
26260 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26261 {
26262 /* Force a relocation for a branch 12 bits wide. */
26263 fixP->fx_done = 0;
26264 }
26265
26266 bfd_vma insn = get_thumb32_insn (buf);
26267 /* le lr, <label> or le <label> */
26268 if (((insn & 0xffffffff) == 0xf00fc001)
26269 || ((insn & 0xffffffff) == 0xf02fc001))
26270 value = -value;
26271
26272 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
26273 as_bad_where (fixP->fx_file, fixP->fx_line,
26274 BAD_BRANCH_OFF);
26275 if (fixP->fx_done || !seg->use_rela_p)
26276 {
26277 addressT imml, immh;
26278
26279 immh = (value & 0x00000ffc) >> 2;
26280 imml = (value & 0x00000002) >> 1;
26281
26282 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26283 newval |= (imml << 11) | (immh << 1);
26284 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
26285 }
26286 break;
26287
26288 case BFD_RELOC_ARM_V4BX:
26289 /* This will need to go in the object file. */
26290 fixP->fx_done = 0;
26291 break;
26292
26293 case BFD_RELOC_UNUSED:
26294 default:
26295 as_bad_where (fixP->fx_file, fixP->fx_line,
26296 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
26297 }
26298 }
26299
26300 /* Translate internal representation of relocation info to BFD target
26301 format. */
26302
26303 arelent *
26304 tc_gen_reloc (asection *section, fixS *fixp)
26305 {
26306 arelent * reloc;
26307 bfd_reloc_code_real_type code;
26308
26309 reloc = XNEW (arelent);
26310
26311 reloc->sym_ptr_ptr = XNEW (asymbol *);
26312 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
26313 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
26314
26315 if (fixp->fx_pcrel)
26316 {
26317 if (section->use_rela_p)
26318 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
26319 else
26320 fixp->fx_offset = reloc->address;
26321 }
26322 reloc->addend = fixp->fx_offset;
26323
26324 switch (fixp->fx_r_type)
26325 {
26326 case BFD_RELOC_8:
26327 if (fixp->fx_pcrel)
26328 {
26329 code = BFD_RELOC_8_PCREL;
26330 break;
26331 }
26332 /* Fall through. */
26333
26334 case BFD_RELOC_16:
26335 if (fixp->fx_pcrel)
26336 {
26337 code = BFD_RELOC_16_PCREL;
26338 break;
26339 }
26340 /* Fall through. */
26341
26342 case BFD_RELOC_32:
26343 if (fixp->fx_pcrel)
26344 {
26345 code = BFD_RELOC_32_PCREL;
26346 break;
26347 }
26348 /* Fall through. */
26349
26350 case BFD_RELOC_ARM_MOVW:
26351 if (fixp->fx_pcrel)
26352 {
26353 code = BFD_RELOC_ARM_MOVW_PCREL;
26354 break;
26355 }
26356 /* Fall through. */
26357
26358 case BFD_RELOC_ARM_MOVT:
26359 if (fixp->fx_pcrel)
26360 {
26361 code = BFD_RELOC_ARM_MOVT_PCREL;
26362 break;
26363 }
26364 /* Fall through. */
26365
26366 case BFD_RELOC_ARM_THUMB_MOVW:
26367 if (fixp->fx_pcrel)
26368 {
26369 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
26370 break;
26371 }
26372 /* Fall through. */
26373
26374 case BFD_RELOC_ARM_THUMB_MOVT:
26375 if (fixp->fx_pcrel)
26376 {
26377 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
26378 break;
26379 }
26380 /* Fall through. */
26381
26382 case BFD_RELOC_NONE:
26383 case BFD_RELOC_ARM_PCREL_BRANCH:
26384 case BFD_RELOC_ARM_PCREL_BLX:
26385 case BFD_RELOC_RVA:
26386 case BFD_RELOC_THUMB_PCREL_BRANCH7:
26387 case BFD_RELOC_THUMB_PCREL_BRANCH9:
26388 case BFD_RELOC_THUMB_PCREL_BRANCH12:
26389 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26390 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26391 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26392 case BFD_RELOC_VTABLE_ENTRY:
26393 case BFD_RELOC_VTABLE_INHERIT:
26394 #ifdef TE_PE
26395 case BFD_RELOC_32_SECREL:
26396 #endif
26397 code = fixp->fx_r_type;
26398 break;
26399
26400 case BFD_RELOC_THUMB_PCREL_BLX:
26401 #ifdef OBJ_ELF
26402 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
26403 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
26404 else
26405 #endif
26406 code = BFD_RELOC_THUMB_PCREL_BLX;
26407 break;
26408
26409 case BFD_RELOC_ARM_LITERAL:
26410 case BFD_RELOC_ARM_HWLITERAL:
26411 /* If this is called then the a literal has
26412 been referenced across a section boundary. */
26413 as_bad_where (fixp->fx_file, fixp->fx_line,
26414 _("literal referenced across section boundary"));
26415 return NULL;
26416
26417 #ifdef OBJ_ELF
26418 case BFD_RELOC_ARM_TLS_CALL:
26419 case BFD_RELOC_ARM_THM_TLS_CALL:
26420 case BFD_RELOC_ARM_TLS_DESCSEQ:
26421 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
26422 case BFD_RELOC_ARM_GOT32:
26423 case BFD_RELOC_ARM_GOTOFF:
26424 case BFD_RELOC_ARM_GOT_PREL:
26425 case BFD_RELOC_ARM_PLT32:
26426 case BFD_RELOC_ARM_TARGET1:
26427 case BFD_RELOC_ARM_ROSEGREL32:
26428 case BFD_RELOC_ARM_SBREL32:
26429 case BFD_RELOC_ARM_PREL31:
26430 case BFD_RELOC_ARM_TARGET2:
26431 case BFD_RELOC_ARM_TLS_LDO32:
26432 case BFD_RELOC_ARM_PCREL_CALL:
26433 case BFD_RELOC_ARM_PCREL_JUMP:
26434 case BFD_RELOC_ARM_ALU_PC_G0_NC:
26435 case BFD_RELOC_ARM_ALU_PC_G0:
26436 case BFD_RELOC_ARM_ALU_PC_G1_NC:
26437 case BFD_RELOC_ARM_ALU_PC_G1:
26438 case BFD_RELOC_ARM_ALU_PC_G2:
26439 case BFD_RELOC_ARM_LDR_PC_G0:
26440 case BFD_RELOC_ARM_LDR_PC_G1:
26441 case BFD_RELOC_ARM_LDR_PC_G2:
26442 case BFD_RELOC_ARM_LDRS_PC_G0:
26443 case BFD_RELOC_ARM_LDRS_PC_G1:
26444 case BFD_RELOC_ARM_LDRS_PC_G2:
26445 case BFD_RELOC_ARM_LDC_PC_G0:
26446 case BFD_RELOC_ARM_LDC_PC_G1:
26447 case BFD_RELOC_ARM_LDC_PC_G2:
26448 case BFD_RELOC_ARM_ALU_SB_G0_NC:
26449 case BFD_RELOC_ARM_ALU_SB_G0:
26450 case BFD_RELOC_ARM_ALU_SB_G1_NC:
26451 case BFD_RELOC_ARM_ALU_SB_G1:
26452 case BFD_RELOC_ARM_ALU_SB_G2:
26453 case BFD_RELOC_ARM_LDR_SB_G0:
26454 case BFD_RELOC_ARM_LDR_SB_G1:
26455 case BFD_RELOC_ARM_LDR_SB_G2:
26456 case BFD_RELOC_ARM_LDRS_SB_G0:
26457 case BFD_RELOC_ARM_LDRS_SB_G1:
26458 case BFD_RELOC_ARM_LDRS_SB_G2:
26459 case BFD_RELOC_ARM_LDC_SB_G0:
26460 case BFD_RELOC_ARM_LDC_SB_G1:
26461 case BFD_RELOC_ARM_LDC_SB_G2:
26462 case BFD_RELOC_ARM_V4BX:
26463 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
26464 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
26465 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
26466 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
26467 case BFD_RELOC_ARM_GOTFUNCDESC:
26468 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
26469 case BFD_RELOC_ARM_FUNCDESC:
26470 case BFD_RELOC_ARM_THUMB_BF17:
26471 case BFD_RELOC_ARM_THUMB_BF19:
26472 case BFD_RELOC_ARM_THUMB_BF13:
26473 code = fixp->fx_r_type;
26474 break;
26475
26476 case BFD_RELOC_ARM_TLS_GOTDESC:
26477 case BFD_RELOC_ARM_TLS_GD32:
26478 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
26479 case BFD_RELOC_ARM_TLS_LE32:
26480 case BFD_RELOC_ARM_TLS_IE32:
26481 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
26482 case BFD_RELOC_ARM_TLS_LDM32:
26483 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
26484 /* BFD will include the symbol's address in the addend.
26485 But we don't want that, so subtract it out again here. */
26486 if (!S_IS_COMMON (fixp->fx_addsy))
26487 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
26488 code = fixp->fx_r_type;
26489 break;
26490 #endif
26491
26492 case BFD_RELOC_ARM_IMMEDIATE:
26493 as_bad_where (fixp->fx_file, fixp->fx_line,
26494 _("internal relocation (type: IMMEDIATE) not fixed up"));
26495 return NULL;
26496
26497 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
26498 as_bad_where (fixp->fx_file, fixp->fx_line,
26499 _("ADRL used for a symbol not defined in the same file"));
26500 return NULL;
26501
26502 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26503 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26504 case BFD_RELOC_ARM_THUMB_LOOP12:
26505 as_bad_where (fixp->fx_file, fixp->fx_line,
26506 _("%s used for a symbol not defined in the same file"),
26507 bfd_get_reloc_code_name (fixp->fx_r_type));
26508 return NULL;
26509
26510 case BFD_RELOC_ARM_OFFSET_IMM:
26511 if (section->use_rela_p)
26512 {
26513 code = fixp->fx_r_type;
26514 break;
26515 }
26516
26517 if (fixp->fx_addsy != NULL
26518 && !S_IS_DEFINED (fixp->fx_addsy)
26519 && S_IS_LOCAL (fixp->fx_addsy))
26520 {
26521 as_bad_where (fixp->fx_file, fixp->fx_line,
26522 _("undefined local label `%s'"),
26523 S_GET_NAME (fixp->fx_addsy));
26524 return NULL;
26525 }
26526
26527 as_bad_where (fixp->fx_file, fixp->fx_line,
26528 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26529 return NULL;
26530
26531 default:
26532 {
26533 const char * type;
26534
26535 switch (fixp->fx_r_type)
26536 {
26537 case BFD_RELOC_NONE: type = "NONE"; break;
26538 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
26539 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
26540 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
26541 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
26542 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
26543 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
26544 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
26545 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
26546 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
26547 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
26548 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
26549 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
26550 default: type = _("<unknown>"); break;
26551 }
26552 as_bad_where (fixp->fx_file, fixp->fx_line,
26553 _("cannot represent %s relocation in this object file format"),
26554 type);
26555 return NULL;
26556 }
26557 }
26558
26559 #ifdef OBJ_ELF
26560 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
26561 && GOT_symbol
26562 && fixp->fx_addsy == GOT_symbol)
26563 {
26564 code = BFD_RELOC_ARM_GOTPC;
26565 reloc->addend = fixp->fx_offset = reloc->address;
26566 }
26567 #endif
26568
26569 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
26570
26571 if (reloc->howto == NULL)
26572 {
26573 as_bad_where (fixp->fx_file, fixp->fx_line,
26574 _("cannot represent %s relocation in this object file format"),
26575 bfd_get_reloc_code_name (code));
26576 return NULL;
26577 }
26578
26579 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26580 vtable entry to be used in the relocation's section offset. */
26581 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
26582 reloc->address = fixp->fx_offset;
26583
26584 return reloc;
26585 }
26586
26587 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26588
26589 void
26590 cons_fix_new_arm (fragS * frag,
26591 int where,
26592 int size,
26593 expressionS * exp,
26594 bfd_reloc_code_real_type reloc)
26595 {
26596 int pcrel = 0;
26597
26598 /* Pick a reloc.
26599 FIXME: @@ Should look at CPU word size. */
26600 switch (size)
26601 {
26602 case 1:
26603 reloc = BFD_RELOC_8;
26604 break;
26605 case 2:
26606 reloc = BFD_RELOC_16;
26607 break;
26608 case 4:
26609 default:
26610 reloc = BFD_RELOC_32;
26611 break;
26612 case 8:
26613 reloc = BFD_RELOC_64;
26614 break;
26615 }
26616
26617 #ifdef TE_PE
26618 if (exp->X_op == O_secrel)
26619 {
26620 exp->X_op = O_symbol;
26621 reloc = BFD_RELOC_32_SECREL;
26622 }
26623 #endif
26624
26625 fix_new_exp (frag, where, size, exp, pcrel, reloc);
26626 }
26627
26628 #if defined (OBJ_COFF)
26629 void
26630 arm_validate_fix (fixS * fixP)
26631 {
26632 /* If the destination of the branch is a defined symbol which does not have
26633 the THUMB_FUNC attribute, then we must be calling a function which has
26634 the (interfacearm) attribute. We look for the Thumb entry point to that
26635 function and change the branch to refer to that function instead. */
26636 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
26637 && fixP->fx_addsy != NULL
26638 && S_IS_DEFINED (fixP->fx_addsy)
26639 && ! THUMB_IS_FUNC (fixP->fx_addsy))
26640 {
26641 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
26642 }
26643 }
26644 #endif
26645
26646
26647 int
26648 arm_force_relocation (struct fix * fixp)
26649 {
26650 #if defined (OBJ_COFF) && defined (TE_PE)
26651 if (fixp->fx_r_type == BFD_RELOC_RVA)
26652 return 1;
26653 #endif
26654
26655 /* In case we have a call or a branch to a function in ARM ISA mode from
26656 a thumb function or vice-versa force the relocation. These relocations
26657 are cleared off for some cores that might have blx and simple transformations
26658 are possible. */
26659
26660 #ifdef OBJ_ELF
26661 switch (fixp->fx_r_type)
26662 {
26663 case BFD_RELOC_ARM_PCREL_JUMP:
26664 case BFD_RELOC_ARM_PCREL_CALL:
26665 case BFD_RELOC_THUMB_PCREL_BLX:
26666 if (THUMB_IS_FUNC (fixp->fx_addsy))
26667 return 1;
26668 break;
26669
26670 case BFD_RELOC_ARM_PCREL_BLX:
26671 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26672 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26673 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26674 if (ARM_IS_FUNC (fixp->fx_addsy))
26675 return 1;
26676 break;
26677
26678 default:
26679 break;
26680 }
26681 #endif
26682
26683 /* Resolve these relocations even if the symbol is extern or weak.
26684 Technically this is probably wrong due to symbol preemption.
26685 In practice these relocations do not have enough range to be useful
26686 at dynamic link time, and some code (e.g. in the Linux kernel)
26687 expects these references to be resolved. */
26688 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
26689 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
26690 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
26691 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
26692 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
26693 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
26694 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
26695 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
26696 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
26697 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
26698 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
26699 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
26700 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
26701 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
26702 return 0;
26703
26704 /* Always leave these relocations for the linker. */
26705 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
26706 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
26707 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
26708 return 1;
26709
26710 /* Always generate relocations against function symbols. */
26711 if (fixp->fx_r_type == BFD_RELOC_32
26712 && fixp->fx_addsy
26713 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
26714 return 1;
26715
26716 return generic_force_reloc (fixp);
26717 }
26718
26719 #if defined (OBJ_ELF) || defined (OBJ_COFF)
26720 /* Relocations against function names must be left unadjusted,
26721 so that the linker can use this information to generate interworking
26722 stubs. The MIPS version of this function
26723 also prevents relocations that are mips-16 specific, but I do not
26724 know why it does this.
26725
26726 FIXME:
26727 There is one other problem that ought to be addressed here, but
26728 which currently is not: Taking the address of a label (rather
26729 than a function) and then later jumping to that address. Such
26730 addresses also ought to have their bottom bit set (assuming that
26731 they reside in Thumb code), but at the moment they will not. */
26732
26733 bfd_boolean
26734 arm_fix_adjustable (fixS * fixP)
26735 {
26736 if (fixP->fx_addsy == NULL)
26737 return 1;
26738
26739 /* Preserve relocations against symbols with function type. */
26740 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
26741 return FALSE;
26742
26743 if (THUMB_IS_FUNC (fixP->fx_addsy)
26744 && fixP->fx_subsy == NULL)
26745 return FALSE;
26746
26747 /* We need the symbol name for the VTABLE entries. */
26748 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
26749 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
26750 return FALSE;
26751
26752 /* Don't allow symbols to be discarded on GOT related relocs. */
26753 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
26754 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
26755 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
26756 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
26757 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
26758 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
26759 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
26760 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
26761 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
26762 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
26763 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
26764 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
26765 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
26766 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
26767 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
26768 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
26769 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
26770 return FALSE;
26771
26772 /* Similarly for group relocations. */
26773 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
26774 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
26775 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
26776 return FALSE;
26777
26778 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
26779 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
26780 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
26781 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
26782 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
26783 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
26784 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
26785 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
26786 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
26787 return FALSE;
26788
26789 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
26790 offsets, so keep these symbols. */
26791 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
26792 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
26793 return FALSE;
26794
26795 return TRUE;
26796 }
26797 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
26798
26799 #ifdef OBJ_ELF
26800 const char *
26801 elf32_arm_target_format (void)
26802 {
26803 #ifdef TE_SYMBIAN
26804 return (target_big_endian
26805 ? "elf32-bigarm-symbian"
26806 : "elf32-littlearm-symbian");
26807 #elif defined (TE_VXWORKS)
26808 return (target_big_endian
26809 ? "elf32-bigarm-vxworks"
26810 : "elf32-littlearm-vxworks");
26811 #elif defined (TE_NACL)
26812 return (target_big_endian
26813 ? "elf32-bigarm-nacl"
26814 : "elf32-littlearm-nacl");
26815 #else
26816 if (arm_fdpic)
26817 {
26818 if (target_big_endian)
26819 return "elf32-bigarm-fdpic";
26820 else
26821 return "elf32-littlearm-fdpic";
26822 }
26823 else
26824 {
26825 if (target_big_endian)
26826 return "elf32-bigarm";
26827 else
26828 return "elf32-littlearm";
26829 }
26830 #endif
26831 }
26832
26833 void
26834 armelf_frob_symbol (symbolS * symp,
26835 int * puntp)
26836 {
26837 elf_frob_symbol (symp, puntp);
26838 }
26839 #endif
26840
26841 /* MD interface: Finalization. */
26842
26843 void
26844 arm_cleanup (void)
26845 {
26846 literal_pool * pool;
26847
26848 /* Ensure that all the predication blocks are properly closed. */
26849 check_pred_blocks_finished ();
26850
26851 for (pool = list_of_pools; pool; pool = pool->next)
26852 {
26853 /* Put it at the end of the relevant section. */
26854 subseg_set (pool->section, pool->sub_section);
26855 #ifdef OBJ_ELF
26856 arm_elf_change_section ();
26857 #endif
26858 s_ltorg (0);
26859 }
26860 }
26861
26862 #ifdef OBJ_ELF
26863 /* Remove any excess mapping symbols generated for alignment frags in
26864 SEC. We may have created a mapping symbol before a zero byte
26865 alignment; remove it if there's a mapping symbol after the
26866 alignment. */
26867 static void
26868 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
26869 void *dummy ATTRIBUTE_UNUSED)
26870 {
26871 segment_info_type *seginfo = seg_info (sec);
26872 fragS *fragp;
26873
26874 if (seginfo == NULL || seginfo->frchainP == NULL)
26875 return;
26876
26877 for (fragp = seginfo->frchainP->frch_root;
26878 fragp != NULL;
26879 fragp = fragp->fr_next)
26880 {
26881 symbolS *sym = fragp->tc_frag_data.last_map;
26882 fragS *next = fragp->fr_next;
26883
26884 /* Variable-sized frags have been converted to fixed size by
26885 this point. But if this was variable-sized to start with,
26886 there will be a fixed-size frag after it. So don't handle
26887 next == NULL. */
26888 if (sym == NULL || next == NULL)
26889 continue;
26890
26891 if (S_GET_VALUE (sym) < next->fr_address)
26892 /* Not at the end of this frag. */
26893 continue;
26894 know (S_GET_VALUE (sym) == next->fr_address);
26895
26896 do
26897 {
26898 if (next->tc_frag_data.first_map != NULL)
26899 {
26900 /* Next frag starts with a mapping symbol. Discard this
26901 one. */
26902 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26903 break;
26904 }
26905
26906 if (next->fr_next == NULL)
26907 {
26908 /* This mapping symbol is at the end of the section. Discard
26909 it. */
26910 know (next->fr_fix == 0 && next->fr_var == 0);
26911 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26912 break;
26913 }
26914
26915 /* As long as we have empty frags without any mapping symbols,
26916 keep looking. */
26917 /* If the next frag is non-empty and does not start with a
26918 mapping symbol, then this mapping symbol is required. */
26919 if (next->fr_address != next->fr_next->fr_address)
26920 break;
26921
26922 next = next->fr_next;
26923 }
26924 while (next != NULL);
26925 }
26926 }
26927 #endif
26928
26929 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26930 ARM ones. */
26931
26932 void
26933 arm_adjust_symtab (void)
26934 {
26935 #ifdef OBJ_COFF
26936 symbolS * sym;
26937
26938 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26939 {
26940 if (ARM_IS_THUMB (sym))
26941 {
26942 if (THUMB_IS_FUNC (sym))
26943 {
26944 /* Mark the symbol as a Thumb function. */
26945 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
26946 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
26947 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
26948
26949 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
26950 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
26951 else
26952 as_bad (_("%s: unexpected function type: %d"),
26953 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
26954 }
26955 else switch (S_GET_STORAGE_CLASS (sym))
26956 {
26957 case C_EXT:
26958 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
26959 break;
26960 case C_STAT:
26961 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
26962 break;
26963 case C_LABEL:
26964 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
26965 break;
26966 default:
26967 /* Do nothing. */
26968 break;
26969 }
26970 }
26971
26972 if (ARM_IS_INTERWORK (sym))
26973 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
26974 }
26975 #endif
26976 #ifdef OBJ_ELF
26977 symbolS * sym;
26978 char bind;
26979
26980 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26981 {
26982 if (ARM_IS_THUMB (sym))
26983 {
26984 elf_symbol_type * elf_sym;
26985
26986 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
26987 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
26988
26989 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
26990 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
26991 {
26992 /* If it's a .thumb_func, declare it as so,
26993 otherwise tag label as .code 16. */
26994 if (THUMB_IS_FUNC (sym))
26995 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
26996 ST_BRANCH_TO_THUMB);
26997 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26998 elf_sym->internal_elf_sym.st_info =
26999 ELF_ST_INFO (bind, STT_ARM_16BIT);
27000 }
27001 }
27002 }
27003
27004 /* Remove any overlapping mapping symbols generated by alignment frags. */
27005 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
27006 /* Now do generic ELF adjustments. */
27007 elf_adjust_symtab ();
27008 #endif
27009 }
27010
27011 /* MD interface: Initialization. */
27012
27013 static void
27014 set_constant_flonums (void)
27015 {
27016 int i;
27017
27018 for (i = 0; i < NUM_FLOAT_VALS; i++)
27019 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
27020 abort ();
27021 }
27022
27023 /* Auto-select Thumb mode if it's the only available instruction set for the
27024 given architecture. */
27025
27026 static void
27027 autoselect_thumb_from_cpu_variant (void)
27028 {
27029 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
27030 opcode_select (16);
27031 }
27032
27033 void
27034 md_begin (void)
27035 {
27036 unsigned mach;
27037 unsigned int i;
27038
27039 if ( (arm_ops_hsh = hash_new ()) == NULL
27040 || (arm_cond_hsh = hash_new ()) == NULL
27041 || (arm_vcond_hsh = hash_new ()) == NULL
27042 || (arm_shift_hsh = hash_new ()) == NULL
27043 || (arm_psr_hsh = hash_new ()) == NULL
27044 || (arm_v7m_psr_hsh = hash_new ()) == NULL
27045 || (arm_reg_hsh = hash_new ()) == NULL
27046 || (arm_reloc_hsh = hash_new ()) == NULL
27047 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
27048 as_fatal (_("virtual memory exhausted"));
27049
27050 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
27051 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
27052 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
27053 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
27054 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
27055 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
27056 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
27057 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
27058 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
27059 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
27060 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
27061 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
27062 (void *) (v7m_psrs + i));
27063 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
27064 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
27065 for (i = 0;
27066 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
27067 i++)
27068 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
27069 (void *) (barrier_opt_names + i));
27070 #ifdef OBJ_ELF
27071 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
27072 {
27073 struct reloc_entry * entry = reloc_names + i;
27074
27075 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
27076 /* This makes encode_branch() use the EABI versions of this relocation. */
27077 entry->reloc = BFD_RELOC_UNUSED;
27078
27079 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
27080 }
27081 #endif
27082
27083 set_constant_flonums ();
27084
27085 /* Set the cpu variant based on the command-line options. We prefer
27086 -mcpu= over -march= if both are set (as for GCC); and we prefer
27087 -mfpu= over any other way of setting the floating point unit.
27088 Use of legacy options with new options are faulted. */
27089 if (legacy_cpu)
27090 {
27091 if (mcpu_cpu_opt || march_cpu_opt)
27092 as_bad (_("use of old and new-style options to set CPU type"));
27093
27094 selected_arch = *legacy_cpu;
27095 }
27096 else if (mcpu_cpu_opt)
27097 {
27098 selected_arch = *mcpu_cpu_opt;
27099 selected_ext = *mcpu_ext_opt;
27100 }
27101 else if (march_cpu_opt)
27102 {
27103 selected_arch = *march_cpu_opt;
27104 selected_ext = *march_ext_opt;
27105 }
27106 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27107
27108 if (legacy_fpu)
27109 {
27110 if (mfpu_opt)
27111 as_bad (_("use of old and new-style options to set FPU type"));
27112
27113 selected_fpu = *legacy_fpu;
27114 }
27115 else if (mfpu_opt)
27116 selected_fpu = *mfpu_opt;
27117 else
27118 {
27119 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27120 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27121 /* Some environments specify a default FPU. If they don't, infer it
27122 from the processor. */
27123 if (mcpu_fpu_opt)
27124 selected_fpu = *mcpu_fpu_opt;
27125 else if (march_fpu_opt)
27126 selected_fpu = *march_fpu_opt;
27127 #else
27128 selected_fpu = fpu_default;
27129 #endif
27130 }
27131
27132 if (ARM_FEATURE_ZERO (selected_fpu))
27133 {
27134 if (!no_cpu_selected ())
27135 selected_fpu = fpu_default;
27136 else
27137 selected_fpu = fpu_arch_fpa;
27138 }
27139
27140 #ifdef CPU_DEFAULT
27141 if (ARM_FEATURE_ZERO (selected_arch))
27142 {
27143 selected_arch = cpu_default;
27144 selected_cpu = selected_arch;
27145 }
27146 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27147 #else
27148 /* Autodection of feature mode: allow all features in cpu_variant but leave
27149 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27150 after all instruction have been processed and we can decide what CPU
27151 should be selected. */
27152 if (ARM_FEATURE_ZERO (selected_arch))
27153 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27154 else
27155 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27156 #endif
27157
27158 autoselect_thumb_from_cpu_variant ();
27159
27160 arm_arch_used = thumb_arch_used = arm_arch_none;
27161
27162 #if defined OBJ_COFF || defined OBJ_ELF
27163 {
27164 unsigned int flags = 0;
27165
27166 #if defined OBJ_ELF
27167 flags = meabi_flags;
27168
27169 switch (meabi_flags)
27170 {
27171 case EF_ARM_EABI_UNKNOWN:
27172 #endif
27173 /* Set the flags in the private structure. */
27174 if (uses_apcs_26) flags |= F_APCS26;
27175 if (support_interwork) flags |= F_INTERWORK;
27176 if (uses_apcs_float) flags |= F_APCS_FLOAT;
27177 if (pic_code) flags |= F_PIC;
27178 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
27179 flags |= F_SOFT_FLOAT;
27180
27181 switch (mfloat_abi_opt)
27182 {
27183 case ARM_FLOAT_ABI_SOFT:
27184 case ARM_FLOAT_ABI_SOFTFP:
27185 flags |= F_SOFT_FLOAT;
27186 break;
27187
27188 case ARM_FLOAT_ABI_HARD:
27189 if (flags & F_SOFT_FLOAT)
27190 as_bad (_("hard-float conflicts with specified fpu"));
27191 break;
27192 }
27193
27194 /* Using pure-endian doubles (even if soft-float). */
27195 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
27196 flags |= F_VFP_FLOAT;
27197
27198 #if defined OBJ_ELF
27199 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
27200 flags |= EF_ARM_MAVERICK_FLOAT;
27201 break;
27202
27203 case EF_ARM_EABI_VER4:
27204 case EF_ARM_EABI_VER5:
27205 /* No additional flags to set. */
27206 break;
27207
27208 default:
27209 abort ();
27210 }
27211 #endif
27212 bfd_set_private_flags (stdoutput, flags);
27213
27214 /* We have run out flags in the COFF header to encode the
27215 status of ATPCS support, so instead we create a dummy,
27216 empty, debug section called .arm.atpcs. */
27217 if (atpcs)
27218 {
27219 asection * sec;
27220
27221 sec = bfd_make_section (stdoutput, ".arm.atpcs");
27222
27223 if (sec != NULL)
27224 {
27225 bfd_set_section_flags
27226 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
27227 bfd_set_section_size (stdoutput, sec, 0);
27228 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
27229 }
27230 }
27231 }
27232 #endif
27233
27234 /* Record the CPU type as well. */
27235 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
27236 mach = bfd_mach_arm_iWMMXt2;
27237 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
27238 mach = bfd_mach_arm_iWMMXt;
27239 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
27240 mach = bfd_mach_arm_XScale;
27241 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
27242 mach = bfd_mach_arm_ep9312;
27243 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
27244 mach = bfd_mach_arm_5TE;
27245 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
27246 {
27247 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27248 mach = bfd_mach_arm_5T;
27249 else
27250 mach = bfd_mach_arm_5;
27251 }
27252 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
27253 {
27254 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27255 mach = bfd_mach_arm_4T;
27256 else
27257 mach = bfd_mach_arm_4;
27258 }
27259 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
27260 mach = bfd_mach_arm_3M;
27261 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
27262 mach = bfd_mach_arm_3;
27263 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
27264 mach = bfd_mach_arm_2a;
27265 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
27266 mach = bfd_mach_arm_2;
27267 else
27268 mach = bfd_mach_arm_unknown;
27269
27270 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
27271 }
27272
27273 /* Command line processing. */
27274
27275 /* md_parse_option
27276 Invocation line includes a switch not recognized by the base assembler.
27277 See if it's a processor-specific option.
27278
27279 This routine is somewhat complicated by the need for backwards
27280 compatibility (since older releases of gcc can't be changed).
27281 The new options try to make the interface as compatible as
27282 possible with GCC.
27283
27284 New options (supported) are:
27285
27286 -mcpu=<cpu name> Assemble for selected processor
27287 -march=<architecture name> Assemble for selected architecture
27288 -mfpu=<fpu architecture> Assemble for selected FPU.
27289 -EB/-mbig-endian Big-endian
27290 -EL/-mlittle-endian Little-endian
27291 -k Generate PIC code
27292 -mthumb Start in Thumb mode
27293 -mthumb-interwork Code supports ARM/Thumb interworking
27294
27295 -m[no-]warn-deprecated Warn about deprecated features
27296 -m[no-]warn-syms Warn when symbols match instructions
27297
27298 For now we will also provide support for:
27299
27300 -mapcs-32 32-bit Program counter
27301 -mapcs-26 26-bit Program counter
27302 -macps-float Floats passed in FP registers
27303 -mapcs-reentrant Reentrant code
27304 -matpcs
27305 (sometime these will probably be replaced with -mapcs=<list of options>
27306 and -matpcs=<list of options>)
27307
27308 The remaining options are only supported for back-wards compatibility.
27309 Cpu variants, the arm part is optional:
27310 -m[arm]1 Currently not supported.
27311 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27312 -m[arm]3 Arm 3 processor
27313 -m[arm]6[xx], Arm 6 processors
27314 -m[arm]7[xx][t][[d]m] Arm 7 processors
27315 -m[arm]8[10] Arm 8 processors
27316 -m[arm]9[20][tdmi] Arm 9 processors
27317 -mstrongarm[110[0]] StrongARM processors
27318 -mxscale XScale processors
27319 -m[arm]v[2345[t[e]]] Arm architectures
27320 -mall All (except the ARM1)
27321 FP variants:
27322 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27323 -mfpe-old (No float load/store multiples)
27324 -mvfpxd VFP Single precision
27325 -mvfp All VFP
27326 -mno-fpu Disable all floating point instructions
27327
27328 The following CPU names are recognized:
27329 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27330 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27331 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27332 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27333 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27334 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27335 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27336
27337 */
27338
27339 const char * md_shortopts = "m:k";
27340
27341 #ifdef ARM_BI_ENDIAN
27342 #define OPTION_EB (OPTION_MD_BASE + 0)
27343 #define OPTION_EL (OPTION_MD_BASE + 1)
27344 #else
27345 #if TARGET_BYTES_BIG_ENDIAN
27346 #define OPTION_EB (OPTION_MD_BASE + 0)
27347 #else
27348 #define OPTION_EL (OPTION_MD_BASE + 1)
27349 #endif
27350 #endif
27351 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27352 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27353
27354 struct option md_longopts[] =
27355 {
27356 #ifdef OPTION_EB
27357 {"EB", no_argument, NULL, OPTION_EB},
27358 #endif
27359 #ifdef OPTION_EL
27360 {"EL", no_argument, NULL, OPTION_EL},
27361 #endif
27362 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
27363 #ifdef OBJ_ELF
27364 {"fdpic", no_argument, NULL, OPTION_FDPIC},
27365 #endif
27366 {NULL, no_argument, NULL, 0}
27367 };
27368
27369 size_t md_longopts_size = sizeof (md_longopts);
27370
27371 struct arm_option_table
27372 {
27373 const char * option; /* Option name to match. */
27374 const char * help; /* Help information. */
27375 int * var; /* Variable to change. */
27376 int value; /* What to change it to. */
27377 const char * deprecated; /* If non-null, print this message. */
27378 };
27379
27380 struct arm_option_table arm_opts[] =
27381 {
27382 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
27383 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
27384 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27385 &support_interwork, 1, NULL},
27386 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
27387 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
27388 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
27389 1, NULL},
27390 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
27391 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
27392 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
27393 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
27394 NULL},
27395
27396 /* These are recognized by the assembler, but have no affect on code. */
27397 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
27398 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
27399
27400 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
27401 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27402 &warn_on_deprecated, 0, NULL},
27403 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
27404 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
27405 {NULL, NULL, NULL, 0, NULL}
27406 };
27407
27408 struct arm_legacy_option_table
27409 {
27410 const char * option; /* Option name to match. */
27411 const arm_feature_set ** var; /* Variable to change. */
27412 const arm_feature_set value; /* What to change it to. */
27413 const char * deprecated; /* If non-null, print this message. */
27414 };
27415
27416 const struct arm_legacy_option_table arm_legacy_opts[] =
27417 {
27418 /* DON'T add any new processors to this list -- we want the whole list
27419 to go away... Add them to the processors table instead. */
27420 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27421 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27422 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27423 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27424 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27425 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27426 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27427 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27428 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27429 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27430 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27431 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27432 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27433 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27434 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27435 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27436 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27437 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27438 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27439 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27440 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27441 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27442 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27443 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27444 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27445 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27446 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27447 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27448 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27449 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27450 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27451 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27452 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27453 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27454 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27455 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27456 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27457 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27458 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27459 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27460 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27461 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27462 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27463 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27464 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27465 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27466 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27467 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27468 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27469 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27470 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27471 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27472 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27473 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27474 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27475 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27476 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27477 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27478 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27479 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27480 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27481 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27482 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27483 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27484 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27485 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27486 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27487 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27488 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
27489 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
27490 N_("use -mcpu=strongarm110")},
27491 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
27492 N_("use -mcpu=strongarm1100")},
27493 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
27494 N_("use -mcpu=strongarm1110")},
27495 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
27496 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
27497 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
27498
27499 /* Architecture variants -- don't add any more to this list either. */
27500 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27501 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27502 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27503 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27504 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27505 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27506 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27507 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27508 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27509 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27510 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27511 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27512 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27513 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27514 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27515 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27516 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27517 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27518
27519 /* Floating point variants -- don't add any more to this list either. */
27520 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
27521 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
27522 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
27523 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
27524 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27525
27526 {NULL, NULL, ARM_ARCH_NONE, NULL}
27527 };
27528
27529 struct arm_cpu_option_table
27530 {
27531 const char * name;
27532 size_t name_len;
27533 const arm_feature_set value;
27534 const arm_feature_set ext;
27535 /* For some CPUs we assume an FPU unless the user explicitly sets
27536 -mfpu=... */
27537 const arm_feature_set default_fpu;
27538 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27539 case. */
27540 const char * canonical_name;
27541 };
27542
27543 /* This list should, at a minimum, contain all the cpu names
27544 recognized by GCC. */
27545 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27546
27547 static const struct arm_cpu_option_table arm_cpus[] =
27548 {
27549 ARM_CPU_OPT ("all", NULL, ARM_ANY,
27550 ARM_ARCH_NONE,
27551 FPU_ARCH_FPA),
27552 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
27553 ARM_ARCH_NONE,
27554 FPU_ARCH_FPA),
27555 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
27556 ARM_ARCH_NONE,
27557 FPU_ARCH_FPA),
27558 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
27559 ARM_ARCH_NONE,
27560 FPU_ARCH_FPA),
27561 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
27562 ARM_ARCH_NONE,
27563 FPU_ARCH_FPA),
27564 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
27565 ARM_ARCH_NONE,
27566 FPU_ARCH_FPA),
27567 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
27568 ARM_ARCH_NONE,
27569 FPU_ARCH_FPA),
27570 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
27571 ARM_ARCH_NONE,
27572 FPU_ARCH_FPA),
27573 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
27574 ARM_ARCH_NONE,
27575 FPU_ARCH_FPA),
27576 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
27577 ARM_ARCH_NONE,
27578 FPU_ARCH_FPA),
27579 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
27580 ARM_ARCH_NONE,
27581 FPU_ARCH_FPA),
27582 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
27583 ARM_ARCH_NONE,
27584 FPU_ARCH_FPA),
27585 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
27586 ARM_ARCH_NONE,
27587 FPU_ARCH_FPA),
27588 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
27589 ARM_ARCH_NONE,
27590 FPU_ARCH_FPA),
27591 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
27592 ARM_ARCH_NONE,
27593 FPU_ARCH_FPA),
27594 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
27595 ARM_ARCH_NONE,
27596 FPU_ARCH_FPA),
27597 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
27598 ARM_ARCH_NONE,
27599 FPU_ARCH_FPA),
27600 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
27601 ARM_ARCH_NONE,
27602 FPU_ARCH_FPA),
27603 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
27604 ARM_ARCH_NONE,
27605 FPU_ARCH_FPA),
27606 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
27607 ARM_ARCH_NONE,
27608 FPU_ARCH_FPA),
27609 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
27610 ARM_ARCH_NONE,
27611 FPU_ARCH_FPA),
27612 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
27613 ARM_ARCH_NONE,
27614 FPU_ARCH_FPA),
27615 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
27616 ARM_ARCH_NONE,
27617 FPU_ARCH_FPA),
27618 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
27619 ARM_ARCH_NONE,
27620 FPU_ARCH_FPA),
27621 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
27622 ARM_ARCH_NONE,
27623 FPU_ARCH_FPA),
27624 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
27625 ARM_ARCH_NONE,
27626 FPU_ARCH_FPA),
27627 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
27628 ARM_ARCH_NONE,
27629 FPU_ARCH_FPA),
27630 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
27631 ARM_ARCH_NONE,
27632 FPU_ARCH_FPA),
27633 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
27634 ARM_ARCH_NONE,
27635 FPU_ARCH_FPA),
27636 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
27637 ARM_ARCH_NONE,
27638 FPU_ARCH_FPA),
27639 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
27640 ARM_ARCH_NONE,
27641 FPU_ARCH_FPA),
27642 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
27643 ARM_ARCH_NONE,
27644 FPU_ARCH_FPA),
27645 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
27646 ARM_ARCH_NONE,
27647 FPU_ARCH_FPA),
27648 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
27649 ARM_ARCH_NONE,
27650 FPU_ARCH_FPA),
27651 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
27652 ARM_ARCH_NONE,
27653 FPU_ARCH_FPA),
27654 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
27655 ARM_ARCH_NONE,
27656 FPU_ARCH_FPA),
27657 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
27658 ARM_ARCH_NONE,
27659 FPU_ARCH_FPA),
27660 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
27661 ARM_ARCH_NONE,
27662 FPU_ARCH_FPA),
27663 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
27664 ARM_ARCH_NONE,
27665 FPU_ARCH_FPA),
27666 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
27667 ARM_ARCH_NONE,
27668 FPU_ARCH_FPA),
27669 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
27670 ARM_ARCH_NONE,
27671 FPU_ARCH_FPA),
27672 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
27673 ARM_ARCH_NONE,
27674 FPU_ARCH_FPA),
27675 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
27676 ARM_ARCH_NONE,
27677 FPU_ARCH_FPA),
27678 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
27679 ARM_ARCH_NONE,
27680 FPU_ARCH_FPA),
27681 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
27682 ARM_ARCH_NONE,
27683 FPU_ARCH_FPA),
27684 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
27685 ARM_ARCH_NONE,
27686 FPU_ARCH_FPA),
27687
27688 /* For V5 or later processors we default to using VFP; but the user
27689 should really set the FPU type explicitly. */
27690 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
27691 ARM_ARCH_NONE,
27692 FPU_ARCH_VFP_V2),
27693 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
27694 ARM_ARCH_NONE,
27695 FPU_ARCH_VFP_V2),
27696 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
27697 ARM_ARCH_NONE,
27698 FPU_ARCH_VFP_V2),
27699 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
27700 ARM_ARCH_NONE,
27701 FPU_ARCH_VFP_V2),
27702 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
27703 ARM_ARCH_NONE,
27704 FPU_ARCH_VFP_V2),
27705 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
27706 ARM_ARCH_NONE,
27707 FPU_ARCH_VFP_V2),
27708 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
27709 ARM_ARCH_NONE,
27710 FPU_ARCH_VFP_V2),
27711 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
27712 ARM_ARCH_NONE,
27713 FPU_ARCH_VFP_V2),
27714 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
27715 ARM_ARCH_NONE,
27716 FPU_ARCH_VFP_V2),
27717 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
27718 ARM_ARCH_NONE,
27719 FPU_ARCH_VFP_V2),
27720 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
27721 ARM_ARCH_NONE,
27722 FPU_ARCH_VFP_V2),
27723 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
27724 ARM_ARCH_NONE,
27725 FPU_ARCH_VFP_V2),
27726 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
27727 ARM_ARCH_NONE,
27728 FPU_ARCH_VFP_V1),
27729 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
27730 ARM_ARCH_NONE,
27731 FPU_ARCH_VFP_V1),
27732 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
27733 ARM_ARCH_NONE,
27734 FPU_ARCH_VFP_V2),
27735 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
27736 ARM_ARCH_NONE,
27737 FPU_ARCH_VFP_V2),
27738 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
27739 ARM_ARCH_NONE,
27740 FPU_ARCH_VFP_V1),
27741 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
27742 ARM_ARCH_NONE,
27743 FPU_ARCH_VFP_V2),
27744 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
27745 ARM_ARCH_NONE,
27746 FPU_ARCH_VFP_V2),
27747 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
27748 ARM_ARCH_NONE,
27749 FPU_ARCH_VFP_V2),
27750 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
27751 ARM_ARCH_NONE,
27752 FPU_ARCH_VFP_V2),
27753 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
27754 ARM_ARCH_NONE,
27755 FPU_ARCH_VFP_V2),
27756 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
27757 ARM_ARCH_NONE,
27758 FPU_ARCH_VFP_V2),
27759 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
27760 ARM_ARCH_NONE,
27761 FPU_ARCH_VFP_V2),
27762 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
27763 ARM_ARCH_NONE,
27764 FPU_ARCH_VFP_V2),
27765 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
27766 ARM_ARCH_NONE,
27767 FPU_ARCH_VFP_V2),
27768 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
27769 ARM_ARCH_NONE,
27770 FPU_NONE),
27771 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
27772 ARM_ARCH_NONE,
27773 FPU_NONE),
27774 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
27775 ARM_ARCH_NONE,
27776 FPU_ARCH_VFP_V2),
27777 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
27778 ARM_ARCH_NONE,
27779 FPU_ARCH_VFP_V2),
27780 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
27781 ARM_ARCH_NONE,
27782 FPU_ARCH_VFP_V2),
27783 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
27784 ARM_ARCH_NONE,
27785 FPU_NONE),
27786 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
27787 ARM_ARCH_NONE,
27788 FPU_NONE),
27789 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
27790 ARM_ARCH_NONE,
27791 FPU_ARCH_VFP_V2),
27792 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
27793 ARM_ARCH_NONE,
27794 FPU_NONE),
27795 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
27796 ARM_ARCH_NONE,
27797 FPU_ARCH_VFP_V2),
27798 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
27799 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27800 FPU_NONE),
27801 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
27802 ARM_ARCH_NONE,
27803 FPU_ARCH_NEON_VFP_V4),
27804 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
27805 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27806 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
27807 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
27808 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27809 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
27810 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
27811 ARM_ARCH_NONE,
27812 FPU_ARCH_NEON_VFP_V4),
27813 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
27814 ARM_ARCH_NONE,
27815 FPU_ARCH_NEON_VFP_V4),
27816 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
27817 ARM_ARCH_NONE,
27818 FPU_ARCH_NEON_VFP_V4),
27819 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
27820 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27821 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27822 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
27823 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27824 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27825 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
27826 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27827 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27828 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
27829 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27830 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27831 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
27832 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27833 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27834 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
27835 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27836 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27837 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
27838 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27839 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27840 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
27841 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27842 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27843 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
27844 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27845 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27846 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
27847 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27848 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27849 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
27850 ARM_ARCH_NONE,
27851 FPU_NONE),
27852 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
27853 ARM_ARCH_NONE,
27854 FPU_ARCH_VFP_V3D16),
27855 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
27856 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
27857 FPU_NONE),
27858 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
27859 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
27860 FPU_ARCH_VFP_V3D16),
27861 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
27862 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
27863 FPU_ARCH_VFP_V3D16),
27864 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
27865 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27866 FPU_ARCH_NEON_VFP_ARMV8),
27867 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
27868 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27869 FPU_NONE),
27870 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
27871 ARM_ARCH_NONE,
27872 FPU_NONE),
27873 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
27874 ARM_ARCH_NONE,
27875 FPU_NONE),
27876 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
27877 ARM_ARCH_NONE,
27878 FPU_NONE),
27879 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
27880 ARM_ARCH_NONE,
27881 FPU_NONE),
27882 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
27883 ARM_ARCH_NONE,
27884 FPU_NONE),
27885 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
27886 ARM_ARCH_NONE,
27887 FPU_NONE),
27888 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
27889 ARM_ARCH_NONE,
27890 FPU_NONE),
27891 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
27892 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27893 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27894 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
27895 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27896 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27897 /* ??? XSCALE is really an architecture. */
27898 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
27899 ARM_ARCH_NONE,
27900 FPU_ARCH_VFP_V2),
27901
27902 /* ??? iwmmxt is not a processor. */
27903 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
27904 ARM_ARCH_NONE,
27905 FPU_ARCH_VFP_V2),
27906 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
27907 ARM_ARCH_NONE,
27908 FPU_ARCH_VFP_V2),
27909 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
27910 ARM_ARCH_NONE,
27911 FPU_ARCH_VFP_V2),
27912
27913 /* Maverick. */
27914 ARM_CPU_OPT ("ep9312", "ARM920T",
27915 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
27916 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
27917
27918 /* Marvell processors. */
27919 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
27920 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27921 FPU_ARCH_VFP_V3D16),
27922 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
27923 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27924 FPU_ARCH_NEON_VFP_V4),
27925
27926 /* APM X-Gene family. */
27927 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
27928 ARM_ARCH_NONE,
27929 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27930 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
27931 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27932 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27933
27934 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27935 };
27936 #undef ARM_CPU_OPT
27937
27938 struct arm_ext_table
27939 {
27940 const char * name;
27941 size_t name_len;
27942 const arm_feature_set merge;
27943 const arm_feature_set clear;
27944 };
27945
27946 struct arm_arch_option_table
27947 {
27948 const char * name;
27949 size_t name_len;
27950 const arm_feature_set value;
27951 const arm_feature_set default_fpu;
27952 const struct arm_ext_table * ext_table;
27953 };
27954
27955 /* Used to add support for +E and +noE extension. */
27956 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27957 /* Used to add support for a +E extension. */
27958 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27959 /* Used to add support for a +noE extension. */
27960 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27961
27962 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27963 ~0 & ~FPU_ENDIAN_PURE)
27964
27965 static const struct arm_ext_table armv5te_ext_table[] =
27966 {
27967 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
27968 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27969 };
27970
27971 static const struct arm_ext_table armv7_ext_table[] =
27972 {
27973 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27974 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27975 };
27976
27977 static const struct arm_ext_table armv7ve_ext_table[] =
27978 {
27979 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
27980 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
27981 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27982 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27983 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27984 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
27985 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27986
27987 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
27988 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27989
27990 /* Aliases for +simd. */
27991 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27992
27993 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27994 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27995 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27996
27997 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27998 };
27999
28000 static const struct arm_ext_table armv7a_ext_table[] =
28001 {
28002 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28003 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
28004 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
28005 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28006 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
28007 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
28008 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
28009
28010 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
28011 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
28012
28013 /* Aliases for +simd. */
28014 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28015 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28016
28017 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
28018 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
28019
28020 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
28021 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
28022 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28023 };
28024
28025 static const struct arm_ext_table armv7r_ext_table[] =
28026 {
28027 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
28028 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
28029 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28030 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
28031 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
28032 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28033 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28034 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
28035 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28036 };
28037
28038 static const struct arm_ext_table armv7em_ext_table[] =
28039 {
28040 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
28041 /* Alias for +fp, used to be known as fpv4-sp-d16. */
28042 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
28043 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
28044 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28045 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
28046 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28047 };
28048
28049 static const struct arm_ext_table armv8a_ext_table[] =
28050 {
28051 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28052 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28053 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28054 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28055
28056 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28057 should use the +simd option to turn on FP. */
28058 ARM_REMOVE ("fp", ALL_FP),
28059 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28060 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28061 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28062 };
28063
28064
28065 static const struct arm_ext_table armv81a_ext_table[] =
28066 {
28067 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
28068 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
28069 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28070
28071 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28072 should use the +simd option to turn on FP. */
28073 ARM_REMOVE ("fp", ALL_FP),
28074 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28075 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28076 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28077 };
28078
28079 static const struct arm_ext_table armv82a_ext_table[] =
28080 {
28081 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
28082 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
28083 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
28084 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
28085 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28086 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28087
28088 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28089 should use the +simd option to turn on FP. */
28090 ARM_REMOVE ("fp", ALL_FP),
28091 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28092 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28093 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28094 };
28095
28096 static const struct arm_ext_table armv84a_ext_table[] =
28097 {
28098 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28099 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28100 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28101 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28102
28103 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28104 should use the +simd option to turn on FP. */
28105 ARM_REMOVE ("fp", ALL_FP),
28106 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28107 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28108 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28109 };
28110
28111 static const struct arm_ext_table armv85a_ext_table[] =
28112 {
28113 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28114 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28115 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28116 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28117
28118 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28119 should use the +simd option to turn on FP. */
28120 ARM_REMOVE ("fp", ALL_FP),
28121 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28122 };
28123
28124 static const struct arm_ext_table armv8m_main_ext_table[] =
28125 {
28126 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28127 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28128 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
28129 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28130 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28131 };
28132
28133 static const struct arm_ext_table armv8_1m_main_ext_table[] =
28134 {
28135 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28136 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28137 ARM_EXT ("fp",
28138 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28139 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
28140 ALL_FP),
28141 ARM_ADD ("fp.dp",
28142 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28143 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28144 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE),
28145 ARM_FEATURE_COPROC (FPU_MVE | FPU_MVE_FP)),
28146 ARM_ADD ("mve.fp",
28147 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28148 FPU_MVE | FPU_MVE_FP | FPU_VFP_V5_SP_D16 |
28149 FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28150 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28151 };
28152
28153 static const struct arm_ext_table armv8r_ext_table[] =
28154 {
28155 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28156 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28157 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28158 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28159 ARM_REMOVE ("fp", ALL_FP),
28160 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
28161 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28162 };
28163
28164 /* This list should, at a minimum, contain all the architecture names
28165 recognized by GCC. */
28166 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28167 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28168 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28169
28170 static const struct arm_arch_option_table arm_archs[] =
28171 {
28172 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
28173 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
28174 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
28175 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
28176 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
28177 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
28178 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
28179 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
28180 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
28181 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
28182 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
28183 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
28184 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
28185 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
28186 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
28187 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
28188 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
28189 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28190 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28191 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
28192 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
28193 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28194 kept to preserve existing behaviour. */
28195 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28196 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28197 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
28198 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
28199 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
28200 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28201 kept to preserve existing behaviour. */
28202 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28203 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28204 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
28205 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
28206 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
28207 /* The official spelling of the ARMv7 profile variants is the dashed form.
28208 Accept the non-dashed form for compatibility with old toolchains. */
28209 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28210 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
28211 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28212 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28213 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28214 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28215 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28216 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
28217 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
28218 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
28219 armv8m_main),
28220 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
28221 armv8_1m_main),
28222 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
28223 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
28224 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
28225 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
28226 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
28227 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
28228 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
28229 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
28230 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
28231 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
28232 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
28233 };
28234 #undef ARM_ARCH_OPT
28235
28236 /* ISA extensions in the co-processor and main instruction set space. */
28237
28238 struct arm_option_extension_value_table
28239 {
28240 const char * name;
28241 size_t name_len;
28242 const arm_feature_set merge_value;
28243 const arm_feature_set clear_value;
28244 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28245 indicates that an extension is available for all architectures while
28246 ARM_ANY marks an empty entry. */
28247 const arm_feature_set allowed_archs[2];
28248 };
28249
28250 /* The following table must be in alphabetical order with a NULL last entry. */
28251
28252 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28253 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28254
28255 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28256 use the context sensitive approach using arm_ext_table's. */
28257 static const struct arm_option_extension_value_table arm_extensions[] =
28258 {
28259 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28260 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28261 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28262 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
28263 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28264 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
28265 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
28266 ARM_ARCH_V8_2A),
28267 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28268 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28269 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
28270 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
28271 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28272 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28273 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28274 ARM_ARCH_V8_2A),
28275 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28276 | ARM_EXT2_FP16_FML),
28277 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28278 | ARM_EXT2_FP16_FML),
28279 ARM_ARCH_V8_2A),
28280 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28281 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28282 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28283 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28284 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28285 Thumb divide instruction. Due to this having the same name as the
28286 previous entry, this will be ignored when doing command-line parsing and
28287 only considered by build attribute selection code. */
28288 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28289 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28290 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
28291 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
28292 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
28293 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
28294 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
28295 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
28296 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
28297 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28298 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28299 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28300 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28301 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28302 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28303 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
28304 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
28305 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
28306 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28307 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28308 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28309 ARM_ARCH_V8A),
28310 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
28311 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
28312 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28313 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
28314 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
28315 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28316 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28317 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28318 ARM_ARCH_V8A),
28319 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28320 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28321 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
28322 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28323 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
28324 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
28325 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28326 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
28327 | ARM_EXT_DIV),
28328 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
28329 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28330 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
28331 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
28332 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
28333 };
28334 #undef ARM_EXT_OPT
28335
28336 /* ISA floating-point and Advanced SIMD extensions. */
28337 struct arm_option_fpu_value_table
28338 {
28339 const char * name;
28340 const arm_feature_set value;
28341 };
28342
28343 /* This list should, at a minimum, contain all the fpu names
28344 recognized by GCC. */
28345 static const struct arm_option_fpu_value_table arm_fpus[] =
28346 {
28347 {"softfpa", FPU_NONE},
28348 {"fpe", FPU_ARCH_FPE},
28349 {"fpe2", FPU_ARCH_FPE},
28350 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
28351 {"fpa", FPU_ARCH_FPA},
28352 {"fpa10", FPU_ARCH_FPA},
28353 {"fpa11", FPU_ARCH_FPA},
28354 {"arm7500fe", FPU_ARCH_FPA},
28355 {"softvfp", FPU_ARCH_VFP},
28356 {"softvfp+vfp", FPU_ARCH_VFP_V2},
28357 {"vfp", FPU_ARCH_VFP_V2},
28358 {"vfp9", FPU_ARCH_VFP_V2},
28359 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
28360 {"vfp10", FPU_ARCH_VFP_V2},
28361 {"vfp10-r0", FPU_ARCH_VFP_V1},
28362 {"vfpxd", FPU_ARCH_VFP_V1xD},
28363 {"vfpv2", FPU_ARCH_VFP_V2},
28364 {"vfpv3", FPU_ARCH_VFP_V3},
28365 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
28366 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
28367 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
28368 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
28369 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
28370 {"arm1020t", FPU_ARCH_VFP_V1},
28371 {"arm1020e", FPU_ARCH_VFP_V2},
28372 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
28373 {"arm1136jf-s", FPU_ARCH_VFP_V2},
28374 {"maverick", FPU_ARCH_MAVERICK},
28375 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28376 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28377 {"neon-fp16", FPU_ARCH_NEON_FP16},
28378 {"vfpv4", FPU_ARCH_VFP_V4},
28379 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
28380 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
28381 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
28382 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
28383 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
28384 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
28385 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
28386 {"crypto-neon-fp-armv8",
28387 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
28388 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
28389 {"crypto-neon-fp-armv8.1",
28390 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
28391 {NULL, ARM_ARCH_NONE}
28392 };
28393
28394 struct arm_option_value_table
28395 {
28396 const char *name;
28397 long value;
28398 };
28399
28400 static const struct arm_option_value_table arm_float_abis[] =
28401 {
28402 {"hard", ARM_FLOAT_ABI_HARD},
28403 {"softfp", ARM_FLOAT_ABI_SOFTFP},
28404 {"soft", ARM_FLOAT_ABI_SOFT},
28405 {NULL, 0}
28406 };
28407
28408 #ifdef OBJ_ELF
28409 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28410 static const struct arm_option_value_table arm_eabis[] =
28411 {
28412 {"gnu", EF_ARM_EABI_UNKNOWN},
28413 {"4", EF_ARM_EABI_VER4},
28414 {"5", EF_ARM_EABI_VER5},
28415 {NULL, 0}
28416 };
28417 #endif
28418
28419 struct arm_long_option_table
28420 {
28421 const char * option; /* Substring to match. */
28422 const char * help; /* Help information. */
28423 int (* func) (const char * subopt); /* Function to decode sub-option. */
28424 const char * deprecated; /* If non-null, print this message. */
28425 };
28426
28427 static bfd_boolean
28428 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
28429 arm_feature_set *ext_set,
28430 const struct arm_ext_table *ext_table)
28431 {
28432 /* We insist on extensions being specified in alphabetical order, and with
28433 extensions being added before being removed. We achieve this by having
28434 the global ARM_EXTENSIONS table in alphabetical order, and using the
28435 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28436 or removing it (0) and only allowing it to change in the order
28437 -1 -> 1 -> 0. */
28438 const struct arm_option_extension_value_table * opt = NULL;
28439 const arm_feature_set arm_any = ARM_ANY;
28440 int adding_value = -1;
28441
28442 while (str != NULL && *str != 0)
28443 {
28444 const char *ext;
28445 size_t len;
28446
28447 if (*str != '+')
28448 {
28449 as_bad (_("invalid architectural extension"));
28450 return FALSE;
28451 }
28452
28453 str++;
28454 ext = strchr (str, '+');
28455
28456 if (ext != NULL)
28457 len = ext - str;
28458 else
28459 len = strlen (str);
28460
28461 if (len >= 2 && strncmp (str, "no", 2) == 0)
28462 {
28463 if (adding_value != 0)
28464 {
28465 adding_value = 0;
28466 opt = arm_extensions;
28467 }
28468
28469 len -= 2;
28470 str += 2;
28471 }
28472 else if (len > 0)
28473 {
28474 if (adding_value == -1)
28475 {
28476 adding_value = 1;
28477 opt = arm_extensions;
28478 }
28479 else if (adding_value != 1)
28480 {
28481 as_bad (_("must specify extensions to add before specifying "
28482 "those to remove"));
28483 return FALSE;
28484 }
28485 }
28486
28487 if (len == 0)
28488 {
28489 as_bad (_("missing architectural extension"));
28490 return FALSE;
28491 }
28492
28493 gas_assert (adding_value != -1);
28494 gas_assert (opt != NULL);
28495
28496 if (ext_table != NULL)
28497 {
28498 const struct arm_ext_table * ext_opt = ext_table;
28499 bfd_boolean found = FALSE;
28500 for (; ext_opt->name != NULL; ext_opt++)
28501 if (ext_opt->name_len == len
28502 && strncmp (ext_opt->name, str, len) == 0)
28503 {
28504 if (adding_value)
28505 {
28506 if (ARM_FEATURE_ZERO (ext_opt->merge))
28507 /* TODO: Option not supported. When we remove the
28508 legacy table this case should error out. */
28509 continue;
28510
28511 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
28512 }
28513 else
28514 {
28515 if (ARM_FEATURE_ZERO (ext_opt->clear))
28516 /* TODO: Option not supported. When we remove the
28517 legacy table this case should error out. */
28518 continue;
28519 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
28520 }
28521 found = TRUE;
28522 break;
28523 }
28524 if (found)
28525 {
28526 str = ext;
28527 continue;
28528 }
28529 }
28530
28531 /* Scan over the options table trying to find an exact match. */
28532 for (; opt->name != NULL; opt++)
28533 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28534 {
28535 int i, nb_allowed_archs =
28536 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28537 /* Check we can apply the extension to this architecture. */
28538 for (i = 0; i < nb_allowed_archs; i++)
28539 {
28540 /* Empty entry. */
28541 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
28542 continue;
28543 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
28544 break;
28545 }
28546 if (i == nb_allowed_archs)
28547 {
28548 as_bad (_("extension does not apply to the base architecture"));
28549 return FALSE;
28550 }
28551
28552 /* Add or remove the extension. */
28553 if (adding_value)
28554 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
28555 else
28556 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
28557
28558 /* Allowing Thumb division instructions for ARMv7 in autodetection
28559 rely on this break so that duplicate extensions (extensions
28560 with the same name as a previous extension in the list) are not
28561 considered for command-line parsing. */
28562 break;
28563 }
28564
28565 if (opt->name == NULL)
28566 {
28567 /* Did we fail to find an extension because it wasn't specified in
28568 alphabetical order, or because it does not exist? */
28569
28570 for (opt = arm_extensions; opt->name != NULL; opt++)
28571 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28572 break;
28573
28574 if (opt->name == NULL)
28575 as_bad (_("unknown architectural extension `%s'"), str);
28576 else
28577 as_bad (_("architectural extensions must be specified in "
28578 "alphabetical order"));
28579
28580 return FALSE;
28581 }
28582 else
28583 {
28584 /* We should skip the extension we've just matched the next time
28585 round. */
28586 opt++;
28587 }
28588
28589 str = ext;
28590 };
28591
28592 return TRUE;
28593 }
28594
28595 static bfd_boolean
28596 arm_parse_cpu (const char *str)
28597 {
28598 const struct arm_cpu_option_table *opt;
28599 const char *ext = strchr (str, '+');
28600 size_t len;
28601
28602 if (ext != NULL)
28603 len = ext - str;
28604 else
28605 len = strlen (str);
28606
28607 if (len == 0)
28608 {
28609 as_bad (_("missing cpu name `%s'"), str);
28610 return FALSE;
28611 }
28612
28613 for (opt = arm_cpus; opt->name != NULL; opt++)
28614 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28615 {
28616 mcpu_cpu_opt = &opt->value;
28617 if (mcpu_ext_opt == NULL)
28618 mcpu_ext_opt = XNEW (arm_feature_set);
28619 *mcpu_ext_opt = opt->ext;
28620 mcpu_fpu_opt = &opt->default_fpu;
28621 if (opt->canonical_name)
28622 {
28623 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
28624 strcpy (selected_cpu_name, opt->canonical_name);
28625 }
28626 else
28627 {
28628 size_t i;
28629
28630 if (len >= sizeof selected_cpu_name)
28631 len = (sizeof selected_cpu_name) - 1;
28632
28633 for (i = 0; i < len; i++)
28634 selected_cpu_name[i] = TOUPPER (opt->name[i]);
28635 selected_cpu_name[i] = 0;
28636 }
28637
28638 if (ext != NULL)
28639 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
28640
28641 return TRUE;
28642 }
28643
28644 as_bad (_("unknown cpu `%s'"), str);
28645 return FALSE;
28646 }
28647
28648 static bfd_boolean
28649 arm_parse_arch (const char *str)
28650 {
28651 const struct arm_arch_option_table *opt;
28652 const char *ext = strchr (str, '+');
28653 size_t len;
28654
28655 if (ext != NULL)
28656 len = ext - str;
28657 else
28658 len = strlen (str);
28659
28660 if (len == 0)
28661 {
28662 as_bad (_("missing architecture name `%s'"), str);
28663 return FALSE;
28664 }
28665
28666 for (opt = arm_archs; opt->name != NULL; opt++)
28667 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28668 {
28669 march_cpu_opt = &opt->value;
28670 if (march_ext_opt == NULL)
28671 march_ext_opt = XNEW (arm_feature_set);
28672 *march_ext_opt = arm_arch_none;
28673 march_fpu_opt = &opt->default_fpu;
28674 strcpy (selected_cpu_name, opt->name);
28675
28676 if (ext != NULL)
28677 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
28678 opt->ext_table);
28679
28680 return TRUE;
28681 }
28682
28683 as_bad (_("unknown architecture `%s'\n"), str);
28684 return FALSE;
28685 }
28686
28687 static bfd_boolean
28688 arm_parse_fpu (const char * str)
28689 {
28690 const struct arm_option_fpu_value_table * opt;
28691
28692 for (opt = arm_fpus; opt->name != NULL; opt++)
28693 if (streq (opt->name, str))
28694 {
28695 mfpu_opt = &opt->value;
28696 return TRUE;
28697 }
28698
28699 as_bad (_("unknown floating point format `%s'\n"), str);
28700 return FALSE;
28701 }
28702
28703 static bfd_boolean
28704 arm_parse_float_abi (const char * str)
28705 {
28706 const struct arm_option_value_table * opt;
28707
28708 for (opt = arm_float_abis; opt->name != NULL; opt++)
28709 if (streq (opt->name, str))
28710 {
28711 mfloat_abi_opt = opt->value;
28712 return TRUE;
28713 }
28714
28715 as_bad (_("unknown floating point abi `%s'\n"), str);
28716 return FALSE;
28717 }
28718
28719 #ifdef OBJ_ELF
28720 static bfd_boolean
28721 arm_parse_eabi (const char * str)
28722 {
28723 const struct arm_option_value_table *opt;
28724
28725 for (opt = arm_eabis; opt->name != NULL; opt++)
28726 if (streq (opt->name, str))
28727 {
28728 meabi_flags = opt->value;
28729 return TRUE;
28730 }
28731 as_bad (_("unknown EABI `%s'\n"), str);
28732 return FALSE;
28733 }
28734 #endif
28735
28736 static bfd_boolean
28737 arm_parse_it_mode (const char * str)
28738 {
28739 bfd_boolean ret = TRUE;
28740
28741 if (streq ("arm", str))
28742 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
28743 else if (streq ("thumb", str))
28744 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
28745 else if (streq ("always", str))
28746 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
28747 else if (streq ("never", str))
28748 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
28749 else
28750 {
28751 as_bad (_("unknown implicit IT mode `%s', should be "\
28752 "arm, thumb, always, or never."), str);
28753 ret = FALSE;
28754 }
28755
28756 return ret;
28757 }
28758
28759 static bfd_boolean
28760 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
28761 {
28762 codecomposer_syntax = TRUE;
28763 arm_comment_chars[0] = ';';
28764 arm_line_separator_chars[0] = 0;
28765 return TRUE;
28766 }
28767
28768 struct arm_long_option_table arm_long_opts[] =
28769 {
28770 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
28771 arm_parse_cpu, NULL},
28772 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
28773 arm_parse_arch, NULL},
28774 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
28775 arm_parse_fpu, NULL},
28776 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
28777 arm_parse_float_abi, NULL},
28778 #ifdef OBJ_ELF
28779 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
28780 arm_parse_eabi, NULL},
28781 #endif
28782 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
28783 arm_parse_it_mode, NULL},
28784 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
28785 arm_ccs_mode, NULL},
28786 {NULL, NULL, 0, NULL}
28787 };
28788
28789 int
28790 md_parse_option (int c, const char * arg)
28791 {
28792 struct arm_option_table *opt;
28793 const struct arm_legacy_option_table *fopt;
28794 struct arm_long_option_table *lopt;
28795
28796 switch (c)
28797 {
28798 #ifdef OPTION_EB
28799 case OPTION_EB:
28800 target_big_endian = 1;
28801 break;
28802 #endif
28803
28804 #ifdef OPTION_EL
28805 case OPTION_EL:
28806 target_big_endian = 0;
28807 break;
28808 #endif
28809
28810 case OPTION_FIX_V4BX:
28811 fix_v4bx = TRUE;
28812 break;
28813
28814 #ifdef OBJ_ELF
28815 case OPTION_FDPIC:
28816 arm_fdpic = TRUE;
28817 break;
28818 #endif /* OBJ_ELF */
28819
28820 case 'a':
28821 /* Listing option. Just ignore these, we don't support additional
28822 ones. */
28823 return 0;
28824
28825 default:
28826 for (opt = arm_opts; opt->option != NULL; opt++)
28827 {
28828 if (c == opt->option[0]
28829 && ((arg == NULL && opt->option[1] == 0)
28830 || streq (arg, opt->option + 1)))
28831 {
28832 /* If the option is deprecated, tell the user. */
28833 if (warn_on_deprecated && opt->deprecated != NULL)
28834 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
28835 arg ? arg : "", _(opt->deprecated));
28836
28837 if (opt->var != NULL)
28838 *opt->var = opt->value;
28839
28840 return 1;
28841 }
28842 }
28843
28844 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
28845 {
28846 if (c == fopt->option[0]
28847 && ((arg == NULL && fopt->option[1] == 0)
28848 || streq (arg, fopt->option + 1)))
28849 {
28850 /* If the option is deprecated, tell the user. */
28851 if (warn_on_deprecated && fopt->deprecated != NULL)
28852 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
28853 arg ? arg : "", _(fopt->deprecated));
28854
28855 if (fopt->var != NULL)
28856 *fopt->var = &fopt->value;
28857
28858 return 1;
28859 }
28860 }
28861
28862 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
28863 {
28864 /* These options are expected to have an argument. */
28865 if (c == lopt->option[0]
28866 && arg != NULL
28867 && strncmp (arg, lopt->option + 1,
28868 strlen (lopt->option + 1)) == 0)
28869 {
28870 /* If the option is deprecated, tell the user. */
28871 if (warn_on_deprecated && lopt->deprecated != NULL)
28872 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
28873 _(lopt->deprecated));
28874
28875 /* Call the sup-option parser. */
28876 return lopt->func (arg + strlen (lopt->option) - 1);
28877 }
28878 }
28879
28880 return 0;
28881 }
28882
28883 return 1;
28884 }
28885
28886 void
28887 md_show_usage (FILE * fp)
28888 {
28889 struct arm_option_table *opt;
28890 struct arm_long_option_table *lopt;
28891
28892 fprintf (fp, _(" ARM-specific assembler options:\n"));
28893
28894 for (opt = arm_opts; opt->option != NULL; opt++)
28895 if (opt->help != NULL)
28896 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
28897
28898 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
28899 if (lopt->help != NULL)
28900 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
28901
28902 #ifdef OPTION_EB
28903 fprintf (fp, _("\
28904 -EB assemble code for a big-endian cpu\n"));
28905 #endif
28906
28907 #ifdef OPTION_EL
28908 fprintf (fp, _("\
28909 -EL assemble code for a little-endian cpu\n"));
28910 #endif
28911
28912 fprintf (fp, _("\
28913 --fix-v4bx Allow BX in ARMv4 code\n"));
28914
28915 #ifdef OBJ_ELF
28916 fprintf (fp, _("\
28917 --fdpic generate an FDPIC object file\n"));
28918 #endif /* OBJ_ELF */
28919 }
28920
28921 #ifdef OBJ_ELF
28922
28923 typedef struct
28924 {
28925 int val;
28926 arm_feature_set flags;
28927 } cpu_arch_ver_table;
28928
28929 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28930 chronologically for architectures, with an exception for ARMv6-M and
28931 ARMv6S-M due to legacy reasons. No new architecture should have a
28932 special case. This allows for build attribute selection results to be
28933 stable when new architectures are added. */
28934 static const cpu_arch_ver_table cpu_arch_ver[] =
28935 {
28936 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
28937 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
28938 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
28939 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
28940 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
28941 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
28942 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
28943 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
28944 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
28945 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
28946 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
28947 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
28948 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
28949 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
28950 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
28951 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
28952 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
28953 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
28954 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
28955 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
28956 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
28957 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
28958 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
28959 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
28960
28961 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28962 always selected build attributes to match those of ARMv6-M
28963 (resp. ARMv6S-M). However, due to these architectures being a strict
28964 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28965 would be selected when fully respecting chronology of architectures.
28966 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28967 move them before ARMv7 architectures. */
28968 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
28969 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
28970
28971 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
28972 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
28973 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
28974 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
28975 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
28976 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
28977 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
28978 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
28979 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
28980 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
28981 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
28982 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
28983 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
28984 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
28985 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
28986 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
28987 {-1, ARM_ARCH_NONE}
28988 };
28989
28990 /* Set an attribute if it has not already been set by the user. */
28991
28992 static void
28993 aeabi_set_attribute_int (int tag, int value)
28994 {
28995 if (tag < 1
28996 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28997 || !attributes_set_explicitly[tag])
28998 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
28999 }
29000
29001 static void
29002 aeabi_set_attribute_string (int tag, const char *value)
29003 {
29004 if (tag < 1
29005 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
29006 || !attributes_set_explicitly[tag])
29007 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
29008 }
29009
29010 /* Return whether features in the *NEEDED feature set are available via
29011 extensions for the architecture whose feature set is *ARCH_FSET. */
29012
29013 static bfd_boolean
29014 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
29015 const arm_feature_set *needed)
29016 {
29017 int i, nb_allowed_archs;
29018 arm_feature_set ext_fset;
29019 const struct arm_option_extension_value_table *opt;
29020
29021 ext_fset = arm_arch_none;
29022 for (opt = arm_extensions; opt->name != NULL; opt++)
29023 {
29024 /* Extension does not provide any feature we need. */
29025 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
29026 continue;
29027
29028 nb_allowed_archs =
29029 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
29030 for (i = 0; i < nb_allowed_archs; i++)
29031 {
29032 /* Empty entry. */
29033 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
29034 break;
29035
29036 /* Extension is available, add it. */
29037 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
29038 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
29039 }
29040 }
29041
29042 /* Can we enable all features in *needed? */
29043 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
29044 }
29045
29046 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
29047 a given architecture feature set *ARCH_EXT_FSET including extension feature
29048 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
29049 - if true, check for an exact match of the architecture modulo extensions;
29050 - otherwise, select build attribute value of the first superset
29051 architecture released so that results remains stable when new architectures
29052 are added.
29053 For -march/-mcpu=all the build attribute value of the most featureful
29054 architecture is returned. Tag_CPU_arch_profile result is returned in
29055 PROFILE. */
29056
29057 static int
29058 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
29059 const arm_feature_set *ext_fset,
29060 char *profile, int exact_match)
29061 {
29062 arm_feature_set arch_fset;
29063 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
29064
29065 /* Select most featureful architecture with all its extensions if building
29066 for -march=all as the feature sets used to set build attributes. */
29067 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
29068 {
29069 /* Force revisiting of decision for each new architecture. */
29070 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
29071 *profile = 'A';
29072 return TAG_CPU_ARCH_V8;
29073 }
29074
29075 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
29076
29077 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
29078 {
29079 arm_feature_set known_arch_fset;
29080
29081 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
29082 if (exact_match)
29083 {
29084 /* Base architecture match user-specified architecture and
29085 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
29086 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
29087 {
29088 p_ver_ret = p_ver;
29089 goto found;
29090 }
29091 /* Base architecture match user-specified architecture only
29092 (eg. ARMv6-M in the same case as above). Record it in case we
29093 find a match with above condition. */
29094 else if (p_ver_ret == NULL
29095 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
29096 p_ver_ret = p_ver;
29097 }
29098 else
29099 {
29100
29101 /* Architecture has all features wanted. */
29102 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
29103 {
29104 arm_feature_set added_fset;
29105
29106 /* Compute features added by this architecture over the one
29107 recorded in p_ver_ret. */
29108 if (p_ver_ret != NULL)
29109 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
29110 p_ver_ret->flags);
29111 /* First architecture that match incl. with extensions, or the
29112 only difference in features over the recorded match is
29113 features that were optional and are now mandatory. */
29114 if (p_ver_ret == NULL
29115 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
29116 {
29117 p_ver_ret = p_ver;
29118 goto found;
29119 }
29120 }
29121 else if (p_ver_ret == NULL)
29122 {
29123 arm_feature_set needed_ext_fset;
29124
29125 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
29126
29127 /* Architecture has all features needed when using some
29128 extensions. Record it and continue searching in case there
29129 exist an architecture providing all needed features without
29130 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29131 OS extension). */
29132 if (have_ext_for_needed_feat_p (&known_arch_fset,
29133 &needed_ext_fset))
29134 p_ver_ret = p_ver;
29135 }
29136 }
29137 }
29138
29139 if (p_ver_ret == NULL)
29140 return -1;
29141
29142 found:
29143 /* Tag_CPU_arch_profile. */
29144 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
29145 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
29146 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
29147 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
29148 *profile = 'A';
29149 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
29150 *profile = 'R';
29151 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
29152 *profile = 'M';
29153 else
29154 *profile = '\0';
29155 return p_ver_ret->val;
29156 }
29157
29158 /* Set the public EABI object attributes. */
29159
29160 static void
29161 aeabi_set_public_attributes (void)
29162 {
29163 char profile = '\0';
29164 int arch = -1;
29165 int virt_sec = 0;
29166 int fp16_optional = 0;
29167 int skip_exact_match = 0;
29168 arm_feature_set flags, flags_arch, flags_ext;
29169
29170 /* Autodetection mode, choose the architecture based the instructions
29171 actually used. */
29172 if (no_cpu_selected ())
29173 {
29174 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
29175
29176 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
29177 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
29178
29179 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
29180 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
29181
29182 /* Code run during relaxation relies on selected_cpu being set. */
29183 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29184 flags_ext = arm_arch_none;
29185 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
29186 selected_ext = flags_ext;
29187 selected_cpu = flags;
29188 }
29189 /* Otherwise, choose the architecture based on the capabilities of the
29190 requested cpu. */
29191 else
29192 {
29193 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
29194 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
29195 flags_ext = selected_ext;
29196 flags = selected_cpu;
29197 }
29198 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
29199
29200 /* Allow the user to override the reported architecture. */
29201 if (!ARM_FEATURE_ZERO (selected_object_arch))
29202 {
29203 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
29204 flags_ext = arm_arch_none;
29205 }
29206 else
29207 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
29208
29209 /* When this function is run again after relaxation has happened there is no
29210 way to determine whether an architecture or CPU was specified by the user:
29211 - selected_cpu is set above for relaxation to work;
29212 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29213 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29214 Therefore, if not in -march=all case we first try an exact match and fall
29215 back to autodetection. */
29216 if (!skip_exact_match)
29217 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
29218 if (arch == -1)
29219 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
29220 if (arch == -1)
29221 as_bad (_("no architecture contains all the instructions used\n"));
29222
29223 /* Tag_CPU_name. */
29224 if (selected_cpu_name[0])
29225 {
29226 char *q;
29227
29228 q = selected_cpu_name;
29229 if (strncmp (q, "armv", 4) == 0)
29230 {
29231 int i;
29232
29233 q += 4;
29234 for (i = 0; q[i]; i++)
29235 q[i] = TOUPPER (q[i]);
29236 }
29237 aeabi_set_attribute_string (Tag_CPU_name, q);
29238 }
29239
29240 /* Tag_CPU_arch. */
29241 aeabi_set_attribute_int (Tag_CPU_arch, arch);
29242
29243 /* Tag_CPU_arch_profile. */
29244 if (profile != '\0')
29245 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
29246
29247 /* Tag_DSP_extension. */
29248 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
29249 aeabi_set_attribute_int (Tag_DSP_extension, 1);
29250
29251 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29252 /* Tag_ARM_ISA_use. */
29253 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
29254 || ARM_FEATURE_ZERO (flags_arch))
29255 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
29256
29257 /* Tag_THUMB_ISA_use. */
29258 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
29259 || ARM_FEATURE_ZERO (flags_arch))
29260 {
29261 int thumb_isa_use;
29262
29263 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29264 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
29265 thumb_isa_use = 3;
29266 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
29267 thumb_isa_use = 2;
29268 else
29269 thumb_isa_use = 1;
29270 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
29271 }
29272
29273 /* Tag_VFP_arch. */
29274 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
29275 aeabi_set_attribute_int (Tag_VFP_arch,
29276 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29277 ? 7 : 8);
29278 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
29279 aeabi_set_attribute_int (Tag_VFP_arch,
29280 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29281 ? 5 : 6);
29282 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
29283 {
29284 fp16_optional = 1;
29285 aeabi_set_attribute_int (Tag_VFP_arch, 3);
29286 }
29287 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
29288 {
29289 aeabi_set_attribute_int (Tag_VFP_arch, 4);
29290 fp16_optional = 1;
29291 }
29292 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
29293 aeabi_set_attribute_int (Tag_VFP_arch, 2);
29294 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
29295 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
29296 aeabi_set_attribute_int (Tag_VFP_arch, 1);
29297
29298 /* Tag_ABI_HardFP_use. */
29299 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
29300 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
29301 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
29302
29303 /* Tag_WMMX_arch. */
29304 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
29305 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
29306 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
29307 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
29308
29309 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29310 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
29311 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
29312 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
29313 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
29314 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
29315 {
29316 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
29317 {
29318 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
29319 }
29320 else
29321 {
29322 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
29323 fp16_optional = 1;
29324 }
29325 }
29326
29327 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
29328 aeabi_set_attribute_int (Tag_MVE_arch, 2);
29329 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
29330 aeabi_set_attribute_int (Tag_MVE_arch, 1);
29331
29332 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29333 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
29334 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
29335
29336 /* Tag_DIV_use.
29337
29338 We set Tag_DIV_use to two when integer divide instructions have been used
29339 in ARM state, or when Thumb integer divide instructions have been used,
29340 but we have no architecture profile set, nor have we any ARM instructions.
29341
29342 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29343 by the base architecture.
29344
29345 For new architectures we will have to check these tests. */
29346 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
29347 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29348 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
29349 aeabi_set_attribute_int (Tag_DIV_use, 0);
29350 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
29351 || (profile == '\0'
29352 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
29353 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
29354 aeabi_set_attribute_int (Tag_DIV_use, 2);
29355
29356 /* Tag_MP_extension_use. */
29357 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
29358 aeabi_set_attribute_int (Tag_MPextension_use, 1);
29359
29360 /* Tag Virtualization_use. */
29361 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
29362 virt_sec |= 1;
29363 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
29364 virt_sec |= 2;
29365 if (virt_sec != 0)
29366 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
29367 }
29368
29369 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29370 finished and free extension feature bits which will not be used anymore. */
29371
29372 void
29373 arm_md_post_relax (void)
29374 {
29375 aeabi_set_public_attributes ();
29376 XDELETE (mcpu_ext_opt);
29377 mcpu_ext_opt = NULL;
29378 XDELETE (march_ext_opt);
29379 march_ext_opt = NULL;
29380 }
29381
29382 /* Add the default contents for the .ARM.attributes section. */
29383
29384 void
29385 arm_md_end (void)
29386 {
29387 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
29388 return;
29389
29390 aeabi_set_public_attributes ();
29391 }
29392 #endif /* OBJ_ELF */
29393
29394 /* Parse a .cpu directive. */
29395
29396 static void
29397 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
29398 {
29399 const struct arm_cpu_option_table *opt;
29400 char *name;
29401 char saved_char;
29402
29403 name = input_line_pointer;
29404 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29405 input_line_pointer++;
29406 saved_char = *input_line_pointer;
29407 *input_line_pointer = 0;
29408
29409 /* Skip the first "all" entry. */
29410 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
29411 if (streq (opt->name, name))
29412 {
29413 selected_arch = opt->value;
29414 selected_ext = opt->ext;
29415 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29416 if (opt->canonical_name)
29417 strcpy (selected_cpu_name, opt->canonical_name);
29418 else
29419 {
29420 int i;
29421 for (i = 0; opt->name[i]; i++)
29422 selected_cpu_name[i] = TOUPPER (opt->name[i]);
29423
29424 selected_cpu_name[i] = 0;
29425 }
29426 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29427
29428 *input_line_pointer = saved_char;
29429 demand_empty_rest_of_line ();
29430 return;
29431 }
29432 as_bad (_("unknown cpu `%s'"), name);
29433 *input_line_pointer = saved_char;
29434 ignore_rest_of_line ();
29435 }
29436
29437 /* Parse a .arch directive. */
29438
29439 static void
29440 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
29441 {
29442 const struct arm_arch_option_table *opt;
29443 char saved_char;
29444 char *name;
29445
29446 name = input_line_pointer;
29447 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29448 input_line_pointer++;
29449 saved_char = *input_line_pointer;
29450 *input_line_pointer = 0;
29451
29452 /* Skip the first "all" entry. */
29453 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29454 if (streq (opt->name, name))
29455 {
29456 selected_arch = opt->value;
29457 selected_ext = arm_arch_none;
29458 selected_cpu = selected_arch;
29459 strcpy (selected_cpu_name, opt->name);
29460 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29461 *input_line_pointer = saved_char;
29462 demand_empty_rest_of_line ();
29463 return;
29464 }
29465
29466 as_bad (_("unknown architecture `%s'\n"), name);
29467 *input_line_pointer = saved_char;
29468 ignore_rest_of_line ();
29469 }
29470
29471 /* Parse a .object_arch directive. */
29472
29473 static void
29474 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
29475 {
29476 const struct arm_arch_option_table *opt;
29477 char saved_char;
29478 char *name;
29479
29480 name = input_line_pointer;
29481 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29482 input_line_pointer++;
29483 saved_char = *input_line_pointer;
29484 *input_line_pointer = 0;
29485
29486 /* Skip the first "all" entry. */
29487 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29488 if (streq (opt->name, name))
29489 {
29490 selected_object_arch = opt->value;
29491 *input_line_pointer = saved_char;
29492 demand_empty_rest_of_line ();
29493 return;
29494 }
29495
29496 as_bad (_("unknown architecture `%s'\n"), name);
29497 *input_line_pointer = saved_char;
29498 ignore_rest_of_line ();
29499 }
29500
29501 /* Parse a .arch_extension directive. */
29502
29503 static void
29504 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
29505 {
29506 const struct arm_option_extension_value_table *opt;
29507 char saved_char;
29508 char *name;
29509 int adding_value = 1;
29510
29511 name = input_line_pointer;
29512 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29513 input_line_pointer++;
29514 saved_char = *input_line_pointer;
29515 *input_line_pointer = 0;
29516
29517 if (strlen (name) >= 2
29518 && strncmp (name, "no", 2) == 0)
29519 {
29520 adding_value = 0;
29521 name += 2;
29522 }
29523
29524 for (opt = arm_extensions; opt->name != NULL; opt++)
29525 if (streq (opt->name, name))
29526 {
29527 int i, nb_allowed_archs =
29528 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
29529 for (i = 0; i < nb_allowed_archs; i++)
29530 {
29531 /* Empty entry. */
29532 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
29533 continue;
29534 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
29535 break;
29536 }
29537
29538 if (i == nb_allowed_archs)
29539 {
29540 as_bad (_("architectural extension `%s' is not allowed for the "
29541 "current base architecture"), name);
29542 break;
29543 }
29544
29545 if (adding_value)
29546 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
29547 opt->merge_value);
29548 else
29549 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
29550
29551 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29552 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29553 *input_line_pointer = saved_char;
29554 demand_empty_rest_of_line ();
29555 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29556 on this return so that duplicate extensions (extensions with the
29557 same name as a previous extension in the list) are not considered
29558 for command-line parsing. */
29559 return;
29560 }
29561
29562 if (opt->name == NULL)
29563 as_bad (_("unknown architecture extension `%s'\n"), name);
29564
29565 *input_line_pointer = saved_char;
29566 ignore_rest_of_line ();
29567 }
29568
29569 /* Parse a .fpu directive. */
29570
29571 static void
29572 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
29573 {
29574 const struct arm_option_fpu_value_table *opt;
29575 char saved_char;
29576 char *name;
29577
29578 name = input_line_pointer;
29579 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29580 input_line_pointer++;
29581 saved_char = *input_line_pointer;
29582 *input_line_pointer = 0;
29583
29584 for (opt = arm_fpus; opt->name != NULL; opt++)
29585 if (streq (opt->name, name))
29586 {
29587 selected_fpu = opt->value;
29588 #ifndef CPU_DEFAULT
29589 if (no_cpu_selected ())
29590 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
29591 else
29592 #endif
29593 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29594 *input_line_pointer = saved_char;
29595 demand_empty_rest_of_line ();
29596 return;
29597 }
29598
29599 as_bad (_("unknown floating point format `%s'\n"), name);
29600 *input_line_pointer = saved_char;
29601 ignore_rest_of_line ();
29602 }
29603
29604 /* Copy symbol information. */
29605
29606 void
29607 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
29608 {
29609 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
29610 }
29611
29612 #ifdef OBJ_ELF
29613 /* Given a symbolic attribute NAME, return the proper integer value.
29614 Returns -1 if the attribute is not known. */
29615
29616 int
29617 arm_convert_symbolic_attribute (const char *name)
29618 {
29619 static const struct
29620 {
29621 const char * name;
29622 const int tag;
29623 }
29624 attribute_table[] =
29625 {
29626 /* When you modify this table you should
29627 also modify the list in doc/c-arm.texi. */
29628 #define T(tag) {#tag, tag}
29629 T (Tag_CPU_raw_name),
29630 T (Tag_CPU_name),
29631 T (Tag_CPU_arch),
29632 T (Tag_CPU_arch_profile),
29633 T (Tag_ARM_ISA_use),
29634 T (Tag_THUMB_ISA_use),
29635 T (Tag_FP_arch),
29636 T (Tag_VFP_arch),
29637 T (Tag_WMMX_arch),
29638 T (Tag_Advanced_SIMD_arch),
29639 T (Tag_PCS_config),
29640 T (Tag_ABI_PCS_R9_use),
29641 T (Tag_ABI_PCS_RW_data),
29642 T (Tag_ABI_PCS_RO_data),
29643 T (Tag_ABI_PCS_GOT_use),
29644 T (Tag_ABI_PCS_wchar_t),
29645 T (Tag_ABI_FP_rounding),
29646 T (Tag_ABI_FP_denormal),
29647 T (Tag_ABI_FP_exceptions),
29648 T (Tag_ABI_FP_user_exceptions),
29649 T (Tag_ABI_FP_number_model),
29650 T (Tag_ABI_align_needed),
29651 T (Tag_ABI_align8_needed),
29652 T (Tag_ABI_align_preserved),
29653 T (Tag_ABI_align8_preserved),
29654 T (Tag_ABI_enum_size),
29655 T (Tag_ABI_HardFP_use),
29656 T (Tag_ABI_VFP_args),
29657 T (Tag_ABI_WMMX_args),
29658 T (Tag_ABI_optimization_goals),
29659 T (Tag_ABI_FP_optimization_goals),
29660 T (Tag_compatibility),
29661 T (Tag_CPU_unaligned_access),
29662 T (Tag_FP_HP_extension),
29663 T (Tag_VFP_HP_extension),
29664 T (Tag_ABI_FP_16bit_format),
29665 T (Tag_MPextension_use),
29666 T (Tag_DIV_use),
29667 T (Tag_nodefaults),
29668 T (Tag_also_compatible_with),
29669 T (Tag_conformance),
29670 T (Tag_T2EE_use),
29671 T (Tag_Virtualization_use),
29672 T (Tag_DSP_extension),
29673 T (Tag_MVE_arch),
29674 /* We deliberately do not include Tag_MPextension_use_legacy. */
29675 #undef T
29676 };
29677 unsigned int i;
29678
29679 if (name == NULL)
29680 return -1;
29681
29682 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
29683 if (streq (name, attribute_table[i].name))
29684 return attribute_table[i].tag;
29685
29686 return -1;
29687 }
29688
29689 /* Apply sym value for relocations only in the case that they are for
29690 local symbols in the same segment as the fixup and you have the
29691 respective architectural feature for blx and simple switches. */
29692
29693 int
29694 arm_apply_sym_value (struct fix * fixP, segT this_seg)
29695 {
29696 if (fixP->fx_addsy
29697 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
29698 /* PR 17444: If the local symbol is in a different section then a reloc
29699 will always be generated for it, so applying the symbol value now
29700 will result in a double offset being stored in the relocation. */
29701 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
29702 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
29703 {
29704 switch (fixP->fx_r_type)
29705 {
29706 case BFD_RELOC_ARM_PCREL_BLX:
29707 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29708 if (ARM_IS_FUNC (fixP->fx_addsy))
29709 return 1;
29710 break;
29711
29712 case BFD_RELOC_ARM_PCREL_CALL:
29713 case BFD_RELOC_THUMB_PCREL_BLX:
29714 if (THUMB_IS_FUNC (fixP->fx_addsy))
29715 return 1;
29716 break;
29717
29718 default:
29719 break;
29720 }
29721
29722 }
29723 return 0;
29724 }
29725 #endif /* OBJ_ELF */