]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
This patch similarly to the AArch64 one enables Dot Product support by default for...
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2017 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set * legacy_cpu = NULL;
147 static const arm_feature_set * legacy_fpu = NULL;
148
149 static const arm_feature_set * mcpu_cpu_opt = NULL;
150 static arm_feature_set * dyn_mcpu_ext_opt = NULL;
151 static const arm_feature_set * mcpu_fpu_opt = NULL;
152 static const arm_feature_set * march_cpu_opt = NULL;
153 static arm_feature_set * dyn_march_ext_opt = NULL;
154 static const arm_feature_set * march_fpu_opt = NULL;
155 static const arm_feature_set * mfpu_opt = NULL;
156 static const arm_feature_set * object_arch = NULL;
157
158 /* Constants for known architecture features. */
159 static const arm_feature_set fpu_default = FPU_DEFAULT;
160 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
161 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
162 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
163 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
164 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
165 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
166 #ifdef OBJ_ELF
167 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
168 #endif
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
191 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
192 static const arm_feature_set arm_ext_v6_notm =
193 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
194 static const arm_feature_set arm_ext_v6_dsp =
195 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
196 static const arm_feature_set arm_ext_barrier =
197 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
198 static const arm_feature_set arm_ext_msr =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
200 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
201 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
202 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
203 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
204 #ifdef OBJ_ELF
205 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
206 #endif
207 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
208 static const arm_feature_set arm_ext_m =
209 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
210 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
211 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
212 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
213 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
214 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
215 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
216 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
217 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
218 static const arm_feature_set arm_ext_v8m_main =
219 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
220 /* Instructions in ARMv8-M only found in M profile architectures. */
221 static const arm_feature_set arm_ext_v8m_m_only =
222 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
223 static const arm_feature_set arm_ext_v6t2_v8m =
224 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
225 /* Instructions shared between ARMv8-A and ARMv8-M. */
226 static const arm_feature_set arm_ext_atomics =
227 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
228 #ifdef OBJ_ELF
229 /* DSP instructions Tag_DSP_extension refers to. */
230 static const arm_feature_set arm_ext_dsp =
231 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
232 #endif
233 static const arm_feature_set arm_ext_ras =
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
235 /* FP16 instructions. */
236 static const arm_feature_set arm_ext_fp16 =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
238 static const arm_feature_set arm_ext_v8_3 =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
240
241 static const arm_feature_set arm_arch_any = ARM_ANY;
242 #ifdef OBJ_ELF
243 static const arm_feature_set fpu_any = FPU_ANY;
244 #endif
245 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
246 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
247 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
248
249 static const arm_feature_set arm_cext_iwmmxt2 =
250 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
251 static const arm_feature_set arm_cext_iwmmxt =
252 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
253 static const arm_feature_set arm_cext_xscale =
254 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
255 static const arm_feature_set arm_cext_maverick =
256 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
257 static const arm_feature_set fpu_fpa_ext_v1 =
258 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
259 static const arm_feature_set fpu_fpa_ext_v2 =
260 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
261 static const arm_feature_set fpu_vfp_ext_v1xd =
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
263 static const arm_feature_set fpu_vfp_ext_v1 =
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
265 static const arm_feature_set fpu_vfp_ext_v2 =
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
267 static const arm_feature_set fpu_vfp_ext_v3xd =
268 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
269 static const arm_feature_set fpu_vfp_ext_v3 =
270 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
271 static const arm_feature_set fpu_vfp_ext_d32 =
272 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
273 static const arm_feature_set fpu_neon_ext_v1 =
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
275 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
276 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
277 #ifdef OBJ_ELF
278 static const arm_feature_set fpu_vfp_fp16 =
279 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
280 static const arm_feature_set fpu_neon_ext_fma =
281 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
282 #endif
283 static const arm_feature_set fpu_vfp_ext_fma =
284 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
285 static const arm_feature_set fpu_vfp_ext_armv8 =
286 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
287 static const arm_feature_set fpu_vfp_ext_armv8xd =
288 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
289 static const arm_feature_set fpu_neon_ext_armv8 =
290 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
291 static const arm_feature_set fpu_crypto_ext_armv8 =
292 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
293 static const arm_feature_set crc_ext_armv8 =
294 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
295 static const arm_feature_set fpu_neon_ext_v8_1 =
296 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
297 static const arm_feature_set fpu_neon_ext_dotprod =
298 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
299
300 static int mfloat_abi_opt = -1;
301 /* Record user cpu selection for object attributes. */
302 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
303 /* Must be long enough to hold any of the names in arm_cpus. */
304 static char selected_cpu_name[20];
305
306 extern FLONUM_TYPE generic_floating_point_number;
307
308 /* Return if no cpu was selected on command-line. */
309 static bfd_boolean
310 no_cpu_selected (void)
311 {
312 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
313 }
314
315 #ifdef OBJ_ELF
316 # ifdef EABI_DEFAULT
317 static int meabi_flags = EABI_DEFAULT;
318 # else
319 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
320 # endif
321
322 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
323
324 bfd_boolean
325 arm_is_eabi (void)
326 {
327 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
328 }
329 #endif
330
331 #ifdef OBJ_ELF
332 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
333 symbolS * GOT_symbol;
334 #endif
335
336 /* 0: assemble for ARM,
337 1: assemble for Thumb,
338 2: assemble for Thumb even though target CPU does not support thumb
339 instructions. */
340 static int thumb_mode = 0;
341 /* A value distinct from the possible values for thumb_mode that we
342 can use to record whether thumb_mode has been copied into the
343 tc_frag_data field of a frag. */
344 #define MODE_RECORDED (1 << 4)
345
346 /* Specifies the intrinsic IT insn behavior mode. */
347 enum implicit_it_mode
348 {
349 IMPLICIT_IT_MODE_NEVER = 0x00,
350 IMPLICIT_IT_MODE_ARM = 0x01,
351 IMPLICIT_IT_MODE_THUMB = 0x02,
352 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
353 };
354 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
355
356 /* If unified_syntax is true, we are processing the new unified
357 ARM/Thumb syntax. Important differences from the old ARM mode:
358
359 - Immediate operands do not require a # prefix.
360 - Conditional affixes always appear at the end of the
361 instruction. (For backward compatibility, those instructions
362 that formerly had them in the middle, continue to accept them
363 there.)
364 - The IT instruction may appear, and if it does is validated
365 against subsequent conditional affixes. It does not generate
366 machine code.
367
368 Important differences from the old Thumb mode:
369
370 - Immediate operands do not require a # prefix.
371 - Most of the V6T2 instructions are only available in unified mode.
372 - The .N and .W suffixes are recognized and honored (it is an error
373 if they cannot be honored).
374 - All instructions set the flags if and only if they have an 's' affix.
375 - Conditional affixes may be used. They are validated against
376 preceding IT instructions. Unlike ARM mode, you cannot use a
377 conditional affix except in the scope of an IT instruction. */
378
379 static bfd_boolean unified_syntax = FALSE;
380
381 /* An immediate operand can start with #, and ld*, st*, pld operands
382 can contain [ and ]. We need to tell APP not to elide whitespace
383 before a [, which can appear as the first operand for pld.
384 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
385 const char arm_symbol_chars[] = "#[]{}";
386
387 enum neon_el_type
388 {
389 NT_invtype,
390 NT_untyped,
391 NT_integer,
392 NT_float,
393 NT_poly,
394 NT_signed,
395 NT_unsigned
396 };
397
398 struct neon_type_el
399 {
400 enum neon_el_type type;
401 unsigned size;
402 };
403
404 #define NEON_MAX_TYPE_ELS 4
405
406 struct neon_type
407 {
408 struct neon_type_el el[NEON_MAX_TYPE_ELS];
409 unsigned elems;
410 };
411
412 enum it_instruction_type
413 {
414 OUTSIDE_IT_INSN,
415 INSIDE_IT_INSN,
416 INSIDE_IT_LAST_INSN,
417 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
418 if inside, should be the last one. */
419 NEUTRAL_IT_INSN, /* This could be either inside or outside,
420 i.e. BKPT and NOP. */
421 IT_INSN /* The IT insn has been parsed. */
422 };
423
424 /* The maximum number of operands we need. */
425 #define ARM_IT_MAX_OPERANDS 6
426
427 struct arm_it
428 {
429 const char * error;
430 unsigned long instruction;
431 int size;
432 int size_req;
433 int cond;
434 /* "uncond_value" is set to the value in place of the conditional field in
435 unconditional versions of the instruction, or -1 if nothing is
436 appropriate. */
437 int uncond_value;
438 struct neon_type vectype;
439 /* This does not indicate an actual NEON instruction, only that
440 the mnemonic accepts neon-style type suffixes. */
441 int is_neon;
442 /* Set to the opcode if the instruction needs relaxation.
443 Zero if the instruction is not relaxed. */
444 unsigned long relax;
445 struct
446 {
447 bfd_reloc_code_real_type type;
448 expressionS exp;
449 int pc_rel;
450 } reloc;
451
452 enum it_instruction_type it_insn_type;
453
454 struct
455 {
456 unsigned reg;
457 signed int imm;
458 struct neon_type_el vectype;
459 unsigned present : 1; /* Operand present. */
460 unsigned isreg : 1; /* Operand was a register. */
461 unsigned immisreg : 1; /* .imm field is a second register. */
462 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
463 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
464 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
465 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
466 instructions. This allows us to disambiguate ARM <-> vector insns. */
467 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
468 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
469 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
470 unsigned issingle : 1; /* Operand is VFP single-precision register. */
471 unsigned hasreloc : 1; /* Operand has relocation suffix. */
472 unsigned writeback : 1; /* Operand has trailing ! */
473 unsigned preind : 1; /* Preindexed address. */
474 unsigned postind : 1; /* Postindexed address. */
475 unsigned negative : 1; /* Index register was negated. */
476 unsigned shifted : 1; /* Shift applied to operation. */
477 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
478 } operands[ARM_IT_MAX_OPERANDS];
479 };
480
481 static struct arm_it inst;
482
483 #define NUM_FLOAT_VALS 8
484
485 const char * fp_const[] =
486 {
487 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
488 };
489
490 /* Number of littlenums required to hold an extended precision number. */
491 #define MAX_LITTLENUMS 6
492
493 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
494
495 #define FAIL (-1)
496 #define SUCCESS (0)
497
498 #define SUFF_S 1
499 #define SUFF_D 2
500 #define SUFF_E 3
501 #define SUFF_P 4
502
503 #define CP_T_X 0x00008000
504 #define CP_T_Y 0x00400000
505
506 #define CONDS_BIT 0x00100000
507 #define LOAD_BIT 0x00100000
508
509 #define DOUBLE_LOAD_FLAG 0x00000001
510
511 struct asm_cond
512 {
513 const char * template_name;
514 unsigned long value;
515 };
516
517 #define COND_ALWAYS 0xE
518
519 struct asm_psr
520 {
521 const char * template_name;
522 unsigned long field;
523 };
524
525 struct asm_barrier_opt
526 {
527 const char * template_name;
528 unsigned long value;
529 const arm_feature_set arch;
530 };
531
532 /* The bit that distinguishes CPSR and SPSR. */
533 #define SPSR_BIT (1 << 22)
534
535 /* The individual PSR flag bits. */
536 #define PSR_c (1 << 16)
537 #define PSR_x (1 << 17)
538 #define PSR_s (1 << 18)
539 #define PSR_f (1 << 19)
540
541 struct reloc_entry
542 {
543 const char * name;
544 bfd_reloc_code_real_type reloc;
545 };
546
547 enum vfp_reg_pos
548 {
549 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
550 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
551 };
552
553 enum vfp_ldstm_type
554 {
555 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
556 };
557
558 /* Bits for DEFINED field in neon_typed_alias. */
559 #define NTA_HASTYPE 1
560 #define NTA_HASINDEX 2
561
562 struct neon_typed_alias
563 {
564 unsigned char defined;
565 unsigned char index;
566 struct neon_type_el eltype;
567 };
568
569 /* ARM register categories. This includes coprocessor numbers and various
570 architecture extensions' registers. */
571 enum arm_reg_type
572 {
573 REG_TYPE_RN,
574 REG_TYPE_CP,
575 REG_TYPE_CN,
576 REG_TYPE_FN,
577 REG_TYPE_VFS,
578 REG_TYPE_VFD,
579 REG_TYPE_NQ,
580 REG_TYPE_VFSD,
581 REG_TYPE_NDQ,
582 REG_TYPE_NSDQ,
583 REG_TYPE_VFC,
584 REG_TYPE_MVF,
585 REG_TYPE_MVD,
586 REG_TYPE_MVFX,
587 REG_TYPE_MVDX,
588 REG_TYPE_MVAX,
589 REG_TYPE_DSPSC,
590 REG_TYPE_MMXWR,
591 REG_TYPE_MMXWC,
592 REG_TYPE_MMXWCG,
593 REG_TYPE_XSCALE,
594 REG_TYPE_RNB
595 };
596
597 /* Structure for a hash table entry for a register.
598 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
599 information which states whether a vector type or index is specified (for a
600 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
601 struct reg_entry
602 {
603 const char * name;
604 unsigned int number;
605 unsigned char type;
606 unsigned char builtin;
607 struct neon_typed_alias * neon;
608 };
609
610 /* Diagnostics used when we don't get a register of the expected type. */
611 const char * const reg_expected_msgs[] =
612 {
613 N_("ARM register expected"),
614 N_("bad or missing co-processor number"),
615 N_("co-processor register expected"),
616 N_("FPA register expected"),
617 N_("VFP single precision register expected"),
618 N_("VFP/Neon double precision register expected"),
619 N_("Neon quad precision register expected"),
620 N_("VFP single or double precision register expected"),
621 N_("Neon double or quad precision register expected"),
622 N_("VFP single, double or Neon quad precision register expected"),
623 N_("VFP system register expected"),
624 N_("Maverick MVF register expected"),
625 N_("Maverick MVD register expected"),
626 N_("Maverick MVFX register expected"),
627 N_("Maverick MVDX register expected"),
628 N_("Maverick MVAX register expected"),
629 N_("Maverick DSPSC register expected"),
630 N_("iWMMXt data register expected"),
631 N_("iWMMXt control register expected"),
632 N_("iWMMXt scalar register expected"),
633 N_("XScale accumulator register expected"),
634 };
635
636 /* Some well known registers that we refer to directly elsewhere. */
637 #define REG_R12 12
638 #define REG_SP 13
639 #define REG_LR 14
640 #define REG_PC 15
641
642 /* ARM instructions take 4bytes in the object file, Thumb instructions
643 take 2: */
644 #define INSN_SIZE 4
645
646 struct asm_opcode
647 {
648 /* Basic string to match. */
649 const char * template_name;
650
651 /* Parameters to instruction. */
652 unsigned int operands[8];
653
654 /* Conditional tag - see opcode_lookup. */
655 unsigned int tag : 4;
656
657 /* Basic instruction code. */
658 unsigned int avalue : 28;
659
660 /* Thumb-format instruction code. */
661 unsigned int tvalue;
662
663 /* Which architecture variant provides this instruction. */
664 const arm_feature_set * avariant;
665 const arm_feature_set * tvariant;
666
667 /* Function to call to encode instruction in ARM format. */
668 void (* aencode) (void);
669
670 /* Function to call to encode instruction in Thumb format. */
671 void (* tencode) (void);
672 };
673
674 /* Defines for various bits that we will want to toggle. */
675 #define INST_IMMEDIATE 0x02000000
676 #define OFFSET_REG 0x02000000
677 #define HWOFFSET_IMM 0x00400000
678 #define SHIFT_BY_REG 0x00000010
679 #define PRE_INDEX 0x01000000
680 #define INDEX_UP 0x00800000
681 #define WRITE_BACK 0x00200000
682 #define LDM_TYPE_2_OR_3 0x00400000
683 #define CPSI_MMOD 0x00020000
684
685 #define LITERAL_MASK 0xf000f000
686 #define OPCODE_MASK 0xfe1fffff
687 #define V4_STR_BIT 0x00000020
688 #define VLDR_VMOV_SAME 0x0040f000
689
690 #define T2_SUBS_PC_LR 0xf3de8f00
691
692 #define DATA_OP_SHIFT 21
693 #define SBIT_SHIFT 20
694
695 #define T2_OPCODE_MASK 0xfe1fffff
696 #define T2_DATA_OP_SHIFT 21
697 #define T2_SBIT_SHIFT 20
698
699 #define A_COND_MASK 0xf0000000
700 #define A_PUSH_POP_OP_MASK 0x0fff0000
701
702 /* Opcodes for pushing/poping registers to/from the stack. */
703 #define A1_OPCODE_PUSH 0x092d0000
704 #define A2_OPCODE_PUSH 0x052d0004
705 #define A2_OPCODE_POP 0x049d0004
706
707 /* Codes to distinguish the arithmetic instructions. */
708 #define OPCODE_AND 0
709 #define OPCODE_EOR 1
710 #define OPCODE_SUB 2
711 #define OPCODE_RSB 3
712 #define OPCODE_ADD 4
713 #define OPCODE_ADC 5
714 #define OPCODE_SBC 6
715 #define OPCODE_RSC 7
716 #define OPCODE_TST 8
717 #define OPCODE_TEQ 9
718 #define OPCODE_CMP 10
719 #define OPCODE_CMN 11
720 #define OPCODE_ORR 12
721 #define OPCODE_MOV 13
722 #define OPCODE_BIC 14
723 #define OPCODE_MVN 15
724
725 #define T2_OPCODE_AND 0
726 #define T2_OPCODE_BIC 1
727 #define T2_OPCODE_ORR 2
728 #define T2_OPCODE_ORN 3
729 #define T2_OPCODE_EOR 4
730 #define T2_OPCODE_ADD 8
731 #define T2_OPCODE_ADC 10
732 #define T2_OPCODE_SBC 11
733 #define T2_OPCODE_SUB 13
734 #define T2_OPCODE_RSB 14
735
736 #define T_OPCODE_MUL 0x4340
737 #define T_OPCODE_TST 0x4200
738 #define T_OPCODE_CMN 0x42c0
739 #define T_OPCODE_NEG 0x4240
740 #define T_OPCODE_MVN 0x43c0
741
742 #define T_OPCODE_ADD_R3 0x1800
743 #define T_OPCODE_SUB_R3 0x1a00
744 #define T_OPCODE_ADD_HI 0x4400
745 #define T_OPCODE_ADD_ST 0xb000
746 #define T_OPCODE_SUB_ST 0xb080
747 #define T_OPCODE_ADD_SP 0xa800
748 #define T_OPCODE_ADD_PC 0xa000
749 #define T_OPCODE_ADD_I8 0x3000
750 #define T_OPCODE_SUB_I8 0x3800
751 #define T_OPCODE_ADD_I3 0x1c00
752 #define T_OPCODE_SUB_I3 0x1e00
753
754 #define T_OPCODE_ASR_R 0x4100
755 #define T_OPCODE_LSL_R 0x4080
756 #define T_OPCODE_LSR_R 0x40c0
757 #define T_OPCODE_ROR_R 0x41c0
758 #define T_OPCODE_ASR_I 0x1000
759 #define T_OPCODE_LSL_I 0x0000
760 #define T_OPCODE_LSR_I 0x0800
761
762 #define T_OPCODE_MOV_I8 0x2000
763 #define T_OPCODE_CMP_I8 0x2800
764 #define T_OPCODE_CMP_LR 0x4280
765 #define T_OPCODE_MOV_HR 0x4600
766 #define T_OPCODE_CMP_HR 0x4500
767
768 #define T_OPCODE_LDR_PC 0x4800
769 #define T_OPCODE_LDR_SP 0x9800
770 #define T_OPCODE_STR_SP 0x9000
771 #define T_OPCODE_LDR_IW 0x6800
772 #define T_OPCODE_STR_IW 0x6000
773 #define T_OPCODE_LDR_IH 0x8800
774 #define T_OPCODE_STR_IH 0x8000
775 #define T_OPCODE_LDR_IB 0x7800
776 #define T_OPCODE_STR_IB 0x7000
777 #define T_OPCODE_LDR_RW 0x5800
778 #define T_OPCODE_STR_RW 0x5000
779 #define T_OPCODE_LDR_RH 0x5a00
780 #define T_OPCODE_STR_RH 0x5200
781 #define T_OPCODE_LDR_RB 0x5c00
782 #define T_OPCODE_STR_RB 0x5400
783
784 #define T_OPCODE_PUSH 0xb400
785 #define T_OPCODE_POP 0xbc00
786
787 #define T_OPCODE_BRANCH 0xe000
788
789 #define THUMB_SIZE 2 /* Size of thumb instruction. */
790 #define THUMB_PP_PC_LR 0x0100
791 #define THUMB_LOAD_BIT 0x0800
792 #define THUMB2_LOAD_BIT 0x00100000
793
794 #define BAD_ARGS _("bad arguments to instruction")
795 #define BAD_SP _("r13 not allowed here")
796 #define BAD_PC _("r15 not allowed here")
797 #define BAD_COND _("instruction cannot be conditional")
798 #define BAD_OVERLAP _("registers may not be the same")
799 #define BAD_HIREG _("lo register required")
800 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
801 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
802 #define BAD_BRANCH _("branch must be last instruction in IT block")
803 #define BAD_NOT_IT _("instruction not allowed in IT block")
804 #define BAD_FPU _("selected FPU does not support instruction")
805 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
806 #define BAD_IT_COND _("incorrect condition in IT block")
807 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
808 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
809 #define BAD_PC_ADDRESSING \
810 _("cannot use register index with PC-relative addressing")
811 #define BAD_PC_WRITEBACK \
812 _("cannot use writeback with PC-relative addressing")
813 #define BAD_RANGE _("branch out of range")
814 #define BAD_FP16 _("selected processor does not support fp16 instruction")
815 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
816 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
817
818 static struct hash_control * arm_ops_hsh;
819 static struct hash_control * arm_cond_hsh;
820 static struct hash_control * arm_shift_hsh;
821 static struct hash_control * arm_psr_hsh;
822 static struct hash_control * arm_v7m_psr_hsh;
823 static struct hash_control * arm_reg_hsh;
824 static struct hash_control * arm_reloc_hsh;
825 static struct hash_control * arm_barrier_opt_hsh;
826
827 /* Stuff needed to resolve the label ambiguity
828 As:
829 ...
830 label: <insn>
831 may differ from:
832 ...
833 label:
834 <insn> */
835
836 symbolS * last_label_seen;
837 static int label_is_thumb_function_name = FALSE;
838
839 /* Literal pool structure. Held on a per-section
840 and per-sub-section basis. */
841
842 #define MAX_LITERAL_POOL_SIZE 1024
843 typedef struct literal_pool
844 {
845 expressionS literals [MAX_LITERAL_POOL_SIZE];
846 unsigned int next_free_entry;
847 unsigned int id;
848 symbolS * symbol;
849 segT section;
850 subsegT sub_section;
851 #ifdef OBJ_ELF
852 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
853 #endif
854 struct literal_pool * next;
855 unsigned int alignment;
856 } literal_pool;
857
858 /* Pointer to a linked list of literal pools. */
859 literal_pool * list_of_pools = NULL;
860
861 typedef enum asmfunc_states
862 {
863 OUTSIDE_ASMFUNC,
864 WAITING_ASMFUNC_NAME,
865 WAITING_ENDASMFUNC
866 } asmfunc_states;
867
868 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
869
870 #ifdef OBJ_ELF
871 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
872 #else
873 static struct current_it now_it;
874 #endif
875
876 static inline int
877 now_it_compatible (int cond)
878 {
879 return (cond & ~1) == (now_it.cc & ~1);
880 }
881
882 static inline int
883 conditional_insn (void)
884 {
885 return inst.cond != COND_ALWAYS;
886 }
887
888 static int in_it_block (void);
889
890 static int handle_it_state (void);
891
892 static void force_automatic_it_block_close (void);
893
894 static void it_fsm_post_encode (void);
895
896 #define set_it_insn_type(type) \
897 do \
898 { \
899 inst.it_insn_type = type; \
900 if (handle_it_state () == FAIL) \
901 return; \
902 } \
903 while (0)
904
905 #define set_it_insn_type_nonvoid(type, failret) \
906 do \
907 { \
908 inst.it_insn_type = type; \
909 if (handle_it_state () == FAIL) \
910 return failret; \
911 } \
912 while(0)
913
914 #define set_it_insn_type_last() \
915 do \
916 { \
917 if (inst.cond == COND_ALWAYS) \
918 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
919 else \
920 set_it_insn_type (INSIDE_IT_LAST_INSN); \
921 } \
922 while (0)
923
924 /* Pure syntax. */
925
926 /* This array holds the chars that always start a comment. If the
927 pre-processor is disabled, these aren't very useful. */
928 char arm_comment_chars[] = "@";
929
930 /* This array holds the chars that only start a comment at the beginning of
931 a line. If the line seems to have the form '# 123 filename'
932 .line and .file directives will appear in the pre-processed output. */
933 /* Note that input_file.c hand checks for '#' at the beginning of the
934 first line of the input file. This is because the compiler outputs
935 #NO_APP at the beginning of its output. */
936 /* Also note that comments like this one will always work. */
937 const char line_comment_chars[] = "#";
938
939 char arm_line_separator_chars[] = ";";
940
941 /* Chars that can be used to separate mant
942 from exp in floating point numbers. */
943 const char EXP_CHARS[] = "eE";
944
945 /* Chars that mean this number is a floating point constant. */
946 /* As in 0f12.456 */
947 /* or 0d1.2345e12 */
948
949 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
950
951 /* Prefix characters that indicate the start of an immediate
952 value. */
953 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
954
955 /* Separator character handling. */
956
957 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
958
959 static inline int
960 skip_past_char (char ** str, char c)
961 {
962 /* PR gas/14987: Allow for whitespace before the expected character. */
963 skip_whitespace (*str);
964
965 if (**str == c)
966 {
967 (*str)++;
968 return SUCCESS;
969 }
970 else
971 return FAIL;
972 }
973
974 #define skip_past_comma(str) skip_past_char (str, ',')
975
976 /* Arithmetic expressions (possibly involving symbols). */
977
978 /* Return TRUE if anything in the expression is a bignum. */
979
980 static bfd_boolean
981 walk_no_bignums (symbolS * sp)
982 {
983 if (symbol_get_value_expression (sp)->X_op == O_big)
984 return TRUE;
985
986 if (symbol_get_value_expression (sp)->X_add_symbol)
987 {
988 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
989 || (symbol_get_value_expression (sp)->X_op_symbol
990 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
991 }
992
993 return FALSE;
994 }
995
996 static bfd_boolean in_my_get_expression = FALSE;
997
998 /* Third argument to my_get_expression. */
999 #define GE_NO_PREFIX 0
1000 #define GE_IMM_PREFIX 1
1001 #define GE_OPT_PREFIX 2
1002 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1003 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1004 #define GE_OPT_PREFIX_BIG 3
1005
1006 static int
1007 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1008 {
1009 char * save_in;
1010 segT seg;
1011
1012 /* In unified syntax, all prefixes are optional. */
1013 if (unified_syntax)
1014 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1015 : GE_OPT_PREFIX;
1016
1017 switch (prefix_mode)
1018 {
1019 case GE_NO_PREFIX: break;
1020 case GE_IMM_PREFIX:
1021 if (!is_immediate_prefix (**str))
1022 {
1023 inst.error = _("immediate expression requires a # prefix");
1024 return FAIL;
1025 }
1026 (*str)++;
1027 break;
1028 case GE_OPT_PREFIX:
1029 case GE_OPT_PREFIX_BIG:
1030 if (is_immediate_prefix (**str))
1031 (*str)++;
1032 break;
1033 default:
1034 abort ();
1035 }
1036
1037 memset (ep, 0, sizeof (expressionS));
1038
1039 save_in = input_line_pointer;
1040 input_line_pointer = *str;
1041 in_my_get_expression = TRUE;
1042 seg = expression (ep);
1043 in_my_get_expression = FALSE;
1044
1045 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1046 {
1047 /* We found a bad or missing expression in md_operand(). */
1048 *str = input_line_pointer;
1049 input_line_pointer = save_in;
1050 if (inst.error == NULL)
1051 inst.error = (ep->X_op == O_absent
1052 ? _("missing expression") :_("bad expression"));
1053 return 1;
1054 }
1055
1056 #ifdef OBJ_AOUT
1057 if (seg != absolute_section
1058 && seg != text_section
1059 && seg != data_section
1060 && seg != bss_section
1061 && seg != undefined_section)
1062 {
1063 inst.error = _("bad segment");
1064 *str = input_line_pointer;
1065 input_line_pointer = save_in;
1066 return 1;
1067 }
1068 #else
1069 (void) seg;
1070 #endif
1071
1072 /* Get rid of any bignums now, so that we don't generate an error for which
1073 we can't establish a line number later on. Big numbers are never valid
1074 in instructions, which is where this routine is always called. */
1075 if (prefix_mode != GE_OPT_PREFIX_BIG
1076 && (ep->X_op == O_big
1077 || (ep->X_add_symbol
1078 && (walk_no_bignums (ep->X_add_symbol)
1079 || (ep->X_op_symbol
1080 && walk_no_bignums (ep->X_op_symbol))))))
1081 {
1082 inst.error = _("invalid constant");
1083 *str = input_line_pointer;
1084 input_line_pointer = save_in;
1085 return 1;
1086 }
1087
1088 *str = input_line_pointer;
1089 input_line_pointer = save_in;
1090 return SUCCESS;
1091 }
1092
1093 /* Turn a string in input_line_pointer into a floating point constant
1094 of type TYPE, and store the appropriate bytes in *LITP. The number
1095 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1096 returned, or NULL on OK.
1097
1098 Note that fp constants aren't represent in the normal way on the ARM.
1099 In big endian mode, things are as expected. However, in little endian
1100 mode fp constants are big-endian word-wise, and little-endian byte-wise
1101 within the words. For example, (double) 1.1 in big endian mode is
1102 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1103 the byte sequence 99 99 f1 3f 9a 99 99 99.
1104
1105 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1106
1107 const char *
1108 md_atof (int type, char * litP, int * sizeP)
1109 {
1110 int prec;
1111 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1112 char *t;
1113 int i;
1114
1115 switch (type)
1116 {
1117 case 'f':
1118 case 'F':
1119 case 's':
1120 case 'S':
1121 prec = 2;
1122 break;
1123
1124 case 'd':
1125 case 'D':
1126 case 'r':
1127 case 'R':
1128 prec = 4;
1129 break;
1130
1131 case 'x':
1132 case 'X':
1133 prec = 5;
1134 break;
1135
1136 case 'p':
1137 case 'P':
1138 prec = 5;
1139 break;
1140
1141 default:
1142 *sizeP = 0;
1143 return _("Unrecognized or unsupported floating point constant");
1144 }
1145
1146 t = atof_ieee (input_line_pointer, type, words);
1147 if (t)
1148 input_line_pointer = t;
1149 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1150
1151 if (target_big_endian)
1152 {
1153 for (i = 0; i < prec; i++)
1154 {
1155 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1156 litP += sizeof (LITTLENUM_TYPE);
1157 }
1158 }
1159 else
1160 {
1161 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1162 for (i = prec - 1; i >= 0; i--)
1163 {
1164 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1165 litP += sizeof (LITTLENUM_TYPE);
1166 }
1167 else
1168 /* For a 4 byte float the order of elements in `words' is 1 0.
1169 For an 8 byte float the order is 1 0 3 2. */
1170 for (i = 0; i < prec; i += 2)
1171 {
1172 md_number_to_chars (litP, (valueT) words[i + 1],
1173 sizeof (LITTLENUM_TYPE));
1174 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1175 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1176 litP += 2 * sizeof (LITTLENUM_TYPE);
1177 }
1178 }
1179
1180 return NULL;
1181 }
1182
1183 /* We handle all bad expressions here, so that we can report the faulty
1184 instruction in the error message. */
1185
1186 void
1187 md_operand (expressionS * exp)
1188 {
1189 if (in_my_get_expression)
1190 exp->X_op = O_illegal;
1191 }
1192
1193 /* Immediate values. */
1194
1195 #ifdef OBJ_ELF
1196 /* Generic immediate-value read function for use in directives.
1197 Accepts anything that 'expression' can fold to a constant.
1198 *val receives the number. */
1199
1200 static int
1201 immediate_for_directive (int *val)
1202 {
1203 expressionS exp;
1204 exp.X_op = O_illegal;
1205
1206 if (is_immediate_prefix (*input_line_pointer))
1207 {
1208 input_line_pointer++;
1209 expression (&exp);
1210 }
1211
1212 if (exp.X_op != O_constant)
1213 {
1214 as_bad (_("expected #constant"));
1215 ignore_rest_of_line ();
1216 return FAIL;
1217 }
1218 *val = exp.X_add_number;
1219 return SUCCESS;
1220 }
1221 #endif
1222
1223 /* Register parsing. */
1224
1225 /* Generic register parser. CCP points to what should be the
1226 beginning of a register name. If it is indeed a valid register
1227 name, advance CCP over it and return the reg_entry structure;
1228 otherwise return NULL. Does not issue diagnostics. */
1229
1230 static struct reg_entry *
1231 arm_reg_parse_multi (char **ccp)
1232 {
1233 char *start = *ccp;
1234 char *p;
1235 struct reg_entry *reg;
1236
1237 skip_whitespace (start);
1238
1239 #ifdef REGISTER_PREFIX
1240 if (*start != REGISTER_PREFIX)
1241 return NULL;
1242 start++;
1243 #endif
1244 #ifdef OPTIONAL_REGISTER_PREFIX
1245 if (*start == OPTIONAL_REGISTER_PREFIX)
1246 start++;
1247 #endif
1248
1249 p = start;
1250 if (!ISALPHA (*p) || !is_name_beginner (*p))
1251 return NULL;
1252
1253 do
1254 p++;
1255 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1256
1257 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1258
1259 if (!reg)
1260 return NULL;
1261
1262 *ccp = p;
1263 return reg;
1264 }
1265
1266 static int
1267 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1268 enum arm_reg_type type)
1269 {
1270 /* Alternative syntaxes are accepted for a few register classes. */
1271 switch (type)
1272 {
1273 case REG_TYPE_MVF:
1274 case REG_TYPE_MVD:
1275 case REG_TYPE_MVFX:
1276 case REG_TYPE_MVDX:
1277 /* Generic coprocessor register names are allowed for these. */
1278 if (reg && reg->type == REG_TYPE_CN)
1279 return reg->number;
1280 break;
1281
1282 case REG_TYPE_CP:
1283 /* For backward compatibility, a bare number is valid here. */
1284 {
1285 unsigned long processor = strtoul (start, ccp, 10);
1286 if (*ccp != start && processor <= 15)
1287 return processor;
1288 }
1289 /* Fall through. */
1290
1291 case REG_TYPE_MMXWC:
1292 /* WC includes WCG. ??? I'm not sure this is true for all
1293 instructions that take WC registers. */
1294 if (reg && reg->type == REG_TYPE_MMXWCG)
1295 return reg->number;
1296 break;
1297
1298 default:
1299 break;
1300 }
1301
1302 return FAIL;
1303 }
1304
1305 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1306 return value is the register number or FAIL. */
1307
1308 static int
1309 arm_reg_parse (char **ccp, enum arm_reg_type type)
1310 {
1311 char *start = *ccp;
1312 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1313 int ret;
1314
1315 /* Do not allow a scalar (reg+index) to parse as a register. */
1316 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1317 return FAIL;
1318
1319 if (reg && reg->type == type)
1320 return reg->number;
1321
1322 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1323 return ret;
1324
1325 *ccp = start;
1326 return FAIL;
1327 }
1328
1329 /* Parse a Neon type specifier. *STR should point at the leading '.'
1330 character. Does no verification at this stage that the type fits the opcode
1331 properly. E.g.,
1332
1333 .i32.i32.s16
1334 .s32.f32
1335 .u16
1336
1337 Can all be legally parsed by this function.
1338
1339 Fills in neon_type struct pointer with parsed information, and updates STR
1340 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1341 type, FAIL if not. */
1342
1343 static int
1344 parse_neon_type (struct neon_type *type, char **str)
1345 {
1346 char *ptr = *str;
1347
1348 if (type)
1349 type->elems = 0;
1350
1351 while (type->elems < NEON_MAX_TYPE_ELS)
1352 {
1353 enum neon_el_type thistype = NT_untyped;
1354 unsigned thissize = -1u;
1355
1356 if (*ptr != '.')
1357 break;
1358
1359 ptr++;
1360
1361 /* Just a size without an explicit type. */
1362 if (ISDIGIT (*ptr))
1363 goto parsesize;
1364
1365 switch (TOLOWER (*ptr))
1366 {
1367 case 'i': thistype = NT_integer; break;
1368 case 'f': thistype = NT_float; break;
1369 case 'p': thistype = NT_poly; break;
1370 case 's': thistype = NT_signed; break;
1371 case 'u': thistype = NT_unsigned; break;
1372 case 'd':
1373 thistype = NT_float;
1374 thissize = 64;
1375 ptr++;
1376 goto done;
1377 default:
1378 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1379 return FAIL;
1380 }
1381
1382 ptr++;
1383
1384 /* .f is an abbreviation for .f32. */
1385 if (thistype == NT_float && !ISDIGIT (*ptr))
1386 thissize = 32;
1387 else
1388 {
1389 parsesize:
1390 thissize = strtoul (ptr, &ptr, 10);
1391
1392 if (thissize != 8 && thissize != 16 && thissize != 32
1393 && thissize != 64)
1394 {
1395 as_bad (_("bad size %d in type specifier"), thissize);
1396 return FAIL;
1397 }
1398 }
1399
1400 done:
1401 if (type)
1402 {
1403 type->el[type->elems].type = thistype;
1404 type->el[type->elems].size = thissize;
1405 type->elems++;
1406 }
1407 }
1408
1409 /* Empty/missing type is not a successful parse. */
1410 if (type->elems == 0)
1411 return FAIL;
1412
1413 *str = ptr;
1414
1415 return SUCCESS;
1416 }
1417
1418 /* Errors may be set multiple times during parsing or bit encoding
1419 (particularly in the Neon bits), but usually the earliest error which is set
1420 will be the most meaningful. Avoid overwriting it with later (cascading)
1421 errors by calling this function. */
1422
1423 static void
1424 first_error (const char *err)
1425 {
1426 if (!inst.error)
1427 inst.error = err;
1428 }
1429
1430 /* Parse a single type, e.g. ".s32", leading period included. */
1431 static int
1432 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1433 {
1434 char *str = *ccp;
1435 struct neon_type optype;
1436
1437 if (*str == '.')
1438 {
1439 if (parse_neon_type (&optype, &str) == SUCCESS)
1440 {
1441 if (optype.elems == 1)
1442 *vectype = optype.el[0];
1443 else
1444 {
1445 first_error (_("only one type should be specified for operand"));
1446 return FAIL;
1447 }
1448 }
1449 else
1450 {
1451 first_error (_("vector type expected"));
1452 return FAIL;
1453 }
1454 }
1455 else
1456 return FAIL;
1457
1458 *ccp = str;
1459
1460 return SUCCESS;
1461 }
1462
1463 /* Special meanings for indices (which have a range of 0-7), which will fit into
1464 a 4-bit integer. */
1465
1466 #define NEON_ALL_LANES 15
1467 #define NEON_INTERLEAVE_LANES 14
1468
1469 /* Parse either a register or a scalar, with an optional type. Return the
1470 register number, and optionally fill in the actual type of the register
1471 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1472 type/index information in *TYPEINFO. */
1473
1474 static int
1475 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1476 enum arm_reg_type *rtype,
1477 struct neon_typed_alias *typeinfo)
1478 {
1479 char *str = *ccp;
1480 struct reg_entry *reg = arm_reg_parse_multi (&str);
1481 struct neon_typed_alias atype;
1482 struct neon_type_el parsetype;
1483
1484 atype.defined = 0;
1485 atype.index = -1;
1486 atype.eltype.type = NT_invtype;
1487 atype.eltype.size = -1;
1488
1489 /* Try alternate syntax for some types of register. Note these are mutually
1490 exclusive with the Neon syntax extensions. */
1491 if (reg == NULL)
1492 {
1493 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1494 if (altreg != FAIL)
1495 *ccp = str;
1496 if (typeinfo)
1497 *typeinfo = atype;
1498 return altreg;
1499 }
1500
1501 /* Undo polymorphism when a set of register types may be accepted. */
1502 if ((type == REG_TYPE_NDQ
1503 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1504 || (type == REG_TYPE_VFSD
1505 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1506 || (type == REG_TYPE_NSDQ
1507 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1508 || reg->type == REG_TYPE_NQ))
1509 || (type == REG_TYPE_MMXWC
1510 && (reg->type == REG_TYPE_MMXWCG)))
1511 type = (enum arm_reg_type) reg->type;
1512
1513 if (type != reg->type)
1514 return FAIL;
1515
1516 if (reg->neon)
1517 atype = *reg->neon;
1518
1519 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1520 {
1521 if ((atype.defined & NTA_HASTYPE) != 0)
1522 {
1523 first_error (_("can't redefine type for operand"));
1524 return FAIL;
1525 }
1526 atype.defined |= NTA_HASTYPE;
1527 atype.eltype = parsetype;
1528 }
1529
1530 if (skip_past_char (&str, '[') == SUCCESS)
1531 {
1532 if (type != REG_TYPE_VFD)
1533 {
1534 first_error (_("only D registers may be indexed"));
1535 return FAIL;
1536 }
1537
1538 if ((atype.defined & NTA_HASINDEX) != 0)
1539 {
1540 first_error (_("can't change index for operand"));
1541 return FAIL;
1542 }
1543
1544 atype.defined |= NTA_HASINDEX;
1545
1546 if (skip_past_char (&str, ']') == SUCCESS)
1547 atype.index = NEON_ALL_LANES;
1548 else
1549 {
1550 expressionS exp;
1551
1552 my_get_expression (&exp, &str, GE_NO_PREFIX);
1553
1554 if (exp.X_op != O_constant)
1555 {
1556 first_error (_("constant expression required"));
1557 return FAIL;
1558 }
1559
1560 if (skip_past_char (&str, ']') == FAIL)
1561 return FAIL;
1562
1563 atype.index = exp.X_add_number;
1564 }
1565 }
1566
1567 if (typeinfo)
1568 *typeinfo = atype;
1569
1570 if (rtype)
1571 *rtype = type;
1572
1573 *ccp = str;
1574
1575 return reg->number;
1576 }
1577
1578 /* Like arm_reg_parse, but allow allow the following extra features:
1579 - If RTYPE is non-zero, return the (possibly restricted) type of the
1580 register (e.g. Neon double or quad reg when either has been requested).
1581 - If this is a Neon vector type with additional type information, fill
1582 in the struct pointed to by VECTYPE (if non-NULL).
1583 This function will fault on encountering a scalar. */
1584
1585 static int
1586 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1587 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1588 {
1589 struct neon_typed_alias atype;
1590 char *str = *ccp;
1591 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1592
1593 if (reg == FAIL)
1594 return FAIL;
1595
1596 /* Do not allow regname(... to parse as a register. */
1597 if (*str == '(')
1598 return FAIL;
1599
1600 /* Do not allow a scalar (reg+index) to parse as a register. */
1601 if ((atype.defined & NTA_HASINDEX) != 0)
1602 {
1603 first_error (_("register operand expected, but got scalar"));
1604 return FAIL;
1605 }
1606
1607 if (vectype)
1608 *vectype = atype.eltype;
1609
1610 *ccp = str;
1611
1612 return reg;
1613 }
1614
1615 #define NEON_SCALAR_REG(X) ((X) >> 4)
1616 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1617
1618 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1619 have enough information to be able to do a good job bounds-checking. So, we
1620 just do easy checks here, and do further checks later. */
1621
1622 static int
1623 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1624 {
1625 int reg;
1626 char *str = *ccp;
1627 struct neon_typed_alias atype;
1628
1629 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1630
1631 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1632 return FAIL;
1633
1634 if (atype.index == NEON_ALL_LANES)
1635 {
1636 first_error (_("scalar must have an index"));
1637 return FAIL;
1638 }
1639 else if (atype.index >= 64 / elsize)
1640 {
1641 first_error (_("scalar index out of range"));
1642 return FAIL;
1643 }
1644
1645 if (type)
1646 *type = atype.eltype;
1647
1648 *ccp = str;
1649
1650 return reg * 16 + atype.index;
1651 }
1652
1653 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1654
1655 static long
1656 parse_reg_list (char ** strp)
1657 {
1658 char * str = * strp;
1659 long range = 0;
1660 int another_range;
1661
1662 /* We come back here if we get ranges concatenated by '+' or '|'. */
1663 do
1664 {
1665 skip_whitespace (str);
1666
1667 another_range = 0;
1668
1669 if (*str == '{')
1670 {
1671 int in_range = 0;
1672 int cur_reg = -1;
1673
1674 str++;
1675 do
1676 {
1677 int reg;
1678
1679 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1680 {
1681 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1682 return FAIL;
1683 }
1684
1685 if (in_range)
1686 {
1687 int i;
1688
1689 if (reg <= cur_reg)
1690 {
1691 first_error (_("bad range in register list"));
1692 return FAIL;
1693 }
1694
1695 for (i = cur_reg + 1; i < reg; i++)
1696 {
1697 if (range & (1 << i))
1698 as_tsktsk
1699 (_("Warning: duplicated register (r%d) in register list"),
1700 i);
1701 else
1702 range |= 1 << i;
1703 }
1704 in_range = 0;
1705 }
1706
1707 if (range & (1 << reg))
1708 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1709 reg);
1710 else if (reg <= cur_reg)
1711 as_tsktsk (_("Warning: register range not in ascending order"));
1712
1713 range |= 1 << reg;
1714 cur_reg = reg;
1715 }
1716 while (skip_past_comma (&str) != FAIL
1717 || (in_range = 1, *str++ == '-'));
1718 str--;
1719
1720 if (skip_past_char (&str, '}') == FAIL)
1721 {
1722 first_error (_("missing `}'"));
1723 return FAIL;
1724 }
1725 }
1726 else
1727 {
1728 expressionS exp;
1729
1730 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1731 return FAIL;
1732
1733 if (exp.X_op == O_constant)
1734 {
1735 if (exp.X_add_number
1736 != (exp.X_add_number & 0x0000ffff))
1737 {
1738 inst.error = _("invalid register mask");
1739 return FAIL;
1740 }
1741
1742 if ((range & exp.X_add_number) != 0)
1743 {
1744 int regno = range & exp.X_add_number;
1745
1746 regno &= -regno;
1747 regno = (1 << regno) - 1;
1748 as_tsktsk
1749 (_("Warning: duplicated register (r%d) in register list"),
1750 regno);
1751 }
1752
1753 range |= exp.X_add_number;
1754 }
1755 else
1756 {
1757 if (inst.reloc.type != 0)
1758 {
1759 inst.error = _("expression too complex");
1760 return FAIL;
1761 }
1762
1763 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1764 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1765 inst.reloc.pc_rel = 0;
1766 }
1767 }
1768
1769 if (*str == '|' || *str == '+')
1770 {
1771 str++;
1772 another_range = 1;
1773 }
1774 }
1775 while (another_range);
1776
1777 *strp = str;
1778 return range;
1779 }
1780
1781 /* Types of registers in a list. */
1782
1783 enum reg_list_els
1784 {
1785 REGLIST_VFP_S,
1786 REGLIST_VFP_D,
1787 REGLIST_NEON_D
1788 };
1789
1790 /* Parse a VFP register list. If the string is invalid return FAIL.
1791 Otherwise return the number of registers, and set PBASE to the first
1792 register. Parses registers of type ETYPE.
1793 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1794 - Q registers can be used to specify pairs of D registers
1795 - { } can be omitted from around a singleton register list
1796 FIXME: This is not implemented, as it would require backtracking in
1797 some cases, e.g.:
1798 vtbl.8 d3,d4,d5
1799 This could be done (the meaning isn't really ambiguous), but doesn't
1800 fit in well with the current parsing framework.
1801 - 32 D registers may be used (also true for VFPv3).
1802 FIXME: Types are ignored in these register lists, which is probably a
1803 bug. */
1804
1805 static int
1806 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1807 {
1808 char *str = *ccp;
1809 int base_reg;
1810 int new_base;
1811 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1812 int max_regs = 0;
1813 int count = 0;
1814 int warned = 0;
1815 unsigned long mask = 0;
1816 int i;
1817
1818 if (skip_past_char (&str, '{') == FAIL)
1819 {
1820 inst.error = _("expecting {");
1821 return FAIL;
1822 }
1823
1824 switch (etype)
1825 {
1826 case REGLIST_VFP_S:
1827 regtype = REG_TYPE_VFS;
1828 max_regs = 32;
1829 break;
1830
1831 case REGLIST_VFP_D:
1832 regtype = REG_TYPE_VFD;
1833 break;
1834
1835 case REGLIST_NEON_D:
1836 regtype = REG_TYPE_NDQ;
1837 break;
1838 }
1839
1840 if (etype != REGLIST_VFP_S)
1841 {
1842 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1843 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1844 {
1845 max_regs = 32;
1846 if (thumb_mode)
1847 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1848 fpu_vfp_ext_d32);
1849 else
1850 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1851 fpu_vfp_ext_d32);
1852 }
1853 else
1854 max_regs = 16;
1855 }
1856
1857 base_reg = max_regs;
1858
1859 do
1860 {
1861 int setmask = 1, addregs = 1;
1862
1863 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1864
1865 if (new_base == FAIL)
1866 {
1867 first_error (_(reg_expected_msgs[regtype]));
1868 return FAIL;
1869 }
1870
1871 if (new_base >= max_regs)
1872 {
1873 first_error (_("register out of range in list"));
1874 return FAIL;
1875 }
1876
1877 /* Note: a value of 2 * n is returned for the register Q<n>. */
1878 if (regtype == REG_TYPE_NQ)
1879 {
1880 setmask = 3;
1881 addregs = 2;
1882 }
1883
1884 if (new_base < base_reg)
1885 base_reg = new_base;
1886
1887 if (mask & (setmask << new_base))
1888 {
1889 first_error (_("invalid register list"));
1890 return FAIL;
1891 }
1892
1893 if ((mask >> new_base) != 0 && ! warned)
1894 {
1895 as_tsktsk (_("register list not in ascending order"));
1896 warned = 1;
1897 }
1898
1899 mask |= setmask << new_base;
1900 count += addregs;
1901
1902 if (*str == '-') /* We have the start of a range expression */
1903 {
1904 int high_range;
1905
1906 str++;
1907
1908 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1909 == FAIL)
1910 {
1911 inst.error = gettext (reg_expected_msgs[regtype]);
1912 return FAIL;
1913 }
1914
1915 if (high_range >= max_regs)
1916 {
1917 first_error (_("register out of range in list"));
1918 return FAIL;
1919 }
1920
1921 if (regtype == REG_TYPE_NQ)
1922 high_range = high_range + 1;
1923
1924 if (high_range <= new_base)
1925 {
1926 inst.error = _("register range not in ascending order");
1927 return FAIL;
1928 }
1929
1930 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1931 {
1932 if (mask & (setmask << new_base))
1933 {
1934 inst.error = _("invalid register list");
1935 return FAIL;
1936 }
1937
1938 mask |= setmask << new_base;
1939 count += addregs;
1940 }
1941 }
1942 }
1943 while (skip_past_comma (&str) != FAIL);
1944
1945 str++;
1946
1947 /* Sanity check -- should have raised a parse error above. */
1948 if (count == 0 || count > max_regs)
1949 abort ();
1950
1951 *pbase = base_reg;
1952
1953 /* Final test -- the registers must be consecutive. */
1954 mask >>= base_reg;
1955 for (i = 0; i < count; i++)
1956 {
1957 if ((mask & (1u << i)) == 0)
1958 {
1959 inst.error = _("non-contiguous register range");
1960 return FAIL;
1961 }
1962 }
1963
1964 *ccp = str;
1965
1966 return count;
1967 }
1968
1969 /* True if two alias types are the same. */
1970
1971 static bfd_boolean
1972 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1973 {
1974 if (!a && !b)
1975 return TRUE;
1976
1977 if (!a || !b)
1978 return FALSE;
1979
1980 if (a->defined != b->defined)
1981 return FALSE;
1982
1983 if ((a->defined & NTA_HASTYPE) != 0
1984 && (a->eltype.type != b->eltype.type
1985 || a->eltype.size != b->eltype.size))
1986 return FALSE;
1987
1988 if ((a->defined & NTA_HASINDEX) != 0
1989 && (a->index != b->index))
1990 return FALSE;
1991
1992 return TRUE;
1993 }
1994
1995 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1996 The base register is put in *PBASE.
1997 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1998 the return value.
1999 The register stride (minus one) is put in bit 4 of the return value.
2000 Bits [6:5] encode the list length (minus one).
2001 The type of the list elements is put in *ELTYPE, if non-NULL. */
2002
2003 #define NEON_LANE(X) ((X) & 0xf)
2004 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2005 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2006
2007 static int
2008 parse_neon_el_struct_list (char **str, unsigned *pbase,
2009 struct neon_type_el *eltype)
2010 {
2011 char *ptr = *str;
2012 int base_reg = -1;
2013 int reg_incr = -1;
2014 int count = 0;
2015 int lane = -1;
2016 int leading_brace = 0;
2017 enum arm_reg_type rtype = REG_TYPE_NDQ;
2018 const char *const incr_error = _("register stride must be 1 or 2");
2019 const char *const type_error = _("mismatched element/structure types in list");
2020 struct neon_typed_alias firsttype;
2021 firsttype.defined = 0;
2022 firsttype.eltype.type = NT_invtype;
2023 firsttype.eltype.size = -1;
2024 firsttype.index = -1;
2025
2026 if (skip_past_char (&ptr, '{') == SUCCESS)
2027 leading_brace = 1;
2028
2029 do
2030 {
2031 struct neon_typed_alias atype;
2032 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2033
2034 if (getreg == FAIL)
2035 {
2036 first_error (_(reg_expected_msgs[rtype]));
2037 return FAIL;
2038 }
2039
2040 if (base_reg == -1)
2041 {
2042 base_reg = getreg;
2043 if (rtype == REG_TYPE_NQ)
2044 {
2045 reg_incr = 1;
2046 }
2047 firsttype = atype;
2048 }
2049 else if (reg_incr == -1)
2050 {
2051 reg_incr = getreg - base_reg;
2052 if (reg_incr < 1 || reg_incr > 2)
2053 {
2054 first_error (_(incr_error));
2055 return FAIL;
2056 }
2057 }
2058 else if (getreg != base_reg + reg_incr * count)
2059 {
2060 first_error (_(incr_error));
2061 return FAIL;
2062 }
2063
2064 if (! neon_alias_types_same (&atype, &firsttype))
2065 {
2066 first_error (_(type_error));
2067 return FAIL;
2068 }
2069
2070 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2071 modes. */
2072 if (ptr[0] == '-')
2073 {
2074 struct neon_typed_alias htype;
2075 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2076 if (lane == -1)
2077 lane = NEON_INTERLEAVE_LANES;
2078 else if (lane != NEON_INTERLEAVE_LANES)
2079 {
2080 first_error (_(type_error));
2081 return FAIL;
2082 }
2083 if (reg_incr == -1)
2084 reg_incr = 1;
2085 else if (reg_incr != 1)
2086 {
2087 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2088 return FAIL;
2089 }
2090 ptr++;
2091 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2092 if (hireg == FAIL)
2093 {
2094 first_error (_(reg_expected_msgs[rtype]));
2095 return FAIL;
2096 }
2097 if (! neon_alias_types_same (&htype, &firsttype))
2098 {
2099 first_error (_(type_error));
2100 return FAIL;
2101 }
2102 count += hireg + dregs - getreg;
2103 continue;
2104 }
2105
2106 /* If we're using Q registers, we can't use [] or [n] syntax. */
2107 if (rtype == REG_TYPE_NQ)
2108 {
2109 count += 2;
2110 continue;
2111 }
2112
2113 if ((atype.defined & NTA_HASINDEX) != 0)
2114 {
2115 if (lane == -1)
2116 lane = atype.index;
2117 else if (lane != atype.index)
2118 {
2119 first_error (_(type_error));
2120 return FAIL;
2121 }
2122 }
2123 else if (lane == -1)
2124 lane = NEON_INTERLEAVE_LANES;
2125 else if (lane != NEON_INTERLEAVE_LANES)
2126 {
2127 first_error (_(type_error));
2128 return FAIL;
2129 }
2130 count++;
2131 }
2132 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2133
2134 /* No lane set by [x]. We must be interleaving structures. */
2135 if (lane == -1)
2136 lane = NEON_INTERLEAVE_LANES;
2137
2138 /* Sanity check. */
2139 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2140 || (count > 1 && reg_incr == -1))
2141 {
2142 first_error (_("error parsing element/structure list"));
2143 return FAIL;
2144 }
2145
2146 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2147 {
2148 first_error (_("expected }"));
2149 return FAIL;
2150 }
2151
2152 if (reg_incr == -1)
2153 reg_incr = 1;
2154
2155 if (eltype)
2156 *eltype = firsttype.eltype;
2157
2158 *pbase = base_reg;
2159 *str = ptr;
2160
2161 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2162 }
2163
2164 /* Parse an explicit relocation suffix on an expression. This is
2165 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2166 arm_reloc_hsh contains no entries, so this function can only
2167 succeed if there is no () after the word. Returns -1 on error,
2168 BFD_RELOC_UNUSED if there wasn't any suffix. */
2169
2170 static int
2171 parse_reloc (char **str)
2172 {
2173 struct reloc_entry *r;
2174 char *p, *q;
2175
2176 if (**str != '(')
2177 return BFD_RELOC_UNUSED;
2178
2179 p = *str + 1;
2180 q = p;
2181
2182 while (*q && *q != ')' && *q != ',')
2183 q++;
2184 if (*q != ')')
2185 return -1;
2186
2187 if ((r = (struct reloc_entry *)
2188 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2189 return -1;
2190
2191 *str = q + 1;
2192 return r->reloc;
2193 }
2194
2195 /* Directives: register aliases. */
2196
2197 static struct reg_entry *
2198 insert_reg_alias (char *str, unsigned number, int type)
2199 {
2200 struct reg_entry *new_reg;
2201 const char *name;
2202
2203 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2204 {
2205 if (new_reg->builtin)
2206 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2207
2208 /* Only warn about a redefinition if it's not defined as the
2209 same register. */
2210 else if (new_reg->number != number || new_reg->type != type)
2211 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2212
2213 return NULL;
2214 }
2215
2216 name = xstrdup (str);
2217 new_reg = XNEW (struct reg_entry);
2218
2219 new_reg->name = name;
2220 new_reg->number = number;
2221 new_reg->type = type;
2222 new_reg->builtin = FALSE;
2223 new_reg->neon = NULL;
2224
2225 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2226 abort ();
2227
2228 return new_reg;
2229 }
2230
2231 static void
2232 insert_neon_reg_alias (char *str, int number, int type,
2233 struct neon_typed_alias *atype)
2234 {
2235 struct reg_entry *reg = insert_reg_alias (str, number, type);
2236
2237 if (!reg)
2238 {
2239 first_error (_("attempt to redefine typed alias"));
2240 return;
2241 }
2242
2243 if (atype)
2244 {
2245 reg->neon = XNEW (struct neon_typed_alias);
2246 *reg->neon = *atype;
2247 }
2248 }
2249
2250 /* Look for the .req directive. This is of the form:
2251
2252 new_register_name .req existing_register_name
2253
2254 If we find one, or if it looks sufficiently like one that we want to
2255 handle any error here, return TRUE. Otherwise return FALSE. */
2256
2257 static bfd_boolean
2258 create_register_alias (char * newname, char *p)
2259 {
2260 struct reg_entry *old;
2261 char *oldname, *nbuf;
2262 size_t nlen;
2263
2264 /* The input scrubber ensures that whitespace after the mnemonic is
2265 collapsed to single spaces. */
2266 oldname = p;
2267 if (strncmp (oldname, " .req ", 6) != 0)
2268 return FALSE;
2269
2270 oldname += 6;
2271 if (*oldname == '\0')
2272 return FALSE;
2273
2274 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2275 if (!old)
2276 {
2277 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2278 return TRUE;
2279 }
2280
2281 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2282 the desired alias name, and p points to its end. If not, then
2283 the desired alias name is in the global original_case_string. */
2284 #ifdef TC_CASE_SENSITIVE
2285 nlen = p - newname;
2286 #else
2287 newname = original_case_string;
2288 nlen = strlen (newname);
2289 #endif
2290
2291 nbuf = xmemdup0 (newname, nlen);
2292
2293 /* Create aliases under the new name as stated; an all-lowercase
2294 version of the new name; and an all-uppercase version of the new
2295 name. */
2296 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2297 {
2298 for (p = nbuf; *p; p++)
2299 *p = TOUPPER (*p);
2300
2301 if (strncmp (nbuf, newname, nlen))
2302 {
2303 /* If this attempt to create an additional alias fails, do not bother
2304 trying to create the all-lower case alias. We will fail and issue
2305 a second, duplicate error message. This situation arises when the
2306 programmer does something like:
2307 foo .req r0
2308 Foo .req r1
2309 The second .req creates the "Foo" alias but then fails to create
2310 the artificial FOO alias because it has already been created by the
2311 first .req. */
2312 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2313 {
2314 free (nbuf);
2315 return TRUE;
2316 }
2317 }
2318
2319 for (p = nbuf; *p; p++)
2320 *p = TOLOWER (*p);
2321
2322 if (strncmp (nbuf, newname, nlen))
2323 insert_reg_alias (nbuf, old->number, old->type);
2324 }
2325
2326 free (nbuf);
2327 return TRUE;
2328 }
2329
2330 /* Create a Neon typed/indexed register alias using directives, e.g.:
2331 X .dn d5.s32[1]
2332 Y .qn 6.s16
2333 Z .dn d7
2334 T .dn Z[0]
2335 These typed registers can be used instead of the types specified after the
2336 Neon mnemonic, so long as all operands given have types. Types can also be
2337 specified directly, e.g.:
2338 vadd d0.s32, d1.s32, d2.s32 */
2339
2340 static bfd_boolean
2341 create_neon_reg_alias (char *newname, char *p)
2342 {
2343 enum arm_reg_type basetype;
2344 struct reg_entry *basereg;
2345 struct reg_entry mybasereg;
2346 struct neon_type ntype;
2347 struct neon_typed_alias typeinfo;
2348 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2349 int namelen;
2350
2351 typeinfo.defined = 0;
2352 typeinfo.eltype.type = NT_invtype;
2353 typeinfo.eltype.size = -1;
2354 typeinfo.index = -1;
2355
2356 nameend = p;
2357
2358 if (strncmp (p, " .dn ", 5) == 0)
2359 basetype = REG_TYPE_VFD;
2360 else if (strncmp (p, " .qn ", 5) == 0)
2361 basetype = REG_TYPE_NQ;
2362 else
2363 return FALSE;
2364
2365 p += 5;
2366
2367 if (*p == '\0')
2368 return FALSE;
2369
2370 basereg = arm_reg_parse_multi (&p);
2371
2372 if (basereg && basereg->type != basetype)
2373 {
2374 as_bad (_("bad type for register"));
2375 return FALSE;
2376 }
2377
2378 if (basereg == NULL)
2379 {
2380 expressionS exp;
2381 /* Try parsing as an integer. */
2382 my_get_expression (&exp, &p, GE_NO_PREFIX);
2383 if (exp.X_op != O_constant)
2384 {
2385 as_bad (_("expression must be constant"));
2386 return FALSE;
2387 }
2388 basereg = &mybasereg;
2389 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2390 : exp.X_add_number;
2391 basereg->neon = 0;
2392 }
2393
2394 if (basereg->neon)
2395 typeinfo = *basereg->neon;
2396
2397 if (parse_neon_type (&ntype, &p) == SUCCESS)
2398 {
2399 /* We got a type. */
2400 if (typeinfo.defined & NTA_HASTYPE)
2401 {
2402 as_bad (_("can't redefine the type of a register alias"));
2403 return FALSE;
2404 }
2405
2406 typeinfo.defined |= NTA_HASTYPE;
2407 if (ntype.elems != 1)
2408 {
2409 as_bad (_("you must specify a single type only"));
2410 return FALSE;
2411 }
2412 typeinfo.eltype = ntype.el[0];
2413 }
2414
2415 if (skip_past_char (&p, '[') == SUCCESS)
2416 {
2417 expressionS exp;
2418 /* We got a scalar index. */
2419
2420 if (typeinfo.defined & NTA_HASINDEX)
2421 {
2422 as_bad (_("can't redefine the index of a scalar alias"));
2423 return FALSE;
2424 }
2425
2426 my_get_expression (&exp, &p, GE_NO_PREFIX);
2427
2428 if (exp.X_op != O_constant)
2429 {
2430 as_bad (_("scalar index must be constant"));
2431 return FALSE;
2432 }
2433
2434 typeinfo.defined |= NTA_HASINDEX;
2435 typeinfo.index = exp.X_add_number;
2436
2437 if (skip_past_char (&p, ']') == FAIL)
2438 {
2439 as_bad (_("expecting ]"));
2440 return FALSE;
2441 }
2442 }
2443
2444 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2445 the desired alias name, and p points to its end. If not, then
2446 the desired alias name is in the global original_case_string. */
2447 #ifdef TC_CASE_SENSITIVE
2448 namelen = nameend - newname;
2449 #else
2450 newname = original_case_string;
2451 namelen = strlen (newname);
2452 #endif
2453
2454 namebuf = xmemdup0 (newname, namelen);
2455
2456 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2457 typeinfo.defined != 0 ? &typeinfo : NULL);
2458
2459 /* Insert name in all uppercase. */
2460 for (p = namebuf; *p; p++)
2461 *p = TOUPPER (*p);
2462
2463 if (strncmp (namebuf, newname, namelen))
2464 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2465 typeinfo.defined != 0 ? &typeinfo : NULL);
2466
2467 /* Insert name in all lowercase. */
2468 for (p = namebuf; *p; p++)
2469 *p = TOLOWER (*p);
2470
2471 if (strncmp (namebuf, newname, namelen))
2472 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2473 typeinfo.defined != 0 ? &typeinfo : NULL);
2474
2475 free (namebuf);
2476 return TRUE;
2477 }
2478
2479 /* Should never be called, as .req goes between the alias and the
2480 register name, not at the beginning of the line. */
2481
2482 static void
2483 s_req (int a ATTRIBUTE_UNUSED)
2484 {
2485 as_bad (_("invalid syntax for .req directive"));
2486 }
2487
2488 static void
2489 s_dn (int a ATTRIBUTE_UNUSED)
2490 {
2491 as_bad (_("invalid syntax for .dn directive"));
2492 }
2493
2494 static void
2495 s_qn (int a ATTRIBUTE_UNUSED)
2496 {
2497 as_bad (_("invalid syntax for .qn directive"));
2498 }
2499
2500 /* The .unreq directive deletes an alias which was previously defined
2501 by .req. For example:
2502
2503 my_alias .req r11
2504 .unreq my_alias */
2505
2506 static void
2507 s_unreq (int a ATTRIBUTE_UNUSED)
2508 {
2509 char * name;
2510 char saved_char;
2511
2512 name = input_line_pointer;
2513
2514 while (*input_line_pointer != 0
2515 && *input_line_pointer != ' '
2516 && *input_line_pointer != '\n')
2517 ++input_line_pointer;
2518
2519 saved_char = *input_line_pointer;
2520 *input_line_pointer = 0;
2521
2522 if (!*name)
2523 as_bad (_("invalid syntax for .unreq directive"));
2524 else
2525 {
2526 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2527 name);
2528
2529 if (!reg)
2530 as_bad (_("unknown register alias '%s'"), name);
2531 else if (reg->builtin)
2532 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2533 name);
2534 else
2535 {
2536 char * p;
2537 char * nbuf;
2538
2539 hash_delete (arm_reg_hsh, name, FALSE);
2540 free ((char *) reg->name);
2541 if (reg->neon)
2542 free (reg->neon);
2543 free (reg);
2544
2545 /* Also locate the all upper case and all lower case versions.
2546 Do not complain if we cannot find one or the other as it
2547 was probably deleted above. */
2548
2549 nbuf = strdup (name);
2550 for (p = nbuf; *p; p++)
2551 *p = TOUPPER (*p);
2552 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2553 if (reg)
2554 {
2555 hash_delete (arm_reg_hsh, nbuf, FALSE);
2556 free ((char *) reg->name);
2557 if (reg->neon)
2558 free (reg->neon);
2559 free (reg);
2560 }
2561
2562 for (p = nbuf; *p; p++)
2563 *p = TOLOWER (*p);
2564 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2565 if (reg)
2566 {
2567 hash_delete (arm_reg_hsh, nbuf, FALSE);
2568 free ((char *) reg->name);
2569 if (reg->neon)
2570 free (reg->neon);
2571 free (reg);
2572 }
2573
2574 free (nbuf);
2575 }
2576 }
2577
2578 *input_line_pointer = saved_char;
2579 demand_empty_rest_of_line ();
2580 }
2581
2582 /* Directives: Instruction set selection. */
2583
2584 #ifdef OBJ_ELF
2585 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2586 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2587 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2588 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2589
2590 /* Create a new mapping symbol for the transition to STATE. */
2591
2592 static void
2593 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2594 {
2595 symbolS * symbolP;
2596 const char * symname;
2597 int type;
2598
2599 switch (state)
2600 {
2601 case MAP_DATA:
2602 symname = "$d";
2603 type = BSF_NO_FLAGS;
2604 break;
2605 case MAP_ARM:
2606 symname = "$a";
2607 type = BSF_NO_FLAGS;
2608 break;
2609 case MAP_THUMB:
2610 symname = "$t";
2611 type = BSF_NO_FLAGS;
2612 break;
2613 default:
2614 abort ();
2615 }
2616
2617 symbolP = symbol_new (symname, now_seg, value, frag);
2618 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2619
2620 switch (state)
2621 {
2622 case MAP_ARM:
2623 THUMB_SET_FUNC (symbolP, 0);
2624 ARM_SET_THUMB (symbolP, 0);
2625 ARM_SET_INTERWORK (symbolP, support_interwork);
2626 break;
2627
2628 case MAP_THUMB:
2629 THUMB_SET_FUNC (symbolP, 1);
2630 ARM_SET_THUMB (symbolP, 1);
2631 ARM_SET_INTERWORK (symbolP, support_interwork);
2632 break;
2633
2634 case MAP_DATA:
2635 default:
2636 break;
2637 }
2638
2639 /* Save the mapping symbols for future reference. Also check that
2640 we do not place two mapping symbols at the same offset within a
2641 frag. We'll handle overlap between frags in
2642 check_mapping_symbols.
2643
2644 If .fill or other data filling directive generates zero sized data,
2645 the mapping symbol for the following code will have the same value
2646 as the one generated for the data filling directive. In this case,
2647 we replace the old symbol with the new one at the same address. */
2648 if (value == 0)
2649 {
2650 if (frag->tc_frag_data.first_map != NULL)
2651 {
2652 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2653 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2654 }
2655 frag->tc_frag_data.first_map = symbolP;
2656 }
2657 if (frag->tc_frag_data.last_map != NULL)
2658 {
2659 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2660 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2661 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2662 }
2663 frag->tc_frag_data.last_map = symbolP;
2664 }
2665
2666 /* We must sometimes convert a region marked as code to data during
2667 code alignment, if an odd number of bytes have to be padded. The
2668 code mapping symbol is pushed to an aligned address. */
2669
2670 static void
2671 insert_data_mapping_symbol (enum mstate state,
2672 valueT value, fragS *frag, offsetT bytes)
2673 {
2674 /* If there was already a mapping symbol, remove it. */
2675 if (frag->tc_frag_data.last_map != NULL
2676 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2677 {
2678 symbolS *symp = frag->tc_frag_data.last_map;
2679
2680 if (value == 0)
2681 {
2682 know (frag->tc_frag_data.first_map == symp);
2683 frag->tc_frag_data.first_map = NULL;
2684 }
2685 frag->tc_frag_data.last_map = NULL;
2686 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2687 }
2688
2689 make_mapping_symbol (MAP_DATA, value, frag);
2690 make_mapping_symbol (state, value + bytes, frag);
2691 }
2692
2693 static void mapping_state_2 (enum mstate state, int max_chars);
2694
2695 /* Set the mapping state to STATE. Only call this when about to
2696 emit some STATE bytes to the file. */
2697
2698 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2699 void
2700 mapping_state (enum mstate state)
2701 {
2702 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2703
2704 if (mapstate == state)
2705 /* The mapping symbol has already been emitted.
2706 There is nothing else to do. */
2707 return;
2708
2709 if (state == MAP_ARM || state == MAP_THUMB)
2710 /* PR gas/12931
2711 All ARM instructions require 4-byte alignment.
2712 (Almost) all Thumb instructions require 2-byte alignment.
2713
2714 When emitting instructions into any section, mark the section
2715 appropriately.
2716
2717 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2718 but themselves require 2-byte alignment; this applies to some
2719 PC- relative forms. However, these cases will involve implicit
2720 literal pool generation or an explicit .align >=2, both of
2721 which will cause the section to me marked with sufficient
2722 alignment. Thus, we don't handle those cases here. */
2723 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2724
2725 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2726 /* This case will be evaluated later. */
2727 return;
2728
2729 mapping_state_2 (state, 0);
2730 }
2731
2732 /* Same as mapping_state, but MAX_CHARS bytes have already been
2733 allocated. Put the mapping symbol that far back. */
2734
2735 static void
2736 mapping_state_2 (enum mstate state, int max_chars)
2737 {
2738 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2739
2740 if (!SEG_NORMAL (now_seg))
2741 return;
2742
2743 if (mapstate == state)
2744 /* The mapping symbol has already been emitted.
2745 There is nothing else to do. */
2746 return;
2747
2748 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2749 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2750 {
2751 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2752 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2753
2754 if (add_symbol)
2755 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2756 }
2757
2758 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2759 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2760 }
2761 #undef TRANSITION
2762 #else
2763 #define mapping_state(x) ((void)0)
2764 #define mapping_state_2(x, y) ((void)0)
2765 #endif
2766
2767 /* Find the real, Thumb encoded start of a Thumb function. */
2768
2769 #ifdef OBJ_COFF
2770 static symbolS *
2771 find_real_start (symbolS * symbolP)
2772 {
2773 char * real_start;
2774 const char * name = S_GET_NAME (symbolP);
2775 symbolS * new_target;
2776
2777 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2778 #define STUB_NAME ".real_start_of"
2779
2780 if (name == NULL)
2781 abort ();
2782
2783 /* The compiler may generate BL instructions to local labels because
2784 it needs to perform a branch to a far away location. These labels
2785 do not have a corresponding ".real_start_of" label. We check
2786 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2787 the ".real_start_of" convention for nonlocal branches. */
2788 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2789 return symbolP;
2790
2791 real_start = concat (STUB_NAME, name, NULL);
2792 new_target = symbol_find (real_start);
2793 free (real_start);
2794
2795 if (new_target == NULL)
2796 {
2797 as_warn (_("Failed to find real start of function: %s\n"), name);
2798 new_target = symbolP;
2799 }
2800
2801 return new_target;
2802 }
2803 #endif
2804
2805 static void
2806 opcode_select (int width)
2807 {
2808 switch (width)
2809 {
2810 case 16:
2811 if (! thumb_mode)
2812 {
2813 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2814 as_bad (_("selected processor does not support THUMB opcodes"));
2815
2816 thumb_mode = 1;
2817 /* No need to force the alignment, since we will have been
2818 coming from ARM mode, which is word-aligned. */
2819 record_alignment (now_seg, 1);
2820 }
2821 break;
2822
2823 case 32:
2824 if (thumb_mode)
2825 {
2826 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2827 as_bad (_("selected processor does not support ARM opcodes"));
2828
2829 thumb_mode = 0;
2830
2831 if (!need_pass_2)
2832 frag_align (2, 0, 0);
2833
2834 record_alignment (now_seg, 1);
2835 }
2836 break;
2837
2838 default:
2839 as_bad (_("invalid instruction size selected (%d)"), width);
2840 }
2841 }
2842
2843 static void
2844 s_arm (int ignore ATTRIBUTE_UNUSED)
2845 {
2846 opcode_select (32);
2847 demand_empty_rest_of_line ();
2848 }
2849
2850 static void
2851 s_thumb (int ignore ATTRIBUTE_UNUSED)
2852 {
2853 opcode_select (16);
2854 demand_empty_rest_of_line ();
2855 }
2856
2857 static void
2858 s_code (int unused ATTRIBUTE_UNUSED)
2859 {
2860 int temp;
2861
2862 temp = get_absolute_expression ();
2863 switch (temp)
2864 {
2865 case 16:
2866 case 32:
2867 opcode_select (temp);
2868 break;
2869
2870 default:
2871 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2872 }
2873 }
2874
2875 static void
2876 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2877 {
2878 /* If we are not already in thumb mode go into it, EVEN if
2879 the target processor does not support thumb instructions.
2880 This is used by gcc/config/arm/lib1funcs.asm for example
2881 to compile interworking support functions even if the
2882 target processor should not support interworking. */
2883 if (! thumb_mode)
2884 {
2885 thumb_mode = 2;
2886 record_alignment (now_seg, 1);
2887 }
2888
2889 demand_empty_rest_of_line ();
2890 }
2891
2892 static void
2893 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2894 {
2895 s_thumb (0);
2896
2897 /* The following label is the name/address of the start of a Thumb function.
2898 We need to know this for the interworking support. */
2899 label_is_thumb_function_name = TRUE;
2900 }
2901
2902 /* Perform a .set directive, but also mark the alias as
2903 being a thumb function. */
2904
2905 static void
2906 s_thumb_set (int equiv)
2907 {
2908 /* XXX the following is a duplicate of the code for s_set() in read.c
2909 We cannot just call that code as we need to get at the symbol that
2910 is created. */
2911 char * name;
2912 char delim;
2913 char * end_name;
2914 symbolS * symbolP;
2915
2916 /* Especial apologies for the random logic:
2917 This just grew, and could be parsed much more simply!
2918 Dean - in haste. */
2919 delim = get_symbol_name (& name);
2920 end_name = input_line_pointer;
2921 (void) restore_line_pointer (delim);
2922
2923 if (*input_line_pointer != ',')
2924 {
2925 *end_name = 0;
2926 as_bad (_("expected comma after name \"%s\""), name);
2927 *end_name = delim;
2928 ignore_rest_of_line ();
2929 return;
2930 }
2931
2932 input_line_pointer++;
2933 *end_name = 0;
2934
2935 if (name[0] == '.' && name[1] == '\0')
2936 {
2937 /* XXX - this should not happen to .thumb_set. */
2938 abort ();
2939 }
2940
2941 if ((symbolP = symbol_find (name)) == NULL
2942 && (symbolP = md_undefined_symbol (name)) == NULL)
2943 {
2944 #ifndef NO_LISTING
2945 /* When doing symbol listings, play games with dummy fragments living
2946 outside the normal fragment chain to record the file and line info
2947 for this symbol. */
2948 if (listing & LISTING_SYMBOLS)
2949 {
2950 extern struct list_info_struct * listing_tail;
2951 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2952
2953 memset (dummy_frag, 0, sizeof (fragS));
2954 dummy_frag->fr_type = rs_fill;
2955 dummy_frag->line = listing_tail;
2956 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2957 dummy_frag->fr_symbol = symbolP;
2958 }
2959 else
2960 #endif
2961 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2962
2963 #ifdef OBJ_COFF
2964 /* "set" symbols are local unless otherwise specified. */
2965 SF_SET_LOCAL (symbolP);
2966 #endif /* OBJ_COFF */
2967 } /* Make a new symbol. */
2968
2969 symbol_table_insert (symbolP);
2970
2971 * end_name = delim;
2972
2973 if (equiv
2974 && S_IS_DEFINED (symbolP)
2975 && S_GET_SEGMENT (symbolP) != reg_section)
2976 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2977
2978 pseudo_set (symbolP);
2979
2980 demand_empty_rest_of_line ();
2981
2982 /* XXX Now we come to the Thumb specific bit of code. */
2983
2984 THUMB_SET_FUNC (symbolP, 1);
2985 ARM_SET_THUMB (symbolP, 1);
2986 #if defined OBJ_ELF || defined OBJ_COFF
2987 ARM_SET_INTERWORK (symbolP, support_interwork);
2988 #endif
2989 }
2990
2991 /* Directives: Mode selection. */
2992
2993 /* .syntax [unified|divided] - choose the new unified syntax
2994 (same for Arm and Thumb encoding, modulo slight differences in what
2995 can be represented) or the old divergent syntax for each mode. */
2996 static void
2997 s_syntax (int unused ATTRIBUTE_UNUSED)
2998 {
2999 char *name, delim;
3000
3001 delim = get_symbol_name (& name);
3002
3003 if (!strcasecmp (name, "unified"))
3004 unified_syntax = TRUE;
3005 else if (!strcasecmp (name, "divided"))
3006 unified_syntax = FALSE;
3007 else
3008 {
3009 as_bad (_("unrecognized syntax mode \"%s\""), name);
3010 return;
3011 }
3012 (void) restore_line_pointer (delim);
3013 demand_empty_rest_of_line ();
3014 }
3015
3016 /* Directives: sectioning and alignment. */
3017
3018 static void
3019 s_bss (int ignore ATTRIBUTE_UNUSED)
3020 {
3021 /* We don't support putting frags in the BSS segment, we fake it by
3022 marking in_bss, then looking at s_skip for clues. */
3023 subseg_set (bss_section, 0);
3024 demand_empty_rest_of_line ();
3025
3026 #ifdef md_elf_section_change_hook
3027 md_elf_section_change_hook ();
3028 #endif
3029 }
3030
3031 static void
3032 s_even (int ignore ATTRIBUTE_UNUSED)
3033 {
3034 /* Never make frag if expect extra pass. */
3035 if (!need_pass_2)
3036 frag_align (1, 0, 0);
3037
3038 record_alignment (now_seg, 1);
3039
3040 demand_empty_rest_of_line ();
3041 }
3042
3043 /* Directives: CodeComposer Studio. */
3044
3045 /* .ref (for CodeComposer Studio syntax only). */
3046 static void
3047 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3048 {
3049 if (codecomposer_syntax)
3050 ignore_rest_of_line ();
3051 else
3052 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3053 }
3054
3055 /* If name is not NULL, then it is used for marking the beginning of a
3056 function, whereas if it is NULL then it means the function end. */
3057 static void
3058 asmfunc_debug (const char * name)
3059 {
3060 static const char * last_name = NULL;
3061
3062 if (name != NULL)
3063 {
3064 gas_assert (last_name == NULL);
3065 last_name = name;
3066
3067 if (debug_type == DEBUG_STABS)
3068 stabs_generate_asm_func (name, name);
3069 }
3070 else
3071 {
3072 gas_assert (last_name != NULL);
3073
3074 if (debug_type == DEBUG_STABS)
3075 stabs_generate_asm_endfunc (last_name, last_name);
3076
3077 last_name = NULL;
3078 }
3079 }
3080
3081 static void
3082 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3083 {
3084 if (codecomposer_syntax)
3085 {
3086 switch (asmfunc_state)
3087 {
3088 case OUTSIDE_ASMFUNC:
3089 asmfunc_state = WAITING_ASMFUNC_NAME;
3090 break;
3091
3092 case WAITING_ASMFUNC_NAME:
3093 as_bad (_(".asmfunc repeated."));
3094 break;
3095
3096 case WAITING_ENDASMFUNC:
3097 as_bad (_(".asmfunc without function."));
3098 break;
3099 }
3100 demand_empty_rest_of_line ();
3101 }
3102 else
3103 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3104 }
3105
3106 static void
3107 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3108 {
3109 if (codecomposer_syntax)
3110 {
3111 switch (asmfunc_state)
3112 {
3113 case OUTSIDE_ASMFUNC:
3114 as_bad (_(".endasmfunc without a .asmfunc."));
3115 break;
3116
3117 case WAITING_ASMFUNC_NAME:
3118 as_bad (_(".endasmfunc without function."));
3119 break;
3120
3121 case WAITING_ENDASMFUNC:
3122 asmfunc_state = OUTSIDE_ASMFUNC;
3123 asmfunc_debug (NULL);
3124 break;
3125 }
3126 demand_empty_rest_of_line ();
3127 }
3128 else
3129 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3130 }
3131
3132 static void
3133 s_ccs_def (int name)
3134 {
3135 if (codecomposer_syntax)
3136 s_globl (name);
3137 else
3138 as_bad (_(".def pseudo-op only available with -mccs flag."));
3139 }
3140
3141 /* Directives: Literal pools. */
3142
3143 static literal_pool *
3144 find_literal_pool (void)
3145 {
3146 literal_pool * pool;
3147
3148 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3149 {
3150 if (pool->section == now_seg
3151 && pool->sub_section == now_subseg)
3152 break;
3153 }
3154
3155 return pool;
3156 }
3157
3158 static literal_pool *
3159 find_or_make_literal_pool (void)
3160 {
3161 /* Next literal pool ID number. */
3162 static unsigned int latest_pool_num = 1;
3163 literal_pool * pool;
3164
3165 pool = find_literal_pool ();
3166
3167 if (pool == NULL)
3168 {
3169 /* Create a new pool. */
3170 pool = XNEW (literal_pool);
3171 if (! pool)
3172 return NULL;
3173
3174 pool->next_free_entry = 0;
3175 pool->section = now_seg;
3176 pool->sub_section = now_subseg;
3177 pool->next = list_of_pools;
3178 pool->symbol = NULL;
3179 pool->alignment = 2;
3180
3181 /* Add it to the list. */
3182 list_of_pools = pool;
3183 }
3184
3185 /* New pools, and emptied pools, will have a NULL symbol. */
3186 if (pool->symbol == NULL)
3187 {
3188 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3189 (valueT) 0, &zero_address_frag);
3190 pool->id = latest_pool_num ++;
3191 }
3192
3193 /* Done. */
3194 return pool;
3195 }
3196
3197 /* Add the literal in the global 'inst'
3198 structure to the relevant literal pool. */
3199
3200 static int
3201 add_to_lit_pool (unsigned int nbytes)
3202 {
3203 #define PADDING_SLOT 0x1
3204 #define LIT_ENTRY_SIZE_MASK 0xFF
3205 literal_pool * pool;
3206 unsigned int entry, pool_size = 0;
3207 bfd_boolean padding_slot_p = FALSE;
3208 unsigned imm1 = 0;
3209 unsigned imm2 = 0;
3210
3211 if (nbytes == 8)
3212 {
3213 imm1 = inst.operands[1].imm;
3214 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3215 : inst.reloc.exp.X_unsigned ? 0
3216 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3217 if (target_big_endian)
3218 {
3219 imm1 = imm2;
3220 imm2 = inst.operands[1].imm;
3221 }
3222 }
3223
3224 pool = find_or_make_literal_pool ();
3225
3226 /* Check if this literal value is already in the pool. */
3227 for (entry = 0; entry < pool->next_free_entry; entry ++)
3228 {
3229 if (nbytes == 4)
3230 {
3231 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3232 && (inst.reloc.exp.X_op == O_constant)
3233 && (pool->literals[entry].X_add_number
3234 == inst.reloc.exp.X_add_number)
3235 && (pool->literals[entry].X_md == nbytes)
3236 && (pool->literals[entry].X_unsigned
3237 == inst.reloc.exp.X_unsigned))
3238 break;
3239
3240 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3241 && (inst.reloc.exp.X_op == O_symbol)
3242 && (pool->literals[entry].X_add_number
3243 == inst.reloc.exp.X_add_number)
3244 && (pool->literals[entry].X_add_symbol
3245 == inst.reloc.exp.X_add_symbol)
3246 && (pool->literals[entry].X_op_symbol
3247 == inst.reloc.exp.X_op_symbol)
3248 && (pool->literals[entry].X_md == nbytes))
3249 break;
3250 }
3251 else if ((nbytes == 8)
3252 && !(pool_size & 0x7)
3253 && ((entry + 1) != pool->next_free_entry)
3254 && (pool->literals[entry].X_op == O_constant)
3255 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3256 && (pool->literals[entry].X_unsigned
3257 == inst.reloc.exp.X_unsigned)
3258 && (pool->literals[entry + 1].X_op == O_constant)
3259 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3260 && (pool->literals[entry + 1].X_unsigned
3261 == inst.reloc.exp.X_unsigned))
3262 break;
3263
3264 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3265 if (padding_slot_p && (nbytes == 4))
3266 break;
3267
3268 pool_size += 4;
3269 }
3270
3271 /* Do we need to create a new entry? */
3272 if (entry == pool->next_free_entry)
3273 {
3274 if (entry >= MAX_LITERAL_POOL_SIZE)
3275 {
3276 inst.error = _("literal pool overflow");
3277 return FAIL;
3278 }
3279
3280 if (nbytes == 8)
3281 {
3282 /* For 8-byte entries, we align to an 8-byte boundary,
3283 and split it into two 4-byte entries, because on 32-bit
3284 host, 8-byte constants are treated as big num, thus
3285 saved in "generic_bignum" which will be overwritten
3286 by later assignments.
3287
3288 We also need to make sure there is enough space for
3289 the split.
3290
3291 We also check to make sure the literal operand is a
3292 constant number. */
3293 if (!(inst.reloc.exp.X_op == O_constant
3294 || inst.reloc.exp.X_op == O_big))
3295 {
3296 inst.error = _("invalid type for literal pool");
3297 return FAIL;
3298 }
3299 else if (pool_size & 0x7)
3300 {
3301 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3302 {
3303 inst.error = _("literal pool overflow");
3304 return FAIL;
3305 }
3306
3307 pool->literals[entry] = inst.reloc.exp;
3308 pool->literals[entry].X_op = O_constant;
3309 pool->literals[entry].X_add_number = 0;
3310 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3311 pool->next_free_entry += 1;
3312 pool_size += 4;
3313 }
3314 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3315 {
3316 inst.error = _("literal pool overflow");
3317 return FAIL;
3318 }
3319
3320 pool->literals[entry] = inst.reloc.exp;
3321 pool->literals[entry].X_op = O_constant;
3322 pool->literals[entry].X_add_number = imm1;
3323 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3324 pool->literals[entry++].X_md = 4;
3325 pool->literals[entry] = inst.reloc.exp;
3326 pool->literals[entry].X_op = O_constant;
3327 pool->literals[entry].X_add_number = imm2;
3328 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3329 pool->literals[entry].X_md = 4;
3330 pool->alignment = 3;
3331 pool->next_free_entry += 1;
3332 }
3333 else
3334 {
3335 pool->literals[entry] = inst.reloc.exp;
3336 pool->literals[entry].X_md = 4;
3337 }
3338
3339 #ifdef OBJ_ELF
3340 /* PR ld/12974: Record the location of the first source line to reference
3341 this entry in the literal pool. If it turns out during linking that the
3342 symbol does not exist we will be able to give an accurate line number for
3343 the (first use of the) missing reference. */
3344 if (debug_type == DEBUG_DWARF2)
3345 dwarf2_where (pool->locs + entry);
3346 #endif
3347 pool->next_free_entry += 1;
3348 }
3349 else if (padding_slot_p)
3350 {
3351 pool->literals[entry] = inst.reloc.exp;
3352 pool->literals[entry].X_md = nbytes;
3353 }
3354
3355 inst.reloc.exp.X_op = O_symbol;
3356 inst.reloc.exp.X_add_number = pool_size;
3357 inst.reloc.exp.X_add_symbol = pool->symbol;
3358
3359 return SUCCESS;
3360 }
3361
3362 bfd_boolean
3363 tc_start_label_without_colon (void)
3364 {
3365 bfd_boolean ret = TRUE;
3366
3367 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3368 {
3369 const char *label = input_line_pointer;
3370
3371 while (!is_end_of_line[(int) label[-1]])
3372 --label;
3373
3374 if (*label == '.')
3375 {
3376 as_bad (_("Invalid label '%s'"), label);
3377 ret = FALSE;
3378 }
3379
3380 asmfunc_debug (label);
3381
3382 asmfunc_state = WAITING_ENDASMFUNC;
3383 }
3384
3385 return ret;
3386 }
3387
3388 /* Can't use symbol_new here, so have to create a symbol and then at
3389 a later date assign it a value. That's what these functions do. */
3390
3391 static void
3392 symbol_locate (symbolS * symbolP,
3393 const char * name, /* It is copied, the caller can modify. */
3394 segT segment, /* Segment identifier (SEG_<something>). */
3395 valueT valu, /* Symbol value. */
3396 fragS * frag) /* Associated fragment. */
3397 {
3398 size_t name_length;
3399 char * preserved_copy_of_name;
3400
3401 name_length = strlen (name) + 1; /* +1 for \0. */
3402 obstack_grow (&notes, name, name_length);
3403 preserved_copy_of_name = (char *) obstack_finish (&notes);
3404
3405 #ifdef tc_canonicalize_symbol_name
3406 preserved_copy_of_name =
3407 tc_canonicalize_symbol_name (preserved_copy_of_name);
3408 #endif
3409
3410 S_SET_NAME (symbolP, preserved_copy_of_name);
3411
3412 S_SET_SEGMENT (symbolP, segment);
3413 S_SET_VALUE (symbolP, valu);
3414 symbol_clear_list_pointers (symbolP);
3415
3416 symbol_set_frag (symbolP, frag);
3417
3418 /* Link to end of symbol chain. */
3419 {
3420 extern int symbol_table_frozen;
3421
3422 if (symbol_table_frozen)
3423 abort ();
3424 }
3425
3426 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3427
3428 obj_symbol_new_hook (symbolP);
3429
3430 #ifdef tc_symbol_new_hook
3431 tc_symbol_new_hook (symbolP);
3432 #endif
3433
3434 #ifdef DEBUG_SYMS
3435 verify_symbol_chain (symbol_rootP, symbol_lastP);
3436 #endif /* DEBUG_SYMS */
3437 }
3438
3439 static void
3440 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3441 {
3442 unsigned int entry;
3443 literal_pool * pool;
3444 char sym_name[20];
3445
3446 pool = find_literal_pool ();
3447 if (pool == NULL
3448 || pool->symbol == NULL
3449 || pool->next_free_entry == 0)
3450 return;
3451
3452 /* Align pool as you have word accesses.
3453 Only make a frag if we have to. */
3454 if (!need_pass_2)
3455 frag_align (pool->alignment, 0, 0);
3456
3457 record_alignment (now_seg, 2);
3458
3459 #ifdef OBJ_ELF
3460 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3461 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3462 #endif
3463 sprintf (sym_name, "$$lit_\002%x", pool->id);
3464
3465 symbol_locate (pool->symbol, sym_name, now_seg,
3466 (valueT) frag_now_fix (), frag_now);
3467 symbol_table_insert (pool->symbol);
3468
3469 ARM_SET_THUMB (pool->symbol, thumb_mode);
3470
3471 #if defined OBJ_COFF || defined OBJ_ELF
3472 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3473 #endif
3474
3475 for (entry = 0; entry < pool->next_free_entry; entry ++)
3476 {
3477 #ifdef OBJ_ELF
3478 if (debug_type == DEBUG_DWARF2)
3479 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3480 #endif
3481 /* First output the expression in the instruction to the pool. */
3482 emit_expr (&(pool->literals[entry]),
3483 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3484 }
3485
3486 /* Mark the pool as empty. */
3487 pool->next_free_entry = 0;
3488 pool->symbol = NULL;
3489 }
3490
3491 #ifdef OBJ_ELF
3492 /* Forward declarations for functions below, in the MD interface
3493 section. */
3494 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3495 static valueT create_unwind_entry (int);
3496 static void start_unwind_section (const segT, int);
3497 static void add_unwind_opcode (valueT, int);
3498 static void flush_pending_unwind (void);
3499
3500 /* Directives: Data. */
3501
3502 static void
3503 s_arm_elf_cons (int nbytes)
3504 {
3505 expressionS exp;
3506
3507 #ifdef md_flush_pending_output
3508 md_flush_pending_output ();
3509 #endif
3510
3511 if (is_it_end_of_statement ())
3512 {
3513 demand_empty_rest_of_line ();
3514 return;
3515 }
3516
3517 #ifdef md_cons_align
3518 md_cons_align (nbytes);
3519 #endif
3520
3521 mapping_state (MAP_DATA);
3522 do
3523 {
3524 int reloc;
3525 char *base = input_line_pointer;
3526
3527 expression (& exp);
3528
3529 if (exp.X_op != O_symbol)
3530 emit_expr (&exp, (unsigned int) nbytes);
3531 else
3532 {
3533 char *before_reloc = input_line_pointer;
3534 reloc = parse_reloc (&input_line_pointer);
3535 if (reloc == -1)
3536 {
3537 as_bad (_("unrecognized relocation suffix"));
3538 ignore_rest_of_line ();
3539 return;
3540 }
3541 else if (reloc == BFD_RELOC_UNUSED)
3542 emit_expr (&exp, (unsigned int) nbytes);
3543 else
3544 {
3545 reloc_howto_type *howto = (reloc_howto_type *)
3546 bfd_reloc_type_lookup (stdoutput,
3547 (bfd_reloc_code_real_type) reloc);
3548 int size = bfd_get_reloc_size (howto);
3549
3550 if (reloc == BFD_RELOC_ARM_PLT32)
3551 {
3552 as_bad (_("(plt) is only valid on branch targets"));
3553 reloc = BFD_RELOC_UNUSED;
3554 size = 0;
3555 }
3556
3557 if (size > nbytes)
3558 as_bad (ngettext ("%s relocations do not fit in %d byte",
3559 "%s relocations do not fit in %d bytes",
3560 nbytes),
3561 howto->name, nbytes);
3562 else
3563 {
3564 /* We've parsed an expression stopping at O_symbol.
3565 But there may be more expression left now that we
3566 have parsed the relocation marker. Parse it again.
3567 XXX Surely there is a cleaner way to do this. */
3568 char *p = input_line_pointer;
3569 int offset;
3570 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3571
3572 memcpy (save_buf, base, input_line_pointer - base);
3573 memmove (base + (input_line_pointer - before_reloc),
3574 base, before_reloc - base);
3575
3576 input_line_pointer = base + (input_line_pointer-before_reloc);
3577 expression (&exp);
3578 memcpy (base, save_buf, p - base);
3579
3580 offset = nbytes - size;
3581 p = frag_more (nbytes);
3582 memset (p, 0, nbytes);
3583 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3584 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3585 free (save_buf);
3586 }
3587 }
3588 }
3589 }
3590 while (*input_line_pointer++ == ',');
3591
3592 /* Put terminator back into stream. */
3593 input_line_pointer --;
3594 demand_empty_rest_of_line ();
3595 }
3596
3597 /* Emit an expression containing a 32-bit thumb instruction.
3598 Implementation based on put_thumb32_insn. */
3599
3600 static void
3601 emit_thumb32_expr (expressionS * exp)
3602 {
3603 expressionS exp_high = *exp;
3604
3605 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3606 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3607 exp->X_add_number &= 0xffff;
3608 emit_expr (exp, (unsigned int) THUMB_SIZE);
3609 }
3610
3611 /* Guess the instruction size based on the opcode. */
3612
3613 static int
3614 thumb_insn_size (int opcode)
3615 {
3616 if ((unsigned int) opcode < 0xe800u)
3617 return 2;
3618 else if ((unsigned int) opcode >= 0xe8000000u)
3619 return 4;
3620 else
3621 return 0;
3622 }
3623
3624 static bfd_boolean
3625 emit_insn (expressionS *exp, int nbytes)
3626 {
3627 int size = 0;
3628
3629 if (exp->X_op == O_constant)
3630 {
3631 size = nbytes;
3632
3633 if (size == 0)
3634 size = thumb_insn_size (exp->X_add_number);
3635
3636 if (size != 0)
3637 {
3638 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3639 {
3640 as_bad (_(".inst.n operand too big. "\
3641 "Use .inst.w instead"));
3642 size = 0;
3643 }
3644 else
3645 {
3646 if (now_it.state == AUTOMATIC_IT_BLOCK)
3647 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3648 else
3649 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3650
3651 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3652 emit_thumb32_expr (exp);
3653 else
3654 emit_expr (exp, (unsigned int) size);
3655
3656 it_fsm_post_encode ();
3657 }
3658 }
3659 else
3660 as_bad (_("cannot determine Thumb instruction size. " \
3661 "Use .inst.n/.inst.w instead"));
3662 }
3663 else
3664 as_bad (_("constant expression required"));
3665
3666 return (size != 0);
3667 }
3668
3669 /* Like s_arm_elf_cons but do not use md_cons_align and
3670 set the mapping state to MAP_ARM/MAP_THUMB. */
3671
3672 static void
3673 s_arm_elf_inst (int nbytes)
3674 {
3675 if (is_it_end_of_statement ())
3676 {
3677 demand_empty_rest_of_line ();
3678 return;
3679 }
3680
3681 /* Calling mapping_state () here will not change ARM/THUMB,
3682 but will ensure not to be in DATA state. */
3683
3684 if (thumb_mode)
3685 mapping_state (MAP_THUMB);
3686 else
3687 {
3688 if (nbytes != 0)
3689 {
3690 as_bad (_("width suffixes are invalid in ARM mode"));
3691 ignore_rest_of_line ();
3692 return;
3693 }
3694
3695 nbytes = 4;
3696
3697 mapping_state (MAP_ARM);
3698 }
3699
3700 do
3701 {
3702 expressionS exp;
3703
3704 expression (& exp);
3705
3706 if (! emit_insn (& exp, nbytes))
3707 {
3708 ignore_rest_of_line ();
3709 return;
3710 }
3711 }
3712 while (*input_line_pointer++ == ',');
3713
3714 /* Put terminator back into stream. */
3715 input_line_pointer --;
3716 demand_empty_rest_of_line ();
3717 }
3718
3719 /* Parse a .rel31 directive. */
3720
3721 static void
3722 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3723 {
3724 expressionS exp;
3725 char *p;
3726 valueT highbit;
3727
3728 highbit = 0;
3729 if (*input_line_pointer == '1')
3730 highbit = 0x80000000;
3731 else if (*input_line_pointer != '0')
3732 as_bad (_("expected 0 or 1"));
3733
3734 input_line_pointer++;
3735 if (*input_line_pointer != ',')
3736 as_bad (_("missing comma"));
3737 input_line_pointer++;
3738
3739 #ifdef md_flush_pending_output
3740 md_flush_pending_output ();
3741 #endif
3742
3743 #ifdef md_cons_align
3744 md_cons_align (4);
3745 #endif
3746
3747 mapping_state (MAP_DATA);
3748
3749 expression (&exp);
3750
3751 p = frag_more (4);
3752 md_number_to_chars (p, highbit, 4);
3753 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3754 BFD_RELOC_ARM_PREL31);
3755
3756 demand_empty_rest_of_line ();
3757 }
3758
3759 /* Directives: AEABI stack-unwind tables. */
3760
3761 /* Parse an unwind_fnstart directive. Simply records the current location. */
3762
3763 static void
3764 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3765 {
3766 demand_empty_rest_of_line ();
3767 if (unwind.proc_start)
3768 {
3769 as_bad (_("duplicate .fnstart directive"));
3770 return;
3771 }
3772
3773 /* Mark the start of the function. */
3774 unwind.proc_start = expr_build_dot ();
3775
3776 /* Reset the rest of the unwind info. */
3777 unwind.opcode_count = 0;
3778 unwind.table_entry = NULL;
3779 unwind.personality_routine = NULL;
3780 unwind.personality_index = -1;
3781 unwind.frame_size = 0;
3782 unwind.fp_offset = 0;
3783 unwind.fp_reg = REG_SP;
3784 unwind.fp_used = 0;
3785 unwind.sp_restored = 0;
3786 }
3787
3788
3789 /* Parse a handlerdata directive. Creates the exception handling table entry
3790 for the function. */
3791
3792 static void
3793 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3794 {
3795 demand_empty_rest_of_line ();
3796 if (!unwind.proc_start)
3797 as_bad (MISSING_FNSTART);
3798
3799 if (unwind.table_entry)
3800 as_bad (_("duplicate .handlerdata directive"));
3801
3802 create_unwind_entry (1);
3803 }
3804
3805 /* Parse an unwind_fnend directive. Generates the index table entry. */
3806
3807 static void
3808 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3809 {
3810 long where;
3811 char *ptr;
3812 valueT val;
3813 unsigned int marked_pr_dependency;
3814
3815 demand_empty_rest_of_line ();
3816
3817 if (!unwind.proc_start)
3818 {
3819 as_bad (_(".fnend directive without .fnstart"));
3820 return;
3821 }
3822
3823 /* Add eh table entry. */
3824 if (unwind.table_entry == NULL)
3825 val = create_unwind_entry (0);
3826 else
3827 val = 0;
3828
3829 /* Add index table entry. This is two words. */
3830 start_unwind_section (unwind.saved_seg, 1);
3831 frag_align (2, 0, 0);
3832 record_alignment (now_seg, 2);
3833
3834 ptr = frag_more (8);
3835 memset (ptr, 0, 8);
3836 where = frag_now_fix () - 8;
3837
3838 /* Self relative offset of the function start. */
3839 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3840 BFD_RELOC_ARM_PREL31);
3841
3842 /* Indicate dependency on EHABI-defined personality routines to the
3843 linker, if it hasn't been done already. */
3844 marked_pr_dependency
3845 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3846 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3847 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3848 {
3849 static const char *const name[] =
3850 {
3851 "__aeabi_unwind_cpp_pr0",
3852 "__aeabi_unwind_cpp_pr1",
3853 "__aeabi_unwind_cpp_pr2"
3854 };
3855 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3856 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3857 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3858 |= 1 << unwind.personality_index;
3859 }
3860
3861 if (val)
3862 /* Inline exception table entry. */
3863 md_number_to_chars (ptr + 4, val, 4);
3864 else
3865 /* Self relative offset of the table entry. */
3866 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3867 BFD_RELOC_ARM_PREL31);
3868
3869 /* Restore the original section. */
3870 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3871
3872 unwind.proc_start = NULL;
3873 }
3874
3875
3876 /* Parse an unwind_cantunwind directive. */
3877
3878 static void
3879 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3880 {
3881 demand_empty_rest_of_line ();
3882 if (!unwind.proc_start)
3883 as_bad (MISSING_FNSTART);
3884
3885 if (unwind.personality_routine || unwind.personality_index != -1)
3886 as_bad (_("personality routine specified for cantunwind frame"));
3887
3888 unwind.personality_index = -2;
3889 }
3890
3891
3892 /* Parse a personalityindex directive. */
3893
3894 static void
3895 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3896 {
3897 expressionS exp;
3898
3899 if (!unwind.proc_start)
3900 as_bad (MISSING_FNSTART);
3901
3902 if (unwind.personality_routine || unwind.personality_index != -1)
3903 as_bad (_("duplicate .personalityindex directive"));
3904
3905 expression (&exp);
3906
3907 if (exp.X_op != O_constant
3908 || exp.X_add_number < 0 || exp.X_add_number > 15)
3909 {
3910 as_bad (_("bad personality routine number"));
3911 ignore_rest_of_line ();
3912 return;
3913 }
3914
3915 unwind.personality_index = exp.X_add_number;
3916
3917 demand_empty_rest_of_line ();
3918 }
3919
3920
3921 /* Parse a personality directive. */
3922
3923 static void
3924 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3925 {
3926 char *name, *p, c;
3927
3928 if (!unwind.proc_start)
3929 as_bad (MISSING_FNSTART);
3930
3931 if (unwind.personality_routine || unwind.personality_index != -1)
3932 as_bad (_("duplicate .personality directive"));
3933
3934 c = get_symbol_name (& name);
3935 p = input_line_pointer;
3936 if (c == '"')
3937 ++ input_line_pointer;
3938 unwind.personality_routine = symbol_find_or_make (name);
3939 *p = c;
3940 demand_empty_rest_of_line ();
3941 }
3942
3943
3944 /* Parse a directive saving core registers. */
3945
3946 static void
3947 s_arm_unwind_save_core (void)
3948 {
3949 valueT op;
3950 long range;
3951 int n;
3952
3953 range = parse_reg_list (&input_line_pointer);
3954 if (range == FAIL)
3955 {
3956 as_bad (_("expected register list"));
3957 ignore_rest_of_line ();
3958 return;
3959 }
3960
3961 demand_empty_rest_of_line ();
3962
3963 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3964 into .unwind_save {..., sp...}. We aren't bothered about the value of
3965 ip because it is clobbered by calls. */
3966 if (unwind.sp_restored && unwind.fp_reg == 12
3967 && (range & 0x3000) == 0x1000)
3968 {
3969 unwind.opcode_count--;
3970 unwind.sp_restored = 0;
3971 range = (range | 0x2000) & ~0x1000;
3972 unwind.pending_offset = 0;
3973 }
3974
3975 /* Pop r4-r15. */
3976 if (range & 0xfff0)
3977 {
3978 /* See if we can use the short opcodes. These pop a block of up to 8
3979 registers starting with r4, plus maybe r14. */
3980 for (n = 0; n < 8; n++)
3981 {
3982 /* Break at the first non-saved register. */
3983 if ((range & (1 << (n + 4))) == 0)
3984 break;
3985 }
3986 /* See if there are any other bits set. */
3987 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3988 {
3989 /* Use the long form. */
3990 op = 0x8000 | ((range >> 4) & 0xfff);
3991 add_unwind_opcode (op, 2);
3992 }
3993 else
3994 {
3995 /* Use the short form. */
3996 if (range & 0x4000)
3997 op = 0xa8; /* Pop r14. */
3998 else
3999 op = 0xa0; /* Do not pop r14. */
4000 op |= (n - 1);
4001 add_unwind_opcode (op, 1);
4002 }
4003 }
4004
4005 /* Pop r0-r3. */
4006 if (range & 0xf)
4007 {
4008 op = 0xb100 | (range & 0xf);
4009 add_unwind_opcode (op, 2);
4010 }
4011
4012 /* Record the number of bytes pushed. */
4013 for (n = 0; n < 16; n++)
4014 {
4015 if (range & (1 << n))
4016 unwind.frame_size += 4;
4017 }
4018 }
4019
4020
4021 /* Parse a directive saving FPA registers. */
4022
4023 static void
4024 s_arm_unwind_save_fpa (int reg)
4025 {
4026 expressionS exp;
4027 int num_regs;
4028 valueT op;
4029
4030 /* Get Number of registers to transfer. */
4031 if (skip_past_comma (&input_line_pointer) != FAIL)
4032 expression (&exp);
4033 else
4034 exp.X_op = O_illegal;
4035
4036 if (exp.X_op != O_constant)
4037 {
4038 as_bad (_("expected , <constant>"));
4039 ignore_rest_of_line ();
4040 return;
4041 }
4042
4043 num_regs = exp.X_add_number;
4044
4045 if (num_regs < 1 || num_regs > 4)
4046 {
4047 as_bad (_("number of registers must be in the range [1:4]"));
4048 ignore_rest_of_line ();
4049 return;
4050 }
4051
4052 demand_empty_rest_of_line ();
4053
4054 if (reg == 4)
4055 {
4056 /* Short form. */
4057 op = 0xb4 | (num_regs - 1);
4058 add_unwind_opcode (op, 1);
4059 }
4060 else
4061 {
4062 /* Long form. */
4063 op = 0xc800 | (reg << 4) | (num_regs - 1);
4064 add_unwind_opcode (op, 2);
4065 }
4066 unwind.frame_size += num_regs * 12;
4067 }
4068
4069
4070 /* Parse a directive saving VFP registers for ARMv6 and above. */
4071
4072 static void
4073 s_arm_unwind_save_vfp_armv6 (void)
4074 {
4075 int count;
4076 unsigned int start;
4077 valueT op;
4078 int num_vfpv3_regs = 0;
4079 int num_regs_below_16;
4080
4081 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4082 if (count == FAIL)
4083 {
4084 as_bad (_("expected register list"));
4085 ignore_rest_of_line ();
4086 return;
4087 }
4088
4089 demand_empty_rest_of_line ();
4090
4091 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4092 than FSTMX/FLDMX-style ones). */
4093
4094 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4095 if (start >= 16)
4096 num_vfpv3_regs = count;
4097 else if (start + count > 16)
4098 num_vfpv3_regs = start + count - 16;
4099
4100 if (num_vfpv3_regs > 0)
4101 {
4102 int start_offset = start > 16 ? start - 16 : 0;
4103 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4104 add_unwind_opcode (op, 2);
4105 }
4106
4107 /* Generate opcode for registers numbered in the range 0 .. 15. */
4108 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4109 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4110 if (num_regs_below_16 > 0)
4111 {
4112 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4113 add_unwind_opcode (op, 2);
4114 }
4115
4116 unwind.frame_size += count * 8;
4117 }
4118
4119
4120 /* Parse a directive saving VFP registers for pre-ARMv6. */
4121
4122 static void
4123 s_arm_unwind_save_vfp (void)
4124 {
4125 int count;
4126 unsigned int reg;
4127 valueT op;
4128
4129 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4130 if (count == FAIL)
4131 {
4132 as_bad (_("expected register list"));
4133 ignore_rest_of_line ();
4134 return;
4135 }
4136
4137 demand_empty_rest_of_line ();
4138
4139 if (reg == 8)
4140 {
4141 /* Short form. */
4142 op = 0xb8 | (count - 1);
4143 add_unwind_opcode (op, 1);
4144 }
4145 else
4146 {
4147 /* Long form. */
4148 op = 0xb300 | (reg << 4) | (count - 1);
4149 add_unwind_opcode (op, 2);
4150 }
4151 unwind.frame_size += count * 8 + 4;
4152 }
4153
4154
4155 /* Parse a directive saving iWMMXt data registers. */
4156
4157 static void
4158 s_arm_unwind_save_mmxwr (void)
4159 {
4160 int reg;
4161 int hi_reg;
4162 int i;
4163 unsigned mask = 0;
4164 valueT op;
4165
4166 if (*input_line_pointer == '{')
4167 input_line_pointer++;
4168
4169 do
4170 {
4171 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4172
4173 if (reg == FAIL)
4174 {
4175 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4176 goto error;
4177 }
4178
4179 if (mask >> reg)
4180 as_tsktsk (_("register list not in ascending order"));
4181 mask |= 1 << reg;
4182
4183 if (*input_line_pointer == '-')
4184 {
4185 input_line_pointer++;
4186 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4187 if (hi_reg == FAIL)
4188 {
4189 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4190 goto error;
4191 }
4192 else if (reg >= hi_reg)
4193 {
4194 as_bad (_("bad register range"));
4195 goto error;
4196 }
4197 for (; reg < hi_reg; reg++)
4198 mask |= 1 << reg;
4199 }
4200 }
4201 while (skip_past_comma (&input_line_pointer) != FAIL);
4202
4203 skip_past_char (&input_line_pointer, '}');
4204
4205 demand_empty_rest_of_line ();
4206
4207 /* Generate any deferred opcodes because we're going to be looking at
4208 the list. */
4209 flush_pending_unwind ();
4210
4211 for (i = 0; i < 16; i++)
4212 {
4213 if (mask & (1 << i))
4214 unwind.frame_size += 8;
4215 }
4216
4217 /* Attempt to combine with a previous opcode. We do this because gcc
4218 likes to output separate unwind directives for a single block of
4219 registers. */
4220 if (unwind.opcode_count > 0)
4221 {
4222 i = unwind.opcodes[unwind.opcode_count - 1];
4223 if ((i & 0xf8) == 0xc0)
4224 {
4225 i &= 7;
4226 /* Only merge if the blocks are contiguous. */
4227 if (i < 6)
4228 {
4229 if ((mask & 0xfe00) == (1 << 9))
4230 {
4231 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4232 unwind.opcode_count--;
4233 }
4234 }
4235 else if (i == 6 && unwind.opcode_count >= 2)
4236 {
4237 i = unwind.opcodes[unwind.opcode_count - 2];
4238 reg = i >> 4;
4239 i &= 0xf;
4240
4241 op = 0xffff << (reg - 1);
4242 if (reg > 0
4243 && ((mask & op) == (1u << (reg - 1))))
4244 {
4245 op = (1 << (reg + i + 1)) - 1;
4246 op &= ~((1 << reg) - 1);
4247 mask |= op;
4248 unwind.opcode_count -= 2;
4249 }
4250 }
4251 }
4252 }
4253
4254 hi_reg = 15;
4255 /* We want to generate opcodes in the order the registers have been
4256 saved, ie. descending order. */
4257 for (reg = 15; reg >= -1; reg--)
4258 {
4259 /* Save registers in blocks. */
4260 if (reg < 0
4261 || !(mask & (1 << reg)))
4262 {
4263 /* We found an unsaved reg. Generate opcodes to save the
4264 preceding block. */
4265 if (reg != hi_reg)
4266 {
4267 if (reg == 9)
4268 {
4269 /* Short form. */
4270 op = 0xc0 | (hi_reg - 10);
4271 add_unwind_opcode (op, 1);
4272 }
4273 else
4274 {
4275 /* Long form. */
4276 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4277 add_unwind_opcode (op, 2);
4278 }
4279 }
4280 hi_reg = reg - 1;
4281 }
4282 }
4283
4284 return;
4285 error:
4286 ignore_rest_of_line ();
4287 }
4288
4289 static void
4290 s_arm_unwind_save_mmxwcg (void)
4291 {
4292 int reg;
4293 int hi_reg;
4294 unsigned mask = 0;
4295 valueT op;
4296
4297 if (*input_line_pointer == '{')
4298 input_line_pointer++;
4299
4300 skip_whitespace (input_line_pointer);
4301
4302 do
4303 {
4304 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4305
4306 if (reg == FAIL)
4307 {
4308 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4309 goto error;
4310 }
4311
4312 reg -= 8;
4313 if (mask >> reg)
4314 as_tsktsk (_("register list not in ascending order"));
4315 mask |= 1 << reg;
4316
4317 if (*input_line_pointer == '-')
4318 {
4319 input_line_pointer++;
4320 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4321 if (hi_reg == FAIL)
4322 {
4323 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4324 goto error;
4325 }
4326 else if (reg >= hi_reg)
4327 {
4328 as_bad (_("bad register range"));
4329 goto error;
4330 }
4331 for (; reg < hi_reg; reg++)
4332 mask |= 1 << reg;
4333 }
4334 }
4335 while (skip_past_comma (&input_line_pointer) != FAIL);
4336
4337 skip_past_char (&input_line_pointer, '}');
4338
4339 demand_empty_rest_of_line ();
4340
4341 /* Generate any deferred opcodes because we're going to be looking at
4342 the list. */
4343 flush_pending_unwind ();
4344
4345 for (reg = 0; reg < 16; reg++)
4346 {
4347 if (mask & (1 << reg))
4348 unwind.frame_size += 4;
4349 }
4350 op = 0xc700 | mask;
4351 add_unwind_opcode (op, 2);
4352 return;
4353 error:
4354 ignore_rest_of_line ();
4355 }
4356
4357
4358 /* Parse an unwind_save directive.
4359 If the argument is non-zero, this is a .vsave directive. */
4360
4361 static void
4362 s_arm_unwind_save (int arch_v6)
4363 {
4364 char *peek;
4365 struct reg_entry *reg;
4366 bfd_boolean had_brace = FALSE;
4367
4368 if (!unwind.proc_start)
4369 as_bad (MISSING_FNSTART);
4370
4371 /* Figure out what sort of save we have. */
4372 peek = input_line_pointer;
4373
4374 if (*peek == '{')
4375 {
4376 had_brace = TRUE;
4377 peek++;
4378 }
4379
4380 reg = arm_reg_parse_multi (&peek);
4381
4382 if (!reg)
4383 {
4384 as_bad (_("register expected"));
4385 ignore_rest_of_line ();
4386 return;
4387 }
4388
4389 switch (reg->type)
4390 {
4391 case REG_TYPE_FN:
4392 if (had_brace)
4393 {
4394 as_bad (_("FPA .unwind_save does not take a register list"));
4395 ignore_rest_of_line ();
4396 return;
4397 }
4398 input_line_pointer = peek;
4399 s_arm_unwind_save_fpa (reg->number);
4400 return;
4401
4402 case REG_TYPE_RN:
4403 s_arm_unwind_save_core ();
4404 return;
4405
4406 case REG_TYPE_VFD:
4407 if (arch_v6)
4408 s_arm_unwind_save_vfp_armv6 ();
4409 else
4410 s_arm_unwind_save_vfp ();
4411 return;
4412
4413 case REG_TYPE_MMXWR:
4414 s_arm_unwind_save_mmxwr ();
4415 return;
4416
4417 case REG_TYPE_MMXWCG:
4418 s_arm_unwind_save_mmxwcg ();
4419 return;
4420
4421 default:
4422 as_bad (_(".unwind_save does not support this kind of register"));
4423 ignore_rest_of_line ();
4424 }
4425 }
4426
4427
4428 /* Parse an unwind_movsp directive. */
4429
4430 static void
4431 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4432 {
4433 int reg;
4434 valueT op;
4435 int offset;
4436
4437 if (!unwind.proc_start)
4438 as_bad (MISSING_FNSTART);
4439
4440 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4441 if (reg == FAIL)
4442 {
4443 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4444 ignore_rest_of_line ();
4445 return;
4446 }
4447
4448 /* Optional constant. */
4449 if (skip_past_comma (&input_line_pointer) != FAIL)
4450 {
4451 if (immediate_for_directive (&offset) == FAIL)
4452 return;
4453 }
4454 else
4455 offset = 0;
4456
4457 demand_empty_rest_of_line ();
4458
4459 if (reg == REG_SP || reg == REG_PC)
4460 {
4461 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4462 return;
4463 }
4464
4465 if (unwind.fp_reg != REG_SP)
4466 as_bad (_("unexpected .unwind_movsp directive"));
4467
4468 /* Generate opcode to restore the value. */
4469 op = 0x90 | reg;
4470 add_unwind_opcode (op, 1);
4471
4472 /* Record the information for later. */
4473 unwind.fp_reg = reg;
4474 unwind.fp_offset = unwind.frame_size - offset;
4475 unwind.sp_restored = 1;
4476 }
4477
4478 /* Parse an unwind_pad directive. */
4479
4480 static void
4481 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4482 {
4483 int offset;
4484
4485 if (!unwind.proc_start)
4486 as_bad (MISSING_FNSTART);
4487
4488 if (immediate_for_directive (&offset) == FAIL)
4489 return;
4490
4491 if (offset & 3)
4492 {
4493 as_bad (_("stack increment must be multiple of 4"));
4494 ignore_rest_of_line ();
4495 return;
4496 }
4497
4498 /* Don't generate any opcodes, just record the details for later. */
4499 unwind.frame_size += offset;
4500 unwind.pending_offset += offset;
4501
4502 demand_empty_rest_of_line ();
4503 }
4504
4505 /* Parse an unwind_setfp directive. */
4506
4507 static void
4508 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4509 {
4510 int sp_reg;
4511 int fp_reg;
4512 int offset;
4513
4514 if (!unwind.proc_start)
4515 as_bad (MISSING_FNSTART);
4516
4517 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4518 if (skip_past_comma (&input_line_pointer) == FAIL)
4519 sp_reg = FAIL;
4520 else
4521 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4522
4523 if (fp_reg == FAIL || sp_reg == FAIL)
4524 {
4525 as_bad (_("expected <reg>, <reg>"));
4526 ignore_rest_of_line ();
4527 return;
4528 }
4529
4530 /* Optional constant. */
4531 if (skip_past_comma (&input_line_pointer) != FAIL)
4532 {
4533 if (immediate_for_directive (&offset) == FAIL)
4534 return;
4535 }
4536 else
4537 offset = 0;
4538
4539 demand_empty_rest_of_line ();
4540
4541 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4542 {
4543 as_bad (_("register must be either sp or set by a previous"
4544 "unwind_movsp directive"));
4545 return;
4546 }
4547
4548 /* Don't generate any opcodes, just record the information for later. */
4549 unwind.fp_reg = fp_reg;
4550 unwind.fp_used = 1;
4551 if (sp_reg == REG_SP)
4552 unwind.fp_offset = unwind.frame_size - offset;
4553 else
4554 unwind.fp_offset -= offset;
4555 }
4556
4557 /* Parse an unwind_raw directive. */
4558
4559 static void
4560 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4561 {
4562 expressionS exp;
4563 /* This is an arbitrary limit. */
4564 unsigned char op[16];
4565 int count;
4566
4567 if (!unwind.proc_start)
4568 as_bad (MISSING_FNSTART);
4569
4570 expression (&exp);
4571 if (exp.X_op == O_constant
4572 && skip_past_comma (&input_line_pointer) != FAIL)
4573 {
4574 unwind.frame_size += exp.X_add_number;
4575 expression (&exp);
4576 }
4577 else
4578 exp.X_op = O_illegal;
4579
4580 if (exp.X_op != O_constant)
4581 {
4582 as_bad (_("expected <offset>, <opcode>"));
4583 ignore_rest_of_line ();
4584 return;
4585 }
4586
4587 count = 0;
4588
4589 /* Parse the opcode. */
4590 for (;;)
4591 {
4592 if (count >= 16)
4593 {
4594 as_bad (_("unwind opcode too long"));
4595 ignore_rest_of_line ();
4596 }
4597 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4598 {
4599 as_bad (_("invalid unwind opcode"));
4600 ignore_rest_of_line ();
4601 return;
4602 }
4603 op[count++] = exp.X_add_number;
4604
4605 /* Parse the next byte. */
4606 if (skip_past_comma (&input_line_pointer) == FAIL)
4607 break;
4608
4609 expression (&exp);
4610 }
4611
4612 /* Add the opcode bytes in reverse order. */
4613 while (count--)
4614 add_unwind_opcode (op[count], 1);
4615
4616 demand_empty_rest_of_line ();
4617 }
4618
4619
4620 /* Parse a .eabi_attribute directive. */
4621
4622 static void
4623 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4624 {
4625 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4626
4627 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4628 attributes_set_explicitly[tag] = 1;
4629 }
4630
4631 /* Emit a tls fix for the symbol. */
4632
4633 static void
4634 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4635 {
4636 char *p;
4637 expressionS exp;
4638 #ifdef md_flush_pending_output
4639 md_flush_pending_output ();
4640 #endif
4641
4642 #ifdef md_cons_align
4643 md_cons_align (4);
4644 #endif
4645
4646 /* Since we're just labelling the code, there's no need to define a
4647 mapping symbol. */
4648 expression (&exp);
4649 p = obstack_next_free (&frchain_now->frch_obstack);
4650 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4651 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4652 : BFD_RELOC_ARM_TLS_DESCSEQ);
4653 }
4654 #endif /* OBJ_ELF */
4655
4656 static void s_arm_arch (int);
4657 static void s_arm_object_arch (int);
4658 static void s_arm_cpu (int);
4659 static void s_arm_fpu (int);
4660 static void s_arm_arch_extension (int);
4661
4662 #ifdef TE_PE
4663
4664 static void
4665 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4666 {
4667 expressionS exp;
4668
4669 do
4670 {
4671 expression (&exp);
4672 if (exp.X_op == O_symbol)
4673 exp.X_op = O_secrel;
4674
4675 emit_expr (&exp, 4);
4676 }
4677 while (*input_line_pointer++ == ',');
4678
4679 input_line_pointer--;
4680 demand_empty_rest_of_line ();
4681 }
4682 #endif /* TE_PE */
4683
4684 /* This table describes all the machine specific pseudo-ops the assembler
4685 has to support. The fields are:
4686 pseudo-op name without dot
4687 function to call to execute this pseudo-op
4688 Integer arg to pass to the function. */
4689
4690 const pseudo_typeS md_pseudo_table[] =
4691 {
4692 /* Never called because '.req' does not start a line. */
4693 { "req", s_req, 0 },
4694 /* Following two are likewise never called. */
4695 { "dn", s_dn, 0 },
4696 { "qn", s_qn, 0 },
4697 { "unreq", s_unreq, 0 },
4698 { "bss", s_bss, 0 },
4699 { "align", s_align_ptwo, 2 },
4700 { "arm", s_arm, 0 },
4701 { "thumb", s_thumb, 0 },
4702 { "code", s_code, 0 },
4703 { "force_thumb", s_force_thumb, 0 },
4704 { "thumb_func", s_thumb_func, 0 },
4705 { "thumb_set", s_thumb_set, 0 },
4706 { "even", s_even, 0 },
4707 { "ltorg", s_ltorg, 0 },
4708 { "pool", s_ltorg, 0 },
4709 { "syntax", s_syntax, 0 },
4710 { "cpu", s_arm_cpu, 0 },
4711 { "arch", s_arm_arch, 0 },
4712 { "object_arch", s_arm_object_arch, 0 },
4713 { "fpu", s_arm_fpu, 0 },
4714 { "arch_extension", s_arm_arch_extension, 0 },
4715 #ifdef OBJ_ELF
4716 { "word", s_arm_elf_cons, 4 },
4717 { "long", s_arm_elf_cons, 4 },
4718 { "inst.n", s_arm_elf_inst, 2 },
4719 { "inst.w", s_arm_elf_inst, 4 },
4720 { "inst", s_arm_elf_inst, 0 },
4721 { "rel31", s_arm_rel31, 0 },
4722 { "fnstart", s_arm_unwind_fnstart, 0 },
4723 { "fnend", s_arm_unwind_fnend, 0 },
4724 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4725 { "personality", s_arm_unwind_personality, 0 },
4726 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4727 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4728 { "save", s_arm_unwind_save, 0 },
4729 { "vsave", s_arm_unwind_save, 1 },
4730 { "movsp", s_arm_unwind_movsp, 0 },
4731 { "pad", s_arm_unwind_pad, 0 },
4732 { "setfp", s_arm_unwind_setfp, 0 },
4733 { "unwind_raw", s_arm_unwind_raw, 0 },
4734 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4735 { "tlsdescseq", s_arm_tls_descseq, 0 },
4736 #else
4737 { "word", cons, 4},
4738
4739 /* These are used for dwarf. */
4740 {"2byte", cons, 2},
4741 {"4byte", cons, 4},
4742 {"8byte", cons, 8},
4743 /* These are used for dwarf2. */
4744 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4745 { "loc", dwarf2_directive_loc, 0 },
4746 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4747 #endif
4748 { "extend", float_cons, 'x' },
4749 { "ldouble", float_cons, 'x' },
4750 { "packed", float_cons, 'p' },
4751 #ifdef TE_PE
4752 {"secrel32", pe_directive_secrel, 0},
4753 #endif
4754
4755 /* These are for compatibility with CodeComposer Studio. */
4756 {"ref", s_ccs_ref, 0},
4757 {"def", s_ccs_def, 0},
4758 {"asmfunc", s_ccs_asmfunc, 0},
4759 {"endasmfunc", s_ccs_endasmfunc, 0},
4760
4761 { 0, 0, 0 }
4762 };
4763 \f
4764 /* Parser functions used exclusively in instruction operands. */
4765
4766 /* Generic immediate-value read function for use in insn parsing.
4767 STR points to the beginning of the immediate (the leading #);
4768 VAL receives the value; if the value is outside [MIN, MAX]
4769 issue an error. PREFIX_OPT is true if the immediate prefix is
4770 optional. */
4771
4772 static int
4773 parse_immediate (char **str, int *val, int min, int max,
4774 bfd_boolean prefix_opt)
4775 {
4776 expressionS exp;
4777
4778 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4779 if (exp.X_op != O_constant)
4780 {
4781 inst.error = _("constant expression required");
4782 return FAIL;
4783 }
4784
4785 if (exp.X_add_number < min || exp.X_add_number > max)
4786 {
4787 inst.error = _("immediate value out of range");
4788 return FAIL;
4789 }
4790
4791 *val = exp.X_add_number;
4792 return SUCCESS;
4793 }
4794
4795 /* Less-generic immediate-value read function with the possibility of loading a
4796 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4797 instructions. Puts the result directly in inst.operands[i]. */
4798
4799 static int
4800 parse_big_immediate (char **str, int i, expressionS *in_exp,
4801 bfd_boolean allow_symbol_p)
4802 {
4803 expressionS exp;
4804 expressionS *exp_p = in_exp ? in_exp : &exp;
4805 char *ptr = *str;
4806
4807 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4808
4809 if (exp_p->X_op == O_constant)
4810 {
4811 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4812 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4813 O_constant. We have to be careful not to break compilation for
4814 32-bit X_add_number, though. */
4815 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4816 {
4817 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4818 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4819 & 0xffffffff);
4820 inst.operands[i].regisimm = 1;
4821 }
4822 }
4823 else if (exp_p->X_op == O_big
4824 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4825 {
4826 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4827
4828 /* Bignums have their least significant bits in
4829 generic_bignum[0]. Make sure we put 32 bits in imm and
4830 32 bits in reg, in a (hopefully) portable way. */
4831 gas_assert (parts != 0);
4832
4833 /* Make sure that the number is not too big.
4834 PR 11972: Bignums can now be sign-extended to the
4835 size of a .octa so check that the out of range bits
4836 are all zero or all one. */
4837 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4838 {
4839 LITTLENUM_TYPE m = -1;
4840
4841 if (generic_bignum[parts * 2] != 0
4842 && generic_bignum[parts * 2] != m)
4843 return FAIL;
4844
4845 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4846 if (generic_bignum[j] != generic_bignum[j-1])
4847 return FAIL;
4848 }
4849
4850 inst.operands[i].imm = 0;
4851 for (j = 0; j < parts; j++, idx++)
4852 inst.operands[i].imm |= generic_bignum[idx]
4853 << (LITTLENUM_NUMBER_OF_BITS * j);
4854 inst.operands[i].reg = 0;
4855 for (j = 0; j < parts; j++, idx++)
4856 inst.operands[i].reg |= generic_bignum[idx]
4857 << (LITTLENUM_NUMBER_OF_BITS * j);
4858 inst.operands[i].regisimm = 1;
4859 }
4860 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4861 return FAIL;
4862
4863 *str = ptr;
4864
4865 return SUCCESS;
4866 }
4867
4868 /* Returns the pseudo-register number of an FPA immediate constant,
4869 or FAIL if there isn't a valid constant here. */
4870
4871 static int
4872 parse_fpa_immediate (char ** str)
4873 {
4874 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4875 char * save_in;
4876 expressionS exp;
4877 int i;
4878 int j;
4879
4880 /* First try and match exact strings, this is to guarantee
4881 that some formats will work even for cross assembly. */
4882
4883 for (i = 0; fp_const[i]; i++)
4884 {
4885 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4886 {
4887 char *start = *str;
4888
4889 *str += strlen (fp_const[i]);
4890 if (is_end_of_line[(unsigned char) **str])
4891 return i + 8;
4892 *str = start;
4893 }
4894 }
4895
4896 /* Just because we didn't get a match doesn't mean that the constant
4897 isn't valid, just that it is in a format that we don't
4898 automatically recognize. Try parsing it with the standard
4899 expression routines. */
4900
4901 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4902
4903 /* Look for a raw floating point number. */
4904 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4905 && is_end_of_line[(unsigned char) *save_in])
4906 {
4907 for (i = 0; i < NUM_FLOAT_VALS; i++)
4908 {
4909 for (j = 0; j < MAX_LITTLENUMS; j++)
4910 {
4911 if (words[j] != fp_values[i][j])
4912 break;
4913 }
4914
4915 if (j == MAX_LITTLENUMS)
4916 {
4917 *str = save_in;
4918 return i + 8;
4919 }
4920 }
4921 }
4922
4923 /* Try and parse a more complex expression, this will probably fail
4924 unless the code uses a floating point prefix (eg "0f"). */
4925 save_in = input_line_pointer;
4926 input_line_pointer = *str;
4927 if (expression (&exp) == absolute_section
4928 && exp.X_op == O_big
4929 && exp.X_add_number < 0)
4930 {
4931 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4932 Ditto for 15. */
4933 #define X_PRECISION 5
4934 #define E_PRECISION 15L
4935 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4936 {
4937 for (i = 0; i < NUM_FLOAT_VALS; i++)
4938 {
4939 for (j = 0; j < MAX_LITTLENUMS; j++)
4940 {
4941 if (words[j] != fp_values[i][j])
4942 break;
4943 }
4944
4945 if (j == MAX_LITTLENUMS)
4946 {
4947 *str = input_line_pointer;
4948 input_line_pointer = save_in;
4949 return i + 8;
4950 }
4951 }
4952 }
4953 }
4954
4955 *str = input_line_pointer;
4956 input_line_pointer = save_in;
4957 inst.error = _("invalid FPA immediate expression");
4958 return FAIL;
4959 }
4960
4961 /* Returns 1 if a number has "quarter-precision" float format
4962 0baBbbbbbc defgh000 00000000 00000000. */
4963
4964 static int
4965 is_quarter_float (unsigned imm)
4966 {
4967 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4968 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4969 }
4970
4971
4972 /* Detect the presence of a floating point or integer zero constant,
4973 i.e. #0.0 or #0. */
4974
4975 static bfd_boolean
4976 parse_ifimm_zero (char **in)
4977 {
4978 int error_code;
4979
4980 if (!is_immediate_prefix (**in))
4981 {
4982 /* In unified syntax, all prefixes are optional. */
4983 if (!unified_syntax)
4984 return FALSE;
4985 }
4986 else
4987 ++*in;
4988
4989 /* Accept #0x0 as a synonym for #0. */
4990 if (strncmp (*in, "0x", 2) == 0)
4991 {
4992 int val;
4993 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4994 return FALSE;
4995 return TRUE;
4996 }
4997
4998 error_code = atof_generic (in, ".", EXP_CHARS,
4999 &generic_floating_point_number);
5000
5001 if (!error_code
5002 && generic_floating_point_number.sign == '+'
5003 && (generic_floating_point_number.low
5004 > generic_floating_point_number.leader))
5005 return TRUE;
5006
5007 return FALSE;
5008 }
5009
5010 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5011 0baBbbbbbc defgh000 00000000 00000000.
5012 The zero and minus-zero cases need special handling, since they can't be
5013 encoded in the "quarter-precision" float format, but can nonetheless be
5014 loaded as integer constants. */
5015
5016 static unsigned
5017 parse_qfloat_immediate (char **ccp, int *immed)
5018 {
5019 char *str = *ccp;
5020 char *fpnum;
5021 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5022 int found_fpchar = 0;
5023
5024 skip_past_char (&str, '#');
5025
5026 /* We must not accidentally parse an integer as a floating-point number. Make
5027 sure that the value we parse is not an integer by checking for special
5028 characters '.' or 'e'.
5029 FIXME: This is a horrible hack, but doing better is tricky because type
5030 information isn't in a very usable state at parse time. */
5031 fpnum = str;
5032 skip_whitespace (fpnum);
5033
5034 if (strncmp (fpnum, "0x", 2) == 0)
5035 return FAIL;
5036 else
5037 {
5038 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5039 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5040 {
5041 found_fpchar = 1;
5042 break;
5043 }
5044
5045 if (!found_fpchar)
5046 return FAIL;
5047 }
5048
5049 if ((str = atof_ieee (str, 's', words)) != NULL)
5050 {
5051 unsigned fpword = 0;
5052 int i;
5053
5054 /* Our FP word must be 32 bits (single-precision FP). */
5055 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5056 {
5057 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5058 fpword |= words[i];
5059 }
5060
5061 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5062 *immed = fpword;
5063 else
5064 return FAIL;
5065
5066 *ccp = str;
5067
5068 return SUCCESS;
5069 }
5070
5071 return FAIL;
5072 }
5073
5074 /* Shift operands. */
5075 enum shift_kind
5076 {
5077 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5078 };
5079
5080 struct asm_shift_name
5081 {
5082 const char *name;
5083 enum shift_kind kind;
5084 };
5085
5086 /* Third argument to parse_shift. */
5087 enum parse_shift_mode
5088 {
5089 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5090 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5091 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5092 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5093 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5094 };
5095
5096 /* Parse a <shift> specifier on an ARM data processing instruction.
5097 This has three forms:
5098
5099 (LSL|LSR|ASL|ASR|ROR) Rs
5100 (LSL|LSR|ASL|ASR|ROR) #imm
5101 RRX
5102
5103 Note that ASL is assimilated to LSL in the instruction encoding, and
5104 RRX to ROR #0 (which cannot be written as such). */
5105
5106 static int
5107 parse_shift (char **str, int i, enum parse_shift_mode mode)
5108 {
5109 const struct asm_shift_name *shift_name;
5110 enum shift_kind shift;
5111 char *s = *str;
5112 char *p = s;
5113 int reg;
5114
5115 for (p = *str; ISALPHA (*p); p++)
5116 ;
5117
5118 if (p == *str)
5119 {
5120 inst.error = _("shift expression expected");
5121 return FAIL;
5122 }
5123
5124 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5125 p - *str);
5126
5127 if (shift_name == NULL)
5128 {
5129 inst.error = _("shift expression expected");
5130 return FAIL;
5131 }
5132
5133 shift = shift_name->kind;
5134
5135 switch (mode)
5136 {
5137 case NO_SHIFT_RESTRICT:
5138 case SHIFT_IMMEDIATE: break;
5139
5140 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5141 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5142 {
5143 inst.error = _("'LSL' or 'ASR' required");
5144 return FAIL;
5145 }
5146 break;
5147
5148 case SHIFT_LSL_IMMEDIATE:
5149 if (shift != SHIFT_LSL)
5150 {
5151 inst.error = _("'LSL' required");
5152 return FAIL;
5153 }
5154 break;
5155
5156 case SHIFT_ASR_IMMEDIATE:
5157 if (shift != SHIFT_ASR)
5158 {
5159 inst.error = _("'ASR' required");
5160 return FAIL;
5161 }
5162 break;
5163
5164 default: abort ();
5165 }
5166
5167 if (shift != SHIFT_RRX)
5168 {
5169 /* Whitespace can appear here if the next thing is a bare digit. */
5170 skip_whitespace (p);
5171
5172 if (mode == NO_SHIFT_RESTRICT
5173 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5174 {
5175 inst.operands[i].imm = reg;
5176 inst.operands[i].immisreg = 1;
5177 }
5178 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5179 return FAIL;
5180 }
5181 inst.operands[i].shift_kind = shift;
5182 inst.operands[i].shifted = 1;
5183 *str = p;
5184 return SUCCESS;
5185 }
5186
5187 /* Parse a <shifter_operand> for an ARM data processing instruction:
5188
5189 #<immediate>
5190 #<immediate>, <rotate>
5191 <Rm>
5192 <Rm>, <shift>
5193
5194 where <shift> is defined by parse_shift above, and <rotate> is a
5195 multiple of 2 between 0 and 30. Validation of immediate operands
5196 is deferred to md_apply_fix. */
5197
5198 static int
5199 parse_shifter_operand (char **str, int i)
5200 {
5201 int value;
5202 expressionS exp;
5203
5204 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5205 {
5206 inst.operands[i].reg = value;
5207 inst.operands[i].isreg = 1;
5208
5209 /* parse_shift will override this if appropriate */
5210 inst.reloc.exp.X_op = O_constant;
5211 inst.reloc.exp.X_add_number = 0;
5212
5213 if (skip_past_comma (str) == FAIL)
5214 return SUCCESS;
5215
5216 /* Shift operation on register. */
5217 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5218 }
5219
5220 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5221 return FAIL;
5222
5223 if (skip_past_comma (str) == SUCCESS)
5224 {
5225 /* #x, y -- ie explicit rotation by Y. */
5226 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5227 return FAIL;
5228
5229 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5230 {
5231 inst.error = _("constant expression expected");
5232 return FAIL;
5233 }
5234
5235 value = exp.X_add_number;
5236 if (value < 0 || value > 30 || value % 2 != 0)
5237 {
5238 inst.error = _("invalid rotation");
5239 return FAIL;
5240 }
5241 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5242 {
5243 inst.error = _("invalid constant");
5244 return FAIL;
5245 }
5246
5247 /* Encode as specified. */
5248 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5249 return SUCCESS;
5250 }
5251
5252 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5253 inst.reloc.pc_rel = 0;
5254 return SUCCESS;
5255 }
5256
5257 /* Group relocation information. Each entry in the table contains the
5258 textual name of the relocation as may appear in assembler source
5259 and must end with a colon.
5260 Along with this textual name are the relocation codes to be used if
5261 the corresponding instruction is an ALU instruction (ADD or SUB only),
5262 an LDR, an LDRS, or an LDC. */
5263
5264 struct group_reloc_table_entry
5265 {
5266 const char *name;
5267 int alu_code;
5268 int ldr_code;
5269 int ldrs_code;
5270 int ldc_code;
5271 };
5272
5273 typedef enum
5274 {
5275 /* Varieties of non-ALU group relocation. */
5276
5277 GROUP_LDR,
5278 GROUP_LDRS,
5279 GROUP_LDC
5280 } group_reloc_type;
5281
5282 static struct group_reloc_table_entry group_reloc_table[] =
5283 { /* Program counter relative: */
5284 { "pc_g0_nc",
5285 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5286 0, /* LDR */
5287 0, /* LDRS */
5288 0 }, /* LDC */
5289 { "pc_g0",
5290 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5294 { "pc_g1_nc",
5295 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5296 0, /* LDR */
5297 0, /* LDRS */
5298 0 }, /* LDC */
5299 { "pc_g1",
5300 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5301 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5302 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5303 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5304 { "pc_g2",
5305 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5306 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5307 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5308 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5309 /* Section base relative */
5310 { "sb_g0_nc",
5311 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5312 0, /* LDR */
5313 0, /* LDRS */
5314 0 }, /* LDC */
5315 { "sb_g0",
5316 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5320 { "sb_g1_nc",
5321 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5322 0, /* LDR */
5323 0, /* LDRS */
5324 0 }, /* LDC */
5325 { "sb_g1",
5326 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5327 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5328 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5329 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5330 { "sb_g2",
5331 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5332 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5333 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5334 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5335 /* Absolute thumb alu relocations. */
5336 { "lower0_7",
5337 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5338 0, /* LDR. */
5339 0, /* LDRS. */
5340 0 }, /* LDC. */
5341 { "lower8_15",
5342 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5343 0, /* LDR. */
5344 0, /* LDRS. */
5345 0 }, /* LDC. */
5346 { "upper0_7",
5347 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5348 0, /* LDR. */
5349 0, /* LDRS. */
5350 0 }, /* LDC. */
5351 { "upper8_15",
5352 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5353 0, /* LDR. */
5354 0, /* LDRS. */
5355 0 } }; /* LDC. */
5356
5357 /* Given the address of a pointer pointing to the textual name of a group
5358 relocation as may appear in assembler source, attempt to find its details
5359 in group_reloc_table. The pointer will be updated to the character after
5360 the trailing colon. On failure, FAIL will be returned; SUCCESS
5361 otherwise. On success, *entry will be updated to point at the relevant
5362 group_reloc_table entry. */
5363
5364 static int
5365 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5366 {
5367 unsigned int i;
5368 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5369 {
5370 int length = strlen (group_reloc_table[i].name);
5371
5372 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5373 && (*str)[length] == ':')
5374 {
5375 *out = &group_reloc_table[i];
5376 *str += (length + 1);
5377 return SUCCESS;
5378 }
5379 }
5380
5381 return FAIL;
5382 }
5383
5384 /* Parse a <shifter_operand> for an ARM data processing instruction
5385 (as for parse_shifter_operand) where group relocations are allowed:
5386
5387 #<immediate>
5388 #<immediate>, <rotate>
5389 #:<group_reloc>:<expression>
5390 <Rm>
5391 <Rm>, <shift>
5392
5393 where <group_reloc> is one of the strings defined in group_reloc_table.
5394 The hashes are optional.
5395
5396 Everything else is as for parse_shifter_operand. */
5397
5398 static parse_operand_result
5399 parse_shifter_operand_group_reloc (char **str, int i)
5400 {
5401 /* Determine if we have the sequence of characters #: or just :
5402 coming next. If we do, then we check for a group relocation.
5403 If we don't, punt the whole lot to parse_shifter_operand. */
5404
5405 if (((*str)[0] == '#' && (*str)[1] == ':')
5406 || (*str)[0] == ':')
5407 {
5408 struct group_reloc_table_entry *entry;
5409
5410 if ((*str)[0] == '#')
5411 (*str) += 2;
5412 else
5413 (*str)++;
5414
5415 /* Try to parse a group relocation. Anything else is an error. */
5416 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5417 {
5418 inst.error = _("unknown group relocation");
5419 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5420 }
5421
5422 /* We now have the group relocation table entry corresponding to
5423 the name in the assembler source. Next, we parse the expression. */
5424 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5425 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5426
5427 /* Record the relocation type (always the ALU variant here). */
5428 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5429 gas_assert (inst.reloc.type != 0);
5430
5431 return PARSE_OPERAND_SUCCESS;
5432 }
5433 else
5434 return parse_shifter_operand (str, i) == SUCCESS
5435 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5436
5437 /* Never reached. */
5438 }
5439
5440 /* Parse a Neon alignment expression. Information is written to
5441 inst.operands[i]. We assume the initial ':' has been skipped.
5442
5443 align .imm = align << 8, .immisalign=1, .preind=0 */
5444 static parse_operand_result
5445 parse_neon_alignment (char **str, int i)
5446 {
5447 char *p = *str;
5448 expressionS exp;
5449
5450 my_get_expression (&exp, &p, GE_NO_PREFIX);
5451
5452 if (exp.X_op != O_constant)
5453 {
5454 inst.error = _("alignment must be constant");
5455 return PARSE_OPERAND_FAIL;
5456 }
5457
5458 inst.operands[i].imm = exp.X_add_number << 8;
5459 inst.operands[i].immisalign = 1;
5460 /* Alignments are not pre-indexes. */
5461 inst.operands[i].preind = 0;
5462
5463 *str = p;
5464 return PARSE_OPERAND_SUCCESS;
5465 }
5466
5467 /* Parse all forms of an ARM address expression. Information is written
5468 to inst.operands[i] and/or inst.reloc.
5469
5470 Preindexed addressing (.preind=1):
5471
5472 [Rn, #offset] .reg=Rn .reloc.exp=offset
5473 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5474 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5475 .shift_kind=shift .reloc.exp=shift_imm
5476
5477 These three may have a trailing ! which causes .writeback to be set also.
5478
5479 Postindexed addressing (.postind=1, .writeback=1):
5480
5481 [Rn], #offset .reg=Rn .reloc.exp=offset
5482 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5483 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5484 .shift_kind=shift .reloc.exp=shift_imm
5485
5486 Unindexed addressing (.preind=0, .postind=0):
5487
5488 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5489
5490 Other:
5491
5492 [Rn]{!} shorthand for [Rn,#0]{!}
5493 =immediate .isreg=0 .reloc.exp=immediate
5494 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5495
5496 It is the caller's responsibility to check for addressing modes not
5497 supported by the instruction, and to set inst.reloc.type. */
5498
5499 static parse_operand_result
5500 parse_address_main (char **str, int i, int group_relocations,
5501 group_reloc_type group_type)
5502 {
5503 char *p = *str;
5504 int reg;
5505
5506 if (skip_past_char (&p, '[') == FAIL)
5507 {
5508 if (skip_past_char (&p, '=') == FAIL)
5509 {
5510 /* Bare address - translate to PC-relative offset. */
5511 inst.reloc.pc_rel = 1;
5512 inst.operands[i].reg = REG_PC;
5513 inst.operands[i].isreg = 1;
5514 inst.operands[i].preind = 1;
5515
5516 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5517 return PARSE_OPERAND_FAIL;
5518 }
5519 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5520 /*allow_symbol_p=*/TRUE))
5521 return PARSE_OPERAND_FAIL;
5522
5523 *str = p;
5524 return PARSE_OPERAND_SUCCESS;
5525 }
5526
5527 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5528 skip_whitespace (p);
5529
5530 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5531 {
5532 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5533 return PARSE_OPERAND_FAIL;
5534 }
5535 inst.operands[i].reg = reg;
5536 inst.operands[i].isreg = 1;
5537
5538 if (skip_past_comma (&p) == SUCCESS)
5539 {
5540 inst.operands[i].preind = 1;
5541
5542 if (*p == '+') p++;
5543 else if (*p == '-') p++, inst.operands[i].negative = 1;
5544
5545 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5546 {
5547 inst.operands[i].imm = reg;
5548 inst.operands[i].immisreg = 1;
5549
5550 if (skip_past_comma (&p) == SUCCESS)
5551 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5552 return PARSE_OPERAND_FAIL;
5553 }
5554 else if (skip_past_char (&p, ':') == SUCCESS)
5555 {
5556 /* FIXME: '@' should be used here, but it's filtered out by generic
5557 code before we get to see it here. This may be subject to
5558 change. */
5559 parse_operand_result result = parse_neon_alignment (&p, i);
5560
5561 if (result != PARSE_OPERAND_SUCCESS)
5562 return result;
5563 }
5564 else
5565 {
5566 if (inst.operands[i].negative)
5567 {
5568 inst.operands[i].negative = 0;
5569 p--;
5570 }
5571
5572 if (group_relocations
5573 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5574 {
5575 struct group_reloc_table_entry *entry;
5576
5577 /* Skip over the #: or : sequence. */
5578 if (*p == '#')
5579 p += 2;
5580 else
5581 p++;
5582
5583 /* Try to parse a group relocation. Anything else is an
5584 error. */
5585 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5586 {
5587 inst.error = _("unknown group relocation");
5588 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5589 }
5590
5591 /* We now have the group relocation table entry corresponding to
5592 the name in the assembler source. Next, we parse the
5593 expression. */
5594 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5595 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5596
5597 /* Record the relocation type. */
5598 switch (group_type)
5599 {
5600 case GROUP_LDR:
5601 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5602 break;
5603
5604 case GROUP_LDRS:
5605 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5606 break;
5607
5608 case GROUP_LDC:
5609 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5610 break;
5611
5612 default:
5613 gas_assert (0);
5614 }
5615
5616 if (inst.reloc.type == 0)
5617 {
5618 inst.error = _("this group relocation is not allowed on this instruction");
5619 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5620 }
5621 }
5622 else
5623 {
5624 char *q = p;
5625
5626 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5627 return PARSE_OPERAND_FAIL;
5628 /* If the offset is 0, find out if it's a +0 or -0. */
5629 if (inst.reloc.exp.X_op == O_constant
5630 && inst.reloc.exp.X_add_number == 0)
5631 {
5632 skip_whitespace (q);
5633 if (*q == '#')
5634 {
5635 q++;
5636 skip_whitespace (q);
5637 }
5638 if (*q == '-')
5639 inst.operands[i].negative = 1;
5640 }
5641 }
5642 }
5643 }
5644 else if (skip_past_char (&p, ':') == SUCCESS)
5645 {
5646 /* FIXME: '@' should be used here, but it's filtered out by generic code
5647 before we get to see it here. This may be subject to change. */
5648 parse_operand_result result = parse_neon_alignment (&p, i);
5649
5650 if (result != PARSE_OPERAND_SUCCESS)
5651 return result;
5652 }
5653
5654 if (skip_past_char (&p, ']') == FAIL)
5655 {
5656 inst.error = _("']' expected");
5657 return PARSE_OPERAND_FAIL;
5658 }
5659
5660 if (skip_past_char (&p, '!') == SUCCESS)
5661 inst.operands[i].writeback = 1;
5662
5663 else if (skip_past_comma (&p) == SUCCESS)
5664 {
5665 if (skip_past_char (&p, '{') == SUCCESS)
5666 {
5667 /* [Rn], {expr} - unindexed, with option */
5668 if (parse_immediate (&p, &inst.operands[i].imm,
5669 0, 255, TRUE) == FAIL)
5670 return PARSE_OPERAND_FAIL;
5671
5672 if (skip_past_char (&p, '}') == FAIL)
5673 {
5674 inst.error = _("'}' expected at end of 'option' field");
5675 return PARSE_OPERAND_FAIL;
5676 }
5677 if (inst.operands[i].preind)
5678 {
5679 inst.error = _("cannot combine index with option");
5680 return PARSE_OPERAND_FAIL;
5681 }
5682 *str = p;
5683 return PARSE_OPERAND_SUCCESS;
5684 }
5685 else
5686 {
5687 inst.operands[i].postind = 1;
5688 inst.operands[i].writeback = 1;
5689
5690 if (inst.operands[i].preind)
5691 {
5692 inst.error = _("cannot combine pre- and post-indexing");
5693 return PARSE_OPERAND_FAIL;
5694 }
5695
5696 if (*p == '+') p++;
5697 else if (*p == '-') p++, inst.operands[i].negative = 1;
5698
5699 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5700 {
5701 /* We might be using the immediate for alignment already. If we
5702 are, OR the register number into the low-order bits. */
5703 if (inst.operands[i].immisalign)
5704 inst.operands[i].imm |= reg;
5705 else
5706 inst.operands[i].imm = reg;
5707 inst.operands[i].immisreg = 1;
5708
5709 if (skip_past_comma (&p) == SUCCESS)
5710 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5711 return PARSE_OPERAND_FAIL;
5712 }
5713 else
5714 {
5715 char *q = p;
5716
5717 if (inst.operands[i].negative)
5718 {
5719 inst.operands[i].negative = 0;
5720 p--;
5721 }
5722 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5723 return PARSE_OPERAND_FAIL;
5724 /* If the offset is 0, find out if it's a +0 or -0. */
5725 if (inst.reloc.exp.X_op == O_constant
5726 && inst.reloc.exp.X_add_number == 0)
5727 {
5728 skip_whitespace (q);
5729 if (*q == '#')
5730 {
5731 q++;
5732 skip_whitespace (q);
5733 }
5734 if (*q == '-')
5735 inst.operands[i].negative = 1;
5736 }
5737 }
5738 }
5739 }
5740
5741 /* If at this point neither .preind nor .postind is set, we have a
5742 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5743 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5744 {
5745 inst.operands[i].preind = 1;
5746 inst.reloc.exp.X_op = O_constant;
5747 inst.reloc.exp.X_add_number = 0;
5748 }
5749 *str = p;
5750 return PARSE_OPERAND_SUCCESS;
5751 }
5752
5753 static int
5754 parse_address (char **str, int i)
5755 {
5756 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5757 ? SUCCESS : FAIL;
5758 }
5759
5760 static parse_operand_result
5761 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5762 {
5763 return parse_address_main (str, i, 1, type);
5764 }
5765
5766 /* Parse an operand for a MOVW or MOVT instruction. */
5767 static int
5768 parse_half (char **str)
5769 {
5770 char * p;
5771
5772 p = *str;
5773 skip_past_char (&p, '#');
5774 if (strncasecmp (p, ":lower16:", 9) == 0)
5775 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5776 else if (strncasecmp (p, ":upper16:", 9) == 0)
5777 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5778
5779 if (inst.reloc.type != BFD_RELOC_UNUSED)
5780 {
5781 p += 9;
5782 skip_whitespace (p);
5783 }
5784
5785 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5786 return FAIL;
5787
5788 if (inst.reloc.type == BFD_RELOC_UNUSED)
5789 {
5790 if (inst.reloc.exp.X_op != O_constant)
5791 {
5792 inst.error = _("constant expression expected");
5793 return FAIL;
5794 }
5795 if (inst.reloc.exp.X_add_number < 0
5796 || inst.reloc.exp.X_add_number > 0xffff)
5797 {
5798 inst.error = _("immediate value out of range");
5799 return FAIL;
5800 }
5801 }
5802 *str = p;
5803 return SUCCESS;
5804 }
5805
5806 /* Miscellaneous. */
5807
5808 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5809 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5810 static int
5811 parse_psr (char **str, bfd_boolean lhs)
5812 {
5813 char *p;
5814 unsigned long psr_field;
5815 const struct asm_psr *psr;
5816 char *start;
5817 bfd_boolean is_apsr = FALSE;
5818 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5819
5820 /* PR gas/12698: If the user has specified -march=all then m_profile will
5821 be TRUE, but we want to ignore it in this case as we are building for any
5822 CPU type, including non-m variants. */
5823 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5824 m_profile = FALSE;
5825
5826 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5827 feature for ease of use and backwards compatibility. */
5828 p = *str;
5829 if (strncasecmp (p, "SPSR", 4) == 0)
5830 {
5831 if (m_profile)
5832 goto unsupported_psr;
5833
5834 psr_field = SPSR_BIT;
5835 }
5836 else if (strncasecmp (p, "CPSR", 4) == 0)
5837 {
5838 if (m_profile)
5839 goto unsupported_psr;
5840
5841 psr_field = 0;
5842 }
5843 else if (strncasecmp (p, "APSR", 4) == 0)
5844 {
5845 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5846 and ARMv7-R architecture CPUs. */
5847 is_apsr = TRUE;
5848 psr_field = 0;
5849 }
5850 else if (m_profile)
5851 {
5852 start = p;
5853 do
5854 p++;
5855 while (ISALNUM (*p) || *p == '_');
5856
5857 if (strncasecmp (start, "iapsr", 5) == 0
5858 || strncasecmp (start, "eapsr", 5) == 0
5859 || strncasecmp (start, "xpsr", 4) == 0
5860 || strncasecmp (start, "psr", 3) == 0)
5861 p = start + strcspn (start, "rR") + 1;
5862
5863 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5864 p - start);
5865
5866 if (!psr)
5867 return FAIL;
5868
5869 /* If APSR is being written, a bitfield may be specified. Note that
5870 APSR itself is handled above. */
5871 if (psr->field <= 3)
5872 {
5873 psr_field = psr->field;
5874 is_apsr = TRUE;
5875 goto check_suffix;
5876 }
5877
5878 *str = p;
5879 /* M-profile MSR instructions have the mask field set to "10", except
5880 *PSR variants which modify APSR, which may use a different mask (and
5881 have been handled already). Do that by setting the PSR_f field
5882 here. */
5883 return psr->field | (lhs ? PSR_f : 0);
5884 }
5885 else
5886 goto unsupported_psr;
5887
5888 p += 4;
5889 check_suffix:
5890 if (*p == '_')
5891 {
5892 /* A suffix follows. */
5893 p++;
5894 start = p;
5895
5896 do
5897 p++;
5898 while (ISALNUM (*p) || *p == '_');
5899
5900 if (is_apsr)
5901 {
5902 /* APSR uses a notation for bits, rather than fields. */
5903 unsigned int nzcvq_bits = 0;
5904 unsigned int g_bit = 0;
5905 char *bit;
5906
5907 for (bit = start; bit != p; bit++)
5908 {
5909 switch (TOLOWER (*bit))
5910 {
5911 case 'n':
5912 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5913 break;
5914
5915 case 'z':
5916 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5917 break;
5918
5919 case 'c':
5920 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5921 break;
5922
5923 case 'v':
5924 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5925 break;
5926
5927 case 'q':
5928 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5929 break;
5930
5931 case 'g':
5932 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5933 break;
5934
5935 default:
5936 inst.error = _("unexpected bit specified after APSR");
5937 return FAIL;
5938 }
5939 }
5940
5941 if (nzcvq_bits == 0x1f)
5942 psr_field |= PSR_f;
5943
5944 if (g_bit == 0x1)
5945 {
5946 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5947 {
5948 inst.error = _("selected processor does not "
5949 "support DSP extension");
5950 return FAIL;
5951 }
5952
5953 psr_field |= PSR_s;
5954 }
5955
5956 if ((nzcvq_bits & 0x20) != 0
5957 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5958 || (g_bit & 0x2) != 0)
5959 {
5960 inst.error = _("bad bitmask specified after APSR");
5961 return FAIL;
5962 }
5963 }
5964 else
5965 {
5966 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5967 p - start);
5968 if (!psr)
5969 goto error;
5970
5971 psr_field |= psr->field;
5972 }
5973 }
5974 else
5975 {
5976 if (ISALNUM (*p))
5977 goto error; /* Garbage after "[CS]PSR". */
5978
5979 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5980 is deprecated, but allow it anyway. */
5981 if (is_apsr && lhs)
5982 {
5983 psr_field |= PSR_f;
5984 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5985 "deprecated"));
5986 }
5987 else if (!m_profile)
5988 /* These bits are never right for M-profile devices: don't set them
5989 (only code paths which read/write APSR reach here). */
5990 psr_field |= (PSR_c | PSR_f);
5991 }
5992 *str = p;
5993 return psr_field;
5994
5995 unsupported_psr:
5996 inst.error = _("selected processor does not support requested special "
5997 "purpose register");
5998 return FAIL;
5999
6000 error:
6001 inst.error = _("flag for {c}psr instruction expected");
6002 return FAIL;
6003 }
6004
6005 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6006 value suitable for splatting into the AIF field of the instruction. */
6007
6008 static int
6009 parse_cps_flags (char **str)
6010 {
6011 int val = 0;
6012 int saw_a_flag = 0;
6013 char *s = *str;
6014
6015 for (;;)
6016 switch (*s++)
6017 {
6018 case '\0': case ',':
6019 goto done;
6020
6021 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6022 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6023 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6024
6025 default:
6026 inst.error = _("unrecognized CPS flag");
6027 return FAIL;
6028 }
6029
6030 done:
6031 if (saw_a_flag == 0)
6032 {
6033 inst.error = _("missing CPS flags");
6034 return FAIL;
6035 }
6036
6037 *str = s - 1;
6038 return val;
6039 }
6040
6041 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6042 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6043
6044 static int
6045 parse_endian_specifier (char **str)
6046 {
6047 int little_endian;
6048 char *s = *str;
6049
6050 if (strncasecmp (s, "BE", 2))
6051 little_endian = 0;
6052 else if (strncasecmp (s, "LE", 2))
6053 little_endian = 1;
6054 else
6055 {
6056 inst.error = _("valid endian specifiers are be or le");
6057 return FAIL;
6058 }
6059
6060 if (ISALNUM (s[2]) || s[2] == '_')
6061 {
6062 inst.error = _("valid endian specifiers are be or le");
6063 return FAIL;
6064 }
6065
6066 *str = s + 2;
6067 return little_endian;
6068 }
6069
6070 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6071 value suitable for poking into the rotate field of an sxt or sxta
6072 instruction, or FAIL on error. */
6073
6074 static int
6075 parse_ror (char **str)
6076 {
6077 int rot;
6078 char *s = *str;
6079
6080 if (strncasecmp (s, "ROR", 3) == 0)
6081 s += 3;
6082 else
6083 {
6084 inst.error = _("missing rotation field after comma");
6085 return FAIL;
6086 }
6087
6088 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6089 return FAIL;
6090
6091 switch (rot)
6092 {
6093 case 0: *str = s; return 0x0;
6094 case 8: *str = s; return 0x1;
6095 case 16: *str = s; return 0x2;
6096 case 24: *str = s; return 0x3;
6097
6098 default:
6099 inst.error = _("rotation can only be 0, 8, 16, or 24");
6100 return FAIL;
6101 }
6102 }
6103
6104 /* Parse a conditional code (from conds[] below). The value returned is in the
6105 range 0 .. 14, or FAIL. */
6106 static int
6107 parse_cond (char **str)
6108 {
6109 char *q;
6110 const struct asm_cond *c;
6111 int n;
6112 /* Condition codes are always 2 characters, so matching up to
6113 3 characters is sufficient. */
6114 char cond[3];
6115
6116 q = *str;
6117 n = 0;
6118 while (ISALPHA (*q) && n < 3)
6119 {
6120 cond[n] = TOLOWER (*q);
6121 q++;
6122 n++;
6123 }
6124
6125 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6126 if (!c)
6127 {
6128 inst.error = _("condition required");
6129 return FAIL;
6130 }
6131
6132 *str = q;
6133 return c->value;
6134 }
6135
6136 /* Record a use of the given feature. */
6137 static void
6138 record_feature_use (const arm_feature_set *feature)
6139 {
6140 if (thumb_mode)
6141 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6142 else
6143 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6144 }
6145
6146 /* If the given feature available in the selected CPU, mark it as used.
6147 Returns TRUE iff feature is available. */
6148 static bfd_boolean
6149 mark_feature_used (const arm_feature_set *feature)
6150 {
6151 /* Ensure the option is valid on the current architecture. */
6152 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6153 return FALSE;
6154
6155 /* Add the appropriate architecture feature for the barrier option used.
6156 */
6157 record_feature_use (feature);
6158
6159 return TRUE;
6160 }
6161
6162 /* Parse an option for a barrier instruction. Returns the encoding for the
6163 option, or FAIL. */
6164 static int
6165 parse_barrier (char **str)
6166 {
6167 char *p, *q;
6168 const struct asm_barrier_opt *o;
6169
6170 p = q = *str;
6171 while (ISALPHA (*q))
6172 q++;
6173
6174 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6175 q - p);
6176 if (!o)
6177 return FAIL;
6178
6179 if (!mark_feature_used (&o->arch))
6180 return FAIL;
6181
6182 *str = q;
6183 return o->value;
6184 }
6185
6186 /* Parse the operands of a table branch instruction. Similar to a memory
6187 operand. */
6188 static int
6189 parse_tb (char **str)
6190 {
6191 char * p = *str;
6192 int reg;
6193
6194 if (skip_past_char (&p, '[') == FAIL)
6195 {
6196 inst.error = _("'[' expected");
6197 return FAIL;
6198 }
6199
6200 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6201 {
6202 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6203 return FAIL;
6204 }
6205 inst.operands[0].reg = reg;
6206
6207 if (skip_past_comma (&p) == FAIL)
6208 {
6209 inst.error = _("',' expected");
6210 return FAIL;
6211 }
6212
6213 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6214 {
6215 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6216 return FAIL;
6217 }
6218 inst.operands[0].imm = reg;
6219
6220 if (skip_past_comma (&p) == SUCCESS)
6221 {
6222 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6223 return FAIL;
6224 if (inst.reloc.exp.X_add_number != 1)
6225 {
6226 inst.error = _("invalid shift");
6227 return FAIL;
6228 }
6229 inst.operands[0].shifted = 1;
6230 }
6231
6232 if (skip_past_char (&p, ']') == FAIL)
6233 {
6234 inst.error = _("']' expected");
6235 return FAIL;
6236 }
6237 *str = p;
6238 return SUCCESS;
6239 }
6240
6241 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6242 information on the types the operands can take and how they are encoded.
6243 Up to four operands may be read; this function handles setting the
6244 ".present" field for each read operand itself.
6245 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6246 else returns FAIL. */
6247
6248 static int
6249 parse_neon_mov (char **str, int *which_operand)
6250 {
6251 int i = *which_operand, val;
6252 enum arm_reg_type rtype;
6253 char *ptr = *str;
6254 struct neon_type_el optype;
6255
6256 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6257 {
6258 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6259 inst.operands[i].reg = val;
6260 inst.operands[i].isscalar = 1;
6261 inst.operands[i].vectype = optype;
6262 inst.operands[i++].present = 1;
6263
6264 if (skip_past_comma (&ptr) == FAIL)
6265 goto wanted_comma;
6266
6267 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6268 goto wanted_arm;
6269
6270 inst.operands[i].reg = val;
6271 inst.operands[i].isreg = 1;
6272 inst.operands[i].present = 1;
6273 }
6274 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6275 != FAIL)
6276 {
6277 /* Cases 0, 1, 2, 3, 5 (D only). */
6278 if (skip_past_comma (&ptr) == FAIL)
6279 goto wanted_comma;
6280
6281 inst.operands[i].reg = val;
6282 inst.operands[i].isreg = 1;
6283 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6284 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6285 inst.operands[i].isvec = 1;
6286 inst.operands[i].vectype = optype;
6287 inst.operands[i++].present = 1;
6288
6289 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6290 {
6291 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6292 Case 13: VMOV <Sd>, <Rm> */
6293 inst.operands[i].reg = val;
6294 inst.operands[i].isreg = 1;
6295 inst.operands[i].present = 1;
6296
6297 if (rtype == REG_TYPE_NQ)
6298 {
6299 first_error (_("can't use Neon quad register here"));
6300 return FAIL;
6301 }
6302 else if (rtype != REG_TYPE_VFS)
6303 {
6304 i++;
6305 if (skip_past_comma (&ptr) == FAIL)
6306 goto wanted_comma;
6307 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6308 goto wanted_arm;
6309 inst.operands[i].reg = val;
6310 inst.operands[i].isreg = 1;
6311 inst.operands[i].present = 1;
6312 }
6313 }
6314 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6315 &optype)) != FAIL)
6316 {
6317 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6318 Case 1: VMOV<c><q> <Dd>, <Dm>
6319 Case 8: VMOV.F32 <Sd>, <Sm>
6320 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6321
6322 inst.operands[i].reg = val;
6323 inst.operands[i].isreg = 1;
6324 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6325 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6326 inst.operands[i].isvec = 1;
6327 inst.operands[i].vectype = optype;
6328 inst.operands[i].present = 1;
6329
6330 if (skip_past_comma (&ptr) == SUCCESS)
6331 {
6332 /* Case 15. */
6333 i++;
6334
6335 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6336 goto wanted_arm;
6337
6338 inst.operands[i].reg = val;
6339 inst.operands[i].isreg = 1;
6340 inst.operands[i++].present = 1;
6341
6342 if (skip_past_comma (&ptr) == FAIL)
6343 goto wanted_comma;
6344
6345 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6346 goto wanted_arm;
6347
6348 inst.operands[i].reg = val;
6349 inst.operands[i].isreg = 1;
6350 inst.operands[i].present = 1;
6351 }
6352 }
6353 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6354 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6355 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6356 Case 10: VMOV.F32 <Sd>, #<imm>
6357 Case 11: VMOV.F64 <Dd>, #<imm> */
6358 inst.operands[i].immisfloat = 1;
6359 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6360 == SUCCESS)
6361 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6362 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6363 ;
6364 else
6365 {
6366 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6367 return FAIL;
6368 }
6369 }
6370 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6371 {
6372 /* Cases 6, 7. */
6373 inst.operands[i].reg = val;
6374 inst.operands[i].isreg = 1;
6375 inst.operands[i++].present = 1;
6376
6377 if (skip_past_comma (&ptr) == FAIL)
6378 goto wanted_comma;
6379
6380 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6381 {
6382 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6383 inst.operands[i].reg = val;
6384 inst.operands[i].isscalar = 1;
6385 inst.operands[i].present = 1;
6386 inst.operands[i].vectype = optype;
6387 }
6388 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6389 {
6390 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6391 inst.operands[i].reg = val;
6392 inst.operands[i].isreg = 1;
6393 inst.operands[i++].present = 1;
6394
6395 if (skip_past_comma (&ptr) == FAIL)
6396 goto wanted_comma;
6397
6398 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6399 == FAIL)
6400 {
6401 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6402 return FAIL;
6403 }
6404
6405 inst.operands[i].reg = val;
6406 inst.operands[i].isreg = 1;
6407 inst.operands[i].isvec = 1;
6408 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6409 inst.operands[i].vectype = optype;
6410 inst.operands[i].present = 1;
6411
6412 if (rtype == REG_TYPE_VFS)
6413 {
6414 /* Case 14. */
6415 i++;
6416 if (skip_past_comma (&ptr) == FAIL)
6417 goto wanted_comma;
6418 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6419 &optype)) == FAIL)
6420 {
6421 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6422 return FAIL;
6423 }
6424 inst.operands[i].reg = val;
6425 inst.operands[i].isreg = 1;
6426 inst.operands[i].isvec = 1;
6427 inst.operands[i].issingle = 1;
6428 inst.operands[i].vectype = optype;
6429 inst.operands[i].present = 1;
6430 }
6431 }
6432 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6433 != FAIL)
6434 {
6435 /* Case 13. */
6436 inst.operands[i].reg = val;
6437 inst.operands[i].isreg = 1;
6438 inst.operands[i].isvec = 1;
6439 inst.operands[i].issingle = 1;
6440 inst.operands[i].vectype = optype;
6441 inst.operands[i].present = 1;
6442 }
6443 }
6444 else
6445 {
6446 first_error (_("parse error"));
6447 return FAIL;
6448 }
6449
6450 /* Successfully parsed the operands. Update args. */
6451 *which_operand = i;
6452 *str = ptr;
6453 return SUCCESS;
6454
6455 wanted_comma:
6456 first_error (_("expected comma"));
6457 return FAIL;
6458
6459 wanted_arm:
6460 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6461 return FAIL;
6462 }
6463
6464 /* Use this macro when the operand constraints are different
6465 for ARM and THUMB (e.g. ldrd). */
6466 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6467 ((arm_operand) | ((thumb_operand) << 16))
6468
6469 /* Matcher codes for parse_operands. */
6470 enum operand_parse_code
6471 {
6472 OP_stop, /* end of line */
6473
6474 OP_RR, /* ARM register */
6475 OP_RRnpc, /* ARM register, not r15 */
6476 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6477 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6478 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6479 optional trailing ! */
6480 OP_RRw, /* ARM register, not r15, optional trailing ! */
6481 OP_RCP, /* Coprocessor number */
6482 OP_RCN, /* Coprocessor register */
6483 OP_RF, /* FPA register */
6484 OP_RVS, /* VFP single precision register */
6485 OP_RVD, /* VFP double precision register (0..15) */
6486 OP_RND, /* Neon double precision register (0..31) */
6487 OP_RNQ, /* Neon quad precision register */
6488 OP_RVSD, /* VFP single or double precision register */
6489 OP_RNDQ, /* Neon double or quad precision register */
6490 OP_RNSDQ, /* Neon single, double or quad precision register */
6491 OP_RNSC, /* Neon scalar D[X] */
6492 OP_RVC, /* VFP control register */
6493 OP_RMF, /* Maverick F register */
6494 OP_RMD, /* Maverick D register */
6495 OP_RMFX, /* Maverick FX register */
6496 OP_RMDX, /* Maverick DX register */
6497 OP_RMAX, /* Maverick AX register */
6498 OP_RMDS, /* Maverick DSPSC register */
6499 OP_RIWR, /* iWMMXt wR register */
6500 OP_RIWC, /* iWMMXt wC register */
6501 OP_RIWG, /* iWMMXt wCG register */
6502 OP_RXA, /* XScale accumulator register */
6503
6504 OP_REGLST, /* ARM register list */
6505 OP_VRSLST, /* VFP single-precision register list */
6506 OP_VRDLST, /* VFP double-precision register list */
6507 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6508 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6509 OP_NSTRLST, /* Neon element/structure list */
6510
6511 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6512 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6513 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6514 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6515 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6516 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6517 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6518 OP_VMOV, /* Neon VMOV operands. */
6519 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6520 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6521 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6522
6523 OP_I0, /* immediate zero */
6524 OP_I7, /* immediate value 0 .. 7 */
6525 OP_I15, /* 0 .. 15 */
6526 OP_I16, /* 1 .. 16 */
6527 OP_I16z, /* 0 .. 16 */
6528 OP_I31, /* 0 .. 31 */
6529 OP_I31w, /* 0 .. 31, optional trailing ! */
6530 OP_I32, /* 1 .. 32 */
6531 OP_I32z, /* 0 .. 32 */
6532 OP_I63, /* 0 .. 63 */
6533 OP_I63s, /* -64 .. 63 */
6534 OP_I64, /* 1 .. 64 */
6535 OP_I64z, /* 0 .. 64 */
6536 OP_I255, /* 0 .. 255 */
6537
6538 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6539 OP_I7b, /* 0 .. 7 */
6540 OP_I15b, /* 0 .. 15 */
6541 OP_I31b, /* 0 .. 31 */
6542
6543 OP_SH, /* shifter operand */
6544 OP_SHG, /* shifter operand with possible group relocation */
6545 OP_ADDR, /* Memory address expression (any mode) */
6546 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6547 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6548 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6549 OP_EXP, /* arbitrary expression */
6550 OP_EXPi, /* same, with optional immediate prefix */
6551 OP_EXPr, /* same, with optional relocation suffix */
6552 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6553 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6554 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6555
6556 OP_CPSF, /* CPS flags */
6557 OP_ENDI, /* Endianness specifier */
6558 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6559 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6560 OP_COND, /* conditional code */
6561 OP_TB, /* Table branch. */
6562
6563 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6564
6565 OP_RRnpc_I0, /* ARM register or literal 0 */
6566 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6567 OP_RR_EXi, /* ARM register or expression with imm prefix */
6568 OP_RF_IF, /* FPA register or immediate */
6569 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6570 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6571
6572 /* Optional operands. */
6573 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6574 OP_oI31b, /* 0 .. 31 */
6575 OP_oI32b, /* 1 .. 32 */
6576 OP_oI32z, /* 0 .. 32 */
6577 OP_oIffffb, /* 0 .. 65535 */
6578 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6579
6580 OP_oRR, /* ARM register */
6581 OP_oRRnpc, /* ARM register, not the PC */
6582 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6583 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6584 OP_oRND, /* Optional Neon double precision register */
6585 OP_oRNQ, /* Optional Neon quad precision register */
6586 OP_oRNDQ, /* Optional Neon double or quad precision register */
6587 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6588 OP_oSHll, /* LSL immediate */
6589 OP_oSHar, /* ASR immediate */
6590 OP_oSHllar, /* LSL or ASR immediate */
6591 OP_oROR, /* ROR 0/8/16/24 */
6592 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6593
6594 /* Some pre-defined mixed (ARM/THUMB) operands. */
6595 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6596 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6597 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6598
6599 OP_FIRST_OPTIONAL = OP_oI7b
6600 };
6601
6602 /* Generic instruction operand parser. This does no encoding and no
6603 semantic validation; it merely squirrels values away in the inst
6604 structure. Returns SUCCESS or FAIL depending on whether the
6605 specified grammar matched. */
6606 static int
6607 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6608 {
6609 unsigned const int *upat = pattern;
6610 char *backtrack_pos = 0;
6611 const char *backtrack_error = 0;
6612 int i, val = 0, backtrack_index = 0;
6613 enum arm_reg_type rtype;
6614 parse_operand_result result;
6615 unsigned int op_parse_code;
6616
6617 #define po_char_or_fail(chr) \
6618 do \
6619 { \
6620 if (skip_past_char (&str, chr) == FAIL) \
6621 goto bad_args; \
6622 } \
6623 while (0)
6624
6625 #define po_reg_or_fail(regtype) \
6626 do \
6627 { \
6628 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6629 & inst.operands[i].vectype); \
6630 if (val == FAIL) \
6631 { \
6632 first_error (_(reg_expected_msgs[regtype])); \
6633 goto failure; \
6634 } \
6635 inst.operands[i].reg = val; \
6636 inst.operands[i].isreg = 1; \
6637 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6638 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6639 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6640 || rtype == REG_TYPE_VFD \
6641 || rtype == REG_TYPE_NQ); \
6642 } \
6643 while (0)
6644
6645 #define po_reg_or_goto(regtype, label) \
6646 do \
6647 { \
6648 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6649 & inst.operands[i].vectype); \
6650 if (val == FAIL) \
6651 goto label; \
6652 \
6653 inst.operands[i].reg = val; \
6654 inst.operands[i].isreg = 1; \
6655 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6656 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6657 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6658 || rtype == REG_TYPE_VFD \
6659 || rtype == REG_TYPE_NQ); \
6660 } \
6661 while (0)
6662
6663 #define po_imm_or_fail(min, max, popt) \
6664 do \
6665 { \
6666 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6667 goto failure; \
6668 inst.operands[i].imm = val; \
6669 } \
6670 while (0)
6671
6672 #define po_scalar_or_goto(elsz, label) \
6673 do \
6674 { \
6675 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6676 if (val == FAIL) \
6677 goto label; \
6678 inst.operands[i].reg = val; \
6679 inst.operands[i].isscalar = 1; \
6680 } \
6681 while (0)
6682
6683 #define po_misc_or_fail(expr) \
6684 do \
6685 { \
6686 if (expr) \
6687 goto failure; \
6688 } \
6689 while (0)
6690
6691 #define po_misc_or_fail_no_backtrack(expr) \
6692 do \
6693 { \
6694 result = expr; \
6695 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6696 backtrack_pos = 0; \
6697 if (result != PARSE_OPERAND_SUCCESS) \
6698 goto failure; \
6699 } \
6700 while (0)
6701
6702 #define po_barrier_or_imm(str) \
6703 do \
6704 { \
6705 val = parse_barrier (&str); \
6706 if (val == FAIL && ! ISALPHA (*str)) \
6707 goto immediate; \
6708 if (val == FAIL \
6709 /* ISB can only take SY as an option. */ \
6710 || ((inst.instruction & 0xf0) == 0x60 \
6711 && val != 0xf)) \
6712 { \
6713 inst.error = _("invalid barrier type"); \
6714 backtrack_pos = 0; \
6715 goto failure; \
6716 } \
6717 } \
6718 while (0)
6719
6720 skip_whitespace (str);
6721
6722 for (i = 0; upat[i] != OP_stop; i++)
6723 {
6724 op_parse_code = upat[i];
6725 if (op_parse_code >= 1<<16)
6726 op_parse_code = thumb ? (op_parse_code >> 16)
6727 : (op_parse_code & ((1<<16)-1));
6728
6729 if (op_parse_code >= OP_FIRST_OPTIONAL)
6730 {
6731 /* Remember where we are in case we need to backtrack. */
6732 gas_assert (!backtrack_pos);
6733 backtrack_pos = str;
6734 backtrack_error = inst.error;
6735 backtrack_index = i;
6736 }
6737
6738 if (i > 0 && (i > 1 || inst.operands[0].present))
6739 po_char_or_fail (',');
6740
6741 switch (op_parse_code)
6742 {
6743 /* Registers */
6744 case OP_oRRnpc:
6745 case OP_oRRnpcsp:
6746 case OP_RRnpc:
6747 case OP_RRnpcsp:
6748 case OP_oRR:
6749 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6750 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6751 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6752 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6753 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6754 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6755 case OP_oRND:
6756 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6757 case OP_RVC:
6758 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6759 break;
6760 /* Also accept generic coprocessor regs for unknown registers. */
6761 coproc_reg:
6762 po_reg_or_fail (REG_TYPE_CN);
6763 break;
6764 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6765 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6766 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6767 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6768 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6769 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6770 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6771 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6772 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6773 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6774 case OP_oRNQ:
6775 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6776 case OP_oRNDQ:
6777 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6778 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6779 case OP_oRNSDQ:
6780 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6781
6782 /* Neon scalar. Using an element size of 8 means that some invalid
6783 scalars are accepted here, so deal with those in later code. */
6784 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6785
6786 case OP_RNDQ_I0:
6787 {
6788 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6789 break;
6790 try_imm0:
6791 po_imm_or_fail (0, 0, TRUE);
6792 }
6793 break;
6794
6795 case OP_RVSD_I0:
6796 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6797 break;
6798
6799 case OP_RSVD_FI0:
6800 {
6801 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6802 break;
6803 try_ifimm0:
6804 if (parse_ifimm_zero (&str))
6805 inst.operands[i].imm = 0;
6806 else
6807 {
6808 inst.error
6809 = _("only floating point zero is allowed as immediate value");
6810 goto failure;
6811 }
6812 }
6813 break;
6814
6815 case OP_RR_RNSC:
6816 {
6817 po_scalar_or_goto (8, try_rr);
6818 break;
6819 try_rr:
6820 po_reg_or_fail (REG_TYPE_RN);
6821 }
6822 break;
6823
6824 case OP_RNSDQ_RNSC:
6825 {
6826 po_scalar_or_goto (8, try_nsdq);
6827 break;
6828 try_nsdq:
6829 po_reg_or_fail (REG_TYPE_NSDQ);
6830 }
6831 break;
6832
6833 case OP_RNDQ_RNSC:
6834 {
6835 po_scalar_or_goto (8, try_ndq);
6836 break;
6837 try_ndq:
6838 po_reg_or_fail (REG_TYPE_NDQ);
6839 }
6840 break;
6841
6842 case OP_RND_RNSC:
6843 {
6844 po_scalar_or_goto (8, try_vfd);
6845 break;
6846 try_vfd:
6847 po_reg_or_fail (REG_TYPE_VFD);
6848 }
6849 break;
6850
6851 case OP_VMOV:
6852 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6853 not careful then bad things might happen. */
6854 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6855 break;
6856
6857 case OP_RNDQ_Ibig:
6858 {
6859 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6860 break;
6861 try_immbig:
6862 /* There's a possibility of getting a 64-bit immediate here, so
6863 we need special handling. */
6864 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6865 == FAIL)
6866 {
6867 inst.error = _("immediate value is out of range");
6868 goto failure;
6869 }
6870 }
6871 break;
6872
6873 case OP_RNDQ_I63b:
6874 {
6875 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6876 break;
6877 try_shimm:
6878 po_imm_or_fail (0, 63, TRUE);
6879 }
6880 break;
6881
6882 case OP_RRnpcb:
6883 po_char_or_fail ('[');
6884 po_reg_or_fail (REG_TYPE_RN);
6885 po_char_or_fail (']');
6886 break;
6887
6888 case OP_RRnpctw:
6889 case OP_RRw:
6890 case OP_oRRw:
6891 po_reg_or_fail (REG_TYPE_RN);
6892 if (skip_past_char (&str, '!') == SUCCESS)
6893 inst.operands[i].writeback = 1;
6894 break;
6895
6896 /* Immediates */
6897 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6898 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6899 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6900 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6901 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6902 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6903 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6904 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6905 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6906 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6907 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6908 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6909
6910 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6911 case OP_oI7b:
6912 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6913 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6914 case OP_oI31b:
6915 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6916 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6917 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6918 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6919
6920 /* Immediate variants */
6921 case OP_oI255c:
6922 po_char_or_fail ('{');
6923 po_imm_or_fail (0, 255, TRUE);
6924 po_char_or_fail ('}');
6925 break;
6926
6927 case OP_I31w:
6928 /* The expression parser chokes on a trailing !, so we have
6929 to find it first and zap it. */
6930 {
6931 char *s = str;
6932 while (*s && *s != ',')
6933 s++;
6934 if (s[-1] == '!')
6935 {
6936 s[-1] = '\0';
6937 inst.operands[i].writeback = 1;
6938 }
6939 po_imm_or_fail (0, 31, TRUE);
6940 if (str == s - 1)
6941 str = s;
6942 }
6943 break;
6944
6945 /* Expressions */
6946 case OP_EXPi: EXPi:
6947 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6948 GE_OPT_PREFIX));
6949 break;
6950
6951 case OP_EXP:
6952 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6953 GE_NO_PREFIX));
6954 break;
6955
6956 case OP_EXPr: EXPr:
6957 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6958 GE_NO_PREFIX));
6959 if (inst.reloc.exp.X_op == O_symbol)
6960 {
6961 val = parse_reloc (&str);
6962 if (val == -1)
6963 {
6964 inst.error = _("unrecognized relocation suffix");
6965 goto failure;
6966 }
6967 else if (val != BFD_RELOC_UNUSED)
6968 {
6969 inst.operands[i].imm = val;
6970 inst.operands[i].hasreloc = 1;
6971 }
6972 }
6973 break;
6974
6975 /* Operand for MOVW or MOVT. */
6976 case OP_HALF:
6977 po_misc_or_fail (parse_half (&str));
6978 break;
6979
6980 /* Register or expression. */
6981 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6982 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6983
6984 /* Register or immediate. */
6985 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6986 I0: po_imm_or_fail (0, 0, FALSE); break;
6987
6988 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6989 IF:
6990 if (!is_immediate_prefix (*str))
6991 goto bad_args;
6992 str++;
6993 val = parse_fpa_immediate (&str);
6994 if (val == FAIL)
6995 goto failure;
6996 /* FPA immediates are encoded as registers 8-15.
6997 parse_fpa_immediate has already applied the offset. */
6998 inst.operands[i].reg = val;
6999 inst.operands[i].isreg = 1;
7000 break;
7001
7002 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7003 I32z: po_imm_or_fail (0, 32, FALSE); break;
7004
7005 /* Two kinds of register. */
7006 case OP_RIWR_RIWC:
7007 {
7008 struct reg_entry *rege = arm_reg_parse_multi (&str);
7009 if (!rege
7010 || (rege->type != REG_TYPE_MMXWR
7011 && rege->type != REG_TYPE_MMXWC
7012 && rege->type != REG_TYPE_MMXWCG))
7013 {
7014 inst.error = _("iWMMXt data or control register expected");
7015 goto failure;
7016 }
7017 inst.operands[i].reg = rege->number;
7018 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7019 }
7020 break;
7021
7022 case OP_RIWC_RIWG:
7023 {
7024 struct reg_entry *rege = arm_reg_parse_multi (&str);
7025 if (!rege
7026 || (rege->type != REG_TYPE_MMXWC
7027 && rege->type != REG_TYPE_MMXWCG))
7028 {
7029 inst.error = _("iWMMXt control register expected");
7030 goto failure;
7031 }
7032 inst.operands[i].reg = rege->number;
7033 inst.operands[i].isreg = 1;
7034 }
7035 break;
7036
7037 /* Misc */
7038 case OP_CPSF: val = parse_cps_flags (&str); break;
7039 case OP_ENDI: val = parse_endian_specifier (&str); break;
7040 case OP_oROR: val = parse_ror (&str); break;
7041 case OP_COND: val = parse_cond (&str); break;
7042 case OP_oBARRIER_I15:
7043 po_barrier_or_imm (str); break;
7044 immediate:
7045 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7046 goto failure;
7047 break;
7048
7049 case OP_wPSR:
7050 case OP_rPSR:
7051 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7052 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7053 {
7054 inst.error = _("Banked registers are not available with this "
7055 "architecture.");
7056 goto failure;
7057 }
7058 break;
7059 try_psr:
7060 val = parse_psr (&str, op_parse_code == OP_wPSR);
7061 break;
7062
7063 case OP_APSR_RR:
7064 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7065 break;
7066 try_apsr:
7067 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7068 instruction). */
7069 if (strncasecmp (str, "APSR_", 5) == 0)
7070 {
7071 unsigned found = 0;
7072 str += 5;
7073 while (found < 15)
7074 switch (*str++)
7075 {
7076 case 'c': found = (found & 1) ? 16 : found | 1; break;
7077 case 'n': found = (found & 2) ? 16 : found | 2; break;
7078 case 'z': found = (found & 4) ? 16 : found | 4; break;
7079 case 'v': found = (found & 8) ? 16 : found | 8; break;
7080 default: found = 16;
7081 }
7082 if (found != 15)
7083 goto failure;
7084 inst.operands[i].isvec = 1;
7085 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7086 inst.operands[i].reg = REG_PC;
7087 }
7088 else
7089 goto failure;
7090 break;
7091
7092 case OP_TB:
7093 po_misc_or_fail (parse_tb (&str));
7094 break;
7095
7096 /* Register lists. */
7097 case OP_REGLST:
7098 val = parse_reg_list (&str);
7099 if (*str == '^')
7100 {
7101 inst.operands[i].writeback = 1;
7102 str++;
7103 }
7104 break;
7105
7106 case OP_VRSLST:
7107 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7108 break;
7109
7110 case OP_VRDLST:
7111 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7112 break;
7113
7114 case OP_VRSDLST:
7115 /* Allow Q registers too. */
7116 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7117 REGLIST_NEON_D);
7118 if (val == FAIL)
7119 {
7120 inst.error = NULL;
7121 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7122 REGLIST_VFP_S);
7123 inst.operands[i].issingle = 1;
7124 }
7125 break;
7126
7127 case OP_NRDLST:
7128 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7129 REGLIST_NEON_D);
7130 break;
7131
7132 case OP_NSTRLST:
7133 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7134 &inst.operands[i].vectype);
7135 break;
7136
7137 /* Addressing modes */
7138 case OP_ADDR:
7139 po_misc_or_fail (parse_address (&str, i));
7140 break;
7141
7142 case OP_ADDRGLDR:
7143 po_misc_or_fail_no_backtrack (
7144 parse_address_group_reloc (&str, i, GROUP_LDR));
7145 break;
7146
7147 case OP_ADDRGLDRS:
7148 po_misc_or_fail_no_backtrack (
7149 parse_address_group_reloc (&str, i, GROUP_LDRS));
7150 break;
7151
7152 case OP_ADDRGLDC:
7153 po_misc_or_fail_no_backtrack (
7154 parse_address_group_reloc (&str, i, GROUP_LDC));
7155 break;
7156
7157 case OP_SH:
7158 po_misc_or_fail (parse_shifter_operand (&str, i));
7159 break;
7160
7161 case OP_SHG:
7162 po_misc_or_fail_no_backtrack (
7163 parse_shifter_operand_group_reloc (&str, i));
7164 break;
7165
7166 case OP_oSHll:
7167 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7168 break;
7169
7170 case OP_oSHar:
7171 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7172 break;
7173
7174 case OP_oSHllar:
7175 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7176 break;
7177
7178 default:
7179 as_fatal (_("unhandled operand code %d"), op_parse_code);
7180 }
7181
7182 /* Various value-based sanity checks and shared operations. We
7183 do not signal immediate failures for the register constraints;
7184 this allows a syntax error to take precedence. */
7185 switch (op_parse_code)
7186 {
7187 case OP_oRRnpc:
7188 case OP_RRnpc:
7189 case OP_RRnpcb:
7190 case OP_RRw:
7191 case OP_oRRw:
7192 case OP_RRnpc_I0:
7193 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7194 inst.error = BAD_PC;
7195 break;
7196
7197 case OP_oRRnpcsp:
7198 case OP_RRnpcsp:
7199 if (inst.operands[i].isreg)
7200 {
7201 if (inst.operands[i].reg == REG_PC)
7202 inst.error = BAD_PC;
7203 else if (inst.operands[i].reg == REG_SP
7204 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7205 relaxed since ARMv8-A. */
7206 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7207 {
7208 gas_assert (thumb);
7209 inst.error = BAD_SP;
7210 }
7211 }
7212 break;
7213
7214 case OP_RRnpctw:
7215 if (inst.operands[i].isreg
7216 && inst.operands[i].reg == REG_PC
7217 && (inst.operands[i].writeback || thumb))
7218 inst.error = BAD_PC;
7219 break;
7220
7221 case OP_CPSF:
7222 case OP_ENDI:
7223 case OP_oROR:
7224 case OP_wPSR:
7225 case OP_rPSR:
7226 case OP_COND:
7227 case OP_oBARRIER_I15:
7228 case OP_REGLST:
7229 case OP_VRSLST:
7230 case OP_VRDLST:
7231 case OP_VRSDLST:
7232 case OP_NRDLST:
7233 case OP_NSTRLST:
7234 if (val == FAIL)
7235 goto failure;
7236 inst.operands[i].imm = val;
7237 break;
7238
7239 default:
7240 break;
7241 }
7242
7243 /* If we get here, this operand was successfully parsed. */
7244 inst.operands[i].present = 1;
7245 continue;
7246
7247 bad_args:
7248 inst.error = BAD_ARGS;
7249
7250 failure:
7251 if (!backtrack_pos)
7252 {
7253 /* The parse routine should already have set inst.error, but set a
7254 default here just in case. */
7255 if (!inst.error)
7256 inst.error = _("syntax error");
7257 return FAIL;
7258 }
7259
7260 /* Do not backtrack over a trailing optional argument that
7261 absorbed some text. We will only fail again, with the
7262 'garbage following instruction' error message, which is
7263 probably less helpful than the current one. */
7264 if (backtrack_index == i && backtrack_pos != str
7265 && upat[i+1] == OP_stop)
7266 {
7267 if (!inst.error)
7268 inst.error = _("syntax error");
7269 return FAIL;
7270 }
7271
7272 /* Try again, skipping the optional argument at backtrack_pos. */
7273 str = backtrack_pos;
7274 inst.error = backtrack_error;
7275 inst.operands[backtrack_index].present = 0;
7276 i = backtrack_index;
7277 backtrack_pos = 0;
7278 }
7279
7280 /* Check that we have parsed all the arguments. */
7281 if (*str != '\0' && !inst.error)
7282 inst.error = _("garbage following instruction");
7283
7284 return inst.error ? FAIL : SUCCESS;
7285 }
7286
7287 #undef po_char_or_fail
7288 #undef po_reg_or_fail
7289 #undef po_reg_or_goto
7290 #undef po_imm_or_fail
7291 #undef po_scalar_or_fail
7292 #undef po_barrier_or_imm
7293
7294 /* Shorthand macro for instruction encoding functions issuing errors. */
7295 #define constraint(expr, err) \
7296 do \
7297 { \
7298 if (expr) \
7299 { \
7300 inst.error = err; \
7301 return; \
7302 } \
7303 } \
7304 while (0)
7305
7306 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7307 instructions are unpredictable if these registers are used. This
7308 is the BadReg predicate in ARM's Thumb-2 documentation.
7309
7310 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7311 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7312 #define reject_bad_reg(reg) \
7313 do \
7314 if (reg == REG_PC) \
7315 { \
7316 inst.error = BAD_PC; \
7317 return; \
7318 } \
7319 else if (reg == REG_SP \
7320 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7321 { \
7322 inst.error = BAD_SP; \
7323 return; \
7324 } \
7325 while (0)
7326
7327 /* If REG is R13 (the stack pointer), warn that its use is
7328 deprecated. */
7329 #define warn_deprecated_sp(reg) \
7330 do \
7331 if (warn_on_deprecated && reg == REG_SP) \
7332 as_tsktsk (_("use of r13 is deprecated")); \
7333 while (0)
7334
7335 /* Functions for operand encoding. ARM, then Thumb. */
7336
7337 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7338
7339 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7340
7341 The only binary encoding difference is the Coprocessor number. Coprocessor
7342 9 is used for half-precision calculations or conversions. The format of the
7343 instruction is the same as the equivalent Coprocessor 10 instruction that
7344 exists for Single-Precision operation. */
7345
7346 static void
7347 do_scalar_fp16_v82_encode (void)
7348 {
7349 if (inst.cond != COND_ALWAYS)
7350 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7351 " the behaviour is UNPREDICTABLE"));
7352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7353 _(BAD_FP16));
7354
7355 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7356 mark_feature_used (&arm_ext_fp16);
7357 }
7358
7359 /* If VAL can be encoded in the immediate field of an ARM instruction,
7360 return the encoded form. Otherwise, return FAIL. */
7361
7362 static unsigned int
7363 encode_arm_immediate (unsigned int val)
7364 {
7365 unsigned int a, i;
7366
7367 if (val <= 0xff)
7368 return val;
7369
7370 for (i = 2; i < 32; i += 2)
7371 if ((a = rotate_left (val, i)) <= 0xff)
7372 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7373
7374 return FAIL;
7375 }
7376
7377 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7378 return the encoded form. Otherwise, return FAIL. */
7379 static unsigned int
7380 encode_thumb32_immediate (unsigned int val)
7381 {
7382 unsigned int a, i;
7383
7384 if (val <= 0xff)
7385 return val;
7386
7387 for (i = 1; i <= 24; i++)
7388 {
7389 a = val >> i;
7390 if ((val & ~(0xff << i)) == 0)
7391 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7392 }
7393
7394 a = val & 0xff;
7395 if (val == ((a << 16) | a))
7396 return 0x100 | a;
7397 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7398 return 0x300 | a;
7399
7400 a = val & 0xff00;
7401 if (val == ((a << 16) | a))
7402 return 0x200 | (a >> 8);
7403
7404 return FAIL;
7405 }
7406 /* Encode a VFP SP or DP register number into inst.instruction. */
7407
7408 static void
7409 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7410 {
7411 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7412 && reg > 15)
7413 {
7414 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7415 {
7416 if (thumb_mode)
7417 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7418 fpu_vfp_ext_d32);
7419 else
7420 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7421 fpu_vfp_ext_d32);
7422 }
7423 else
7424 {
7425 first_error (_("D register out of range for selected VFP version"));
7426 return;
7427 }
7428 }
7429
7430 switch (pos)
7431 {
7432 case VFP_REG_Sd:
7433 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7434 break;
7435
7436 case VFP_REG_Sn:
7437 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7438 break;
7439
7440 case VFP_REG_Sm:
7441 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7442 break;
7443
7444 case VFP_REG_Dd:
7445 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7446 break;
7447
7448 case VFP_REG_Dn:
7449 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7450 break;
7451
7452 case VFP_REG_Dm:
7453 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7454 break;
7455
7456 default:
7457 abort ();
7458 }
7459 }
7460
7461 /* Encode a <shift> in an ARM-format instruction. The immediate,
7462 if any, is handled by md_apply_fix. */
7463 static void
7464 encode_arm_shift (int i)
7465 {
7466 /* register-shifted register. */
7467 if (inst.operands[i].immisreg)
7468 {
7469 int op_index;
7470 for (op_index = 0; op_index <= i; ++op_index)
7471 {
7472 /* Check the operand only when it's presented. In pre-UAL syntax,
7473 if the destination register is the same as the first operand, two
7474 register form of the instruction can be used. */
7475 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7476 && inst.operands[op_index].reg == REG_PC)
7477 as_warn (UNPRED_REG ("r15"));
7478 }
7479
7480 if (inst.operands[i].imm == REG_PC)
7481 as_warn (UNPRED_REG ("r15"));
7482 }
7483
7484 if (inst.operands[i].shift_kind == SHIFT_RRX)
7485 inst.instruction |= SHIFT_ROR << 5;
7486 else
7487 {
7488 inst.instruction |= inst.operands[i].shift_kind << 5;
7489 if (inst.operands[i].immisreg)
7490 {
7491 inst.instruction |= SHIFT_BY_REG;
7492 inst.instruction |= inst.operands[i].imm << 8;
7493 }
7494 else
7495 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7496 }
7497 }
7498
7499 static void
7500 encode_arm_shifter_operand (int i)
7501 {
7502 if (inst.operands[i].isreg)
7503 {
7504 inst.instruction |= inst.operands[i].reg;
7505 encode_arm_shift (i);
7506 }
7507 else
7508 {
7509 inst.instruction |= INST_IMMEDIATE;
7510 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7511 inst.instruction |= inst.operands[i].imm;
7512 }
7513 }
7514
7515 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7516 static void
7517 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7518 {
7519 /* PR 14260:
7520 Generate an error if the operand is not a register. */
7521 constraint (!inst.operands[i].isreg,
7522 _("Instruction does not support =N addresses"));
7523
7524 inst.instruction |= inst.operands[i].reg << 16;
7525
7526 if (inst.operands[i].preind)
7527 {
7528 if (is_t)
7529 {
7530 inst.error = _("instruction does not accept preindexed addressing");
7531 return;
7532 }
7533 inst.instruction |= PRE_INDEX;
7534 if (inst.operands[i].writeback)
7535 inst.instruction |= WRITE_BACK;
7536
7537 }
7538 else if (inst.operands[i].postind)
7539 {
7540 gas_assert (inst.operands[i].writeback);
7541 if (is_t)
7542 inst.instruction |= WRITE_BACK;
7543 }
7544 else /* unindexed - only for coprocessor */
7545 {
7546 inst.error = _("instruction does not accept unindexed addressing");
7547 return;
7548 }
7549
7550 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7551 && (((inst.instruction & 0x000f0000) >> 16)
7552 == ((inst.instruction & 0x0000f000) >> 12)))
7553 as_warn ((inst.instruction & LOAD_BIT)
7554 ? _("destination register same as write-back base")
7555 : _("source register same as write-back base"));
7556 }
7557
7558 /* inst.operands[i] was set up by parse_address. Encode it into an
7559 ARM-format mode 2 load or store instruction. If is_t is true,
7560 reject forms that cannot be used with a T instruction (i.e. not
7561 post-indexed). */
7562 static void
7563 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7564 {
7565 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7566
7567 encode_arm_addr_mode_common (i, is_t);
7568
7569 if (inst.operands[i].immisreg)
7570 {
7571 constraint ((inst.operands[i].imm == REG_PC
7572 || (is_pc && inst.operands[i].writeback)),
7573 BAD_PC_ADDRESSING);
7574 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7575 inst.instruction |= inst.operands[i].imm;
7576 if (!inst.operands[i].negative)
7577 inst.instruction |= INDEX_UP;
7578 if (inst.operands[i].shifted)
7579 {
7580 if (inst.operands[i].shift_kind == SHIFT_RRX)
7581 inst.instruction |= SHIFT_ROR << 5;
7582 else
7583 {
7584 inst.instruction |= inst.operands[i].shift_kind << 5;
7585 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7586 }
7587 }
7588 }
7589 else /* immediate offset in inst.reloc */
7590 {
7591 if (is_pc && !inst.reloc.pc_rel)
7592 {
7593 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7594
7595 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7596 cannot use PC in addressing.
7597 PC cannot be used in writeback addressing, either. */
7598 constraint ((is_t || inst.operands[i].writeback),
7599 BAD_PC_ADDRESSING);
7600
7601 /* Use of PC in str is deprecated for ARMv7. */
7602 if (warn_on_deprecated
7603 && !is_load
7604 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7605 as_tsktsk (_("use of PC in this instruction is deprecated"));
7606 }
7607
7608 if (inst.reloc.type == BFD_RELOC_UNUSED)
7609 {
7610 /* Prefer + for zero encoded value. */
7611 if (!inst.operands[i].negative)
7612 inst.instruction |= INDEX_UP;
7613 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7614 }
7615 }
7616 }
7617
7618 /* inst.operands[i] was set up by parse_address. Encode it into an
7619 ARM-format mode 3 load or store instruction. Reject forms that
7620 cannot be used with such instructions. If is_t is true, reject
7621 forms that cannot be used with a T instruction (i.e. not
7622 post-indexed). */
7623 static void
7624 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7625 {
7626 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7627 {
7628 inst.error = _("instruction does not accept scaled register index");
7629 return;
7630 }
7631
7632 encode_arm_addr_mode_common (i, is_t);
7633
7634 if (inst.operands[i].immisreg)
7635 {
7636 constraint ((inst.operands[i].imm == REG_PC
7637 || (is_t && inst.operands[i].reg == REG_PC)),
7638 BAD_PC_ADDRESSING);
7639 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7640 BAD_PC_WRITEBACK);
7641 inst.instruction |= inst.operands[i].imm;
7642 if (!inst.operands[i].negative)
7643 inst.instruction |= INDEX_UP;
7644 }
7645 else /* immediate offset in inst.reloc */
7646 {
7647 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7648 && inst.operands[i].writeback),
7649 BAD_PC_WRITEBACK);
7650 inst.instruction |= HWOFFSET_IMM;
7651 if (inst.reloc.type == BFD_RELOC_UNUSED)
7652 {
7653 /* Prefer + for zero encoded value. */
7654 if (!inst.operands[i].negative)
7655 inst.instruction |= INDEX_UP;
7656
7657 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7658 }
7659 }
7660 }
7661
7662 /* Write immediate bits [7:0] to the following locations:
7663
7664 |28/24|23 19|18 16|15 4|3 0|
7665 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7666
7667 This function is used by VMOV/VMVN/VORR/VBIC. */
7668
7669 static void
7670 neon_write_immbits (unsigned immbits)
7671 {
7672 inst.instruction |= immbits & 0xf;
7673 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7674 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7675 }
7676
7677 /* Invert low-order SIZE bits of XHI:XLO. */
7678
7679 static void
7680 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7681 {
7682 unsigned immlo = xlo ? *xlo : 0;
7683 unsigned immhi = xhi ? *xhi : 0;
7684
7685 switch (size)
7686 {
7687 case 8:
7688 immlo = (~immlo) & 0xff;
7689 break;
7690
7691 case 16:
7692 immlo = (~immlo) & 0xffff;
7693 break;
7694
7695 case 64:
7696 immhi = (~immhi) & 0xffffffff;
7697 /* fall through. */
7698
7699 case 32:
7700 immlo = (~immlo) & 0xffffffff;
7701 break;
7702
7703 default:
7704 abort ();
7705 }
7706
7707 if (xlo)
7708 *xlo = immlo;
7709
7710 if (xhi)
7711 *xhi = immhi;
7712 }
7713
7714 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7715 A, B, C, D. */
7716
7717 static int
7718 neon_bits_same_in_bytes (unsigned imm)
7719 {
7720 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7721 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7722 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7723 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7724 }
7725
7726 /* For immediate of above form, return 0bABCD. */
7727
7728 static unsigned
7729 neon_squash_bits (unsigned imm)
7730 {
7731 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7732 | ((imm & 0x01000000) >> 21);
7733 }
7734
7735 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7736
7737 static unsigned
7738 neon_qfloat_bits (unsigned imm)
7739 {
7740 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7741 }
7742
7743 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7744 the instruction. *OP is passed as the initial value of the op field, and
7745 may be set to a different value depending on the constant (i.e.
7746 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7747 MVN). If the immediate looks like a repeated pattern then also
7748 try smaller element sizes. */
7749
7750 static int
7751 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7752 unsigned *immbits, int *op, int size,
7753 enum neon_el_type type)
7754 {
7755 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7756 float. */
7757 if (type == NT_float && !float_p)
7758 return FAIL;
7759
7760 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7761 {
7762 if (size != 32 || *op == 1)
7763 return FAIL;
7764 *immbits = neon_qfloat_bits (immlo);
7765 return 0xf;
7766 }
7767
7768 if (size == 64)
7769 {
7770 if (neon_bits_same_in_bytes (immhi)
7771 && neon_bits_same_in_bytes (immlo))
7772 {
7773 if (*op == 1)
7774 return FAIL;
7775 *immbits = (neon_squash_bits (immhi) << 4)
7776 | neon_squash_bits (immlo);
7777 *op = 1;
7778 return 0xe;
7779 }
7780
7781 if (immhi != immlo)
7782 return FAIL;
7783 }
7784
7785 if (size >= 32)
7786 {
7787 if (immlo == (immlo & 0x000000ff))
7788 {
7789 *immbits = immlo;
7790 return 0x0;
7791 }
7792 else if (immlo == (immlo & 0x0000ff00))
7793 {
7794 *immbits = immlo >> 8;
7795 return 0x2;
7796 }
7797 else if (immlo == (immlo & 0x00ff0000))
7798 {
7799 *immbits = immlo >> 16;
7800 return 0x4;
7801 }
7802 else if (immlo == (immlo & 0xff000000))
7803 {
7804 *immbits = immlo >> 24;
7805 return 0x6;
7806 }
7807 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7808 {
7809 *immbits = (immlo >> 8) & 0xff;
7810 return 0xc;
7811 }
7812 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7813 {
7814 *immbits = (immlo >> 16) & 0xff;
7815 return 0xd;
7816 }
7817
7818 if ((immlo & 0xffff) != (immlo >> 16))
7819 return FAIL;
7820 immlo &= 0xffff;
7821 }
7822
7823 if (size >= 16)
7824 {
7825 if (immlo == (immlo & 0x000000ff))
7826 {
7827 *immbits = immlo;
7828 return 0x8;
7829 }
7830 else if (immlo == (immlo & 0x0000ff00))
7831 {
7832 *immbits = immlo >> 8;
7833 return 0xa;
7834 }
7835
7836 if ((immlo & 0xff) != (immlo >> 8))
7837 return FAIL;
7838 immlo &= 0xff;
7839 }
7840
7841 if (immlo == (immlo & 0x000000ff))
7842 {
7843 /* Don't allow MVN with 8-bit immediate. */
7844 if (*op == 1)
7845 return FAIL;
7846 *immbits = immlo;
7847 return 0xe;
7848 }
7849
7850 return FAIL;
7851 }
7852
7853 #if defined BFD_HOST_64_BIT
7854 /* Returns TRUE if double precision value V may be cast
7855 to single precision without loss of accuracy. */
7856
7857 static bfd_boolean
7858 is_double_a_single (bfd_int64_t v)
7859 {
7860 int exp = (int)((v >> 52) & 0x7FF);
7861 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7862
7863 return (exp == 0 || exp == 0x7FF
7864 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7865 && (mantissa & 0x1FFFFFFFl) == 0;
7866 }
7867
7868 /* Returns a double precision value casted to single precision
7869 (ignoring the least significant bits in exponent and mantissa). */
7870
7871 static int
7872 double_to_single (bfd_int64_t v)
7873 {
7874 int sign = (int) ((v >> 63) & 1l);
7875 int exp = (int) ((v >> 52) & 0x7FF);
7876 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7877
7878 if (exp == 0x7FF)
7879 exp = 0xFF;
7880 else
7881 {
7882 exp = exp - 1023 + 127;
7883 if (exp >= 0xFF)
7884 {
7885 /* Infinity. */
7886 exp = 0x7F;
7887 mantissa = 0;
7888 }
7889 else if (exp < 0)
7890 {
7891 /* No denormalized numbers. */
7892 exp = 0;
7893 mantissa = 0;
7894 }
7895 }
7896 mantissa >>= 29;
7897 return (sign << 31) | (exp << 23) | mantissa;
7898 }
7899 #endif /* BFD_HOST_64_BIT */
7900
7901 enum lit_type
7902 {
7903 CONST_THUMB,
7904 CONST_ARM,
7905 CONST_VEC
7906 };
7907
7908 static void do_vfp_nsyn_opcode (const char *);
7909
7910 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7911 Determine whether it can be performed with a move instruction; if
7912 it can, convert inst.instruction to that move instruction and
7913 return TRUE; if it can't, convert inst.instruction to a literal-pool
7914 load and return FALSE. If this is not a valid thing to do in the
7915 current context, set inst.error and return TRUE.
7916
7917 inst.operands[i] describes the destination register. */
7918
7919 static bfd_boolean
7920 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7921 {
7922 unsigned long tbit;
7923 bfd_boolean thumb_p = (t == CONST_THUMB);
7924 bfd_boolean arm_p = (t == CONST_ARM);
7925
7926 if (thumb_p)
7927 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7928 else
7929 tbit = LOAD_BIT;
7930
7931 if ((inst.instruction & tbit) == 0)
7932 {
7933 inst.error = _("invalid pseudo operation");
7934 return TRUE;
7935 }
7936
7937 if (inst.reloc.exp.X_op != O_constant
7938 && inst.reloc.exp.X_op != O_symbol
7939 && inst.reloc.exp.X_op != O_big)
7940 {
7941 inst.error = _("constant expression expected");
7942 return TRUE;
7943 }
7944
7945 if (inst.reloc.exp.X_op == O_constant
7946 || inst.reloc.exp.X_op == O_big)
7947 {
7948 #if defined BFD_HOST_64_BIT
7949 bfd_int64_t v;
7950 #else
7951 offsetT v;
7952 #endif
7953 if (inst.reloc.exp.X_op == O_big)
7954 {
7955 LITTLENUM_TYPE w[X_PRECISION];
7956 LITTLENUM_TYPE * l;
7957
7958 if (inst.reloc.exp.X_add_number == -1)
7959 {
7960 gen_to_words (w, X_PRECISION, E_PRECISION);
7961 l = w;
7962 /* FIXME: Should we check words w[2..5] ? */
7963 }
7964 else
7965 l = generic_bignum;
7966
7967 #if defined BFD_HOST_64_BIT
7968 v =
7969 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7970 << LITTLENUM_NUMBER_OF_BITS)
7971 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7972 << LITTLENUM_NUMBER_OF_BITS)
7973 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7974 << LITTLENUM_NUMBER_OF_BITS)
7975 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7976 #else
7977 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7978 | (l[0] & LITTLENUM_MASK);
7979 #endif
7980 }
7981 else
7982 v = inst.reloc.exp.X_add_number;
7983
7984 if (!inst.operands[i].issingle)
7985 {
7986 if (thumb_p)
7987 {
7988 /* LDR should not use lead in a flag-setting instruction being
7989 chosen so we do not check whether movs can be used. */
7990
7991 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7992 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7993 && inst.operands[i].reg != 13
7994 && inst.operands[i].reg != 15)
7995 {
7996 /* Check if on thumb2 it can be done with a mov.w, mvn or
7997 movw instruction. */
7998 unsigned int newimm;
7999 bfd_boolean isNegated;
8000
8001 newimm = encode_thumb32_immediate (v);
8002 if (newimm != (unsigned int) FAIL)
8003 isNegated = FALSE;
8004 else
8005 {
8006 newimm = encode_thumb32_immediate (~v);
8007 if (newimm != (unsigned int) FAIL)
8008 isNegated = TRUE;
8009 }
8010
8011 /* The number can be loaded with a mov.w or mvn
8012 instruction. */
8013 if (newimm != (unsigned int) FAIL
8014 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8015 {
8016 inst.instruction = (0xf04f0000 /* MOV.W. */
8017 | (inst.operands[i].reg << 8));
8018 /* Change to MOVN. */
8019 inst.instruction |= (isNegated ? 0x200000 : 0);
8020 inst.instruction |= (newimm & 0x800) << 15;
8021 inst.instruction |= (newimm & 0x700) << 4;
8022 inst.instruction |= (newimm & 0x0ff);
8023 return TRUE;
8024 }
8025 /* The number can be loaded with a movw instruction. */
8026 else if ((v & ~0xFFFF) == 0
8027 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8028 {
8029 int imm = v & 0xFFFF;
8030
8031 inst.instruction = 0xf2400000; /* MOVW. */
8032 inst.instruction |= (inst.operands[i].reg << 8);
8033 inst.instruction |= (imm & 0xf000) << 4;
8034 inst.instruction |= (imm & 0x0800) << 15;
8035 inst.instruction |= (imm & 0x0700) << 4;
8036 inst.instruction |= (imm & 0x00ff);
8037 return TRUE;
8038 }
8039 }
8040 }
8041 else if (arm_p)
8042 {
8043 int value = encode_arm_immediate (v);
8044
8045 if (value != FAIL)
8046 {
8047 /* This can be done with a mov instruction. */
8048 inst.instruction &= LITERAL_MASK;
8049 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8050 inst.instruction |= value & 0xfff;
8051 return TRUE;
8052 }
8053
8054 value = encode_arm_immediate (~ v);
8055 if (value != FAIL)
8056 {
8057 /* This can be done with a mvn instruction. */
8058 inst.instruction &= LITERAL_MASK;
8059 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8060 inst.instruction |= value & 0xfff;
8061 return TRUE;
8062 }
8063 }
8064 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8065 {
8066 int op = 0;
8067 unsigned immbits = 0;
8068 unsigned immlo = inst.operands[1].imm;
8069 unsigned immhi = inst.operands[1].regisimm
8070 ? inst.operands[1].reg
8071 : inst.reloc.exp.X_unsigned
8072 ? 0
8073 : ((bfd_int64_t)((int) immlo)) >> 32;
8074 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8075 &op, 64, NT_invtype);
8076
8077 if (cmode == FAIL)
8078 {
8079 neon_invert_size (&immlo, &immhi, 64);
8080 op = !op;
8081 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8082 &op, 64, NT_invtype);
8083 }
8084
8085 if (cmode != FAIL)
8086 {
8087 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8088 | (1 << 23)
8089 | (cmode << 8)
8090 | (op << 5)
8091 | (1 << 4);
8092
8093 /* Fill other bits in vmov encoding for both thumb and arm. */
8094 if (thumb_mode)
8095 inst.instruction |= (0x7U << 29) | (0xF << 24);
8096 else
8097 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8098 neon_write_immbits (immbits);
8099 return TRUE;
8100 }
8101 }
8102 }
8103
8104 if (t == CONST_VEC)
8105 {
8106 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8107 if (inst.operands[i].issingle
8108 && is_quarter_float (inst.operands[1].imm)
8109 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8110 {
8111 inst.operands[1].imm =
8112 neon_qfloat_bits (v);
8113 do_vfp_nsyn_opcode ("fconsts");
8114 return TRUE;
8115 }
8116
8117 /* If our host does not support a 64-bit type then we cannot perform
8118 the following optimization. This mean that there will be a
8119 discrepancy between the output produced by an assembler built for
8120 a 32-bit-only host and the output produced from a 64-bit host, but
8121 this cannot be helped. */
8122 #if defined BFD_HOST_64_BIT
8123 else if (!inst.operands[1].issingle
8124 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8125 {
8126 if (is_double_a_single (v)
8127 && is_quarter_float (double_to_single (v)))
8128 {
8129 inst.operands[1].imm =
8130 neon_qfloat_bits (double_to_single (v));
8131 do_vfp_nsyn_opcode ("fconstd");
8132 return TRUE;
8133 }
8134 }
8135 #endif
8136 }
8137 }
8138
8139 if (add_to_lit_pool ((!inst.operands[i].isvec
8140 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8141 return TRUE;
8142
8143 inst.operands[1].reg = REG_PC;
8144 inst.operands[1].isreg = 1;
8145 inst.operands[1].preind = 1;
8146 inst.reloc.pc_rel = 1;
8147 inst.reloc.type = (thumb_p
8148 ? BFD_RELOC_ARM_THUMB_OFFSET
8149 : (mode_3
8150 ? BFD_RELOC_ARM_HWLITERAL
8151 : BFD_RELOC_ARM_LITERAL));
8152 return FALSE;
8153 }
8154
8155 /* inst.operands[i] was set up by parse_address. Encode it into an
8156 ARM-format instruction. Reject all forms which cannot be encoded
8157 into a coprocessor load/store instruction. If wb_ok is false,
8158 reject use of writeback; if unind_ok is false, reject use of
8159 unindexed addressing. If reloc_override is not 0, use it instead
8160 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8161 (in which case it is preserved). */
8162
8163 static int
8164 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8165 {
8166 if (!inst.operands[i].isreg)
8167 {
8168 /* PR 18256 */
8169 if (! inst.operands[0].isvec)
8170 {
8171 inst.error = _("invalid co-processor operand");
8172 return FAIL;
8173 }
8174 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8175 return SUCCESS;
8176 }
8177
8178 inst.instruction |= inst.operands[i].reg << 16;
8179
8180 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8181
8182 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8183 {
8184 gas_assert (!inst.operands[i].writeback);
8185 if (!unind_ok)
8186 {
8187 inst.error = _("instruction does not support unindexed addressing");
8188 return FAIL;
8189 }
8190 inst.instruction |= inst.operands[i].imm;
8191 inst.instruction |= INDEX_UP;
8192 return SUCCESS;
8193 }
8194
8195 if (inst.operands[i].preind)
8196 inst.instruction |= PRE_INDEX;
8197
8198 if (inst.operands[i].writeback)
8199 {
8200 if (inst.operands[i].reg == REG_PC)
8201 {
8202 inst.error = _("pc may not be used with write-back");
8203 return FAIL;
8204 }
8205 if (!wb_ok)
8206 {
8207 inst.error = _("instruction does not support writeback");
8208 return FAIL;
8209 }
8210 inst.instruction |= WRITE_BACK;
8211 }
8212
8213 if (reloc_override)
8214 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8215 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8216 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8217 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8218 {
8219 if (thumb_mode)
8220 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8221 else
8222 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8223 }
8224
8225 /* Prefer + for zero encoded value. */
8226 if (!inst.operands[i].negative)
8227 inst.instruction |= INDEX_UP;
8228
8229 return SUCCESS;
8230 }
8231
8232 /* Functions for instruction encoding, sorted by sub-architecture.
8233 First some generics; their names are taken from the conventional
8234 bit positions for register arguments in ARM format instructions. */
8235
8236 static void
8237 do_noargs (void)
8238 {
8239 }
8240
8241 static void
8242 do_rd (void)
8243 {
8244 inst.instruction |= inst.operands[0].reg << 12;
8245 }
8246
8247 static void
8248 do_rn (void)
8249 {
8250 inst.instruction |= inst.operands[0].reg << 16;
8251 }
8252
8253 static void
8254 do_rd_rm (void)
8255 {
8256 inst.instruction |= inst.operands[0].reg << 12;
8257 inst.instruction |= inst.operands[1].reg;
8258 }
8259
8260 static void
8261 do_rm_rn (void)
8262 {
8263 inst.instruction |= inst.operands[0].reg;
8264 inst.instruction |= inst.operands[1].reg << 16;
8265 }
8266
8267 static void
8268 do_rd_rn (void)
8269 {
8270 inst.instruction |= inst.operands[0].reg << 12;
8271 inst.instruction |= inst.operands[1].reg << 16;
8272 }
8273
8274 static void
8275 do_rn_rd (void)
8276 {
8277 inst.instruction |= inst.operands[0].reg << 16;
8278 inst.instruction |= inst.operands[1].reg << 12;
8279 }
8280
8281 static void
8282 do_tt (void)
8283 {
8284 inst.instruction |= inst.operands[0].reg << 8;
8285 inst.instruction |= inst.operands[1].reg << 16;
8286 }
8287
8288 static bfd_boolean
8289 check_obsolete (const arm_feature_set *feature, const char *msg)
8290 {
8291 if (ARM_CPU_IS_ANY (cpu_variant))
8292 {
8293 as_tsktsk ("%s", msg);
8294 return TRUE;
8295 }
8296 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8297 {
8298 as_bad ("%s", msg);
8299 return TRUE;
8300 }
8301
8302 return FALSE;
8303 }
8304
8305 static void
8306 do_rd_rm_rn (void)
8307 {
8308 unsigned Rn = inst.operands[2].reg;
8309 /* Enforce restrictions on SWP instruction. */
8310 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8311 {
8312 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8313 _("Rn must not overlap other operands"));
8314
8315 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8316 */
8317 if (!check_obsolete (&arm_ext_v8,
8318 _("swp{b} use is obsoleted for ARMv8 and later"))
8319 && warn_on_deprecated
8320 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8321 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8322 }
8323
8324 inst.instruction |= inst.operands[0].reg << 12;
8325 inst.instruction |= inst.operands[1].reg;
8326 inst.instruction |= Rn << 16;
8327 }
8328
8329 static void
8330 do_rd_rn_rm (void)
8331 {
8332 inst.instruction |= inst.operands[0].reg << 12;
8333 inst.instruction |= inst.operands[1].reg << 16;
8334 inst.instruction |= inst.operands[2].reg;
8335 }
8336
8337 static void
8338 do_rm_rd_rn (void)
8339 {
8340 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8341 constraint (((inst.reloc.exp.X_op != O_constant
8342 && inst.reloc.exp.X_op != O_illegal)
8343 || inst.reloc.exp.X_add_number != 0),
8344 BAD_ADDR_MODE);
8345 inst.instruction |= inst.operands[0].reg;
8346 inst.instruction |= inst.operands[1].reg << 12;
8347 inst.instruction |= inst.operands[2].reg << 16;
8348 }
8349
8350 static void
8351 do_imm0 (void)
8352 {
8353 inst.instruction |= inst.operands[0].imm;
8354 }
8355
8356 static void
8357 do_rd_cpaddr (void)
8358 {
8359 inst.instruction |= inst.operands[0].reg << 12;
8360 encode_arm_cp_address (1, TRUE, TRUE, 0);
8361 }
8362
8363 /* ARM instructions, in alphabetical order by function name (except
8364 that wrapper functions appear immediately after the function they
8365 wrap). */
8366
8367 /* This is a pseudo-op of the form "adr rd, label" to be converted
8368 into a relative address of the form "add rd, pc, #label-.-8". */
8369
8370 static void
8371 do_adr (void)
8372 {
8373 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8374
8375 /* Frag hacking will turn this into a sub instruction if the offset turns
8376 out to be negative. */
8377 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8378 inst.reloc.pc_rel = 1;
8379 inst.reloc.exp.X_add_number -= 8;
8380
8381 if (inst.reloc.exp.X_op == O_symbol
8382 && inst.reloc.exp.X_add_symbol != NULL
8383 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8384 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8385 inst.reloc.exp.X_add_number += 1;
8386 }
8387
8388 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8389 into a relative address of the form:
8390 add rd, pc, #low(label-.-8)"
8391 add rd, rd, #high(label-.-8)" */
8392
8393 static void
8394 do_adrl (void)
8395 {
8396 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8397
8398 /* Frag hacking will turn this into a sub instruction if the offset turns
8399 out to be negative. */
8400 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8401 inst.reloc.pc_rel = 1;
8402 inst.size = INSN_SIZE * 2;
8403 inst.reloc.exp.X_add_number -= 8;
8404
8405 if (inst.reloc.exp.X_op == O_symbol
8406 && inst.reloc.exp.X_add_symbol != NULL
8407 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8408 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8409 inst.reloc.exp.X_add_number += 1;
8410 }
8411
8412 static void
8413 do_arit (void)
8414 {
8415 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8416 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8417 THUMB1_RELOC_ONLY);
8418 if (!inst.operands[1].present)
8419 inst.operands[1].reg = inst.operands[0].reg;
8420 inst.instruction |= inst.operands[0].reg << 12;
8421 inst.instruction |= inst.operands[1].reg << 16;
8422 encode_arm_shifter_operand (2);
8423 }
8424
8425 static void
8426 do_barrier (void)
8427 {
8428 if (inst.operands[0].present)
8429 inst.instruction |= inst.operands[0].imm;
8430 else
8431 inst.instruction |= 0xf;
8432 }
8433
8434 static void
8435 do_bfc (void)
8436 {
8437 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8438 constraint (msb > 32, _("bit-field extends past end of register"));
8439 /* The instruction encoding stores the LSB and MSB,
8440 not the LSB and width. */
8441 inst.instruction |= inst.operands[0].reg << 12;
8442 inst.instruction |= inst.operands[1].imm << 7;
8443 inst.instruction |= (msb - 1) << 16;
8444 }
8445
8446 static void
8447 do_bfi (void)
8448 {
8449 unsigned int msb;
8450
8451 /* #0 in second position is alternative syntax for bfc, which is
8452 the same instruction but with REG_PC in the Rm field. */
8453 if (!inst.operands[1].isreg)
8454 inst.operands[1].reg = REG_PC;
8455
8456 msb = inst.operands[2].imm + inst.operands[3].imm;
8457 constraint (msb > 32, _("bit-field extends past end of register"));
8458 /* The instruction encoding stores the LSB and MSB,
8459 not the LSB and width. */
8460 inst.instruction |= inst.operands[0].reg << 12;
8461 inst.instruction |= inst.operands[1].reg;
8462 inst.instruction |= inst.operands[2].imm << 7;
8463 inst.instruction |= (msb - 1) << 16;
8464 }
8465
8466 static void
8467 do_bfx (void)
8468 {
8469 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8470 _("bit-field extends past end of register"));
8471 inst.instruction |= inst.operands[0].reg << 12;
8472 inst.instruction |= inst.operands[1].reg;
8473 inst.instruction |= inst.operands[2].imm << 7;
8474 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8475 }
8476
8477 /* ARM V5 breakpoint instruction (argument parse)
8478 BKPT <16 bit unsigned immediate>
8479 Instruction is not conditional.
8480 The bit pattern given in insns[] has the COND_ALWAYS condition,
8481 and it is an error if the caller tried to override that. */
8482
8483 static void
8484 do_bkpt (void)
8485 {
8486 /* Top 12 of 16 bits to bits 19:8. */
8487 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8488
8489 /* Bottom 4 of 16 bits to bits 3:0. */
8490 inst.instruction |= inst.operands[0].imm & 0xf;
8491 }
8492
8493 static void
8494 encode_branch (int default_reloc)
8495 {
8496 if (inst.operands[0].hasreloc)
8497 {
8498 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8499 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8500 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8501 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8502 ? BFD_RELOC_ARM_PLT32
8503 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8504 }
8505 else
8506 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8507 inst.reloc.pc_rel = 1;
8508 }
8509
8510 static void
8511 do_branch (void)
8512 {
8513 #ifdef OBJ_ELF
8514 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8515 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8516 else
8517 #endif
8518 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8519 }
8520
8521 static void
8522 do_bl (void)
8523 {
8524 #ifdef OBJ_ELF
8525 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8526 {
8527 if (inst.cond == COND_ALWAYS)
8528 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8529 else
8530 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8531 }
8532 else
8533 #endif
8534 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8535 }
8536
8537 /* ARM V5 branch-link-exchange instruction (argument parse)
8538 BLX <target_addr> ie BLX(1)
8539 BLX{<condition>} <Rm> ie BLX(2)
8540 Unfortunately, there are two different opcodes for this mnemonic.
8541 So, the insns[].value is not used, and the code here zaps values
8542 into inst.instruction.
8543 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8544
8545 static void
8546 do_blx (void)
8547 {
8548 if (inst.operands[0].isreg)
8549 {
8550 /* Arg is a register; the opcode provided by insns[] is correct.
8551 It is not illegal to do "blx pc", just useless. */
8552 if (inst.operands[0].reg == REG_PC)
8553 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8554
8555 inst.instruction |= inst.operands[0].reg;
8556 }
8557 else
8558 {
8559 /* Arg is an address; this instruction cannot be executed
8560 conditionally, and the opcode must be adjusted.
8561 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8562 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8563 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8564 inst.instruction = 0xfa000000;
8565 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8566 }
8567 }
8568
8569 static void
8570 do_bx (void)
8571 {
8572 bfd_boolean want_reloc;
8573
8574 if (inst.operands[0].reg == REG_PC)
8575 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8576
8577 inst.instruction |= inst.operands[0].reg;
8578 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8579 it is for ARMv4t or earlier. */
8580 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8581 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8582 want_reloc = TRUE;
8583
8584 #ifdef OBJ_ELF
8585 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8586 #endif
8587 want_reloc = FALSE;
8588
8589 if (want_reloc)
8590 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8591 }
8592
8593
8594 /* ARM v5TEJ. Jump to Jazelle code. */
8595
8596 static void
8597 do_bxj (void)
8598 {
8599 if (inst.operands[0].reg == REG_PC)
8600 as_tsktsk (_("use of r15 in bxj is not really useful"));
8601
8602 inst.instruction |= inst.operands[0].reg;
8603 }
8604
8605 /* Co-processor data operation:
8606 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8607 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8608 static void
8609 do_cdp (void)
8610 {
8611 inst.instruction |= inst.operands[0].reg << 8;
8612 inst.instruction |= inst.operands[1].imm << 20;
8613 inst.instruction |= inst.operands[2].reg << 12;
8614 inst.instruction |= inst.operands[3].reg << 16;
8615 inst.instruction |= inst.operands[4].reg;
8616 inst.instruction |= inst.operands[5].imm << 5;
8617 }
8618
8619 static void
8620 do_cmp (void)
8621 {
8622 inst.instruction |= inst.operands[0].reg << 16;
8623 encode_arm_shifter_operand (1);
8624 }
8625
8626 /* Transfer between coprocessor and ARM registers.
8627 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8628 MRC2
8629 MCR{cond}
8630 MCR2
8631
8632 No special properties. */
8633
8634 struct deprecated_coproc_regs_s
8635 {
8636 unsigned cp;
8637 int opc1;
8638 unsigned crn;
8639 unsigned crm;
8640 int opc2;
8641 arm_feature_set deprecated;
8642 arm_feature_set obsoleted;
8643 const char *dep_msg;
8644 const char *obs_msg;
8645 };
8646
8647 #define DEPR_ACCESS_V8 \
8648 N_("This coprocessor register access is deprecated in ARMv8")
8649
8650 /* Table of all deprecated coprocessor registers. */
8651 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8652 {
8653 {15, 0, 7, 10, 5, /* CP15DMB. */
8654 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8655 DEPR_ACCESS_V8, NULL},
8656 {15, 0, 7, 10, 4, /* CP15DSB. */
8657 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8658 DEPR_ACCESS_V8, NULL},
8659 {15, 0, 7, 5, 4, /* CP15ISB. */
8660 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8661 DEPR_ACCESS_V8, NULL},
8662 {14, 6, 1, 0, 0, /* TEEHBR. */
8663 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8664 DEPR_ACCESS_V8, NULL},
8665 {14, 6, 0, 0, 0, /* TEECR. */
8666 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8667 DEPR_ACCESS_V8, NULL},
8668 };
8669
8670 #undef DEPR_ACCESS_V8
8671
8672 static const size_t deprecated_coproc_reg_count =
8673 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8674
8675 static void
8676 do_co_reg (void)
8677 {
8678 unsigned Rd;
8679 size_t i;
8680
8681 Rd = inst.operands[2].reg;
8682 if (thumb_mode)
8683 {
8684 if (inst.instruction == 0xee000010
8685 || inst.instruction == 0xfe000010)
8686 /* MCR, MCR2 */
8687 reject_bad_reg (Rd);
8688 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8689 /* MRC, MRC2 */
8690 constraint (Rd == REG_SP, BAD_SP);
8691 }
8692 else
8693 {
8694 /* MCR */
8695 if (inst.instruction == 0xe000010)
8696 constraint (Rd == REG_PC, BAD_PC);
8697 }
8698
8699 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8700 {
8701 const struct deprecated_coproc_regs_s *r =
8702 deprecated_coproc_regs + i;
8703
8704 if (inst.operands[0].reg == r->cp
8705 && inst.operands[1].imm == r->opc1
8706 && inst.operands[3].reg == r->crn
8707 && inst.operands[4].reg == r->crm
8708 && inst.operands[5].imm == r->opc2)
8709 {
8710 if (! ARM_CPU_IS_ANY (cpu_variant)
8711 && warn_on_deprecated
8712 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8713 as_tsktsk ("%s", r->dep_msg);
8714 }
8715 }
8716
8717 inst.instruction |= inst.operands[0].reg << 8;
8718 inst.instruction |= inst.operands[1].imm << 21;
8719 inst.instruction |= Rd << 12;
8720 inst.instruction |= inst.operands[3].reg << 16;
8721 inst.instruction |= inst.operands[4].reg;
8722 inst.instruction |= inst.operands[5].imm << 5;
8723 }
8724
8725 /* Transfer between coprocessor register and pair of ARM registers.
8726 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8727 MCRR2
8728 MRRC{cond}
8729 MRRC2
8730
8731 Two XScale instructions are special cases of these:
8732
8733 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8734 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8735
8736 Result unpredictable if Rd or Rn is R15. */
8737
8738 static void
8739 do_co_reg2c (void)
8740 {
8741 unsigned Rd, Rn;
8742
8743 Rd = inst.operands[2].reg;
8744 Rn = inst.operands[3].reg;
8745
8746 if (thumb_mode)
8747 {
8748 reject_bad_reg (Rd);
8749 reject_bad_reg (Rn);
8750 }
8751 else
8752 {
8753 constraint (Rd == REG_PC, BAD_PC);
8754 constraint (Rn == REG_PC, BAD_PC);
8755 }
8756
8757 /* Only check the MRRC{2} variants. */
8758 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8759 {
8760 /* If Rd == Rn, error that the operation is
8761 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8762 constraint (Rd == Rn, BAD_OVERLAP);
8763 }
8764
8765 inst.instruction |= inst.operands[0].reg << 8;
8766 inst.instruction |= inst.operands[1].imm << 4;
8767 inst.instruction |= Rd << 12;
8768 inst.instruction |= Rn << 16;
8769 inst.instruction |= inst.operands[4].reg;
8770 }
8771
8772 static void
8773 do_cpsi (void)
8774 {
8775 inst.instruction |= inst.operands[0].imm << 6;
8776 if (inst.operands[1].present)
8777 {
8778 inst.instruction |= CPSI_MMOD;
8779 inst.instruction |= inst.operands[1].imm;
8780 }
8781 }
8782
8783 static void
8784 do_dbg (void)
8785 {
8786 inst.instruction |= inst.operands[0].imm;
8787 }
8788
8789 static void
8790 do_div (void)
8791 {
8792 unsigned Rd, Rn, Rm;
8793
8794 Rd = inst.operands[0].reg;
8795 Rn = (inst.operands[1].present
8796 ? inst.operands[1].reg : Rd);
8797 Rm = inst.operands[2].reg;
8798
8799 constraint ((Rd == REG_PC), BAD_PC);
8800 constraint ((Rn == REG_PC), BAD_PC);
8801 constraint ((Rm == REG_PC), BAD_PC);
8802
8803 inst.instruction |= Rd << 16;
8804 inst.instruction |= Rn << 0;
8805 inst.instruction |= Rm << 8;
8806 }
8807
8808 static void
8809 do_it (void)
8810 {
8811 /* There is no IT instruction in ARM mode. We
8812 process it to do the validation as if in
8813 thumb mode, just in case the code gets
8814 assembled for thumb using the unified syntax. */
8815
8816 inst.size = 0;
8817 if (unified_syntax)
8818 {
8819 set_it_insn_type (IT_INSN);
8820 now_it.mask = (inst.instruction & 0xf) | 0x10;
8821 now_it.cc = inst.operands[0].imm;
8822 }
8823 }
8824
8825 /* If there is only one register in the register list,
8826 then return its register number. Otherwise return -1. */
8827 static int
8828 only_one_reg_in_list (int range)
8829 {
8830 int i = ffs (range) - 1;
8831 return (i > 15 || range != (1 << i)) ? -1 : i;
8832 }
8833
8834 static void
8835 encode_ldmstm(int from_push_pop_mnem)
8836 {
8837 int base_reg = inst.operands[0].reg;
8838 int range = inst.operands[1].imm;
8839 int one_reg;
8840
8841 inst.instruction |= base_reg << 16;
8842 inst.instruction |= range;
8843
8844 if (inst.operands[1].writeback)
8845 inst.instruction |= LDM_TYPE_2_OR_3;
8846
8847 if (inst.operands[0].writeback)
8848 {
8849 inst.instruction |= WRITE_BACK;
8850 /* Check for unpredictable uses of writeback. */
8851 if (inst.instruction & LOAD_BIT)
8852 {
8853 /* Not allowed in LDM type 2. */
8854 if ((inst.instruction & LDM_TYPE_2_OR_3)
8855 && ((range & (1 << REG_PC)) == 0))
8856 as_warn (_("writeback of base register is UNPREDICTABLE"));
8857 /* Only allowed if base reg not in list for other types. */
8858 else if (range & (1 << base_reg))
8859 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8860 }
8861 else /* STM. */
8862 {
8863 /* Not allowed for type 2. */
8864 if (inst.instruction & LDM_TYPE_2_OR_3)
8865 as_warn (_("writeback of base register is UNPREDICTABLE"));
8866 /* Only allowed if base reg not in list, or first in list. */
8867 else if ((range & (1 << base_reg))
8868 && (range & ((1 << base_reg) - 1)))
8869 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8870 }
8871 }
8872
8873 /* If PUSH/POP has only one register, then use the A2 encoding. */
8874 one_reg = only_one_reg_in_list (range);
8875 if (from_push_pop_mnem && one_reg >= 0)
8876 {
8877 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8878
8879 inst.instruction &= A_COND_MASK;
8880 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8881 inst.instruction |= one_reg << 12;
8882 }
8883 }
8884
8885 static void
8886 do_ldmstm (void)
8887 {
8888 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8889 }
8890
8891 /* ARMv5TE load-consecutive (argument parse)
8892 Mode is like LDRH.
8893
8894 LDRccD R, mode
8895 STRccD R, mode. */
8896
8897 static void
8898 do_ldrd (void)
8899 {
8900 constraint (inst.operands[0].reg % 2 != 0,
8901 _("first transfer register must be even"));
8902 constraint (inst.operands[1].present
8903 && inst.operands[1].reg != inst.operands[0].reg + 1,
8904 _("can only transfer two consecutive registers"));
8905 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8906 constraint (!inst.operands[2].isreg, _("'[' expected"));
8907
8908 if (!inst.operands[1].present)
8909 inst.operands[1].reg = inst.operands[0].reg + 1;
8910
8911 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8912 register and the first register written; we have to diagnose
8913 overlap between the base and the second register written here. */
8914
8915 if (inst.operands[2].reg == inst.operands[1].reg
8916 && (inst.operands[2].writeback || inst.operands[2].postind))
8917 as_warn (_("base register written back, and overlaps "
8918 "second transfer register"));
8919
8920 if (!(inst.instruction & V4_STR_BIT))
8921 {
8922 /* For an index-register load, the index register must not overlap the
8923 destination (even if not write-back). */
8924 if (inst.operands[2].immisreg
8925 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8926 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8927 as_warn (_("index register overlaps transfer register"));
8928 }
8929 inst.instruction |= inst.operands[0].reg << 12;
8930 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8931 }
8932
8933 static void
8934 do_ldrex (void)
8935 {
8936 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8937 || inst.operands[1].postind || inst.operands[1].writeback
8938 || inst.operands[1].immisreg || inst.operands[1].shifted
8939 || inst.operands[1].negative
8940 /* This can arise if the programmer has written
8941 strex rN, rM, foo
8942 or if they have mistakenly used a register name as the last
8943 operand, eg:
8944 strex rN, rM, rX
8945 It is very difficult to distinguish between these two cases
8946 because "rX" might actually be a label. ie the register
8947 name has been occluded by a symbol of the same name. So we
8948 just generate a general 'bad addressing mode' type error
8949 message and leave it up to the programmer to discover the
8950 true cause and fix their mistake. */
8951 || (inst.operands[1].reg == REG_PC),
8952 BAD_ADDR_MODE);
8953
8954 constraint (inst.reloc.exp.X_op != O_constant
8955 || inst.reloc.exp.X_add_number != 0,
8956 _("offset must be zero in ARM encoding"));
8957
8958 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8959
8960 inst.instruction |= inst.operands[0].reg << 12;
8961 inst.instruction |= inst.operands[1].reg << 16;
8962 inst.reloc.type = BFD_RELOC_UNUSED;
8963 }
8964
8965 static void
8966 do_ldrexd (void)
8967 {
8968 constraint (inst.operands[0].reg % 2 != 0,
8969 _("even register required"));
8970 constraint (inst.operands[1].present
8971 && inst.operands[1].reg != inst.operands[0].reg + 1,
8972 _("can only load two consecutive registers"));
8973 /* If op 1 were present and equal to PC, this function wouldn't
8974 have been called in the first place. */
8975 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8976
8977 inst.instruction |= inst.operands[0].reg << 12;
8978 inst.instruction |= inst.operands[2].reg << 16;
8979 }
8980
8981 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8982 which is not a multiple of four is UNPREDICTABLE. */
8983 static void
8984 check_ldr_r15_aligned (void)
8985 {
8986 constraint (!(inst.operands[1].immisreg)
8987 && (inst.operands[0].reg == REG_PC
8988 && inst.operands[1].reg == REG_PC
8989 && (inst.reloc.exp.X_add_number & 0x3)),
8990 _("ldr to register 15 must be 4-byte aligned"));
8991 }
8992
8993 static void
8994 do_ldst (void)
8995 {
8996 inst.instruction |= inst.operands[0].reg << 12;
8997 if (!inst.operands[1].isreg)
8998 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8999 return;
9000 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9001 check_ldr_r15_aligned ();
9002 }
9003
9004 static void
9005 do_ldstt (void)
9006 {
9007 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9008 reject [Rn,...]. */
9009 if (inst.operands[1].preind)
9010 {
9011 constraint (inst.reloc.exp.X_op != O_constant
9012 || inst.reloc.exp.X_add_number != 0,
9013 _("this instruction requires a post-indexed address"));
9014
9015 inst.operands[1].preind = 0;
9016 inst.operands[1].postind = 1;
9017 inst.operands[1].writeback = 1;
9018 }
9019 inst.instruction |= inst.operands[0].reg << 12;
9020 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9021 }
9022
9023 /* Halfword and signed-byte load/store operations. */
9024
9025 static void
9026 do_ldstv4 (void)
9027 {
9028 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9029 inst.instruction |= inst.operands[0].reg << 12;
9030 if (!inst.operands[1].isreg)
9031 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9032 return;
9033 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9034 }
9035
9036 static void
9037 do_ldsttv4 (void)
9038 {
9039 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9040 reject [Rn,...]. */
9041 if (inst.operands[1].preind)
9042 {
9043 constraint (inst.reloc.exp.X_op != O_constant
9044 || inst.reloc.exp.X_add_number != 0,
9045 _("this instruction requires a post-indexed address"));
9046
9047 inst.operands[1].preind = 0;
9048 inst.operands[1].postind = 1;
9049 inst.operands[1].writeback = 1;
9050 }
9051 inst.instruction |= inst.operands[0].reg << 12;
9052 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9053 }
9054
9055 /* Co-processor register load/store.
9056 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9057 static void
9058 do_lstc (void)
9059 {
9060 inst.instruction |= inst.operands[0].reg << 8;
9061 inst.instruction |= inst.operands[1].reg << 12;
9062 encode_arm_cp_address (2, TRUE, TRUE, 0);
9063 }
9064
9065 static void
9066 do_mlas (void)
9067 {
9068 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9069 if (inst.operands[0].reg == inst.operands[1].reg
9070 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9071 && !(inst.instruction & 0x00400000))
9072 as_tsktsk (_("Rd and Rm should be different in mla"));
9073
9074 inst.instruction |= inst.operands[0].reg << 16;
9075 inst.instruction |= inst.operands[1].reg;
9076 inst.instruction |= inst.operands[2].reg << 8;
9077 inst.instruction |= inst.operands[3].reg << 12;
9078 }
9079
9080 static void
9081 do_mov (void)
9082 {
9083 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9084 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9085 THUMB1_RELOC_ONLY);
9086 inst.instruction |= inst.operands[0].reg << 12;
9087 encode_arm_shifter_operand (1);
9088 }
9089
9090 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9091 static void
9092 do_mov16 (void)
9093 {
9094 bfd_vma imm;
9095 bfd_boolean top;
9096
9097 top = (inst.instruction & 0x00400000) != 0;
9098 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9099 _(":lower16: not allowed in this instruction"));
9100 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9101 _(":upper16: not allowed in this instruction"));
9102 inst.instruction |= inst.operands[0].reg << 12;
9103 if (inst.reloc.type == BFD_RELOC_UNUSED)
9104 {
9105 imm = inst.reloc.exp.X_add_number;
9106 /* The value is in two pieces: 0:11, 16:19. */
9107 inst.instruction |= (imm & 0x00000fff);
9108 inst.instruction |= (imm & 0x0000f000) << 4;
9109 }
9110 }
9111
9112 static int
9113 do_vfp_nsyn_mrs (void)
9114 {
9115 if (inst.operands[0].isvec)
9116 {
9117 if (inst.operands[1].reg != 1)
9118 first_error (_("operand 1 must be FPSCR"));
9119 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9120 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9121 do_vfp_nsyn_opcode ("fmstat");
9122 }
9123 else if (inst.operands[1].isvec)
9124 do_vfp_nsyn_opcode ("fmrx");
9125 else
9126 return FAIL;
9127
9128 return SUCCESS;
9129 }
9130
9131 static int
9132 do_vfp_nsyn_msr (void)
9133 {
9134 if (inst.operands[0].isvec)
9135 do_vfp_nsyn_opcode ("fmxr");
9136 else
9137 return FAIL;
9138
9139 return SUCCESS;
9140 }
9141
9142 static void
9143 do_vmrs (void)
9144 {
9145 unsigned Rt = inst.operands[0].reg;
9146
9147 if (thumb_mode && Rt == REG_SP)
9148 {
9149 inst.error = BAD_SP;
9150 return;
9151 }
9152
9153 /* MVFR2 is only valid at ARMv8-A. */
9154 if (inst.operands[1].reg == 5)
9155 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9156 _(BAD_FPU));
9157
9158 /* APSR_ sets isvec. All other refs to PC are illegal. */
9159 if (!inst.operands[0].isvec && Rt == REG_PC)
9160 {
9161 inst.error = BAD_PC;
9162 return;
9163 }
9164
9165 /* If we get through parsing the register name, we just insert the number
9166 generated into the instruction without further validation. */
9167 inst.instruction |= (inst.operands[1].reg << 16);
9168 inst.instruction |= (Rt << 12);
9169 }
9170
9171 static void
9172 do_vmsr (void)
9173 {
9174 unsigned Rt = inst.operands[1].reg;
9175
9176 if (thumb_mode)
9177 reject_bad_reg (Rt);
9178 else if (Rt == REG_PC)
9179 {
9180 inst.error = BAD_PC;
9181 return;
9182 }
9183
9184 /* MVFR2 is only valid for ARMv8-A. */
9185 if (inst.operands[0].reg == 5)
9186 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9187 _(BAD_FPU));
9188
9189 /* If we get through parsing the register name, we just insert the number
9190 generated into the instruction without further validation. */
9191 inst.instruction |= (inst.operands[0].reg << 16);
9192 inst.instruction |= (Rt << 12);
9193 }
9194
9195 static void
9196 do_mrs (void)
9197 {
9198 unsigned br;
9199
9200 if (do_vfp_nsyn_mrs () == SUCCESS)
9201 return;
9202
9203 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9204 inst.instruction |= inst.operands[0].reg << 12;
9205
9206 if (inst.operands[1].isreg)
9207 {
9208 br = inst.operands[1].reg;
9209 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9210 as_bad (_("bad register for mrs"));
9211 }
9212 else
9213 {
9214 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9215 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9216 != (PSR_c|PSR_f),
9217 _("'APSR', 'CPSR' or 'SPSR' expected"));
9218 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9219 }
9220
9221 inst.instruction |= br;
9222 }
9223
9224 /* Two possible forms:
9225 "{C|S}PSR_<field>, Rm",
9226 "{C|S}PSR_f, #expression". */
9227
9228 static void
9229 do_msr (void)
9230 {
9231 if (do_vfp_nsyn_msr () == SUCCESS)
9232 return;
9233
9234 inst.instruction |= inst.operands[0].imm;
9235 if (inst.operands[1].isreg)
9236 inst.instruction |= inst.operands[1].reg;
9237 else
9238 {
9239 inst.instruction |= INST_IMMEDIATE;
9240 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9241 inst.reloc.pc_rel = 0;
9242 }
9243 }
9244
9245 static void
9246 do_mul (void)
9247 {
9248 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9249
9250 if (!inst.operands[2].present)
9251 inst.operands[2].reg = inst.operands[0].reg;
9252 inst.instruction |= inst.operands[0].reg << 16;
9253 inst.instruction |= inst.operands[1].reg;
9254 inst.instruction |= inst.operands[2].reg << 8;
9255
9256 if (inst.operands[0].reg == inst.operands[1].reg
9257 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9258 as_tsktsk (_("Rd and Rm should be different in mul"));
9259 }
9260
9261 /* Long Multiply Parser
9262 UMULL RdLo, RdHi, Rm, Rs
9263 SMULL RdLo, RdHi, Rm, Rs
9264 UMLAL RdLo, RdHi, Rm, Rs
9265 SMLAL RdLo, RdHi, Rm, Rs. */
9266
9267 static void
9268 do_mull (void)
9269 {
9270 inst.instruction |= inst.operands[0].reg << 12;
9271 inst.instruction |= inst.operands[1].reg << 16;
9272 inst.instruction |= inst.operands[2].reg;
9273 inst.instruction |= inst.operands[3].reg << 8;
9274
9275 /* rdhi and rdlo must be different. */
9276 if (inst.operands[0].reg == inst.operands[1].reg)
9277 as_tsktsk (_("rdhi and rdlo must be different"));
9278
9279 /* rdhi, rdlo and rm must all be different before armv6. */
9280 if ((inst.operands[0].reg == inst.operands[2].reg
9281 || inst.operands[1].reg == inst.operands[2].reg)
9282 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9283 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9284 }
9285
9286 static void
9287 do_nop (void)
9288 {
9289 if (inst.operands[0].present
9290 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9291 {
9292 /* Architectural NOP hints are CPSR sets with no bits selected. */
9293 inst.instruction &= 0xf0000000;
9294 inst.instruction |= 0x0320f000;
9295 if (inst.operands[0].present)
9296 inst.instruction |= inst.operands[0].imm;
9297 }
9298 }
9299
9300 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9301 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9302 Condition defaults to COND_ALWAYS.
9303 Error if Rd, Rn or Rm are R15. */
9304
9305 static void
9306 do_pkhbt (void)
9307 {
9308 inst.instruction |= inst.operands[0].reg << 12;
9309 inst.instruction |= inst.operands[1].reg << 16;
9310 inst.instruction |= inst.operands[2].reg;
9311 if (inst.operands[3].present)
9312 encode_arm_shift (3);
9313 }
9314
9315 /* ARM V6 PKHTB (Argument Parse). */
9316
9317 static void
9318 do_pkhtb (void)
9319 {
9320 if (!inst.operands[3].present)
9321 {
9322 /* If the shift specifier is omitted, turn the instruction
9323 into pkhbt rd, rm, rn. */
9324 inst.instruction &= 0xfff00010;
9325 inst.instruction |= inst.operands[0].reg << 12;
9326 inst.instruction |= inst.operands[1].reg;
9327 inst.instruction |= inst.operands[2].reg << 16;
9328 }
9329 else
9330 {
9331 inst.instruction |= inst.operands[0].reg << 12;
9332 inst.instruction |= inst.operands[1].reg << 16;
9333 inst.instruction |= inst.operands[2].reg;
9334 encode_arm_shift (3);
9335 }
9336 }
9337
9338 /* ARMv5TE: Preload-Cache
9339 MP Extensions: Preload for write
9340
9341 PLD(W) <addr_mode>
9342
9343 Syntactically, like LDR with B=1, W=0, L=1. */
9344
9345 static void
9346 do_pld (void)
9347 {
9348 constraint (!inst.operands[0].isreg,
9349 _("'[' expected after PLD mnemonic"));
9350 constraint (inst.operands[0].postind,
9351 _("post-indexed expression used in preload instruction"));
9352 constraint (inst.operands[0].writeback,
9353 _("writeback used in preload instruction"));
9354 constraint (!inst.operands[0].preind,
9355 _("unindexed addressing used in preload instruction"));
9356 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9357 }
9358
9359 /* ARMv7: PLI <addr_mode> */
9360 static void
9361 do_pli (void)
9362 {
9363 constraint (!inst.operands[0].isreg,
9364 _("'[' expected after PLI mnemonic"));
9365 constraint (inst.operands[0].postind,
9366 _("post-indexed expression used in preload instruction"));
9367 constraint (inst.operands[0].writeback,
9368 _("writeback used in preload instruction"));
9369 constraint (!inst.operands[0].preind,
9370 _("unindexed addressing used in preload instruction"));
9371 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9372 inst.instruction &= ~PRE_INDEX;
9373 }
9374
9375 static void
9376 do_push_pop (void)
9377 {
9378 constraint (inst.operands[0].writeback,
9379 _("push/pop do not support {reglist}^"));
9380 inst.operands[1] = inst.operands[0];
9381 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9382 inst.operands[0].isreg = 1;
9383 inst.operands[0].writeback = 1;
9384 inst.operands[0].reg = REG_SP;
9385 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9386 }
9387
9388 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9389 word at the specified address and the following word
9390 respectively.
9391 Unconditionally executed.
9392 Error if Rn is R15. */
9393
9394 static void
9395 do_rfe (void)
9396 {
9397 inst.instruction |= inst.operands[0].reg << 16;
9398 if (inst.operands[0].writeback)
9399 inst.instruction |= WRITE_BACK;
9400 }
9401
9402 /* ARM V6 ssat (argument parse). */
9403
9404 static void
9405 do_ssat (void)
9406 {
9407 inst.instruction |= inst.operands[0].reg << 12;
9408 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9409 inst.instruction |= inst.operands[2].reg;
9410
9411 if (inst.operands[3].present)
9412 encode_arm_shift (3);
9413 }
9414
9415 /* ARM V6 usat (argument parse). */
9416
9417 static void
9418 do_usat (void)
9419 {
9420 inst.instruction |= inst.operands[0].reg << 12;
9421 inst.instruction |= inst.operands[1].imm << 16;
9422 inst.instruction |= inst.operands[2].reg;
9423
9424 if (inst.operands[3].present)
9425 encode_arm_shift (3);
9426 }
9427
9428 /* ARM V6 ssat16 (argument parse). */
9429
9430 static void
9431 do_ssat16 (void)
9432 {
9433 inst.instruction |= inst.operands[0].reg << 12;
9434 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9435 inst.instruction |= inst.operands[2].reg;
9436 }
9437
9438 static void
9439 do_usat16 (void)
9440 {
9441 inst.instruction |= inst.operands[0].reg << 12;
9442 inst.instruction |= inst.operands[1].imm << 16;
9443 inst.instruction |= inst.operands[2].reg;
9444 }
9445
9446 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9447 preserving the other bits.
9448
9449 setend <endian_specifier>, where <endian_specifier> is either
9450 BE or LE. */
9451
9452 static void
9453 do_setend (void)
9454 {
9455 if (warn_on_deprecated
9456 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9457 as_tsktsk (_("setend use is deprecated for ARMv8"));
9458
9459 if (inst.operands[0].imm)
9460 inst.instruction |= 0x200;
9461 }
9462
9463 static void
9464 do_shift (void)
9465 {
9466 unsigned int Rm = (inst.operands[1].present
9467 ? inst.operands[1].reg
9468 : inst.operands[0].reg);
9469
9470 inst.instruction |= inst.operands[0].reg << 12;
9471 inst.instruction |= Rm;
9472 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9473 {
9474 inst.instruction |= inst.operands[2].reg << 8;
9475 inst.instruction |= SHIFT_BY_REG;
9476 /* PR 12854: Error on extraneous shifts. */
9477 constraint (inst.operands[2].shifted,
9478 _("extraneous shift as part of operand to shift insn"));
9479 }
9480 else
9481 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9482 }
9483
9484 static void
9485 do_smc (void)
9486 {
9487 inst.reloc.type = BFD_RELOC_ARM_SMC;
9488 inst.reloc.pc_rel = 0;
9489 }
9490
9491 static void
9492 do_hvc (void)
9493 {
9494 inst.reloc.type = BFD_RELOC_ARM_HVC;
9495 inst.reloc.pc_rel = 0;
9496 }
9497
9498 static void
9499 do_swi (void)
9500 {
9501 inst.reloc.type = BFD_RELOC_ARM_SWI;
9502 inst.reloc.pc_rel = 0;
9503 }
9504
9505 static void
9506 do_setpan (void)
9507 {
9508 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9509 _("selected processor does not support SETPAN instruction"));
9510
9511 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9512 }
9513
9514 static void
9515 do_t_setpan (void)
9516 {
9517 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9518 _("selected processor does not support SETPAN instruction"));
9519
9520 inst.instruction |= (inst.operands[0].imm << 3);
9521 }
9522
9523 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9524 SMLAxy{cond} Rd,Rm,Rs,Rn
9525 SMLAWy{cond} Rd,Rm,Rs,Rn
9526 Error if any register is R15. */
9527
9528 static void
9529 do_smla (void)
9530 {
9531 inst.instruction |= inst.operands[0].reg << 16;
9532 inst.instruction |= inst.operands[1].reg;
9533 inst.instruction |= inst.operands[2].reg << 8;
9534 inst.instruction |= inst.operands[3].reg << 12;
9535 }
9536
9537 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9538 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9539 Error if any register is R15.
9540 Warning if Rdlo == Rdhi. */
9541
9542 static void
9543 do_smlal (void)
9544 {
9545 inst.instruction |= inst.operands[0].reg << 12;
9546 inst.instruction |= inst.operands[1].reg << 16;
9547 inst.instruction |= inst.operands[2].reg;
9548 inst.instruction |= inst.operands[3].reg << 8;
9549
9550 if (inst.operands[0].reg == inst.operands[1].reg)
9551 as_tsktsk (_("rdhi and rdlo must be different"));
9552 }
9553
9554 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9555 SMULxy{cond} Rd,Rm,Rs
9556 Error if any register is R15. */
9557
9558 static void
9559 do_smul (void)
9560 {
9561 inst.instruction |= inst.operands[0].reg << 16;
9562 inst.instruction |= inst.operands[1].reg;
9563 inst.instruction |= inst.operands[2].reg << 8;
9564 }
9565
9566 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9567 the same for both ARM and Thumb-2. */
9568
9569 static void
9570 do_srs (void)
9571 {
9572 int reg;
9573
9574 if (inst.operands[0].present)
9575 {
9576 reg = inst.operands[0].reg;
9577 constraint (reg != REG_SP, _("SRS base register must be r13"));
9578 }
9579 else
9580 reg = REG_SP;
9581
9582 inst.instruction |= reg << 16;
9583 inst.instruction |= inst.operands[1].imm;
9584 if (inst.operands[0].writeback || inst.operands[1].writeback)
9585 inst.instruction |= WRITE_BACK;
9586 }
9587
9588 /* ARM V6 strex (argument parse). */
9589
9590 static void
9591 do_strex (void)
9592 {
9593 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9594 || inst.operands[2].postind || inst.operands[2].writeback
9595 || inst.operands[2].immisreg || inst.operands[2].shifted
9596 || inst.operands[2].negative
9597 /* See comment in do_ldrex(). */
9598 || (inst.operands[2].reg == REG_PC),
9599 BAD_ADDR_MODE);
9600
9601 constraint (inst.operands[0].reg == inst.operands[1].reg
9602 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9603
9604 constraint (inst.reloc.exp.X_op != O_constant
9605 || inst.reloc.exp.X_add_number != 0,
9606 _("offset must be zero in ARM encoding"));
9607
9608 inst.instruction |= inst.operands[0].reg << 12;
9609 inst.instruction |= inst.operands[1].reg;
9610 inst.instruction |= inst.operands[2].reg << 16;
9611 inst.reloc.type = BFD_RELOC_UNUSED;
9612 }
9613
9614 static void
9615 do_t_strexbh (void)
9616 {
9617 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9618 || inst.operands[2].postind || inst.operands[2].writeback
9619 || inst.operands[2].immisreg || inst.operands[2].shifted
9620 || inst.operands[2].negative,
9621 BAD_ADDR_MODE);
9622
9623 constraint (inst.operands[0].reg == inst.operands[1].reg
9624 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9625
9626 do_rm_rd_rn ();
9627 }
9628
9629 static void
9630 do_strexd (void)
9631 {
9632 constraint (inst.operands[1].reg % 2 != 0,
9633 _("even register required"));
9634 constraint (inst.operands[2].present
9635 && inst.operands[2].reg != inst.operands[1].reg + 1,
9636 _("can only store two consecutive registers"));
9637 /* If op 2 were present and equal to PC, this function wouldn't
9638 have been called in the first place. */
9639 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9640
9641 constraint (inst.operands[0].reg == inst.operands[1].reg
9642 || inst.operands[0].reg == inst.operands[1].reg + 1
9643 || inst.operands[0].reg == inst.operands[3].reg,
9644 BAD_OVERLAP);
9645
9646 inst.instruction |= inst.operands[0].reg << 12;
9647 inst.instruction |= inst.operands[1].reg;
9648 inst.instruction |= inst.operands[3].reg << 16;
9649 }
9650
9651 /* ARM V8 STRL. */
9652 static void
9653 do_stlex (void)
9654 {
9655 constraint (inst.operands[0].reg == inst.operands[1].reg
9656 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9657
9658 do_rd_rm_rn ();
9659 }
9660
9661 static void
9662 do_t_stlex (void)
9663 {
9664 constraint (inst.operands[0].reg == inst.operands[1].reg
9665 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9666
9667 do_rm_rd_rn ();
9668 }
9669
9670 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9671 extends it to 32-bits, and adds the result to a value in another
9672 register. You can specify a rotation by 0, 8, 16, or 24 bits
9673 before extracting the 16-bit value.
9674 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9675 Condition defaults to COND_ALWAYS.
9676 Error if any register uses R15. */
9677
9678 static void
9679 do_sxtah (void)
9680 {
9681 inst.instruction |= inst.operands[0].reg << 12;
9682 inst.instruction |= inst.operands[1].reg << 16;
9683 inst.instruction |= inst.operands[2].reg;
9684 inst.instruction |= inst.operands[3].imm << 10;
9685 }
9686
9687 /* ARM V6 SXTH.
9688
9689 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9690 Condition defaults to COND_ALWAYS.
9691 Error if any register uses R15. */
9692
9693 static void
9694 do_sxth (void)
9695 {
9696 inst.instruction |= inst.operands[0].reg << 12;
9697 inst.instruction |= inst.operands[1].reg;
9698 inst.instruction |= inst.operands[2].imm << 10;
9699 }
9700 \f
9701 /* VFP instructions. In a logical order: SP variant first, monad
9702 before dyad, arithmetic then move then load/store. */
9703
9704 static void
9705 do_vfp_sp_monadic (void)
9706 {
9707 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9708 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9709 }
9710
9711 static void
9712 do_vfp_sp_dyadic (void)
9713 {
9714 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9715 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9716 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9717 }
9718
9719 static void
9720 do_vfp_sp_compare_z (void)
9721 {
9722 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9723 }
9724
9725 static void
9726 do_vfp_dp_sp_cvt (void)
9727 {
9728 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9729 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9730 }
9731
9732 static void
9733 do_vfp_sp_dp_cvt (void)
9734 {
9735 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9736 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9737 }
9738
9739 static void
9740 do_vfp_reg_from_sp (void)
9741 {
9742 inst.instruction |= inst.operands[0].reg << 12;
9743 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9744 }
9745
9746 static void
9747 do_vfp_reg2_from_sp2 (void)
9748 {
9749 constraint (inst.operands[2].imm != 2,
9750 _("only two consecutive VFP SP registers allowed here"));
9751 inst.instruction |= inst.operands[0].reg << 12;
9752 inst.instruction |= inst.operands[1].reg << 16;
9753 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9754 }
9755
9756 static void
9757 do_vfp_sp_from_reg (void)
9758 {
9759 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9760 inst.instruction |= inst.operands[1].reg << 12;
9761 }
9762
9763 static void
9764 do_vfp_sp2_from_reg2 (void)
9765 {
9766 constraint (inst.operands[0].imm != 2,
9767 _("only two consecutive VFP SP registers allowed here"));
9768 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9769 inst.instruction |= inst.operands[1].reg << 12;
9770 inst.instruction |= inst.operands[2].reg << 16;
9771 }
9772
9773 static void
9774 do_vfp_sp_ldst (void)
9775 {
9776 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9777 encode_arm_cp_address (1, FALSE, TRUE, 0);
9778 }
9779
9780 static void
9781 do_vfp_dp_ldst (void)
9782 {
9783 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9784 encode_arm_cp_address (1, FALSE, TRUE, 0);
9785 }
9786
9787
9788 static void
9789 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9790 {
9791 if (inst.operands[0].writeback)
9792 inst.instruction |= WRITE_BACK;
9793 else
9794 constraint (ldstm_type != VFP_LDSTMIA,
9795 _("this addressing mode requires base-register writeback"));
9796 inst.instruction |= inst.operands[0].reg << 16;
9797 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9798 inst.instruction |= inst.operands[1].imm;
9799 }
9800
9801 static void
9802 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9803 {
9804 int count;
9805
9806 if (inst.operands[0].writeback)
9807 inst.instruction |= WRITE_BACK;
9808 else
9809 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9810 _("this addressing mode requires base-register writeback"));
9811
9812 inst.instruction |= inst.operands[0].reg << 16;
9813 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9814
9815 count = inst.operands[1].imm << 1;
9816 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9817 count += 1;
9818
9819 inst.instruction |= count;
9820 }
9821
9822 static void
9823 do_vfp_sp_ldstmia (void)
9824 {
9825 vfp_sp_ldstm (VFP_LDSTMIA);
9826 }
9827
9828 static void
9829 do_vfp_sp_ldstmdb (void)
9830 {
9831 vfp_sp_ldstm (VFP_LDSTMDB);
9832 }
9833
9834 static void
9835 do_vfp_dp_ldstmia (void)
9836 {
9837 vfp_dp_ldstm (VFP_LDSTMIA);
9838 }
9839
9840 static void
9841 do_vfp_dp_ldstmdb (void)
9842 {
9843 vfp_dp_ldstm (VFP_LDSTMDB);
9844 }
9845
9846 static void
9847 do_vfp_xp_ldstmia (void)
9848 {
9849 vfp_dp_ldstm (VFP_LDSTMIAX);
9850 }
9851
9852 static void
9853 do_vfp_xp_ldstmdb (void)
9854 {
9855 vfp_dp_ldstm (VFP_LDSTMDBX);
9856 }
9857
9858 static void
9859 do_vfp_dp_rd_rm (void)
9860 {
9861 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9862 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9863 }
9864
9865 static void
9866 do_vfp_dp_rn_rd (void)
9867 {
9868 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9869 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9870 }
9871
9872 static void
9873 do_vfp_dp_rd_rn (void)
9874 {
9875 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9876 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9877 }
9878
9879 static void
9880 do_vfp_dp_rd_rn_rm (void)
9881 {
9882 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9883 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9884 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9885 }
9886
9887 static void
9888 do_vfp_dp_rd (void)
9889 {
9890 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9891 }
9892
9893 static void
9894 do_vfp_dp_rm_rd_rn (void)
9895 {
9896 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9897 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9898 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9899 }
9900
9901 /* VFPv3 instructions. */
9902 static void
9903 do_vfp_sp_const (void)
9904 {
9905 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9906 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9907 inst.instruction |= (inst.operands[1].imm & 0x0f);
9908 }
9909
9910 static void
9911 do_vfp_dp_const (void)
9912 {
9913 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9914 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9915 inst.instruction |= (inst.operands[1].imm & 0x0f);
9916 }
9917
9918 static void
9919 vfp_conv (int srcsize)
9920 {
9921 int immbits = srcsize - inst.operands[1].imm;
9922
9923 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9924 {
9925 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9926 i.e. immbits must be in range 0 - 16. */
9927 inst.error = _("immediate value out of range, expected range [0, 16]");
9928 return;
9929 }
9930 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9931 {
9932 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9933 i.e. immbits must be in range 0 - 31. */
9934 inst.error = _("immediate value out of range, expected range [1, 32]");
9935 return;
9936 }
9937
9938 inst.instruction |= (immbits & 1) << 5;
9939 inst.instruction |= (immbits >> 1);
9940 }
9941
9942 static void
9943 do_vfp_sp_conv_16 (void)
9944 {
9945 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9946 vfp_conv (16);
9947 }
9948
9949 static void
9950 do_vfp_dp_conv_16 (void)
9951 {
9952 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9953 vfp_conv (16);
9954 }
9955
9956 static void
9957 do_vfp_sp_conv_32 (void)
9958 {
9959 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9960 vfp_conv (32);
9961 }
9962
9963 static void
9964 do_vfp_dp_conv_32 (void)
9965 {
9966 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9967 vfp_conv (32);
9968 }
9969 \f
9970 /* FPA instructions. Also in a logical order. */
9971
9972 static void
9973 do_fpa_cmp (void)
9974 {
9975 inst.instruction |= inst.operands[0].reg << 16;
9976 inst.instruction |= inst.operands[1].reg;
9977 }
9978
9979 static void
9980 do_fpa_ldmstm (void)
9981 {
9982 inst.instruction |= inst.operands[0].reg << 12;
9983 switch (inst.operands[1].imm)
9984 {
9985 case 1: inst.instruction |= CP_T_X; break;
9986 case 2: inst.instruction |= CP_T_Y; break;
9987 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9988 case 4: break;
9989 default: abort ();
9990 }
9991
9992 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9993 {
9994 /* The instruction specified "ea" or "fd", so we can only accept
9995 [Rn]{!}. The instruction does not really support stacking or
9996 unstacking, so we have to emulate these by setting appropriate
9997 bits and offsets. */
9998 constraint (inst.reloc.exp.X_op != O_constant
9999 || inst.reloc.exp.X_add_number != 0,
10000 _("this instruction does not support indexing"));
10001
10002 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10003 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
10004
10005 if (!(inst.instruction & INDEX_UP))
10006 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
10007
10008 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10009 {
10010 inst.operands[2].preind = 0;
10011 inst.operands[2].postind = 1;
10012 }
10013 }
10014
10015 encode_arm_cp_address (2, TRUE, TRUE, 0);
10016 }
10017 \f
10018 /* iWMMXt instructions: strictly in alphabetical order. */
10019
10020 static void
10021 do_iwmmxt_tandorc (void)
10022 {
10023 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10024 }
10025
10026 static void
10027 do_iwmmxt_textrc (void)
10028 {
10029 inst.instruction |= inst.operands[0].reg << 12;
10030 inst.instruction |= inst.operands[1].imm;
10031 }
10032
10033 static void
10034 do_iwmmxt_textrm (void)
10035 {
10036 inst.instruction |= inst.operands[0].reg << 12;
10037 inst.instruction |= inst.operands[1].reg << 16;
10038 inst.instruction |= inst.operands[2].imm;
10039 }
10040
10041 static void
10042 do_iwmmxt_tinsr (void)
10043 {
10044 inst.instruction |= inst.operands[0].reg << 16;
10045 inst.instruction |= inst.operands[1].reg << 12;
10046 inst.instruction |= inst.operands[2].imm;
10047 }
10048
10049 static void
10050 do_iwmmxt_tmia (void)
10051 {
10052 inst.instruction |= inst.operands[0].reg << 5;
10053 inst.instruction |= inst.operands[1].reg;
10054 inst.instruction |= inst.operands[2].reg << 12;
10055 }
10056
10057 static void
10058 do_iwmmxt_waligni (void)
10059 {
10060 inst.instruction |= inst.operands[0].reg << 12;
10061 inst.instruction |= inst.operands[1].reg << 16;
10062 inst.instruction |= inst.operands[2].reg;
10063 inst.instruction |= inst.operands[3].imm << 20;
10064 }
10065
10066 static void
10067 do_iwmmxt_wmerge (void)
10068 {
10069 inst.instruction |= inst.operands[0].reg << 12;
10070 inst.instruction |= inst.operands[1].reg << 16;
10071 inst.instruction |= inst.operands[2].reg;
10072 inst.instruction |= inst.operands[3].imm << 21;
10073 }
10074
10075 static void
10076 do_iwmmxt_wmov (void)
10077 {
10078 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10079 inst.instruction |= inst.operands[0].reg << 12;
10080 inst.instruction |= inst.operands[1].reg << 16;
10081 inst.instruction |= inst.operands[1].reg;
10082 }
10083
10084 static void
10085 do_iwmmxt_wldstbh (void)
10086 {
10087 int reloc;
10088 inst.instruction |= inst.operands[0].reg << 12;
10089 if (thumb_mode)
10090 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10091 else
10092 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10093 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10094 }
10095
10096 static void
10097 do_iwmmxt_wldstw (void)
10098 {
10099 /* RIWR_RIWC clears .isreg for a control register. */
10100 if (!inst.operands[0].isreg)
10101 {
10102 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10103 inst.instruction |= 0xf0000000;
10104 }
10105
10106 inst.instruction |= inst.operands[0].reg << 12;
10107 encode_arm_cp_address (1, TRUE, TRUE, 0);
10108 }
10109
10110 static void
10111 do_iwmmxt_wldstd (void)
10112 {
10113 inst.instruction |= inst.operands[0].reg << 12;
10114 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10115 && inst.operands[1].immisreg)
10116 {
10117 inst.instruction &= ~0x1a000ff;
10118 inst.instruction |= (0xfU << 28);
10119 if (inst.operands[1].preind)
10120 inst.instruction |= PRE_INDEX;
10121 if (!inst.operands[1].negative)
10122 inst.instruction |= INDEX_UP;
10123 if (inst.operands[1].writeback)
10124 inst.instruction |= WRITE_BACK;
10125 inst.instruction |= inst.operands[1].reg << 16;
10126 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10127 inst.instruction |= inst.operands[1].imm;
10128 }
10129 else
10130 encode_arm_cp_address (1, TRUE, FALSE, 0);
10131 }
10132
10133 static void
10134 do_iwmmxt_wshufh (void)
10135 {
10136 inst.instruction |= inst.operands[0].reg << 12;
10137 inst.instruction |= inst.operands[1].reg << 16;
10138 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10139 inst.instruction |= (inst.operands[2].imm & 0x0f);
10140 }
10141
10142 static void
10143 do_iwmmxt_wzero (void)
10144 {
10145 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10146 inst.instruction |= inst.operands[0].reg;
10147 inst.instruction |= inst.operands[0].reg << 12;
10148 inst.instruction |= inst.operands[0].reg << 16;
10149 }
10150
10151 static void
10152 do_iwmmxt_wrwrwr_or_imm5 (void)
10153 {
10154 if (inst.operands[2].isreg)
10155 do_rd_rn_rm ();
10156 else {
10157 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10158 _("immediate operand requires iWMMXt2"));
10159 do_rd_rn ();
10160 if (inst.operands[2].imm == 0)
10161 {
10162 switch ((inst.instruction >> 20) & 0xf)
10163 {
10164 case 4:
10165 case 5:
10166 case 6:
10167 case 7:
10168 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10169 inst.operands[2].imm = 16;
10170 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10171 break;
10172 case 8:
10173 case 9:
10174 case 10:
10175 case 11:
10176 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10177 inst.operands[2].imm = 32;
10178 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10179 break;
10180 case 12:
10181 case 13:
10182 case 14:
10183 case 15:
10184 {
10185 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10186 unsigned long wrn;
10187 wrn = (inst.instruction >> 16) & 0xf;
10188 inst.instruction &= 0xff0fff0f;
10189 inst.instruction |= wrn;
10190 /* Bail out here; the instruction is now assembled. */
10191 return;
10192 }
10193 }
10194 }
10195 /* Map 32 -> 0, etc. */
10196 inst.operands[2].imm &= 0x1f;
10197 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10198 }
10199 }
10200 \f
10201 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10202 operations first, then control, shift, and load/store. */
10203
10204 /* Insns like "foo X,Y,Z". */
10205
10206 static void
10207 do_mav_triple (void)
10208 {
10209 inst.instruction |= inst.operands[0].reg << 16;
10210 inst.instruction |= inst.operands[1].reg;
10211 inst.instruction |= inst.operands[2].reg << 12;
10212 }
10213
10214 /* Insns like "foo W,X,Y,Z".
10215 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10216
10217 static void
10218 do_mav_quad (void)
10219 {
10220 inst.instruction |= inst.operands[0].reg << 5;
10221 inst.instruction |= inst.operands[1].reg << 12;
10222 inst.instruction |= inst.operands[2].reg << 16;
10223 inst.instruction |= inst.operands[3].reg;
10224 }
10225
10226 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10227 static void
10228 do_mav_dspsc (void)
10229 {
10230 inst.instruction |= inst.operands[1].reg << 12;
10231 }
10232
10233 /* Maverick shift immediate instructions.
10234 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10235 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10236
10237 static void
10238 do_mav_shift (void)
10239 {
10240 int imm = inst.operands[2].imm;
10241
10242 inst.instruction |= inst.operands[0].reg << 12;
10243 inst.instruction |= inst.operands[1].reg << 16;
10244
10245 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10246 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10247 Bit 4 should be 0. */
10248 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10249
10250 inst.instruction |= imm;
10251 }
10252 \f
10253 /* XScale instructions. Also sorted arithmetic before move. */
10254
10255 /* Xscale multiply-accumulate (argument parse)
10256 MIAcc acc0,Rm,Rs
10257 MIAPHcc acc0,Rm,Rs
10258 MIAxycc acc0,Rm,Rs. */
10259
10260 static void
10261 do_xsc_mia (void)
10262 {
10263 inst.instruction |= inst.operands[1].reg;
10264 inst.instruction |= inst.operands[2].reg << 12;
10265 }
10266
10267 /* Xscale move-accumulator-register (argument parse)
10268
10269 MARcc acc0,RdLo,RdHi. */
10270
10271 static void
10272 do_xsc_mar (void)
10273 {
10274 inst.instruction |= inst.operands[1].reg << 12;
10275 inst.instruction |= inst.operands[2].reg << 16;
10276 }
10277
10278 /* Xscale move-register-accumulator (argument parse)
10279
10280 MRAcc RdLo,RdHi,acc0. */
10281
10282 static void
10283 do_xsc_mra (void)
10284 {
10285 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10286 inst.instruction |= inst.operands[0].reg << 12;
10287 inst.instruction |= inst.operands[1].reg << 16;
10288 }
10289 \f
10290 /* Encoding functions relevant only to Thumb. */
10291
10292 /* inst.operands[i] is a shifted-register operand; encode
10293 it into inst.instruction in the format used by Thumb32. */
10294
10295 static void
10296 encode_thumb32_shifted_operand (int i)
10297 {
10298 unsigned int value = inst.reloc.exp.X_add_number;
10299 unsigned int shift = inst.operands[i].shift_kind;
10300
10301 constraint (inst.operands[i].immisreg,
10302 _("shift by register not allowed in thumb mode"));
10303 inst.instruction |= inst.operands[i].reg;
10304 if (shift == SHIFT_RRX)
10305 inst.instruction |= SHIFT_ROR << 4;
10306 else
10307 {
10308 constraint (inst.reloc.exp.X_op != O_constant,
10309 _("expression too complex"));
10310
10311 constraint (value > 32
10312 || (value == 32 && (shift == SHIFT_LSL
10313 || shift == SHIFT_ROR)),
10314 _("shift expression is too large"));
10315
10316 if (value == 0)
10317 shift = SHIFT_LSL;
10318 else if (value == 32)
10319 value = 0;
10320
10321 inst.instruction |= shift << 4;
10322 inst.instruction |= (value & 0x1c) << 10;
10323 inst.instruction |= (value & 0x03) << 6;
10324 }
10325 }
10326
10327
10328 /* inst.operands[i] was set up by parse_address. Encode it into a
10329 Thumb32 format load or store instruction. Reject forms that cannot
10330 be used with such instructions. If is_t is true, reject forms that
10331 cannot be used with a T instruction; if is_d is true, reject forms
10332 that cannot be used with a D instruction. If it is a store insn,
10333 reject PC in Rn. */
10334
10335 static void
10336 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10337 {
10338 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10339
10340 constraint (!inst.operands[i].isreg,
10341 _("Instruction does not support =N addresses"));
10342
10343 inst.instruction |= inst.operands[i].reg << 16;
10344 if (inst.operands[i].immisreg)
10345 {
10346 constraint (is_pc, BAD_PC_ADDRESSING);
10347 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10348 constraint (inst.operands[i].negative,
10349 _("Thumb does not support negative register indexing"));
10350 constraint (inst.operands[i].postind,
10351 _("Thumb does not support register post-indexing"));
10352 constraint (inst.operands[i].writeback,
10353 _("Thumb does not support register indexing with writeback"));
10354 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10355 _("Thumb supports only LSL in shifted register indexing"));
10356
10357 inst.instruction |= inst.operands[i].imm;
10358 if (inst.operands[i].shifted)
10359 {
10360 constraint (inst.reloc.exp.X_op != O_constant,
10361 _("expression too complex"));
10362 constraint (inst.reloc.exp.X_add_number < 0
10363 || inst.reloc.exp.X_add_number > 3,
10364 _("shift out of range"));
10365 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10366 }
10367 inst.reloc.type = BFD_RELOC_UNUSED;
10368 }
10369 else if (inst.operands[i].preind)
10370 {
10371 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10372 constraint (is_t && inst.operands[i].writeback,
10373 _("cannot use writeback with this instruction"));
10374 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10375 BAD_PC_ADDRESSING);
10376
10377 if (is_d)
10378 {
10379 inst.instruction |= 0x01000000;
10380 if (inst.operands[i].writeback)
10381 inst.instruction |= 0x00200000;
10382 }
10383 else
10384 {
10385 inst.instruction |= 0x00000c00;
10386 if (inst.operands[i].writeback)
10387 inst.instruction |= 0x00000100;
10388 }
10389 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10390 }
10391 else if (inst.operands[i].postind)
10392 {
10393 gas_assert (inst.operands[i].writeback);
10394 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10395 constraint (is_t, _("cannot use post-indexing with this instruction"));
10396
10397 if (is_d)
10398 inst.instruction |= 0x00200000;
10399 else
10400 inst.instruction |= 0x00000900;
10401 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10402 }
10403 else /* unindexed - only for coprocessor */
10404 inst.error = _("instruction does not accept unindexed addressing");
10405 }
10406
10407 /* Table of Thumb instructions which exist in both 16- and 32-bit
10408 encodings (the latter only in post-V6T2 cores). The index is the
10409 value used in the insns table below. When there is more than one
10410 possible 16-bit encoding for the instruction, this table always
10411 holds variant (1).
10412 Also contains several pseudo-instructions used during relaxation. */
10413 #define T16_32_TAB \
10414 X(_adc, 4140, eb400000), \
10415 X(_adcs, 4140, eb500000), \
10416 X(_add, 1c00, eb000000), \
10417 X(_adds, 1c00, eb100000), \
10418 X(_addi, 0000, f1000000), \
10419 X(_addis, 0000, f1100000), \
10420 X(_add_pc,000f, f20f0000), \
10421 X(_add_sp,000d, f10d0000), \
10422 X(_adr, 000f, f20f0000), \
10423 X(_and, 4000, ea000000), \
10424 X(_ands, 4000, ea100000), \
10425 X(_asr, 1000, fa40f000), \
10426 X(_asrs, 1000, fa50f000), \
10427 X(_b, e000, f000b000), \
10428 X(_bcond, d000, f0008000), \
10429 X(_bic, 4380, ea200000), \
10430 X(_bics, 4380, ea300000), \
10431 X(_cmn, 42c0, eb100f00), \
10432 X(_cmp, 2800, ebb00f00), \
10433 X(_cpsie, b660, f3af8400), \
10434 X(_cpsid, b670, f3af8600), \
10435 X(_cpy, 4600, ea4f0000), \
10436 X(_dec_sp,80dd, f1ad0d00), \
10437 X(_eor, 4040, ea800000), \
10438 X(_eors, 4040, ea900000), \
10439 X(_inc_sp,00dd, f10d0d00), \
10440 X(_ldmia, c800, e8900000), \
10441 X(_ldr, 6800, f8500000), \
10442 X(_ldrb, 7800, f8100000), \
10443 X(_ldrh, 8800, f8300000), \
10444 X(_ldrsb, 5600, f9100000), \
10445 X(_ldrsh, 5e00, f9300000), \
10446 X(_ldr_pc,4800, f85f0000), \
10447 X(_ldr_pc2,4800, f85f0000), \
10448 X(_ldr_sp,9800, f85d0000), \
10449 X(_lsl, 0000, fa00f000), \
10450 X(_lsls, 0000, fa10f000), \
10451 X(_lsr, 0800, fa20f000), \
10452 X(_lsrs, 0800, fa30f000), \
10453 X(_mov, 2000, ea4f0000), \
10454 X(_movs, 2000, ea5f0000), \
10455 X(_mul, 4340, fb00f000), \
10456 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10457 X(_mvn, 43c0, ea6f0000), \
10458 X(_mvns, 43c0, ea7f0000), \
10459 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10460 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10461 X(_orr, 4300, ea400000), \
10462 X(_orrs, 4300, ea500000), \
10463 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10464 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10465 X(_rev, ba00, fa90f080), \
10466 X(_rev16, ba40, fa90f090), \
10467 X(_revsh, bac0, fa90f0b0), \
10468 X(_ror, 41c0, fa60f000), \
10469 X(_rors, 41c0, fa70f000), \
10470 X(_sbc, 4180, eb600000), \
10471 X(_sbcs, 4180, eb700000), \
10472 X(_stmia, c000, e8800000), \
10473 X(_str, 6000, f8400000), \
10474 X(_strb, 7000, f8000000), \
10475 X(_strh, 8000, f8200000), \
10476 X(_str_sp,9000, f84d0000), \
10477 X(_sub, 1e00, eba00000), \
10478 X(_subs, 1e00, ebb00000), \
10479 X(_subi, 8000, f1a00000), \
10480 X(_subis, 8000, f1b00000), \
10481 X(_sxtb, b240, fa4ff080), \
10482 X(_sxth, b200, fa0ff080), \
10483 X(_tst, 4200, ea100f00), \
10484 X(_uxtb, b2c0, fa5ff080), \
10485 X(_uxth, b280, fa1ff080), \
10486 X(_nop, bf00, f3af8000), \
10487 X(_yield, bf10, f3af8001), \
10488 X(_wfe, bf20, f3af8002), \
10489 X(_wfi, bf30, f3af8003), \
10490 X(_sev, bf40, f3af8004), \
10491 X(_sevl, bf50, f3af8005), \
10492 X(_udf, de00, f7f0a000)
10493
10494 /* To catch errors in encoding functions, the codes are all offset by
10495 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10496 as 16-bit instructions. */
10497 #define X(a,b,c) T_MNEM##a
10498 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10499 #undef X
10500
10501 #define X(a,b,c) 0x##b
10502 static const unsigned short thumb_op16[] = { T16_32_TAB };
10503 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10504 #undef X
10505
10506 #define X(a,b,c) 0x##c
10507 static const unsigned int thumb_op32[] = { T16_32_TAB };
10508 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10509 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10510 #undef X
10511 #undef T16_32_TAB
10512
10513 /* Thumb instruction encoders, in alphabetical order. */
10514
10515 /* ADDW or SUBW. */
10516
10517 static void
10518 do_t_add_sub_w (void)
10519 {
10520 int Rd, Rn;
10521
10522 Rd = inst.operands[0].reg;
10523 Rn = inst.operands[1].reg;
10524
10525 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10526 is the SP-{plus,minus}-immediate form of the instruction. */
10527 if (Rn == REG_SP)
10528 constraint (Rd == REG_PC, BAD_PC);
10529 else
10530 reject_bad_reg (Rd);
10531
10532 inst.instruction |= (Rn << 16) | (Rd << 8);
10533 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10534 }
10535
10536 /* Parse an add or subtract instruction. We get here with inst.instruction
10537 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10538
10539 static void
10540 do_t_add_sub (void)
10541 {
10542 int Rd, Rs, Rn;
10543
10544 Rd = inst.operands[0].reg;
10545 Rs = (inst.operands[1].present
10546 ? inst.operands[1].reg /* Rd, Rs, foo */
10547 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10548
10549 if (Rd == REG_PC)
10550 set_it_insn_type_last ();
10551
10552 if (unified_syntax)
10553 {
10554 bfd_boolean flags;
10555 bfd_boolean narrow;
10556 int opcode;
10557
10558 flags = (inst.instruction == T_MNEM_adds
10559 || inst.instruction == T_MNEM_subs);
10560 if (flags)
10561 narrow = !in_it_block ();
10562 else
10563 narrow = in_it_block ();
10564 if (!inst.operands[2].isreg)
10565 {
10566 int add;
10567
10568 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10569 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10570
10571 add = (inst.instruction == T_MNEM_add
10572 || inst.instruction == T_MNEM_adds);
10573 opcode = 0;
10574 if (inst.size_req != 4)
10575 {
10576 /* Attempt to use a narrow opcode, with relaxation if
10577 appropriate. */
10578 if (Rd == REG_SP && Rs == REG_SP && !flags)
10579 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10580 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10581 opcode = T_MNEM_add_sp;
10582 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10583 opcode = T_MNEM_add_pc;
10584 else if (Rd <= 7 && Rs <= 7 && narrow)
10585 {
10586 if (flags)
10587 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10588 else
10589 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10590 }
10591 if (opcode)
10592 {
10593 inst.instruction = THUMB_OP16(opcode);
10594 inst.instruction |= (Rd << 4) | Rs;
10595 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10596 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10597 {
10598 if (inst.size_req == 2)
10599 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10600 else
10601 inst.relax = opcode;
10602 }
10603 }
10604 else
10605 constraint (inst.size_req == 2, BAD_HIREG);
10606 }
10607 if (inst.size_req == 4
10608 || (inst.size_req != 2 && !opcode))
10609 {
10610 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10611 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10612 THUMB1_RELOC_ONLY);
10613 if (Rd == REG_PC)
10614 {
10615 constraint (add, BAD_PC);
10616 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10617 _("only SUBS PC, LR, #const allowed"));
10618 constraint (inst.reloc.exp.X_op != O_constant,
10619 _("expression too complex"));
10620 constraint (inst.reloc.exp.X_add_number < 0
10621 || inst.reloc.exp.X_add_number > 0xff,
10622 _("immediate value out of range"));
10623 inst.instruction = T2_SUBS_PC_LR
10624 | inst.reloc.exp.X_add_number;
10625 inst.reloc.type = BFD_RELOC_UNUSED;
10626 return;
10627 }
10628 else if (Rs == REG_PC)
10629 {
10630 /* Always use addw/subw. */
10631 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10632 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10633 }
10634 else
10635 {
10636 inst.instruction = THUMB_OP32 (inst.instruction);
10637 inst.instruction = (inst.instruction & 0xe1ffffff)
10638 | 0x10000000;
10639 if (flags)
10640 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10641 else
10642 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10643 }
10644 inst.instruction |= Rd << 8;
10645 inst.instruction |= Rs << 16;
10646 }
10647 }
10648 else
10649 {
10650 unsigned int value = inst.reloc.exp.X_add_number;
10651 unsigned int shift = inst.operands[2].shift_kind;
10652
10653 Rn = inst.operands[2].reg;
10654 /* See if we can do this with a 16-bit instruction. */
10655 if (!inst.operands[2].shifted && inst.size_req != 4)
10656 {
10657 if (Rd > 7 || Rs > 7 || Rn > 7)
10658 narrow = FALSE;
10659
10660 if (narrow)
10661 {
10662 inst.instruction = ((inst.instruction == T_MNEM_adds
10663 || inst.instruction == T_MNEM_add)
10664 ? T_OPCODE_ADD_R3
10665 : T_OPCODE_SUB_R3);
10666 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10667 return;
10668 }
10669
10670 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10671 {
10672 /* Thumb-1 cores (except v6-M) require at least one high
10673 register in a narrow non flag setting add. */
10674 if (Rd > 7 || Rn > 7
10675 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10676 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10677 {
10678 if (Rd == Rn)
10679 {
10680 Rn = Rs;
10681 Rs = Rd;
10682 }
10683 inst.instruction = T_OPCODE_ADD_HI;
10684 inst.instruction |= (Rd & 8) << 4;
10685 inst.instruction |= (Rd & 7);
10686 inst.instruction |= Rn << 3;
10687 return;
10688 }
10689 }
10690 }
10691
10692 constraint (Rd == REG_PC, BAD_PC);
10693 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10694 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10695 constraint (Rs == REG_PC, BAD_PC);
10696 reject_bad_reg (Rn);
10697
10698 /* If we get here, it can't be done in 16 bits. */
10699 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10700 _("shift must be constant"));
10701 inst.instruction = THUMB_OP32 (inst.instruction);
10702 inst.instruction |= Rd << 8;
10703 inst.instruction |= Rs << 16;
10704 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10705 _("shift value over 3 not allowed in thumb mode"));
10706 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10707 _("only LSL shift allowed in thumb mode"));
10708 encode_thumb32_shifted_operand (2);
10709 }
10710 }
10711 else
10712 {
10713 constraint (inst.instruction == T_MNEM_adds
10714 || inst.instruction == T_MNEM_subs,
10715 BAD_THUMB32);
10716
10717 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10718 {
10719 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10720 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10721 BAD_HIREG);
10722
10723 inst.instruction = (inst.instruction == T_MNEM_add
10724 ? 0x0000 : 0x8000);
10725 inst.instruction |= (Rd << 4) | Rs;
10726 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10727 return;
10728 }
10729
10730 Rn = inst.operands[2].reg;
10731 constraint (inst.operands[2].shifted, _("unshifted register required"));
10732
10733 /* We now have Rd, Rs, and Rn set to registers. */
10734 if (Rd > 7 || Rs > 7 || Rn > 7)
10735 {
10736 /* Can't do this for SUB. */
10737 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10738 inst.instruction = T_OPCODE_ADD_HI;
10739 inst.instruction |= (Rd & 8) << 4;
10740 inst.instruction |= (Rd & 7);
10741 if (Rs == Rd)
10742 inst.instruction |= Rn << 3;
10743 else if (Rn == Rd)
10744 inst.instruction |= Rs << 3;
10745 else
10746 constraint (1, _("dest must overlap one source register"));
10747 }
10748 else
10749 {
10750 inst.instruction = (inst.instruction == T_MNEM_add
10751 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10752 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10753 }
10754 }
10755 }
10756
10757 static void
10758 do_t_adr (void)
10759 {
10760 unsigned Rd;
10761
10762 Rd = inst.operands[0].reg;
10763 reject_bad_reg (Rd);
10764
10765 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10766 {
10767 /* Defer to section relaxation. */
10768 inst.relax = inst.instruction;
10769 inst.instruction = THUMB_OP16 (inst.instruction);
10770 inst.instruction |= Rd << 4;
10771 }
10772 else if (unified_syntax && inst.size_req != 2)
10773 {
10774 /* Generate a 32-bit opcode. */
10775 inst.instruction = THUMB_OP32 (inst.instruction);
10776 inst.instruction |= Rd << 8;
10777 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10778 inst.reloc.pc_rel = 1;
10779 }
10780 else
10781 {
10782 /* Generate a 16-bit opcode. */
10783 inst.instruction = THUMB_OP16 (inst.instruction);
10784 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10785 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10786 inst.reloc.pc_rel = 1;
10787 inst.instruction |= Rd << 4;
10788 }
10789
10790 if (inst.reloc.exp.X_op == O_symbol
10791 && inst.reloc.exp.X_add_symbol != NULL
10792 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10793 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10794 inst.reloc.exp.X_add_number += 1;
10795 }
10796
10797 /* Arithmetic instructions for which there is just one 16-bit
10798 instruction encoding, and it allows only two low registers.
10799 For maximal compatibility with ARM syntax, we allow three register
10800 operands even when Thumb-32 instructions are not available, as long
10801 as the first two are identical. For instance, both "sbc r0,r1" and
10802 "sbc r0,r0,r1" are allowed. */
10803 static void
10804 do_t_arit3 (void)
10805 {
10806 int Rd, Rs, Rn;
10807
10808 Rd = inst.operands[0].reg;
10809 Rs = (inst.operands[1].present
10810 ? inst.operands[1].reg /* Rd, Rs, foo */
10811 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10812 Rn = inst.operands[2].reg;
10813
10814 reject_bad_reg (Rd);
10815 reject_bad_reg (Rs);
10816 if (inst.operands[2].isreg)
10817 reject_bad_reg (Rn);
10818
10819 if (unified_syntax)
10820 {
10821 if (!inst.operands[2].isreg)
10822 {
10823 /* For an immediate, we always generate a 32-bit opcode;
10824 section relaxation will shrink it later if possible. */
10825 inst.instruction = THUMB_OP32 (inst.instruction);
10826 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10827 inst.instruction |= Rd << 8;
10828 inst.instruction |= Rs << 16;
10829 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10830 }
10831 else
10832 {
10833 bfd_boolean narrow;
10834
10835 /* See if we can do this with a 16-bit instruction. */
10836 if (THUMB_SETS_FLAGS (inst.instruction))
10837 narrow = !in_it_block ();
10838 else
10839 narrow = in_it_block ();
10840
10841 if (Rd > 7 || Rn > 7 || Rs > 7)
10842 narrow = FALSE;
10843 if (inst.operands[2].shifted)
10844 narrow = FALSE;
10845 if (inst.size_req == 4)
10846 narrow = FALSE;
10847
10848 if (narrow
10849 && Rd == Rs)
10850 {
10851 inst.instruction = THUMB_OP16 (inst.instruction);
10852 inst.instruction |= Rd;
10853 inst.instruction |= Rn << 3;
10854 return;
10855 }
10856
10857 /* If we get here, it can't be done in 16 bits. */
10858 constraint (inst.operands[2].shifted
10859 && inst.operands[2].immisreg,
10860 _("shift must be constant"));
10861 inst.instruction = THUMB_OP32 (inst.instruction);
10862 inst.instruction |= Rd << 8;
10863 inst.instruction |= Rs << 16;
10864 encode_thumb32_shifted_operand (2);
10865 }
10866 }
10867 else
10868 {
10869 /* On its face this is a lie - the instruction does set the
10870 flags. However, the only supported mnemonic in this mode
10871 says it doesn't. */
10872 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10873
10874 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10875 _("unshifted register required"));
10876 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10877 constraint (Rd != Rs,
10878 _("dest and source1 must be the same register"));
10879
10880 inst.instruction = THUMB_OP16 (inst.instruction);
10881 inst.instruction |= Rd;
10882 inst.instruction |= Rn << 3;
10883 }
10884 }
10885
10886 /* Similarly, but for instructions where the arithmetic operation is
10887 commutative, so we can allow either of them to be different from
10888 the destination operand in a 16-bit instruction. For instance, all
10889 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10890 accepted. */
10891 static void
10892 do_t_arit3c (void)
10893 {
10894 int Rd, Rs, Rn;
10895
10896 Rd = inst.operands[0].reg;
10897 Rs = (inst.operands[1].present
10898 ? inst.operands[1].reg /* Rd, Rs, foo */
10899 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10900 Rn = inst.operands[2].reg;
10901
10902 reject_bad_reg (Rd);
10903 reject_bad_reg (Rs);
10904 if (inst.operands[2].isreg)
10905 reject_bad_reg (Rn);
10906
10907 if (unified_syntax)
10908 {
10909 if (!inst.operands[2].isreg)
10910 {
10911 /* For an immediate, we always generate a 32-bit opcode;
10912 section relaxation will shrink it later if possible. */
10913 inst.instruction = THUMB_OP32 (inst.instruction);
10914 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10915 inst.instruction |= Rd << 8;
10916 inst.instruction |= Rs << 16;
10917 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10918 }
10919 else
10920 {
10921 bfd_boolean narrow;
10922
10923 /* See if we can do this with a 16-bit instruction. */
10924 if (THUMB_SETS_FLAGS (inst.instruction))
10925 narrow = !in_it_block ();
10926 else
10927 narrow = in_it_block ();
10928
10929 if (Rd > 7 || Rn > 7 || Rs > 7)
10930 narrow = FALSE;
10931 if (inst.operands[2].shifted)
10932 narrow = FALSE;
10933 if (inst.size_req == 4)
10934 narrow = FALSE;
10935
10936 if (narrow)
10937 {
10938 if (Rd == Rs)
10939 {
10940 inst.instruction = THUMB_OP16 (inst.instruction);
10941 inst.instruction |= Rd;
10942 inst.instruction |= Rn << 3;
10943 return;
10944 }
10945 if (Rd == Rn)
10946 {
10947 inst.instruction = THUMB_OP16 (inst.instruction);
10948 inst.instruction |= Rd;
10949 inst.instruction |= Rs << 3;
10950 return;
10951 }
10952 }
10953
10954 /* If we get here, it can't be done in 16 bits. */
10955 constraint (inst.operands[2].shifted
10956 && inst.operands[2].immisreg,
10957 _("shift must be constant"));
10958 inst.instruction = THUMB_OP32 (inst.instruction);
10959 inst.instruction |= Rd << 8;
10960 inst.instruction |= Rs << 16;
10961 encode_thumb32_shifted_operand (2);
10962 }
10963 }
10964 else
10965 {
10966 /* On its face this is a lie - the instruction does set the
10967 flags. However, the only supported mnemonic in this mode
10968 says it doesn't. */
10969 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10970
10971 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10972 _("unshifted register required"));
10973 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10974
10975 inst.instruction = THUMB_OP16 (inst.instruction);
10976 inst.instruction |= Rd;
10977
10978 if (Rd == Rs)
10979 inst.instruction |= Rn << 3;
10980 else if (Rd == Rn)
10981 inst.instruction |= Rs << 3;
10982 else
10983 constraint (1, _("dest must overlap one source register"));
10984 }
10985 }
10986
10987 static void
10988 do_t_bfc (void)
10989 {
10990 unsigned Rd;
10991 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10992 constraint (msb > 32, _("bit-field extends past end of register"));
10993 /* The instruction encoding stores the LSB and MSB,
10994 not the LSB and width. */
10995 Rd = inst.operands[0].reg;
10996 reject_bad_reg (Rd);
10997 inst.instruction |= Rd << 8;
10998 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10999 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11000 inst.instruction |= msb - 1;
11001 }
11002
11003 static void
11004 do_t_bfi (void)
11005 {
11006 int Rd, Rn;
11007 unsigned int msb;
11008
11009 Rd = inst.operands[0].reg;
11010 reject_bad_reg (Rd);
11011
11012 /* #0 in second position is alternative syntax for bfc, which is
11013 the same instruction but with REG_PC in the Rm field. */
11014 if (!inst.operands[1].isreg)
11015 Rn = REG_PC;
11016 else
11017 {
11018 Rn = inst.operands[1].reg;
11019 reject_bad_reg (Rn);
11020 }
11021
11022 msb = inst.operands[2].imm + inst.operands[3].imm;
11023 constraint (msb > 32, _("bit-field extends past end of register"));
11024 /* The instruction encoding stores the LSB and MSB,
11025 not the LSB and width. */
11026 inst.instruction |= Rd << 8;
11027 inst.instruction |= Rn << 16;
11028 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11029 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11030 inst.instruction |= msb - 1;
11031 }
11032
11033 static void
11034 do_t_bfx (void)
11035 {
11036 unsigned Rd, Rn;
11037
11038 Rd = inst.operands[0].reg;
11039 Rn = inst.operands[1].reg;
11040
11041 reject_bad_reg (Rd);
11042 reject_bad_reg (Rn);
11043
11044 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11045 _("bit-field extends past end of register"));
11046 inst.instruction |= Rd << 8;
11047 inst.instruction |= Rn << 16;
11048 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11049 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11050 inst.instruction |= inst.operands[3].imm - 1;
11051 }
11052
11053 /* ARM V5 Thumb BLX (argument parse)
11054 BLX <target_addr> which is BLX(1)
11055 BLX <Rm> which is BLX(2)
11056 Unfortunately, there are two different opcodes for this mnemonic.
11057 So, the insns[].value is not used, and the code here zaps values
11058 into inst.instruction.
11059
11060 ??? How to take advantage of the additional two bits of displacement
11061 available in Thumb32 mode? Need new relocation? */
11062
11063 static void
11064 do_t_blx (void)
11065 {
11066 set_it_insn_type_last ();
11067
11068 if (inst.operands[0].isreg)
11069 {
11070 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11071 /* We have a register, so this is BLX(2). */
11072 inst.instruction |= inst.operands[0].reg << 3;
11073 }
11074 else
11075 {
11076 /* No register. This must be BLX(1). */
11077 inst.instruction = 0xf000e800;
11078 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11079 }
11080 }
11081
11082 static void
11083 do_t_branch (void)
11084 {
11085 int opcode;
11086 int cond;
11087 bfd_reloc_code_real_type reloc;
11088
11089 cond = inst.cond;
11090 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11091
11092 if (in_it_block ())
11093 {
11094 /* Conditional branches inside IT blocks are encoded as unconditional
11095 branches. */
11096 cond = COND_ALWAYS;
11097 }
11098 else
11099 cond = inst.cond;
11100
11101 if (cond != COND_ALWAYS)
11102 opcode = T_MNEM_bcond;
11103 else
11104 opcode = inst.instruction;
11105
11106 if (unified_syntax
11107 && (inst.size_req == 4
11108 || (inst.size_req != 2
11109 && (inst.operands[0].hasreloc
11110 || inst.reloc.exp.X_op == O_constant))))
11111 {
11112 inst.instruction = THUMB_OP32(opcode);
11113 if (cond == COND_ALWAYS)
11114 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11115 else
11116 {
11117 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11118 _("selected architecture does not support "
11119 "wide conditional branch instruction"));
11120
11121 gas_assert (cond != 0xF);
11122 inst.instruction |= cond << 22;
11123 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11124 }
11125 }
11126 else
11127 {
11128 inst.instruction = THUMB_OP16(opcode);
11129 if (cond == COND_ALWAYS)
11130 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11131 else
11132 {
11133 inst.instruction |= cond << 8;
11134 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11135 }
11136 /* Allow section relaxation. */
11137 if (unified_syntax && inst.size_req != 2)
11138 inst.relax = opcode;
11139 }
11140 inst.reloc.type = reloc;
11141 inst.reloc.pc_rel = 1;
11142 }
11143
11144 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11145 between the two is the maximum immediate allowed - which is passed in
11146 RANGE. */
11147 static void
11148 do_t_bkpt_hlt1 (int range)
11149 {
11150 constraint (inst.cond != COND_ALWAYS,
11151 _("instruction is always unconditional"));
11152 if (inst.operands[0].present)
11153 {
11154 constraint (inst.operands[0].imm > range,
11155 _("immediate value out of range"));
11156 inst.instruction |= inst.operands[0].imm;
11157 }
11158
11159 set_it_insn_type (NEUTRAL_IT_INSN);
11160 }
11161
11162 static void
11163 do_t_hlt (void)
11164 {
11165 do_t_bkpt_hlt1 (63);
11166 }
11167
11168 static void
11169 do_t_bkpt (void)
11170 {
11171 do_t_bkpt_hlt1 (255);
11172 }
11173
11174 static void
11175 do_t_branch23 (void)
11176 {
11177 set_it_insn_type_last ();
11178 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11179
11180 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11181 this file. We used to simply ignore the PLT reloc type here --
11182 the branch encoding is now needed to deal with TLSCALL relocs.
11183 So if we see a PLT reloc now, put it back to how it used to be to
11184 keep the preexisting behaviour. */
11185 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11186 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11187
11188 #if defined(OBJ_COFF)
11189 /* If the destination of the branch is a defined symbol which does not have
11190 the THUMB_FUNC attribute, then we must be calling a function which has
11191 the (interfacearm) attribute. We look for the Thumb entry point to that
11192 function and change the branch to refer to that function instead. */
11193 if ( inst.reloc.exp.X_op == O_symbol
11194 && inst.reloc.exp.X_add_symbol != NULL
11195 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11196 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11197 inst.reloc.exp.X_add_symbol =
11198 find_real_start (inst.reloc.exp.X_add_symbol);
11199 #endif
11200 }
11201
11202 static void
11203 do_t_bx (void)
11204 {
11205 set_it_insn_type_last ();
11206 inst.instruction |= inst.operands[0].reg << 3;
11207 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11208 should cause the alignment to be checked once it is known. This is
11209 because BX PC only works if the instruction is word aligned. */
11210 }
11211
11212 static void
11213 do_t_bxj (void)
11214 {
11215 int Rm;
11216
11217 set_it_insn_type_last ();
11218 Rm = inst.operands[0].reg;
11219 reject_bad_reg (Rm);
11220 inst.instruction |= Rm << 16;
11221 }
11222
11223 static void
11224 do_t_clz (void)
11225 {
11226 unsigned Rd;
11227 unsigned Rm;
11228
11229 Rd = inst.operands[0].reg;
11230 Rm = inst.operands[1].reg;
11231
11232 reject_bad_reg (Rd);
11233 reject_bad_reg (Rm);
11234
11235 inst.instruction |= Rd << 8;
11236 inst.instruction |= Rm << 16;
11237 inst.instruction |= Rm;
11238 }
11239
11240 static void
11241 do_t_cps (void)
11242 {
11243 set_it_insn_type (OUTSIDE_IT_INSN);
11244 inst.instruction |= inst.operands[0].imm;
11245 }
11246
11247 static void
11248 do_t_cpsi (void)
11249 {
11250 set_it_insn_type (OUTSIDE_IT_INSN);
11251 if (unified_syntax
11252 && (inst.operands[1].present || inst.size_req == 4)
11253 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11254 {
11255 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11256 inst.instruction = 0xf3af8000;
11257 inst.instruction |= imod << 9;
11258 inst.instruction |= inst.operands[0].imm << 5;
11259 if (inst.operands[1].present)
11260 inst.instruction |= 0x100 | inst.operands[1].imm;
11261 }
11262 else
11263 {
11264 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11265 && (inst.operands[0].imm & 4),
11266 _("selected processor does not support 'A' form "
11267 "of this instruction"));
11268 constraint (inst.operands[1].present || inst.size_req == 4,
11269 _("Thumb does not support the 2-argument "
11270 "form of this instruction"));
11271 inst.instruction |= inst.operands[0].imm;
11272 }
11273 }
11274
11275 /* THUMB CPY instruction (argument parse). */
11276
11277 static void
11278 do_t_cpy (void)
11279 {
11280 if (inst.size_req == 4)
11281 {
11282 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11283 inst.instruction |= inst.operands[0].reg << 8;
11284 inst.instruction |= inst.operands[1].reg;
11285 }
11286 else
11287 {
11288 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11289 inst.instruction |= (inst.operands[0].reg & 0x7);
11290 inst.instruction |= inst.operands[1].reg << 3;
11291 }
11292 }
11293
11294 static void
11295 do_t_cbz (void)
11296 {
11297 set_it_insn_type (OUTSIDE_IT_INSN);
11298 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11299 inst.instruction |= inst.operands[0].reg;
11300 inst.reloc.pc_rel = 1;
11301 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11302 }
11303
11304 static void
11305 do_t_dbg (void)
11306 {
11307 inst.instruction |= inst.operands[0].imm;
11308 }
11309
11310 static void
11311 do_t_div (void)
11312 {
11313 unsigned Rd, Rn, Rm;
11314
11315 Rd = inst.operands[0].reg;
11316 Rn = (inst.operands[1].present
11317 ? inst.operands[1].reg : Rd);
11318 Rm = inst.operands[2].reg;
11319
11320 reject_bad_reg (Rd);
11321 reject_bad_reg (Rn);
11322 reject_bad_reg (Rm);
11323
11324 inst.instruction |= Rd << 8;
11325 inst.instruction |= Rn << 16;
11326 inst.instruction |= Rm;
11327 }
11328
11329 static void
11330 do_t_hint (void)
11331 {
11332 if (unified_syntax && inst.size_req == 4)
11333 inst.instruction = THUMB_OP32 (inst.instruction);
11334 else
11335 inst.instruction = THUMB_OP16 (inst.instruction);
11336 }
11337
11338 static void
11339 do_t_it (void)
11340 {
11341 unsigned int cond = inst.operands[0].imm;
11342
11343 set_it_insn_type (IT_INSN);
11344 now_it.mask = (inst.instruction & 0xf) | 0x10;
11345 now_it.cc = cond;
11346 now_it.warn_deprecated = FALSE;
11347
11348 /* If the condition is a negative condition, invert the mask. */
11349 if ((cond & 0x1) == 0x0)
11350 {
11351 unsigned int mask = inst.instruction & 0x000f;
11352
11353 if ((mask & 0x7) == 0)
11354 {
11355 /* No conversion needed. */
11356 now_it.block_length = 1;
11357 }
11358 else if ((mask & 0x3) == 0)
11359 {
11360 mask ^= 0x8;
11361 now_it.block_length = 2;
11362 }
11363 else if ((mask & 0x1) == 0)
11364 {
11365 mask ^= 0xC;
11366 now_it.block_length = 3;
11367 }
11368 else
11369 {
11370 mask ^= 0xE;
11371 now_it.block_length = 4;
11372 }
11373
11374 inst.instruction &= 0xfff0;
11375 inst.instruction |= mask;
11376 }
11377
11378 inst.instruction |= cond << 4;
11379 }
11380
11381 /* Helper function used for both push/pop and ldm/stm. */
11382 static void
11383 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11384 {
11385 bfd_boolean load;
11386
11387 load = (inst.instruction & (1 << 20)) != 0;
11388
11389 if (mask & (1 << 13))
11390 inst.error = _("SP not allowed in register list");
11391
11392 if ((mask & (1 << base)) != 0
11393 && writeback)
11394 inst.error = _("having the base register in the register list when "
11395 "using write back is UNPREDICTABLE");
11396
11397 if (load)
11398 {
11399 if (mask & (1 << 15))
11400 {
11401 if (mask & (1 << 14))
11402 inst.error = _("LR and PC should not both be in register list");
11403 else
11404 set_it_insn_type_last ();
11405 }
11406 }
11407 else
11408 {
11409 if (mask & (1 << 15))
11410 inst.error = _("PC not allowed in register list");
11411 }
11412
11413 if ((mask & (mask - 1)) == 0)
11414 {
11415 /* Single register transfers implemented as str/ldr. */
11416 if (writeback)
11417 {
11418 if (inst.instruction & (1 << 23))
11419 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11420 else
11421 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11422 }
11423 else
11424 {
11425 if (inst.instruction & (1 << 23))
11426 inst.instruction = 0x00800000; /* ia -> [base] */
11427 else
11428 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11429 }
11430
11431 inst.instruction |= 0xf8400000;
11432 if (load)
11433 inst.instruction |= 0x00100000;
11434
11435 mask = ffs (mask) - 1;
11436 mask <<= 12;
11437 }
11438 else if (writeback)
11439 inst.instruction |= WRITE_BACK;
11440
11441 inst.instruction |= mask;
11442 inst.instruction |= base << 16;
11443 }
11444
11445 static void
11446 do_t_ldmstm (void)
11447 {
11448 /* This really doesn't seem worth it. */
11449 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11450 _("expression too complex"));
11451 constraint (inst.operands[1].writeback,
11452 _("Thumb load/store multiple does not support {reglist}^"));
11453
11454 if (unified_syntax)
11455 {
11456 bfd_boolean narrow;
11457 unsigned mask;
11458
11459 narrow = FALSE;
11460 /* See if we can use a 16-bit instruction. */
11461 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11462 && inst.size_req != 4
11463 && !(inst.operands[1].imm & ~0xff))
11464 {
11465 mask = 1 << inst.operands[0].reg;
11466
11467 if (inst.operands[0].reg <= 7)
11468 {
11469 if (inst.instruction == T_MNEM_stmia
11470 ? inst.operands[0].writeback
11471 : (inst.operands[0].writeback
11472 == !(inst.operands[1].imm & mask)))
11473 {
11474 if (inst.instruction == T_MNEM_stmia
11475 && (inst.operands[1].imm & mask)
11476 && (inst.operands[1].imm & (mask - 1)))
11477 as_warn (_("value stored for r%d is UNKNOWN"),
11478 inst.operands[0].reg);
11479
11480 inst.instruction = THUMB_OP16 (inst.instruction);
11481 inst.instruction |= inst.operands[0].reg << 8;
11482 inst.instruction |= inst.operands[1].imm;
11483 narrow = TRUE;
11484 }
11485 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11486 {
11487 /* This means 1 register in reg list one of 3 situations:
11488 1. Instruction is stmia, but without writeback.
11489 2. lmdia without writeback, but with Rn not in
11490 reglist.
11491 3. ldmia with writeback, but with Rn in reglist.
11492 Case 3 is UNPREDICTABLE behaviour, so we handle
11493 case 1 and 2 which can be converted into a 16-bit
11494 str or ldr. The SP cases are handled below. */
11495 unsigned long opcode;
11496 /* First, record an error for Case 3. */
11497 if (inst.operands[1].imm & mask
11498 && inst.operands[0].writeback)
11499 inst.error =
11500 _("having the base register in the register list when "
11501 "using write back is UNPREDICTABLE");
11502
11503 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11504 : T_MNEM_ldr);
11505 inst.instruction = THUMB_OP16 (opcode);
11506 inst.instruction |= inst.operands[0].reg << 3;
11507 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11508 narrow = TRUE;
11509 }
11510 }
11511 else if (inst.operands[0] .reg == REG_SP)
11512 {
11513 if (inst.operands[0].writeback)
11514 {
11515 inst.instruction =
11516 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11517 ? T_MNEM_push : T_MNEM_pop);
11518 inst.instruction |= inst.operands[1].imm;
11519 narrow = TRUE;
11520 }
11521 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11522 {
11523 inst.instruction =
11524 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11525 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11526 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11527 narrow = TRUE;
11528 }
11529 }
11530 }
11531
11532 if (!narrow)
11533 {
11534 if (inst.instruction < 0xffff)
11535 inst.instruction = THUMB_OP32 (inst.instruction);
11536
11537 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11538 inst.operands[0].writeback);
11539 }
11540 }
11541 else
11542 {
11543 constraint (inst.operands[0].reg > 7
11544 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11545 constraint (inst.instruction != T_MNEM_ldmia
11546 && inst.instruction != T_MNEM_stmia,
11547 _("Thumb-2 instruction only valid in unified syntax"));
11548 if (inst.instruction == T_MNEM_stmia)
11549 {
11550 if (!inst.operands[0].writeback)
11551 as_warn (_("this instruction will write back the base register"));
11552 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11553 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11554 as_warn (_("value stored for r%d is UNKNOWN"),
11555 inst.operands[0].reg);
11556 }
11557 else
11558 {
11559 if (!inst.operands[0].writeback
11560 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11561 as_warn (_("this instruction will write back the base register"));
11562 else if (inst.operands[0].writeback
11563 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11564 as_warn (_("this instruction will not write back the base register"));
11565 }
11566
11567 inst.instruction = THUMB_OP16 (inst.instruction);
11568 inst.instruction |= inst.operands[0].reg << 8;
11569 inst.instruction |= inst.operands[1].imm;
11570 }
11571 }
11572
11573 static void
11574 do_t_ldrex (void)
11575 {
11576 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11577 || inst.operands[1].postind || inst.operands[1].writeback
11578 || inst.operands[1].immisreg || inst.operands[1].shifted
11579 || inst.operands[1].negative,
11580 BAD_ADDR_MODE);
11581
11582 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11583
11584 inst.instruction |= inst.operands[0].reg << 12;
11585 inst.instruction |= inst.operands[1].reg << 16;
11586 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11587 }
11588
11589 static void
11590 do_t_ldrexd (void)
11591 {
11592 if (!inst.operands[1].present)
11593 {
11594 constraint (inst.operands[0].reg == REG_LR,
11595 _("r14 not allowed as first register "
11596 "when second register is omitted"));
11597 inst.operands[1].reg = inst.operands[0].reg + 1;
11598 }
11599 constraint (inst.operands[0].reg == inst.operands[1].reg,
11600 BAD_OVERLAP);
11601
11602 inst.instruction |= inst.operands[0].reg << 12;
11603 inst.instruction |= inst.operands[1].reg << 8;
11604 inst.instruction |= inst.operands[2].reg << 16;
11605 }
11606
11607 static void
11608 do_t_ldst (void)
11609 {
11610 unsigned long opcode;
11611 int Rn;
11612
11613 if (inst.operands[0].isreg
11614 && !inst.operands[0].preind
11615 && inst.operands[0].reg == REG_PC)
11616 set_it_insn_type_last ();
11617
11618 opcode = inst.instruction;
11619 if (unified_syntax)
11620 {
11621 if (!inst.operands[1].isreg)
11622 {
11623 if (opcode <= 0xffff)
11624 inst.instruction = THUMB_OP32 (opcode);
11625 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11626 return;
11627 }
11628 if (inst.operands[1].isreg
11629 && !inst.operands[1].writeback
11630 && !inst.operands[1].shifted && !inst.operands[1].postind
11631 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11632 && opcode <= 0xffff
11633 && inst.size_req != 4)
11634 {
11635 /* Insn may have a 16-bit form. */
11636 Rn = inst.operands[1].reg;
11637 if (inst.operands[1].immisreg)
11638 {
11639 inst.instruction = THUMB_OP16 (opcode);
11640 /* [Rn, Rik] */
11641 if (Rn <= 7 && inst.operands[1].imm <= 7)
11642 goto op16;
11643 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11644 reject_bad_reg (inst.operands[1].imm);
11645 }
11646 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11647 && opcode != T_MNEM_ldrsb)
11648 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11649 || (Rn == REG_SP && opcode == T_MNEM_str))
11650 {
11651 /* [Rn, #const] */
11652 if (Rn > 7)
11653 {
11654 if (Rn == REG_PC)
11655 {
11656 if (inst.reloc.pc_rel)
11657 opcode = T_MNEM_ldr_pc2;
11658 else
11659 opcode = T_MNEM_ldr_pc;
11660 }
11661 else
11662 {
11663 if (opcode == T_MNEM_ldr)
11664 opcode = T_MNEM_ldr_sp;
11665 else
11666 opcode = T_MNEM_str_sp;
11667 }
11668 inst.instruction = inst.operands[0].reg << 8;
11669 }
11670 else
11671 {
11672 inst.instruction = inst.operands[0].reg;
11673 inst.instruction |= inst.operands[1].reg << 3;
11674 }
11675 inst.instruction |= THUMB_OP16 (opcode);
11676 if (inst.size_req == 2)
11677 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11678 else
11679 inst.relax = opcode;
11680 return;
11681 }
11682 }
11683 /* Definitely a 32-bit variant. */
11684
11685 /* Warning for Erratum 752419. */
11686 if (opcode == T_MNEM_ldr
11687 && inst.operands[0].reg == REG_SP
11688 && inst.operands[1].writeback == 1
11689 && !inst.operands[1].immisreg)
11690 {
11691 if (no_cpu_selected ()
11692 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11693 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11694 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11695 as_warn (_("This instruction may be unpredictable "
11696 "if executed on M-profile cores "
11697 "with interrupts enabled."));
11698 }
11699
11700 /* Do some validations regarding addressing modes. */
11701 if (inst.operands[1].immisreg)
11702 reject_bad_reg (inst.operands[1].imm);
11703
11704 constraint (inst.operands[1].writeback == 1
11705 && inst.operands[0].reg == inst.operands[1].reg,
11706 BAD_OVERLAP);
11707
11708 inst.instruction = THUMB_OP32 (opcode);
11709 inst.instruction |= inst.operands[0].reg << 12;
11710 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11711 check_ldr_r15_aligned ();
11712 return;
11713 }
11714
11715 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11716
11717 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11718 {
11719 /* Only [Rn,Rm] is acceptable. */
11720 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11721 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11722 || inst.operands[1].postind || inst.operands[1].shifted
11723 || inst.operands[1].negative,
11724 _("Thumb does not support this addressing mode"));
11725 inst.instruction = THUMB_OP16 (inst.instruction);
11726 goto op16;
11727 }
11728
11729 inst.instruction = THUMB_OP16 (inst.instruction);
11730 if (!inst.operands[1].isreg)
11731 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11732 return;
11733
11734 constraint (!inst.operands[1].preind
11735 || inst.operands[1].shifted
11736 || inst.operands[1].writeback,
11737 _("Thumb does not support this addressing mode"));
11738 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11739 {
11740 constraint (inst.instruction & 0x0600,
11741 _("byte or halfword not valid for base register"));
11742 constraint (inst.operands[1].reg == REG_PC
11743 && !(inst.instruction & THUMB_LOAD_BIT),
11744 _("r15 based store not allowed"));
11745 constraint (inst.operands[1].immisreg,
11746 _("invalid base register for register offset"));
11747
11748 if (inst.operands[1].reg == REG_PC)
11749 inst.instruction = T_OPCODE_LDR_PC;
11750 else if (inst.instruction & THUMB_LOAD_BIT)
11751 inst.instruction = T_OPCODE_LDR_SP;
11752 else
11753 inst.instruction = T_OPCODE_STR_SP;
11754
11755 inst.instruction |= inst.operands[0].reg << 8;
11756 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11757 return;
11758 }
11759
11760 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11761 if (!inst.operands[1].immisreg)
11762 {
11763 /* Immediate offset. */
11764 inst.instruction |= inst.operands[0].reg;
11765 inst.instruction |= inst.operands[1].reg << 3;
11766 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11767 return;
11768 }
11769
11770 /* Register offset. */
11771 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11772 constraint (inst.operands[1].negative,
11773 _("Thumb does not support this addressing mode"));
11774
11775 op16:
11776 switch (inst.instruction)
11777 {
11778 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11779 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11780 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11781 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11782 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11783 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11784 case 0x5600 /* ldrsb */:
11785 case 0x5e00 /* ldrsh */: break;
11786 default: abort ();
11787 }
11788
11789 inst.instruction |= inst.operands[0].reg;
11790 inst.instruction |= inst.operands[1].reg << 3;
11791 inst.instruction |= inst.operands[1].imm << 6;
11792 }
11793
11794 static void
11795 do_t_ldstd (void)
11796 {
11797 if (!inst.operands[1].present)
11798 {
11799 inst.operands[1].reg = inst.operands[0].reg + 1;
11800 constraint (inst.operands[0].reg == REG_LR,
11801 _("r14 not allowed here"));
11802 constraint (inst.operands[0].reg == REG_R12,
11803 _("r12 not allowed here"));
11804 }
11805
11806 if (inst.operands[2].writeback
11807 && (inst.operands[0].reg == inst.operands[2].reg
11808 || inst.operands[1].reg == inst.operands[2].reg))
11809 as_warn (_("base register written back, and overlaps "
11810 "one of transfer registers"));
11811
11812 inst.instruction |= inst.operands[0].reg << 12;
11813 inst.instruction |= inst.operands[1].reg << 8;
11814 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11815 }
11816
11817 static void
11818 do_t_ldstt (void)
11819 {
11820 inst.instruction |= inst.operands[0].reg << 12;
11821 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11822 }
11823
11824 static void
11825 do_t_mla (void)
11826 {
11827 unsigned Rd, Rn, Rm, Ra;
11828
11829 Rd = inst.operands[0].reg;
11830 Rn = inst.operands[1].reg;
11831 Rm = inst.operands[2].reg;
11832 Ra = inst.operands[3].reg;
11833
11834 reject_bad_reg (Rd);
11835 reject_bad_reg (Rn);
11836 reject_bad_reg (Rm);
11837 reject_bad_reg (Ra);
11838
11839 inst.instruction |= Rd << 8;
11840 inst.instruction |= Rn << 16;
11841 inst.instruction |= Rm;
11842 inst.instruction |= Ra << 12;
11843 }
11844
11845 static void
11846 do_t_mlal (void)
11847 {
11848 unsigned RdLo, RdHi, Rn, Rm;
11849
11850 RdLo = inst.operands[0].reg;
11851 RdHi = inst.operands[1].reg;
11852 Rn = inst.operands[2].reg;
11853 Rm = inst.operands[3].reg;
11854
11855 reject_bad_reg (RdLo);
11856 reject_bad_reg (RdHi);
11857 reject_bad_reg (Rn);
11858 reject_bad_reg (Rm);
11859
11860 inst.instruction |= RdLo << 12;
11861 inst.instruction |= RdHi << 8;
11862 inst.instruction |= Rn << 16;
11863 inst.instruction |= Rm;
11864 }
11865
11866 static void
11867 do_t_mov_cmp (void)
11868 {
11869 unsigned Rn, Rm;
11870
11871 Rn = inst.operands[0].reg;
11872 Rm = inst.operands[1].reg;
11873
11874 if (Rn == REG_PC)
11875 set_it_insn_type_last ();
11876
11877 if (unified_syntax)
11878 {
11879 int r0off = (inst.instruction == T_MNEM_mov
11880 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11881 unsigned long opcode;
11882 bfd_boolean narrow;
11883 bfd_boolean low_regs;
11884
11885 low_regs = (Rn <= 7 && Rm <= 7);
11886 opcode = inst.instruction;
11887 if (in_it_block ())
11888 narrow = opcode != T_MNEM_movs;
11889 else
11890 narrow = opcode != T_MNEM_movs || low_regs;
11891 if (inst.size_req == 4
11892 || inst.operands[1].shifted)
11893 narrow = FALSE;
11894
11895 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11896 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11897 && !inst.operands[1].shifted
11898 && Rn == REG_PC
11899 && Rm == REG_LR)
11900 {
11901 inst.instruction = T2_SUBS_PC_LR;
11902 return;
11903 }
11904
11905 if (opcode == T_MNEM_cmp)
11906 {
11907 constraint (Rn == REG_PC, BAD_PC);
11908 if (narrow)
11909 {
11910 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11911 but valid. */
11912 warn_deprecated_sp (Rm);
11913 /* R15 was documented as a valid choice for Rm in ARMv6,
11914 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11915 tools reject R15, so we do too. */
11916 constraint (Rm == REG_PC, BAD_PC);
11917 }
11918 else
11919 reject_bad_reg (Rm);
11920 }
11921 else if (opcode == T_MNEM_mov
11922 || opcode == T_MNEM_movs)
11923 {
11924 if (inst.operands[1].isreg)
11925 {
11926 if (opcode == T_MNEM_movs)
11927 {
11928 reject_bad_reg (Rn);
11929 reject_bad_reg (Rm);
11930 }
11931 else if (narrow)
11932 {
11933 /* This is mov.n. */
11934 if ((Rn == REG_SP || Rn == REG_PC)
11935 && (Rm == REG_SP || Rm == REG_PC))
11936 {
11937 as_tsktsk (_("Use of r%u as a source register is "
11938 "deprecated when r%u is the destination "
11939 "register."), Rm, Rn);
11940 }
11941 }
11942 else
11943 {
11944 /* This is mov.w. */
11945 constraint (Rn == REG_PC, BAD_PC);
11946 constraint (Rm == REG_PC, BAD_PC);
11947 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11948 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11949 }
11950 }
11951 else
11952 reject_bad_reg (Rn);
11953 }
11954
11955 if (!inst.operands[1].isreg)
11956 {
11957 /* Immediate operand. */
11958 if (!in_it_block () && opcode == T_MNEM_mov)
11959 narrow = 0;
11960 if (low_regs && narrow)
11961 {
11962 inst.instruction = THUMB_OP16 (opcode);
11963 inst.instruction |= Rn << 8;
11964 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11965 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11966 {
11967 if (inst.size_req == 2)
11968 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11969 else
11970 inst.relax = opcode;
11971 }
11972 }
11973 else
11974 {
11975 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11976 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11977 THUMB1_RELOC_ONLY);
11978
11979 inst.instruction = THUMB_OP32 (inst.instruction);
11980 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11981 inst.instruction |= Rn << r0off;
11982 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11983 }
11984 }
11985 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11986 && (inst.instruction == T_MNEM_mov
11987 || inst.instruction == T_MNEM_movs))
11988 {
11989 /* Register shifts are encoded as separate shift instructions. */
11990 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11991
11992 if (in_it_block ())
11993 narrow = !flags;
11994 else
11995 narrow = flags;
11996
11997 if (inst.size_req == 4)
11998 narrow = FALSE;
11999
12000 if (!low_regs || inst.operands[1].imm > 7)
12001 narrow = FALSE;
12002
12003 if (Rn != Rm)
12004 narrow = FALSE;
12005
12006 switch (inst.operands[1].shift_kind)
12007 {
12008 case SHIFT_LSL:
12009 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12010 break;
12011 case SHIFT_ASR:
12012 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12013 break;
12014 case SHIFT_LSR:
12015 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12016 break;
12017 case SHIFT_ROR:
12018 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12019 break;
12020 default:
12021 abort ();
12022 }
12023
12024 inst.instruction = opcode;
12025 if (narrow)
12026 {
12027 inst.instruction |= Rn;
12028 inst.instruction |= inst.operands[1].imm << 3;
12029 }
12030 else
12031 {
12032 if (flags)
12033 inst.instruction |= CONDS_BIT;
12034
12035 inst.instruction |= Rn << 8;
12036 inst.instruction |= Rm << 16;
12037 inst.instruction |= inst.operands[1].imm;
12038 }
12039 }
12040 else if (!narrow)
12041 {
12042 /* Some mov with immediate shift have narrow variants.
12043 Register shifts are handled above. */
12044 if (low_regs && inst.operands[1].shifted
12045 && (inst.instruction == T_MNEM_mov
12046 || inst.instruction == T_MNEM_movs))
12047 {
12048 if (in_it_block ())
12049 narrow = (inst.instruction == T_MNEM_mov);
12050 else
12051 narrow = (inst.instruction == T_MNEM_movs);
12052 }
12053
12054 if (narrow)
12055 {
12056 switch (inst.operands[1].shift_kind)
12057 {
12058 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12059 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12060 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12061 default: narrow = FALSE; break;
12062 }
12063 }
12064
12065 if (narrow)
12066 {
12067 inst.instruction |= Rn;
12068 inst.instruction |= Rm << 3;
12069 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12070 }
12071 else
12072 {
12073 inst.instruction = THUMB_OP32 (inst.instruction);
12074 inst.instruction |= Rn << r0off;
12075 encode_thumb32_shifted_operand (1);
12076 }
12077 }
12078 else
12079 switch (inst.instruction)
12080 {
12081 case T_MNEM_mov:
12082 /* In v4t or v5t a move of two lowregs produces unpredictable
12083 results. Don't allow this. */
12084 if (low_regs)
12085 {
12086 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12087 "MOV Rd, Rs with two low registers is not "
12088 "permitted on this architecture");
12089 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12090 arm_ext_v6);
12091 }
12092
12093 inst.instruction = T_OPCODE_MOV_HR;
12094 inst.instruction |= (Rn & 0x8) << 4;
12095 inst.instruction |= (Rn & 0x7);
12096 inst.instruction |= Rm << 3;
12097 break;
12098
12099 case T_MNEM_movs:
12100 /* We know we have low registers at this point.
12101 Generate LSLS Rd, Rs, #0. */
12102 inst.instruction = T_OPCODE_LSL_I;
12103 inst.instruction |= Rn;
12104 inst.instruction |= Rm << 3;
12105 break;
12106
12107 case T_MNEM_cmp:
12108 if (low_regs)
12109 {
12110 inst.instruction = T_OPCODE_CMP_LR;
12111 inst.instruction |= Rn;
12112 inst.instruction |= Rm << 3;
12113 }
12114 else
12115 {
12116 inst.instruction = T_OPCODE_CMP_HR;
12117 inst.instruction |= (Rn & 0x8) << 4;
12118 inst.instruction |= (Rn & 0x7);
12119 inst.instruction |= Rm << 3;
12120 }
12121 break;
12122 }
12123 return;
12124 }
12125
12126 inst.instruction = THUMB_OP16 (inst.instruction);
12127
12128 /* PR 10443: Do not silently ignore shifted operands. */
12129 constraint (inst.operands[1].shifted,
12130 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12131
12132 if (inst.operands[1].isreg)
12133 {
12134 if (Rn < 8 && Rm < 8)
12135 {
12136 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12137 since a MOV instruction produces unpredictable results. */
12138 if (inst.instruction == T_OPCODE_MOV_I8)
12139 inst.instruction = T_OPCODE_ADD_I3;
12140 else
12141 inst.instruction = T_OPCODE_CMP_LR;
12142
12143 inst.instruction |= Rn;
12144 inst.instruction |= Rm << 3;
12145 }
12146 else
12147 {
12148 if (inst.instruction == T_OPCODE_MOV_I8)
12149 inst.instruction = T_OPCODE_MOV_HR;
12150 else
12151 inst.instruction = T_OPCODE_CMP_HR;
12152 do_t_cpy ();
12153 }
12154 }
12155 else
12156 {
12157 constraint (Rn > 7,
12158 _("only lo regs allowed with immediate"));
12159 inst.instruction |= Rn << 8;
12160 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12161 }
12162 }
12163
12164 static void
12165 do_t_mov16 (void)
12166 {
12167 unsigned Rd;
12168 bfd_vma imm;
12169 bfd_boolean top;
12170
12171 top = (inst.instruction & 0x00800000) != 0;
12172 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12173 {
12174 constraint (top, _(":lower16: not allowed in this instruction"));
12175 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12176 }
12177 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12178 {
12179 constraint (!top, _(":upper16: not allowed in this instruction"));
12180 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12181 }
12182
12183 Rd = inst.operands[0].reg;
12184 reject_bad_reg (Rd);
12185
12186 inst.instruction |= Rd << 8;
12187 if (inst.reloc.type == BFD_RELOC_UNUSED)
12188 {
12189 imm = inst.reloc.exp.X_add_number;
12190 inst.instruction |= (imm & 0xf000) << 4;
12191 inst.instruction |= (imm & 0x0800) << 15;
12192 inst.instruction |= (imm & 0x0700) << 4;
12193 inst.instruction |= (imm & 0x00ff);
12194 }
12195 }
12196
12197 static void
12198 do_t_mvn_tst (void)
12199 {
12200 unsigned Rn, Rm;
12201
12202 Rn = inst.operands[0].reg;
12203 Rm = inst.operands[1].reg;
12204
12205 if (inst.instruction == T_MNEM_cmp
12206 || inst.instruction == T_MNEM_cmn)
12207 constraint (Rn == REG_PC, BAD_PC);
12208 else
12209 reject_bad_reg (Rn);
12210 reject_bad_reg (Rm);
12211
12212 if (unified_syntax)
12213 {
12214 int r0off = (inst.instruction == T_MNEM_mvn
12215 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12216 bfd_boolean narrow;
12217
12218 if (inst.size_req == 4
12219 || inst.instruction > 0xffff
12220 || inst.operands[1].shifted
12221 || Rn > 7 || Rm > 7)
12222 narrow = FALSE;
12223 else if (inst.instruction == T_MNEM_cmn
12224 || inst.instruction == T_MNEM_tst)
12225 narrow = TRUE;
12226 else if (THUMB_SETS_FLAGS (inst.instruction))
12227 narrow = !in_it_block ();
12228 else
12229 narrow = in_it_block ();
12230
12231 if (!inst.operands[1].isreg)
12232 {
12233 /* For an immediate, we always generate a 32-bit opcode;
12234 section relaxation will shrink it later if possible. */
12235 if (inst.instruction < 0xffff)
12236 inst.instruction = THUMB_OP32 (inst.instruction);
12237 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12238 inst.instruction |= Rn << r0off;
12239 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12240 }
12241 else
12242 {
12243 /* See if we can do this with a 16-bit instruction. */
12244 if (narrow)
12245 {
12246 inst.instruction = THUMB_OP16 (inst.instruction);
12247 inst.instruction |= Rn;
12248 inst.instruction |= Rm << 3;
12249 }
12250 else
12251 {
12252 constraint (inst.operands[1].shifted
12253 && inst.operands[1].immisreg,
12254 _("shift must be constant"));
12255 if (inst.instruction < 0xffff)
12256 inst.instruction = THUMB_OP32 (inst.instruction);
12257 inst.instruction |= Rn << r0off;
12258 encode_thumb32_shifted_operand (1);
12259 }
12260 }
12261 }
12262 else
12263 {
12264 constraint (inst.instruction > 0xffff
12265 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12266 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12267 _("unshifted register required"));
12268 constraint (Rn > 7 || Rm > 7,
12269 BAD_HIREG);
12270
12271 inst.instruction = THUMB_OP16 (inst.instruction);
12272 inst.instruction |= Rn;
12273 inst.instruction |= Rm << 3;
12274 }
12275 }
12276
12277 static void
12278 do_t_mrs (void)
12279 {
12280 unsigned Rd;
12281
12282 if (do_vfp_nsyn_mrs () == SUCCESS)
12283 return;
12284
12285 Rd = inst.operands[0].reg;
12286 reject_bad_reg (Rd);
12287 inst.instruction |= Rd << 8;
12288
12289 if (inst.operands[1].isreg)
12290 {
12291 unsigned br = inst.operands[1].reg;
12292 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12293 as_bad (_("bad register for mrs"));
12294
12295 inst.instruction |= br & (0xf << 16);
12296 inst.instruction |= (br & 0x300) >> 4;
12297 inst.instruction |= (br & SPSR_BIT) >> 2;
12298 }
12299 else
12300 {
12301 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12302
12303 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12304 {
12305 /* PR gas/12698: The constraint is only applied for m_profile.
12306 If the user has specified -march=all, we want to ignore it as
12307 we are building for any CPU type, including non-m variants. */
12308 bfd_boolean m_profile =
12309 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12310 constraint ((flags != 0) && m_profile, _("selected processor does "
12311 "not support requested special purpose register"));
12312 }
12313 else
12314 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12315 devices). */
12316 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12317 _("'APSR', 'CPSR' or 'SPSR' expected"));
12318
12319 inst.instruction |= (flags & SPSR_BIT) >> 2;
12320 inst.instruction |= inst.operands[1].imm & 0xff;
12321 inst.instruction |= 0xf0000;
12322 }
12323 }
12324
12325 static void
12326 do_t_msr (void)
12327 {
12328 int flags;
12329 unsigned Rn;
12330
12331 if (do_vfp_nsyn_msr () == SUCCESS)
12332 return;
12333
12334 constraint (!inst.operands[1].isreg,
12335 _("Thumb encoding does not support an immediate here"));
12336
12337 if (inst.operands[0].isreg)
12338 flags = (int)(inst.operands[0].reg);
12339 else
12340 flags = inst.operands[0].imm;
12341
12342 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12343 {
12344 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12345
12346 /* PR gas/12698: The constraint is only applied for m_profile.
12347 If the user has specified -march=all, we want to ignore it as
12348 we are building for any CPU type, including non-m variants. */
12349 bfd_boolean m_profile =
12350 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12351 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12352 && (bits & ~(PSR_s | PSR_f)) != 0)
12353 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12354 && bits != PSR_f)) && m_profile,
12355 _("selected processor does not support requested special "
12356 "purpose register"));
12357 }
12358 else
12359 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12360 "requested special purpose register"));
12361
12362 Rn = inst.operands[1].reg;
12363 reject_bad_reg (Rn);
12364
12365 inst.instruction |= (flags & SPSR_BIT) >> 2;
12366 inst.instruction |= (flags & 0xf0000) >> 8;
12367 inst.instruction |= (flags & 0x300) >> 4;
12368 inst.instruction |= (flags & 0xff);
12369 inst.instruction |= Rn << 16;
12370 }
12371
12372 static void
12373 do_t_mul (void)
12374 {
12375 bfd_boolean narrow;
12376 unsigned Rd, Rn, Rm;
12377
12378 if (!inst.operands[2].present)
12379 inst.operands[2].reg = inst.operands[0].reg;
12380
12381 Rd = inst.operands[0].reg;
12382 Rn = inst.operands[1].reg;
12383 Rm = inst.operands[2].reg;
12384
12385 if (unified_syntax)
12386 {
12387 if (inst.size_req == 4
12388 || (Rd != Rn
12389 && Rd != Rm)
12390 || Rn > 7
12391 || Rm > 7)
12392 narrow = FALSE;
12393 else if (inst.instruction == T_MNEM_muls)
12394 narrow = !in_it_block ();
12395 else
12396 narrow = in_it_block ();
12397 }
12398 else
12399 {
12400 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12401 constraint (Rn > 7 || Rm > 7,
12402 BAD_HIREG);
12403 narrow = TRUE;
12404 }
12405
12406 if (narrow)
12407 {
12408 /* 16-bit MULS/Conditional MUL. */
12409 inst.instruction = THUMB_OP16 (inst.instruction);
12410 inst.instruction |= Rd;
12411
12412 if (Rd == Rn)
12413 inst.instruction |= Rm << 3;
12414 else if (Rd == Rm)
12415 inst.instruction |= Rn << 3;
12416 else
12417 constraint (1, _("dest must overlap one source register"));
12418 }
12419 else
12420 {
12421 constraint (inst.instruction != T_MNEM_mul,
12422 _("Thumb-2 MUL must not set flags"));
12423 /* 32-bit MUL. */
12424 inst.instruction = THUMB_OP32 (inst.instruction);
12425 inst.instruction |= Rd << 8;
12426 inst.instruction |= Rn << 16;
12427 inst.instruction |= Rm << 0;
12428
12429 reject_bad_reg (Rd);
12430 reject_bad_reg (Rn);
12431 reject_bad_reg (Rm);
12432 }
12433 }
12434
12435 static void
12436 do_t_mull (void)
12437 {
12438 unsigned RdLo, RdHi, Rn, Rm;
12439
12440 RdLo = inst.operands[0].reg;
12441 RdHi = inst.operands[1].reg;
12442 Rn = inst.operands[2].reg;
12443 Rm = inst.operands[3].reg;
12444
12445 reject_bad_reg (RdLo);
12446 reject_bad_reg (RdHi);
12447 reject_bad_reg (Rn);
12448 reject_bad_reg (Rm);
12449
12450 inst.instruction |= RdLo << 12;
12451 inst.instruction |= RdHi << 8;
12452 inst.instruction |= Rn << 16;
12453 inst.instruction |= Rm;
12454
12455 if (RdLo == RdHi)
12456 as_tsktsk (_("rdhi and rdlo must be different"));
12457 }
12458
12459 static void
12460 do_t_nop (void)
12461 {
12462 set_it_insn_type (NEUTRAL_IT_INSN);
12463
12464 if (unified_syntax)
12465 {
12466 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12467 {
12468 inst.instruction = THUMB_OP32 (inst.instruction);
12469 inst.instruction |= inst.operands[0].imm;
12470 }
12471 else
12472 {
12473 /* PR9722: Check for Thumb2 availability before
12474 generating a thumb2 nop instruction. */
12475 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12476 {
12477 inst.instruction = THUMB_OP16 (inst.instruction);
12478 inst.instruction |= inst.operands[0].imm << 4;
12479 }
12480 else
12481 inst.instruction = 0x46c0;
12482 }
12483 }
12484 else
12485 {
12486 constraint (inst.operands[0].present,
12487 _("Thumb does not support NOP with hints"));
12488 inst.instruction = 0x46c0;
12489 }
12490 }
12491
12492 static void
12493 do_t_neg (void)
12494 {
12495 if (unified_syntax)
12496 {
12497 bfd_boolean narrow;
12498
12499 if (THUMB_SETS_FLAGS (inst.instruction))
12500 narrow = !in_it_block ();
12501 else
12502 narrow = in_it_block ();
12503 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12504 narrow = FALSE;
12505 if (inst.size_req == 4)
12506 narrow = FALSE;
12507
12508 if (!narrow)
12509 {
12510 inst.instruction = THUMB_OP32 (inst.instruction);
12511 inst.instruction |= inst.operands[0].reg << 8;
12512 inst.instruction |= inst.operands[1].reg << 16;
12513 }
12514 else
12515 {
12516 inst.instruction = THUMB_OP16 (inst.instruction);
12517 inst.instruction |= inst.operands[0].reg;
12518 inst.instruction |= inst.operands[1].reg << 3;
12519 }
12520 }
12521 else
12522 {
12523 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12524 BAD_HIREG);
12525 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12526
12527 inst.instruction = THUMB_OP16 (inst.instruction);
12528 inst.instruction |= inst.operands[0].reg;
12529 inst.instruction |= inst.operands[1].reg << 3;
12530 }
12531 }
12532
12533 static void
12534 do_t_orn (void)
12535 {
12536 unsigned Rd, Rn;
12537
12538 Rd = inst.operands[0].reg;
12539 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12540
12541 reject_bad_reg (Rd);
12542 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12543 reject_bad_reg (Rn);
12544
12545 inst.instruction |= Rd << 8;
12546 inst.instruction |= Rn << 16;
12547
12548 if (!inst.operands[2].isreg)
12549 {
12550 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12551 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12552 }
12553 else
12554 {
12555 unsigned Rm;
12556
12557 Rm = inst.operands[2].reg;
12558 reject_bad_reg (Rm);
12559
12560 constraint (inst.operands[2].shifted
12561 && inst.operands[2].immisreg,
12562 _("shift must be constant"));
12563 encode_thumb32_shifted_operand (2);
12564 }
12565 }
12566
12567 static void
12568 do_t_pkhbt (void)
12569 {
12570 unsigned Rd, Rn, Rm;
12571
12572 Rd = inst.operands[0].reg;
12573 Rn = inst.operands[1].reg;
12574 Rm = inst.operands[2].reg;
12575
12576 reject_bad_reg (Rd);
12577 reject_bad_reg (Rn);
12578 reject_bad_reg (Rm);
12579
12580 inst.instruction |= Rd << 8;
12581 inst.instruction |= Rn << 16;
12582 inst.instruction |= Rm;
12583 if (inst.operands[3].present)
12584 {
12585 unsigned int val = inst.reloc.exp.X_add_number;
12586 constraint (inst.reloc.exp.X_op != O_constant,
12587 _("expression too complex"));
12588 inst.instruction |= (val & 0x1c) << 10;
12589 inst.instruction |= (val & 0x03) << 6;
12590 }
12591 }
12592
12593 static void
12594 do_t_pkhtb (void)
12595 {
12596 if (!inst.operands[3].present)
12597 {
12598 unsigned Rtmp;
12599
12600 inst.instruction &= ~0x00000020;
12601
12602 /* PR 10168. Swap the Rm and Rn registers. */
12603 Rtmp = inst.operands[1].reg;
12604 inst.operands[1].reg = inst.operands[2].reg;
12605 inst.operands[2].reg = Rtmp;
12606 }
12607 do_t_pkhbt ();
12608 }
12609
12610 static void
12611 do_t_pld (void)
12612 {
12613 if (inst.operands[0].immisreg)
12614 reject_bad_reg (inst.operands[0].imm);
12615
12616 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12617 }
12618
12619 static void
12620 do_t_push_pop (void)
12621 {
12622 unsigned mask;
12623
12624 constraint (inst.operands[0].writeback,
12625 _("push/pop do not support {reglist}^"));
12626 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12627 _("expression too complex"));
12628
12629 mask = inst.operands[0].imm;
12630 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12631 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12632 else if (inst.size_req != 4
12633 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12634 ? REG_LR : REG_PC)))
12635 {
12636 inst.instruction = THUMB_OP16 (inst.instruction);
12637 inst.instruction |= THUMB_PP_PC_LR;
12638 inst.instruction |= mask & 0xff;
12639 }
12640 else if (unified_syntax)
12641 {
12642 inst.instruction = THUMB_OP32 (inst.instruction);
12643 encode_thumb2_ldmstm (13, mask, TRUE);
12644 }
12645 else
12646 {
12647 inst.error = _("invalid register list to push/pop instruction");
12648 return;
12649 }
12650 }
12651
12652 static void
12653 do_t_rbit (void)
12654 {
12655 unsigned Rd, Rm;
12656
12657 Rd = inst.operands[0].reg;
12658 Rm = inst.operands[1].reg;
12659
12660 reject_bad_reg (Rd);
12661 reject_bad_reg (Rm);
12662
12663 inst.instruction |= Rd << 8;
12664 inst.instruction |= Rm << 16;
12665 inst.instruction |= Rm;
12666 }
12667
12668 static void
12669 do_t_rev (void)
12670 {
12671 unsigned Rd, Rm;
12672
12673 Rd = inst.operands[0].reg;
12674 Rm = inst.operands[1].reg;
12675
12676 reject_bad_reg (Rd);
12677 reject_bad_reg (Rm);
12678
12679 if (Rd <= 7 && Rm <= 7
12680 && inst.size_req != 4)
12681 {
12682 inst.instruction = THUMB_OP16 (inst.instruction);
12683 inst.instruction |= Rd;
12684 inst.instruction |= Rm << 3;
12685 }
12686 else if (unified_syntax)
12687 {
12688 inst.instruction = THUMB_OP32 (inst.instruction);
12689 inst.instruction |= Rd << 8;
12690 inst.instruction |= Rm << 16;
12691 inst.instruction |= Rm;
12692 }
12693 else
12694 inst.error = BAD_HIREG;
12695 }
12696
12697 static void
12698 do_t_rrx (void)
12699 {
12700 unsigned Rd, Rm;
12701
12702 Rd = inst.operands[0].reg;
12703 Rm = inst.operands[1].reg;
12704
12705 reject_bad_reg (Rd);
12706 reject_bad_reg (Rm);
12707
12708 inst.instruction |= Rd << 8;
12709 inst.instruction |= Rm;
12710 }
12711
12712 static void
12713 do_t_rsb (void)
12714 {
12715 unsigned Rd, Rs;
12716
12717 Rd = inst.operands[0].reg;
12718 Rs = (inst.operands[1].present
12719 ? inst.operands[1].reg /* Rd, Rs, foo */
12720 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12721
12722 reject_bad_reg (Rd);
12723 reject_bad_reg (Rs);
12724 if (inst.operands[2].isreg)
12725 reject_bad_reg (inst.operands[2].reg);
12726
12727 inst.instruction |= Rd << 8;
12728 inst.instruction |= Rs << 16;
12729 if (!inst.operands[2].isreg)
12730 {
12731 bfd_boolean narrow;
12732
12733 if ((inst.instruction & 0x00100000) != 0)
12734 narrow = !in_it_block ();
12735 else
12736 narrow = in_it_block ();
12737
12738 if (Rd > 7 || Rs > 7)
12739 narrow = FALSE;
12740
12741 if (inst.size_req == 4 || !unified_syntax)
12742 narrow = FALSE;
12743
12744 if (inst.reloc.exp.X_op != O_constant
12745 || inst.reloc.exp.X_add_number != 0)
12746 narrow = FALSE;
12747
12748 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12749 relaxation, but it doesn't seem worth the hassle. */
12750 if (narrow)
12751 {
12752 inst.reloc.type = BFD_RELOC_UNUSED;
12753 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12754 inst.instruction |= Rs << 3;
12755 inst.instruction |= Rd;
12756 }
12757 else
12758 {
12759 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12760 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12761 }
12762 }
12763 else
12764 encode_thumb32_shifted_operand (2);
12765 }
12766
12767 static void
12768 do_t_setend (void)
12769 {
12770 if (warn_on_deprecated
12771 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12772 as_tsktsk (_("setend use is deprecated for ARMv8"));
12773
12774 set_it_insn_type (OUTSIDE_IT_INSN);
12775 if (inst.operands[0].imm)
12776 inst.instruction |= 0x8;
12777 }
12778
12779 static void
12780 do_t_shift (void)
12781 {
12782 if (!inst.operands[1].present)
12783 inst.operands[1].reg = inst.operands[0].reg;
12784
12785 if (unified_syntax)
12786 {
12787 bfd_boolean narrow;
12788 int shift_kind;
12789
12790 switch (inst.instruction)
12791 {
12792 case T_MNEM_asr:
12793 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12794 case T_MNEM_lsl:
12795 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12796 case T_MNEM_lsr:
12797 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12798 case T_MNEM_ror:
12799 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12800 default: abort ();
12801 }
12802
12803 if (THUMB_SETS_FLAGS (inst.instruction))
12804 narrow = !in_it_block ();
12805 else
12806 narrow = in_it_block ();
12807 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12808 narrow = FALSE;
12809 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12810 narrow = FALSE;
12811 if (inst.operands[2].isreg
12812 && (inst.operands[1].reg != inst.operands[0].reg
12813 || inst.operands[2].reg > 7))
12814 narrow = FALSE;
12815 if (inst.size_req == 4)
12816 narrow = FALSE;
12817
12818 reject_bad_reg (inst.operands[0].reg);
12819 reject_bad_reg (inst.operands[1].reg);
12820
12821 if (!narrow)
12822 {
12823 if (inst.operands[2].isreg)
12824 {
12825 reject_bad_reg (inst.operands[2].reg);
12826 inst.instruction = THUMB_OP32 (inst.instruction);
12827 inst.instruction |= inst.operands[0].reg << 8;
12828 inst.instruction |= inst.operands[1].reg << 16;
12829 inst.instruction |= inst.operands[2].reg;
12830
12831 /* PR 12854: Error on extraneous shifts. */
12832 constraint (inst.operands[2].shifted,
12833 _("extraneous shift as part of operand to shift insn"));
12834 }
12835 else
12836 {
12837 inst.operands[1].shifted = 1;
12838 inst.operands[1].shift_kind = shift_kind;
12839 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12840 ? T_MNEM_movs : T_MNEM_mov);
12841 inst.instruction |= inst.operands[0].reg << 8;
12842 encode_thumb32_shifted_operand (1);
12843 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12844 inst.reloc.type = BFD_RELOC_UNUSED;
12845 }
12846 }
12847 else
12848 {
12849 if (inst.operands[2].isreg)
12850 {
12851 switch (shift_kind)
12852 {
12853 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12854 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12855 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12856 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12857 default: abort ();
12858 }
12859
12860 inst.instruction |= inst.operands[0].reg;
12861 inst.instruction |= inst.operands[2].reg << 3;
12862
12863 /* PR 12854: Error on extraneous shifts. */
12864 constraint (inst.operands[2].shifted,
12865 _("extraneous shift as part of operand to shift insn"));
12866 }
12867 else
12868 {
12869 switch (shift_kind)
12870 {
12871 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12872 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12873 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12874 default: abort ();
12875 }
12876 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12877 inst.instruction |= inst.operands[0].reg;
12878 inst.instruction |= inst.operands[1].reg << 3;
12879 }
12880 }
12881 }
12882 else
12883 {
12884 constraint (inst.operands[0].reg > 7
12885 || inst.operands[1].reg > 7, BAD_HIREG);
12886 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12887
12888 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12889 {
12890 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12891 constraint (inst.operands[0].reg != inst.operands[1].reg,
12892 _("source1 and dest must be same register"));
12893
12894 switch (inst.instruction)
12895 {
12896 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12897 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12898 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12899 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12900 default: abort ();
12901 }
12902
12903 inst.instruction |= inst.operands[0].reg;
12904 inst.instruction |= inst.operands[2].reg << 3;
12905
12906 /* PR 12854: Error on extraneous shifts. */
12907 constraint (inst.operands[2].shifted,
12908 _("extraneous shift as part of operand to shift insn"));
12909 }
12910 else
12911 {
12912 switch (inst.instruction)
12913 {
12914 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12915 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12916 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12917 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12918 default: abort ();
12919 }
12920 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12921 inst.instruction |= inst.operands[0].reg;
12922 inst.instruction |= inst.operands[1].reg << 3;
12923 }
12924 }
12925 }
12926
12927 static void
12928 do_t_simd (void)
12929 {
12930 unsigned Rd, Rn, Rm;
12931
12932 Rd = inst.operands[0].reg;
12933 Rn = inst.operands[1].reg;
12934 Rm = inst.operands[2].reg;
12935
12936 reject_bad_reg (Rd);
12937 reject_bad_reg (Rn);
12938 reject_bad_reg (Rm);
12939
12940 inst.instruction |= Rd << 8;
12941 inst.instruction |= Rn << 16;
12942 inst.instruction |= Rm;
12943 }
12944
12945 static void
12946 do_t_simd2 (void)
12947 {
12948 unsigned Rd, Rn, Rm;
12949
12950 Rd = inst.operands[0].reg;
12951 Rm = inst.operands[1].reg;
12952 Rn = inst.operands[2].reg;
12953
12954 reject_bad_reg (Rd);
12955 reject_bad_reg (Rn);
12956 reject_bad_reg (Rm);
12957
12958 inst.instruction |= Rd << 8;
12959 inst.instruction |= Rn << 16;
12960 inst.instruction |= Rm;
12961 }
12962
12963 static void
12964 do_t_smc (void)
12965 {
12966 unsigned int value = inst.reloc.exp.X_add_number;
12967 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12968 _("SMC is not permitted on this architecture"));
12969 constraint (inst.reloc.exp.X_op != O_constant,
12970 _("expression too complex"));
12971 inst.reloc.type = BFD_RELOC_UNUSED;
12972 inst.instruction |= (value & 0xf000) >> 12;
12973 inst.instruction |= (value & 0x0ff0);
12974 inst.instruction |= (value & 0x000f) << 16;
12975 /* PR gas/15623: SMC instructions must be last in an IT block. */
12976 set_it_insn_type_last ();
12977 }
12978
12979 static void
12980 do_t_hvc (void)
12981 {
12982 unsigned int value = inst.reloc.exp.X_add_number;
12983
12984 inst.reloc.type = BFD_RELOC_UNUSED;
12985 inst.instruction |= (value & 0x0fff);
12986 inst.instruction |= (value & 0xf000) << 4;
12987 }
12988
12989 static void
12990 do_t_ssat_usat (int bias)
12991 {
12992 unsigned Rd, Rn;
12993
12994 Rd = inst.operands[0].reg;
12995 Rn = inst.operands[2].reg;
12996
12997 reject_bad_reg (Rd);
12998 reject_bad_reg (Rn);
12999
13000 inst.instruction |= Rd << 8;
13001 inst.instruction |= inst.operands[1].imm - bias;
13002 inst.instruction |= Rn << 16;
13003
13004 if (inst.operands[3].present)
13005 {
13006 offsetT shift_amount = inst.reloc.exp.X_add_number;
13007
13008 inst.reloc.type = BFD_RELOC_UNUSED;
13009
13010 constraint (inst.reloc.exp.X_op != O_constant,
13011 _("expression too complex"));
13012
13013 if (shift_amount != 0)
13014 {
13015 constraint (shift_amount > 31,
13016 _("shift expression is too large"));
13017
13018 if (inst.operands[3].shift_kind == SHIFT_ASR)
13019 inst.instruction |= 0x00200000; /* sh bit. */
13020
13021 inst.instruction |= (shift_amount & 0x1c) << 10;
13022 inst.instruction |= (shift_amount & 0x03) << 6;
13023 }
13024 }
13025 }
13026
13027 static void
13028 do_t_ssat (void)
13029 {
13030 do_t_ssat_usat (1);
13031 }
13032
13033 static void
13034 do_t_ssat16 (void)
13035 {
13036 unsigned Rd, Rn;
13037
13038 Rd = inst.operands[0].reg;
13039 Rn = inst.operands[2].reg;
13040
13041 reject_bad_reg (Rd);
13042 reject_bad_reg (Rn);
13043
13044 inst.instruction |= Rd << 8;
13045 inst.instruction |= inst.operands[1].imm - 1;
13046 inst.instruction |= Rn << 16;
13047 }
13048
13049 static void
13050 do_t_strex (void)
13051 {
13052 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13053 || inst.operands[2].postind || inst.operands[2].writeback
13054 || inst.operands[2].immisreg || inst.operands[2].shifted
13055 || inst.operands[2].negative,
13056 BAD_ADDR_MODE);
13057
13058 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13059
13060 inst.instruction |= inst.operands[0].reg << 8;
13061 inst.instruction |= inst.operands[1].reg << 12;
13062 inst.instruction |= inst.operands[2].reg << 16;
13063 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13064 }
13065
13066 static void
13067 do_t_strexd (void)
13068 {
13069 if (!inst.operands[2].present)
13070 inst.operands[2].reg = inst.operands[1].reg + 1;
13071
13072 constraint (inst.operands[0].reg == inst.operands[1].reg
13073 || inst.operands[0].reg == inst.operands[2].reg
13074 || inst.operands[0].reg == inst.operands[3].reg,
13075 BAD_OVERLAP);
13076
13077 inst.instruction |= inst.operands[0].reg;
13078 inst.instruction |= inst.operands[1].reg << 12;
13079 inst.instruction |= inst.operands[2].reg << 8;
13080 inst.instruction |= inst.operands[3].reg << 16;
13081 }
13082
13083 static void
13084 do_t_sxtah (void)
13085 {
13086 unsigned Rd, Rn, Rm;
13087
13088 Rd = inst.operands[0].reg;
13089 Rn = inst.operands[1].reg;
13090 Rm = inst.operands[2].reg;
13091
13092 reject_bad_reg (Rd);
13093 reject_bad_reg (Rn);
13094 reject_bad_reg (Rm);
13095
13096 inst.instruction |= Rd << 8;
13097 inst.instruction |= Rn << 16;
13098 inst.instruction |= Rm;
13099 inst.instruction |= inst.operands[3].imm << 4;
13100 }
13101
13102 static void
13103 do_t_sxth (void)
13104 {
13105 unsigned Rd, Rm;
13106
13107 Rd = inst.operands[0].reg;
13108 Rm = inst.operands[1].reg;
13109
13110 reject_bad_reg (Rd);
13111 reject_bad_reg (Rm);
13112
13113 if (inst.instruction <= 0xffff
13114 && inst.size_req != 4
13115 && Rd <= 7 && Rm <= 7
13116 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13117 {
13118 inst.instruction = THUMB_OP16 (inst.instruction);
13119 inst.instruction |= Rd;
13120 inst.instruction |= Rm << 3;
13121 }
13122 else if (unified_syntax)
13123 {
13124 if (inst.instruction <= 0xffff)
13125 inst.instruction = THUMB_OP32 (inst.instruction);
13126 inst.instruction |= Rd << 8;
13127 inst.instruction |= Rm;
13128 inst.instruction |= inst.operands[2].imm << 4;
13129 }
13130 else
13131 {
13132 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13133 _("Thumb encoding does not support rotation"));
13134 constraint (1, BAD_HIREG);
13135 }
13136 }
13137
13138 static void
13139 do_t_swi (void)
13140 {
13141 inst.reloc.type = BFD_RELOC_ARM_SWI;
13142 }
13143
13144 static void
13145 do_t_tb (void)
13146 {
13147 unsigned Rn, Rm;
13148 int half;
13149
13150 half = (inst.instruction & 0x10) != 0;
13151 set_it_insn_type_last ();
13152 constraint (inst.operands[0].immisreg,
13153 _("instruction requires register index"));
13154
13155 Rn = inst.operands[0].reg;
13156 Rm = inst.operands[0].imm;
13157
13158 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13159 constraint (Rn == REG_SP, BAD_SP);
13160 reject_bad_reg (Rm);
13161
13162 constraint (!half && inst.operands[0].shifted,
13163 _("instruction does not allow shifted index"));
13164 inst.instruction |= (Rn << 16) | Rm;
13165 }
13166
13167 static void
13168 do_t_udf (void)
13169 {
13170 if (!inst.operands[0].present)
13171 inst.operands[0].imm = 0;
13172
13173 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13174 {
13175 constraint (inst.size_req == 2,
13176 _("immediate value out of range"));
13177 inst.instruction = THUMB_OP32 (inst.instruction);
13178 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13179 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13180 }
13181 else
13182 {
13183 inst.instruction = THUMB_OP16 (inst.instruction);
13184 inst.instruction |= inst.operands[0].imm;
13185 }
13186
13187 set_it_insn_type (NEUTRAL_IT_INSN);
13188 }
13189
13190
13191 static void
13192 do_t_usat (void)
13193 {
13194 do_t_ssat_usat (0);
13195 }
13196
13197 static void
13198 do_t_usat16 (void)
13199 {
13200 unsigned Rd, Rn;
13201
13202 Rd = inst.operands[0].reg;
13203 Rn = inst.operands[2].reg;
13204
13205 reject_bad_reg (Rd);
13206 reject_bad_reg (Rn);
13207
13208 inst.instruction |= Rd << 8;
13209 inst.instruction |= inst.operands[1].imm;
13210 inst.instruction |= Rn << 16;
13211 }
13212
13213 /* Neon instruction encoder helpers. */
13214
13215 /* Encodings for the different types for various Neon opcodes. */
13216
13217 /* An "invalid" code for the following tables. */
13218 #define N_INV -1u
13219
13220 struct neon_tab_entry
13221 {
13222 unsigned integer;
13223 unsigned float_or_poly;
13224 unsigned scalar_or_imm;
13225 };
13226
13227 /* Map overloaded Neon opcodes to their respective encodings. */
13228 #define NEON_ENC_TAB \
13229 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13230 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13231 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13232 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13233 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13234 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13235 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13236 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13237 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13238 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13239 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13240 /* Register variants of the following two instructions are encoded as
13241 vcge / vcgt with the operands reversed. */ \
13242 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13243 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13244 X(vfma, N_INV, 0x0000c10, N_INV), \
13245 X(vfms, N_INV, 0x0200c10, N_INV), \
13246 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13247 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13248 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13249 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13250 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13251 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13252 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13253 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13254 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13255 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13256 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13257 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13258 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13259 X(vshl, 0x0000400, N_INV, 0x0800510), \
13260 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13261 X(vand, 0x0000110, N_INV, 0x0800030), \
13262 X(vbic, 0x0100110, N_INV, 0x0800030), \
13263 X(veor, 0x1000110, N_INV, N_INV), \
13264 X(vorn, 0x0300110, N_INV, 0x0800010), \
13265 X(vorr, 0x0200110, N_INV, 0x0800010), \
13266 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13267 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13268 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13269 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13270 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13271 X(vst1, 0x0000000, 0x0800000, N_INV), \
13272 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13273 X(vst2, 0x0000100, 0x0800100, N_INV), \
13274 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13275 X(vst3, 0x0000200, 0x0800200, N_INV), \
13276 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13277 X(vst4, 0x0000300, 0x0800300, N_INV), \
13278 X(vmovn, 0x1b20200, N_INV, N_INV), \
13279 X(vtrn, 0x1b20080, N_INV, N_INV), \
13280 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13281 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13282 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13283 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13284 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13285 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13286 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13287 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13288 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13289 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13290 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13291 X(vseleq, 0xe000a00, N_INV, N_INV), \
13292 X(vselvs, 0xe100a00, N_INV, N_INV), \
13293 X(vselge, 0xe200a00, N_INV, N_INV), \
13294 X(vselgt, 0xe300a00, N_INV, N_INV), \
13295 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13296 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13297 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13298 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13299 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13300 X(aes, 0x3b00300, N_INV, N_INV), \
13301 X(sha3op, 0x2000c00, N_INV, N_INV), \
13302 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13303 X(sha2op, 0x3ba0380, N_INV, N_INV)
13304
13305 enum neon_opc
13306 {
13307 #define X(OPC,I,F,S) N_MNEM_##OPC
13308 NEON_ENC_TAB
13309 #undef X
13310 };
13311
13312 static const struct neon_tab_entry neon_enc_tab[] =
13313 {
13314 #define X(OPC,I,F,S) { (I), (F), (S) }
13315 NEON_ENC_TAB
13316 #undef X
13317 };
13318
13319 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13320 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13321 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13322 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13323 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13324 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13325 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13326 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13327 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13328 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13329 #define NEON_ENC_SINGLE_(X) \
13330 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13331 #define NEON_ENC_DOUBLE_(X) \
13332 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13333 #define NEON_ENC_FPV8_(X) \
13334 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13335
13336 #define NEON_ENCODE(type, inst) \
13337 do \
13338 { \
13339 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13340 inst.is_neon = 1; \
13341 } \
13342 while (0)
13343
13344 #define check_neon_suffixes \
13345 do \
13346 { \
13347 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13348 { \
13349 as_bad (_("invalid neon suffix for non neon instruction")); \
13350 return; \
13351 } \
13352 } \
13353 while (0)
13354
13355 /* Define shapes for instruction operands. The following mnemonic characters
13356 are used in this table:
13357
13358 F - VFP S<n> register
13359 D - Neon D<n> register
13360 Q - Neon Q<n> register
13361 I - Immediate
13362 S - Scalar
13363 R - ARM register
13364 L - D<n> register list
13365
13366 This table is used to generate various data:
13367 - enumerations of the form NS_DDR to be used as arguments to
13368 neon_select_shape.
13369 - a table classifying shapes into single, double, quad, mixed.
13370 - a table used to drive neon_select_shape. */
13371
13372 #define NEON_SHAPE_DEF \
13373 X(3, (D, D, D), DOUBLE), \
13374 X(3, (Q, Q, Q), QUAD), \
13375 X(3, (D, D, I), DOUBLE), \
13376 X(3, (Q, Q, I), QUAD), \
13377 X(3, (D, D, S), DOUBLE), \
13378 X(3, (Q, Q, S), QUAD), \
13379 X(2, (D, D), DOUBLE), \
13380 X(2, (Q, Q), QUAD), \
13381 X(2, (D, S), DOUBLE), \
13382 X(2, (Q, S), QUAD), \
13383 X(2, (D, R), DOUBLE), \
13384 X(2, (Q, R), QUAD), \
13385 X(2, (D, I), DOUBLE), \
13386 X(2, (Q, I), QUAD), \
13387 X(3, (D, L, D), DOUBLE), \
13388 X(2, (D, Q), MIXED), \
13389 X(2, (Q, D), MIXED), \
13390 X(3, (D, Q, I), MIXED), \
13391 X(3, (Q, D, I), MIXED), \
13392 X(3, (Q, D, D), MIXED), \
13393 X(3, (D, Q, Q), MIXED), \
13394 X(3, (Q, Q, D), MIXED), \
13395 X(3, (Q, D, S), MIXED), \
13396 X(3, (D, Q, S), MIXED), \
13397 X(4, (D, D, D, I), DOUBLE), \
13398 X(4, (Q, Q, Q, I), QUAD), \
13399 X(4, (D, D, S, I), DOUBLE), \
13400 X(4, (Q, Q, S, I), QUAD), \
13401 X(2, (F, F), SINGLE), \
13402 X(3, (F, F, F), SINGLE), \
13403 X(2, (F, I), SINGLE), \
13404 X(2, (F, D), MIXED), \
13405 X(2, (D, F), MIXED), \
13406 X(3, (F, F, I), MIXED), \
13407 X(4, (R, R, F, F), SINGLE), \
13408 X(4, (F, F, R, R), SINGLE), \
13409 X(3, (D, R, R), DOUBLE), \
13410 X(3, (R, R, D), DOUBLE), \
13411 X(2, (S, R), SINGLE), \
13412 X(2, (R, S), SINGLE), \
13413 X(2, (F, R), SINGLE), \
13414 X(2, (R, F), SINGLE), \
13415 /* Half float shape supported so far. */\
13416 X (2, (H, D), MIXED), \
13417 X (2, (D, H), MIXED), \
13418 X (2, (H, F), MIXED), \
13419 X (2, (F, H), MIXED), \
13420 X (2, (H, H), HALF), \
13421 X (2, (H, R), HALF), \
13422 X (2, (R, H), HALF), \
13423 X (2, (H, I), HALF), \
13424 X (3, (H, H, H), HALF), \
13425 X (3, (H, F, I), MIXED), \
13426 X (3, (F, H, I), MIXED)
13427
13428 #define S2(A,B) NS_##A##B
13429 #define S3(A,B,C) NS_##A##B##C
13430 #define S4(A,B,C,D) NS_##A##B##C##D
13431
13432 #define X(N, L, C) S##N L
13433
13434 enum neon_shape
13435 {
13436 NEON_SHAPE_DEF,
13437 NS_NULL
13438 };
13439
13440 #undef X
13441 #undef S2
13442 #undef S3
13443 #undef S4
13444
13445 enum neon_shape_class
13446 {
13447 SC_HALF,
13448 SC_SINGLE,
13449 SC_DOUBLE,
13450 SC_QUAD,
13451 SC_MIXED
13452 };
13453
13454 #define X(N, L, C) SC_##C
13455
13456 static enum neon_shape_class neon_shape_class[] =
13457 {
13458 NEON_SHAPE_DEF
13459 };
13460
13461 #undef X
13462
13463 enum neon_shape_el
13464 {
13465 SE_H,
13466 SE_F,
13467 SE_D,
13468 SE_Q,
13469 SE_I,
13470 SE_S,
13471 SE_R,
13472 SE_L
13473 };
13474
13475 /* Register widths of above. */
13476 static unsigned neon_shape_el_size[] =
13477 {
13478 16,
13479 32,
13480 64,
13481 128,
13482 0,
13483 32,
13484 32,
13485 0
13486 };
13487
13488 struct neon_shape_info
13489 {
13490 unsigned els;
13491 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13492 };
13493
13494 #define S2(A,B) { SE_##A, SE_##B }
13495 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13496 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13497
13498 #define X(N, L, C) { N, S##N L }
13499
13500 static struct neon_shape_info neon_shape_tab[] =
13501 {
13502 NEON_SHAPE_DEF
13503 };
13504
13505 #undef X
13506 #undef S2
13507 #undef S3
13508 #undef S4
13509
13510 /* Bit masks used in type checking given instructions.
13511 'N_EQK' means the type must be the same as (or based on in some way) the key
13512 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13513 set, various other bits can be set as well in order to modify the meaning of
13514 the type constraint. */
13515
13516 enum neon_type_mask
13517 {
13518 N_S8 = 0x0000001,
13519 N_S16 = 0x0000002,
13520 N_S32 = 0x0000004,
13521 N_S64 = 0x0000008,
13522 N_U8 = 0x0000010,
13523 N_U16 = 0x0000020,
13524 N_U32 = 0x0000040,
13525 N_U64 = 0x0000080,
13526 N_I8 = 0x0000100,
13527 N_I16 = 0x0000200,
13528 N_I32 = 0x0000400,
13529 N_I64 = 0x0000800,
13530 N_8 = 0x0001000,
13531 N_16 = 0x0002000,
13532 N_32 = 0x0004000,
13533 N_64 = 0x0008000,
13534 N_P8 = 0x0010000,
13535 N_P16 = 0x0020000,
13536 N_F16 = 0x0040000,
13537 N_F32 = 0x0080000,
13538 N_F64 = 0x0100000,
13539 N_P64 = 0x0200000,
13540 N_KEY = 0x1000000, /* Key element (main type specifier). */
13541 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13542 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13543 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13544 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13545 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13546 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13547 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13548 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13549 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13550 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13551 N_UTYP = 0,
13552 N_MAX_NONSPECIAL = N_P64
13553 };
13554
13555 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13556
13557 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13558 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13559 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13560 #define N_S_32 (N_S8 | N_S16 | N_S32)
13561 #define N_F_16_32 (N_F16 | N_F32)
13562 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13563 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13564 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13565 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13566
13567 /* Pass this as the first type argument to neon_check_type to ignore types
13568 altogether. */
13569 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13570
13571 /* Select a "shape" for the current instruction (describing register types or
13572 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13573 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13574 function of operand parsing, so this function doesn't need to be called.
13575 Shapes should be listed in order of decreasing length. */
13576
13577 static enum neon_shape
13578 neon_select_shape (enum neon_shape shape, ...)
13579 {
13580 va_list ap;
13581 enum neon_shape first_shape = shape;
13582
13583 /* Fix missing optional operands. FIXME: we don't know at this point how
13584 many arguments we should have, so this makes the assumption that we have
13585 > 1. This is true of all current Neon opcodes, I think, but may not be
13586 true in the future. */
13587 if (!inst.operands[1].present)
13588 inst.operands[1] = inst.operands[0];
13589
13590 va_start (ap, shape);
13591
13592 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13593 {
13594 unsigned j;
13595 int matches = 1;
13596
13597 for (j = 0; j < neon_shape_tab[shape].els; j++)
13598 {
13599 if (!inst.operands[j].present)
13600 {
13601 matches = 0;
13602 break;
13603 }
13604
13605 switch (neon_shape_tab[shape].el[j])
13606 {
13607 /* If a .f16, .16, .u16, .s16 type specifier is given over
13608 a VFP single precision register operand, it's essentially
13609 means only half of the register is used.
13610
13611 If the type specifier is given after the mnemonics, the
13612 information is stored in inst.vectype. If the type specifier
13613 is given after register operand, the information is stored
13614 in inst.operands[].vectype.
13615
13616 When there is only one type specifier, and all the register
13617 operands are the same type of hardware register, the type
13618 specifier applies to all register operands.
13619
13620 If no type specifier is given, the shape is inferred from
13621 operand information.
13622
13623 for example:
13624 vadd.f16 s0, s1, s2: NS_HHH
13625 vabs.f16 s0, s1: NS_HH
13626 vmov.f16 s0, r1: NS_HR
13627 vmov.f16 r0, s1: NS_RH
13628 vcvt.f16 r0, s1: NS_RH
13629 vcvt.f16.s32 s2, s2, #29: NS_HFI
13630 vcvt.f16.s32 s2, s2: NS_HF
13631 */
13632 case SE_H:
13633 if (!(inst.operands[j].isreg
13634 && inst.operands[j].isvec
13635 && inst.operands[j].issingle
13636 && !inst.operands[j].isquad
13637 && ((inst.vectype.elems == 1
13638 && inst.vectype.el[0].size == 16)
13639 || (inst.vectype.elems > 1
13640 && inst.vectype.el[j].size == 16)
13641 || (inst.vectype.elems == 0
13642 && inst.operands[j].vectype.type != NT_invtype
13643 && inst.operands[j].vectype.size == 16))))
13644 matches = 0;
13645 break;
13646
13647 case SE_F:
13648 if (!(inst.operands[j].isreg
13649 && inst.operands[j].isvec
13650 && inst.operands[j].issingle
13651 && !inst.operands[j].isquad
13652 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13653 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13654 || (inst.vectype.elems == 0
13655 && (inst.operands[j].vectype.size == 32
13656 || inst.operands[j].vectype.type == NT_invtype)))))
13657 matches = 0;
13658 break;
13659
13660 case SE_D:
13661 if (!(inst.operands[j].isreg
13662 && inst.operands[j].isvec
13663 && !inst.operands[j].isquad
13664 && !inst.operands[j].issingle))
13665 matches = 0;
13666 break;
13667
13668 case SE_R:
13669 if (!(inst.operands[j].isreg
13670 && !inst.operands[j].isvec))
13671 matches = 0;
13672 break;
13673
13674 case SE_Q:
13675 if (!(inst.operands[j].isreg
13676 && inst.operands[j].isvec
13677 && inst.operands[j].isquad
13678 && !inst.operands[j].issingle))
13679 matches = 0;
13680 break;
13681
13682 case SE_I:
13683 if (!(!inst.operands[j].isreg
13684 && !inst.operands[j].isscalar))
13685 matches = 0;
13686 break;
13687
13688 case SE_S:
13689 if (!(!inst.operands[j].isreg
13690 && inst.operands[j].isscalar))
13691 matches = 0;
13692 break;
13693
13694 case SE_L:
13695 break;
13696 }
13697 if (!matches)
13698 break;
13699 }
13700 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13701 /* We've matched all the entries in the shape table, and we don't
13702 have any left over operands which have not been matched. */
13703 break;
13704 }
13705
13706 va_end (ap);
13707
13708 if (shape == NS_NULL && first_shape != NS_NULL)
13709 first_error (_("invalid instruction shape"));
13710
13711 return shape;
13712 }
13713
13714 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13715 means the Q bit should be set). */
13716
13717 static int
13718 neon_quad (enum neon_shape shape)
13719 {
13720 return neon_shape_class[shape] == SC_QUAD;
13721 }
13722
13723 static void
13724 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13725 unsigned *g_size)
13726 {
13727 /* Allow modification to be made to types which are constrained to be
13728 based on the key element, based on bits set alongside N_EQK. */
13729 if ((typebits & N_EQK) != 0)
13730 {
13731 if ((typebits & N_HLF) != 0)
13732 *g_size /= 2;
13733 else if ((typebits & N_DBL) != 0)
13734 *g_size *= 2;
13735 if ((typebits & N_SGN) != 0)
13736 *g_type = NT_signed;
13737 else if ((typebits & N_UNS) != 0)
13738 *g_type = NT_unsigned;
13739 else if ((typebits & N_INT) != 0)
13740 *g_type = NT_integer;
13741 else if ((typebits & N_FLT) != 0)
13742 *g_type = NT_float;
13743 else if ((typebits & N_SIZ) != 0)
13744 *g_type = NT_untyped;
13745 }
13746 }
13747
13748 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13749 operand type, i.e. the single type specified in a Neon instruction when it
13750 is the only one given. */
13751
13752 static struct neon_type_el
13753 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13754 {
13755 struct neon_type_el dest = *key;
13756
13757 gas_assert ((thisarg & N_EQK) != 0);
13758
13759 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13760
13761 return dest;
13762 }
13763
13764 /* Convert Neon type and size into compact bitmask representation. */
13765
13766 static enum neon_type_mask
13767 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13768 {
13769 switch (type)
13770 {
13771 case NT_untyped:
13772 switch (size)
13773 {
13774 case 8: return N_8;
13775 case 16: return N_16;
13776 case 32: return N_32;
13777 case 64: return N_64;
13778 default: ;
13779 }
13780 break;
13781
13782 case NT_integer:
13783 switch (size)
13784 {
13785 case 8: return N_I8;
13786 case 16: return N_I16;
13787 case 32: return N_I32;
13788 case 64: return N_I64;
13789 default: ;
13790 }
13791 break;
13792
13793 case NT_float:
13794 switch (size)
13795 {
13796 case 16: return N_F16;
13797 case 32: return N_F32;
13798 case 64: return N_F64;
13799 default: ;
13800 }
13801 break;
13802
13803 case NT_poly:
13804 switch (size)
13805 {
13806 case 8: return N_P8;
13807 case 16: return N_P16;
13808 case 64: return N_P64;
13809 default: ;
13810 }
13811 break;
13812
13813 case NT_signed:
13814 switch (size)
13815 {
13816 case 8: return N_S8;
13817 case 16: return N_S16;
13818 case 32: return N_S32;
13819 case 64: return N_S64;
13820 default: ;
13821 }
13822 break;
13823
13824 case NT_unsigned:
13825 switch (size)
13826 {
13827 case 8: return N_U8;
13828 case 16: return N_U16;
13829 case 32: return N_U32;
13830 case 64: return N_U64;
13831 default: ;
13832 }
13833 break;
13834
13835 default: ;
13836 }
13837
13838 return N_UTYP;
13839 }
13840
13841 /* Convert compact Neon bitmask type representation to a type and size. Only
13842 handles the case where a single bit is set in the mask. */
13843
13844 static int
13845 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13846 enum neon_type_mask mask)
13847 {
13848 if ((mask & N_EQK) != 0)
13849 return FAIL;
13850
13851 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13852 *size = 8;
13853 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13854 *size = 16;
13855 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13856 *size = 32;
13857 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13858 *size = 64;
13859 else
13860 return FAIL;
13861
13862 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13863 *type = NT_signed;
13864 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13865 *type = NT_unsigned;
13866 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13867 *type = NT_integer;
13868 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13869 *type = NT_untyped;
13870 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13871 *type = NT_poly;
13872 else if ((mask & (N_F_ALL)) != 0)
13873 *type = NT_float;
13874 else
13875 return FAIL;
13876
13877 return SUCCESS;
13878 }
13879
13880 /* Modify a bitmask of allowed types. This is only needed for type
13881 relaxation. */
13882
13883 static unsigned
13884 modify_types_allowed (unsigned allowed, unsigned mods)
13885 {
13886 unsigned size;
13887 enum neon_el_type type;
13888 unsigned destmask;
13889 int i;
13890
13891 destmask = 0;
13892
13893 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13894 {
13895 if (el_type_of_type_chk (&type, &size,
13896 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13897 {
13898 neon_modify_type_size (mods, &type, &size);
13899 destmask |= type_chk_of_el_type (type, size);
13900 }
13901 }
13902
13903 return destmask;
13904 }
13905
13906 /* Check type and return type classification.
13907 The manual states (paraphrase): If one datatype is given, it indicates the
13908 type given in:
13909 - the second operand, if there is one
13910 - the operand, if there is no second operand
13911 - the result, if there are no operands.
13912 This isn't quite good enough though, so we use a concept of a "key" datatype
13913 which is set on a per-instruction basis, which is the one which matters when
13914 only one data type is written.
13915 Note: this function has side-effects (e.g. filling in missing operands). All
13916 Neon instructions should call it before performing bit encoding. */
13917
13918 static struct neon_type_el
13919 neon_check_type (unsigned els, enum neon_shape ns, ...)
13920 {
13921 va_list ap;
13922 unsigned i, pass, key_el = 0;
13923 unsigned types[NEON_MAX_TYPE_ELS];
13924 enum neon_el_type k_type = NT_invtype;
13925 unsigned k_size = -1u;
13926 struct neon_type_el badtype = {NT_invtype, -1};
13927 unsigned key_allowed = 0;
13928
13929 /* Optional registers in Neon instructions are always (not) in operand 1.
13930 Fill in the missing operand here, if it was omitted. */
13931 if (els > 1 && !inst.operands[1].present)
13932 inst.operands[1] = inst.operands[0];
13933
13934 /* Suck up all the varargs. */
13935 va_start (ap, ns);
13936 for (i = 0; i < els; i++)
13937 {
13938 unsigned thisarg = va_arg (ap, unsigned);
13939 if (thisarg == N_IGNORE_TYPE)
13940 {
13941 va_end (ap);
13942 return badtype;
13943 }
13944 types[i] = thisarg;
13945 if ((thisarg & N_KEY) != 0)
13946 key_el = i;
13947 }
13948 va_end (ap);
13949
13950 if (inst.vectype.elems > 0)
13951 for (i = 0; i < els; i++)
13952 if (inst.operands[i].vectype.type != NT_invtype)
13953 {
13954 first_error (_("types specified in both the mnemonic and operands"));
13955 return badtype;
13956 }
13957
13958 /* Duplicate inst.vectype elements here as necessary.
13959 FIXME: No idea if this is exactly the same as the ARM assembler,
13960 particularly when an insn takes one register and one non-register
13961 operand. */
13962 if (inst.vectype.elems == 1 && els > 1)
13963 {
13964 unsigned j;
13965 inst.vectype.elems = els;
13966 inst.vectype.el[key_el] = inst.vectype.el[0];
13967 for (j = 0; j < els; j++)
13968 if (j != key_el)
13969 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13970 types[j]);
13971 }
13972 else if (inst.vectype.elems == 0 && els > 0)
13973 {
13974 unsigned j;
13975 /* No types were given after the mnemonic, so look for types specified
13976 after each operand. We allow some flexibility here; as long as the
13977 "key" operand has a type, we can infer the others. */
13978 for (j = 0; j < els; j++)
13979 if (inst.operands[j].vectype.type != NT_invtype)
13980 inst.vectype.el[j] = inst.operands[j].vectype;
13981
13982 if (inst.operands[key_el].vectype.type != NT_invtype)
13983 {
13984 for (j = 0; j < els; j++)
13985 if (inst.operands[j].vectype.type == NT_invtype)
13986 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13987 types[j]);
13988 }
13989 else
13990 {
13991 first_error (_("operand types can't be inferred"));
13992 return badtype;
13993 }
13994 }
13995 else if (inst.vectype.elems != els)
13996 {
13997 first_error (_("type specifier has the wrong number of parts"));
13998 return badtype;
13999 }
14000
14001 for (pass = 0; pass < 2; pass++)
14002 {
14003 for (i = 0; i < els; i++)
14004 {
14005 unsigned thisarg = types[i];
14006 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14007 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14008 enum neon_el_type g_type = inst.vectype.el[i].type;
14009 unsigned g_size = inst.vectype.el[i].size;
14010
14011 /* Decay more-specific signed & unsigned types to sign-insensitive
14012 integer types if sign-specific variants are unavailable. */
14013 if ((g_type == NT_signed || g_type == NT_unsigned)
14014 && (types_allowed & N_SU_ALL) == 0)
14015 g_type = NT_integer;
14016
14017 /* If only untyped args are allowed, decay any more specific types to
14018 them. Some instructions only care about signs for some element
14019 sizes, so handle that properly. */
14020 if (((types_allowed & N_UNT) == 0)
14021 && ((g_size == 8 && (types_allowed & N_8) != 0)
14022 || (g_size == 16 && (types_allowed & N_16) != 0)
14023 || (g_size == 32 && (types_allowed & N_32) != 0)
14024 || (g_size == 64 && (types_allowed & N_64) != 0)))
14025 g_type = NT_untyped;
14026
14027 if (pass == 0)
14028 {
14029 if ((thisarg & N_KEY) != 0)
14030 {
14031 k_type = g_type;
14032 k_size = g_size;
14033 key_allowed = thisarg & ~N_KEY;
14034
14035 /* Check architecture constraint on FP16 extension. */
14036 if (k_size == 16
14037 && k_type == NT_float
14038 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14039 {
14040 inst.error = _(BAD_FP16);
14041 return badtype;
14042 }
14043 }
14044 }
14045 else
14046 {
14047 if ((thisarg & N_VFP) != 0)
14048 {
14049 enum neon_shape_el regshape;
14050 unsigned regwidth, match;
14051
14052 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14053 if (ns == NS_NULL)
14054 {
14055 first_error (_("invalid instruction shape"));
14056 return badtype;
14057 }
14058 regshape = neon_shape_tab[ns].el[i];
14059 regwidth = neon_shape_el_size[regshape];
14060
14061 /* In VFP mode, operands must match register widths. If we
14062 have a key operand, use its width, else use the width of
14063 the current operand. */
14064 if (k_size != -1u)
14065 match = k_size;
14066 else
14067 match = g_size;
14068
14069 /* FP16 will use a single precision register. */
14070 if (regwidth == 32 && match == 16)
14071 {
14072 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14073 match = regwidth;
14074 else
14075 {
14076 inst.error = _(BAD_FP16);
14077 return badtype;
14078 }
14079 }
14080
14081 if (regwidth != match)
14082 {
14083 first_error (_("operand size must match register width"));
14084 return badtype;
14085 }
14086 }
14087
14088 if ((thisarg & N_EQK) == 0)
14089 {
14090 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14091
14092 if ((given_type & types_allowed) == 0)
14093 {
14094 first_error (_("bad type in Neon instruction"));
14095 return badtype;
14096 }
14097 }
14098 else
14099 {
14100 enum neon_el_type mod_k_type = k_type;
14101 unsigned mod_k_size = k_size;
14102 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14103 if (g_type != mod_k_type || g_size != mod_k_size)
14104 {
14105 first_error (_("inconsistent types in Neon instruction"));
14106 return badtype;
14107 }
14108 }
14109 }
14110 }
14111 }
14112
14113 return inst.vectype.el[key_el];
14114 }
14115
14116 /* Neon-style VFP instruction forwarding. */
14117
14118 /* Thumb VFP instructions have 0xE in the condition field. */
14119
14120 static void
14121 do_vfp_cond_or_thumb (void)
14122 {
14123 inst.is_neon = 1;
14124
14125 if (thumb_mode)
14126 inst.instruction |= 0xe0000000;
14127 else
14128 inst.instruction |= inst.cond << 28;
14129 }
14130
14131 /* Look up and encode a simple mnemonic, for use as a helper function for the
14132 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14133 etc. It is assumed that operand parsing has already been done, and that the
14134 operands are in the form expected by the given opcode (this isn't necessarily
14135 the same as the form in which they were parsed, hence some massaging must
14136 take place before this function is called).
14137 Checks current arch version against that in the looked-up opcode. */
14138
14139 static void
14140 do_vfp_nsyn_opcode (const char *opname)
14141 {
14142 const struct asm_opcode *opcode;
14143
14144 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14145
14146 if (!opcode)
14147 abort ();
14148
14149 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14150 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14151 _(BAD_FPU));
14152
14153 inst.is_neon = 1;
14154
14155 if (thumb_mode)
14156 {
14157 inst.instruction = opcode->tvalue;
14158 opcode->tencode ();
14159 }
14160 else
14161 {
14162 inst.instruction = (inst.cond << 28) | opcode->avalue;
14163 opcode->aencode ();
14164 }
14165 }
14166
14167 static void
14168 do_vfp_nsyn_add_sub (enum neon_shape rs)
14169 {
14170 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14171
14172 if (rs == NS_FFF || rs == NS_HHH)
14173 {
14174 if (is_add)
14175 do_vfp_nsyn_opcode ("fadds");
14176 else
14177 do_vfp_nsyn_opcode ("fsubs");
14178
14179 /* ARMv8.2 fp16 instruction. */
14180 if (rs == NS_HHH)
14181 do_scalar_fp16_v82_encode ();
14182 }
14183 else
14184 {
14185 if (is_add)
14186 do_vfp_nsyn_opcode ("faddd");
14187 else
14188 do_vfp_nsyn_opcode ("fsubd");
14189 }
14190 }
14191
14192 /* Check operand types to see if this is a VFP instruction, and if so call
14193 PFN (). */
14194
14195 static int
14196 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14197 {
14198 enum neon_shape rs;
14199 struct neon_type_el et;
14200
14201 switch (args)
14202 {
14203 case 2:
14204 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14205 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14206 break;
14207
14208 case 3:
14209 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14210 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14211 N_F_ALL | N_KEY | N_VFP);
14212 break;
14213
14214 default:
14215 abort ();
14216 }
14217
14218 if (et.type != NT_invtype)
14219 {
14220 pfn (rs);
14221 return SUCCESS;
14222 }
14223
14224 inst.error = NULL;
14225 return FAIL;
14226 }
14227
14228 static void
14229 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14230 {
14231 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14232
14233 if (rs == NS_FFF || rs == NS_HHH)
14234 {
14235 if (is_mla)
14236 do_vfp_nsyn_opcode ("fmacs");
14237 else
14238 do_vfp_nsyn_opcode ("fnmacs");
14239
14240 /* ARMv8.2 fp16 instruction. */
14241 if (rs == NS_HHH)
14242 do_scalar_fp16_v82_encode ();
14243 }
14244 else
14245 {
14246 if (is_mla)
14247 do_vfp_nsyn_opcode ("fmacd");
14248 else
14249 do_vfp_nsyn_opcode ("fnmacd");
14250 }
14251 }
14252
14253 static void
14254 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14255 {
14256 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14257
14258 if (rs == NS_FFF || rs == NS_HHH)
14259 {
14260 if (is_fma)
14261 do_vfp_nsyn_opcode ("ffmas");
14262 else
14263 do_vfp_nsyn_opcode ("ffnmas");
14264
14265 /* ARMv8.2 fp16 instruction. */
14266 if (rs == NS_HHH)
14267 do_scalar_fp16_v82_encode ();
14268 }
14269 else
14270 {
14271 if (is_fma)
14272 do_vfp_nsyn_opcode ("ffmad");
14273 else
14274 do_vfp_nsyn_opcode ("ffnmad");
14275 }
14276 }
14277
14278 static void
14279 do_vfp_nsyn_mul (enum neon_shape rs)
14280 {
14281 if (rs == NS_FFF || rs == NS_HHH)
14282 {
14283 do_vfp_nsyn_opcode ("fmuls");
14284
14285 /* ARMv8.2 fp16 instruction. */
14286 if (rs == NS_HHH)
14287 do_scalar_fp16_v82_encode ();
14288 }
14289 else
14290 do_vfp_nsyn_opcode ("fmuld");
14291 }
14292
14293 static void
14294 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14295 {
14296 int is_neg = (inst.instruction & 0x80) != 0;
14297 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14298
14299 if (rs == NS_FF || rs == NS_HH)
14300 {
14301 if (is_neg)
14302 do_vfp_nsyn_opcode ("fnegs");
14303 else
14304 do_vfp_nsyn_opcode ("fabss");
14305
14306 /* ARMv8.2 fp16 instruction. */
14307 if (rs == NS_HH)
14308 do_scalar_fp16_v82_encode ();
14309 }
14310 else
14311 {
14312 if (is_neg)
14313 do_vfp_nsyn_opcode ("fnegd");
14314 else
14315 do_vfp_nsyn_opcode ("fabsd");
14316 }
14317 }
14318
14319 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14320 insns belong to Neon, and are handled elsewhere. */
14321
14322 static void
14323 do_vfp_nsyn_ldm_stm (int is_dbmode)
14324 {
14325 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14326 if (is_ldm)
14327 {
14328 if (is_dbmode)
14329 do_vfp_nsyn_opcode ("fldmdbs");
14330 else
14331 do_vfp_nsyn_opcode ("fldmias");
14332 }
14333 else
14334 {
14335 if (is_dbmode)
14336 do_vfp_nsyn_opcode ("fstmdbs");
14337 else
14338 do_vfp_nsyn_opcode ("fstmias");
14339 }
14340 }
14341
14342 static void
14343 do_vfp_nsyn_sqrt (void)
14344 {
14345 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14346 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14347
14348 if (rs == NS_FF || rs == NS_HH)
14349 {
14350 do_vfp_nsyn_opcode ("fsqrts");
14351
14352 /* ARMv8.2 fp16 instruction. */
14353 if (rs == NS_HH)
14354 do_scalar_fp16_v82_encode ();
14355 }
14356 else
14357 do_vfp_nsyn_opcode ("fsqrtd");
14358 }
14359
14360 static void
14361 do_vfp_nsyn_div (void)
14362 {
14363 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14364 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14365 N_F_ALL | N_KEY | N_VFP);
14366
14367 if (rs == NS_FFF || rs == NS_HHH)
14368 {
14369 do_vfp_nsyn_opcode ("fdivs");
14370
14371 /* ARMv8.2 fp16 instruction. */
14372 if (rs == NS_HHH)
14373 do_scalar_fp16_v82_encode ();
14374 }
14375 else
14376 do_vfp_nsyn_opcode ("fdivd");
14377 }
14378
14379 static void
14380 do_vfp_nsyn_nmul (void)
14381 {
14382 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14383 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14384 N_F_ALL | N_KEY | N_VFP);
14385
14386 if (rs == NS_FFF || rs == NS_HHH)
14387 {
14388 NEON_ENCODE (SINGLE, inst);
14389 do_vfp_sp_dyadic ();
14390
14391 /* ARMv8.2 fp16 instruction. */
14392 if (rs == NS_HHH)
14393 do_scalar_fp16_v82_encode ();
14394 }
14395 else
14396 {
14397 NEON_ENCODE (DOUBLE, inst);
14398 do_vfp_dp_rd_rn_rm ();
14399 }
14400 do_vfp_cond_or_thumb ();
14401
14402 }
14403
14404 static void
14405 do_vfp_nsyn_cmp (void)
14406 {
14407 enum neon_shape rs;
14408 if (inst.operands[1].isreg)
14409 {
14410 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14411 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14412
14413 if (rs == NS_FF || rs == NS_HH)
14414 {
14415 NEON_ENCODE (SINGLE, inst);
14416 do_vfp_sp_monadic ();
14417 }
14418 else
14419 {
14420 NEON_ENCODE (DOUBLE, inst);
14421 do_vfp_dp_rd_rm ();
14422 }
14423 }
14424 else
14425 {
14426 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14427 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14428
14429 switch (inst.instruction & 0x0fffffff)
14430 {
14431 case N_MNEM_vcmp:
14432 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14433 break;
14434 case N_MNEM_vcmpe:
14435 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14436 break;
14437 default:
14438 abort ();
14439 }
14440
14441 if (rs == NS_FI || rs == NS_HI)
14442 {
14443 NEON_ENCODE (SINGLE, inst);
14444 do_vfp_sp_compare_z ();
14445 }
14446 else
14447 {
14448 NEON_ENCODE (DOUBLE, inst);
14449 do_vfp_dp_rd ();
14450 }
14451 }
14452 do_vfp_cond_or_thumb ();
14453
14454 /* ARMv8.2 fp16 instruction. */
14455 if (rs == NS_HI || rs == NS_HH)
14456 do_scalar_fp16_v82_encode ();
14457 }
14458
14459 static void
14460 nsyn_insert_sp (void)
14461 {
14462 inst.operands[1] = inst.operands[0];
14463 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14464 inst.operands[0].reg = REG_SP;
14465 inst.operands[0].isreg = 1;
14466 inst.operands[0].writeback = 1;
14467 inst.operands[0].present = 1;
14468 }
14469
14470 static void
14471 do_vfp_nsyn_push (void)
14472 {
14473 nsyn_insert_sp ();
14474
14475 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14476 _("register list must contain at least 1 and at most 16 "
14477 "registers"));
14478
14479 if (inst.operands[1].issingle)
14480 do_vfp_nsyn_opcode ("fstmdbs");
14481 else
14482 do_vfp_nsyn_opcode ("fstmdbd");
14483 }
14484
14485 static void
14486 do_vfp_nsyn_pop (void)
14487 {
14488 nsyn_insert_sp ();
14489
14490 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14491 _("register list must contain at least 1 and at most 16 "
14492 "registers"));
14493
14494 if (inst.operands[1].issingle)
14495 do_vfp_nsyn_opcode ("fldmias");
14496 else
14497 do_vfp_nsyn_opcode ("fldmiad");
14498 }
14499
14500 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14501 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14502
14503 static void
14504 neon_dp_fixup (struct arm_it* insn)
14505 {
14506 unsigned int i = insn->instruction;
14507 insn->is_neon = 1;
14508
14509 if (thumb_mode)
14510 {
14511 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14512 if (i & (1 << 24))
14513 i |= 1 << 28;
14514
14515 i &= ~(1 << 24);
14516
14517 i |= 0xef000000;
14518 }
14519 else
14520 i |= 0xf2000000;
14521
14522 insn->instruction = i;
14523 }
14524
14525 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14526 (0, 1, 2, 3). */
14527
14528 static unsigned
14529 neon_logbits (unsigned x)
14530 {
14531 return ffs (x) - 4;
14532 }
14533
14534 #define LOW4(R) ((R) & 0xf)
14535 #define HI1(R) (((R) >> 4) & 1)
14536
14537 /* Encode insns with bit pattern:
14538
14539 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14540 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14541
14542 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14543 different meaning for some instruction. */
14544
14545 static void
14546 neon_three_same (int isquad, int ubit, int size)
14547 {
14548 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14549 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14550 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14551 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14552 inst.instruction |= LOW4 (inst.operands[2].reg);
14553 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14554 inst.instruction |= (isquad != 0) << 6;
14555 inst.instruction |= (ubit != 0) << 24;
14556 if (size != -1)
14557 inst.instruction |= neon_logbits (size) << 20;
14558
14559 neon_dp_fixup (&inst);
14560 }
14561
14562 /* Encode instructions of the form:
14563
14564 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14565 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14566
14567 Don't write size if SIZE == -1. */
14568
14569 static void
14570 neon_two_same (int qbit, int ubit, int size)
14571 {
14572 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14573 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14574 inst.instruction |= LOW4 (inst.operands[1].reg);
14575 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14576 inst.instruction |= (qbit != 0) << 6;
14577 inst.instruction |= (ubit != 0) << 24;
14578
14579 if (size != -1)
14580 inst.instruction |= neon_logbits (size) << 18;
14581
14582 neon_dp_fixup (&inst);
14583 }
14584
14585 /* Neon instruction encoders, in approximate order of appearance. */
14586
14587 static void
14588 do_neon_dyadic_i_su (void)
14589 {
14590 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14591 struct neon_type_el et = neon_check_type (3, rs,
14592 N_EQK, N_EQK, N_SU_32 | N_KEY);
14593 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14594 }
14595
14596 static void
14597 do_neon_dyadic_i64_su (void)
14598 {
14599 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14600 struct neon_type_el et = neon_check_type (3, rs,
14601 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14602 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14603 }
14604
14605 static void
14606 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14607 unsigned immbits)
14608 {
14609 unsigned size = et.size >> 3;
14610 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14611 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14612 inst.instruction |= LOW4 (inst.operands[1].reg);
14613 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14614 inst.instruction |= (isquad != 0) << 6;
14615 inst.instruction |= immbits << 16;
14616 inst.instruction |= (size >> 3) << 7;
14617 inst.instruction |= (size & 0x7) << 19;
14618 if (write_ubit)
14619 inst.instruction |= (uval != 0) << 24;
14620
14621 neon_dp_fixup (&inst);
14622 }
14623
14624 static void
14625 do_neon_shl_imm (void)
14626 {
14627 if (!inst.operands[2].isreg)
14628 {
14629 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14630 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14631 int imm = inst.operands[2].imm;
14632
14633 constraint (imm < 0 || (unsigned)imm >= et.size,
14634 _("immediate out of range for shift"));
14635 NEON_ENCODE (IMMED, inst);
14636 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14637 }
14638 else
14639 {
14640 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14641 struct neon_type_el et = neon_check_type (3, rs,
14642 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14643 unsigned int tmp;
14644
14645 /* VSHL/VQSHL 3-register variants have syntax such as:
14646 vshl.xx Dd, Dm, Dn
14647 whereas other 3-register operations encoded by neon_three_same have
14648 syntax like:
14649 vadd.xx Dd, Dn, Dm
14650 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14651 here. */
14652 tmp = inst.operands[2].reg;
14653 inst.operands[2].reg = inst.operands[1].reg;
14654 inst.operands[1].reg = tmp;
14655 NEON_ENCODE (INTEGER, inst);
14656 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14657 }
14658 }
14659
14660 static void
14661 do_neon_qshl_imm (void)
14662 {
14663 if (!inst.operands[2].isreg)
14664 {
14665 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14666 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14667 int imm = inst.operands[2].imm;
14668
14669 constraint (imm < 0 || (unsigned)imm >= et.size,
14670 _("immediate out of range for shift"));
14671 NEON_ENCODE (IMMED, inst);
14672 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14673 }
14674 else
14675 {
14676 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14677 struct neon_type_el et = neon_check_type (3, rs,
14678 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14679 unsigned int tmp;
14680
14681 /* See note in do_neon_shl_imm. */
14682 tmp = inst.operands[2].reg;
14683 inst.operands[2].reg = inst.operands[1].reg;
14684 inst.operands[1].reg = tmp;
14685 NEON_ENCODE (INTEGER, inst);
14686 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14687 }
14688 }
14689
14690 static void
14691 do_neon_rshl (void)
14692 {
14693 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14694 struct neon_type_el et = neon_check_type (3, rs,
14695 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14696 unsigned int tmp;
14697
14698 tmp = inst.operands[2].reg;
14699 inst.operands[2].reg = inst.operands[1].reg;
14700 inst.operands[1].reg = tmp;
14701 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14702 }
14703
14704 static int
14705 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14706 {
14707 /* Handle .I8 pseudo-instructions. */
14708 if (size == 8)
14709 {
14710 /* Unfortunately, this will make everything apart from zero out-of-range.
14711 FIXME is this the intended semantics? There doesn't seem much point in
14712 accepting .I8 if so. */
14713 immediate |= immediate << 8;
14714 size = 16;
14715 }
14716
14717 if (size >= 32)
14718 {
14719 if (immediate == (immediate & 0x000000ff))
14720 {
14721 *immbits = immediate;
14722 return 0x1;
14723 }
14724 else if (immediate == (immediate & 0x0000ff00))
14725 {
14726 *immbits = immediate >> 8;
14727 return 0x3;
14728 }
14729 else if (immediate == (immediate & 0x00ff0000))
14730 {
14731 *immbits = immediate >> 16;
14732 return 0x5;
14733 }
14734 else if (immediate == (immediate & 0xff000000))
14735 {
14736 *immbits = immediate >> 24;
14737 return 0x7;
14738 }
14739 if ((immediate & 0xffff) != (immediate >> 16))
14740 goto bad_immediate;
14741 immediate &= 0xffff;
14742 }
14743
14744 if (immediate == (immediate & 0x000000ff))
14745 {
14746 *immbits = immediate;
14747 return 0x9;
14748 }
14749 else if (immediate == (immediate & 0x0000ff00))
14750 {
14751 *immbits = immediate >> 8;
14752 return 0xb;
14753 }
14754
14755 bad_immediate:
14756 first_error (_("immediate value out of range"));
14757 return FAIL;
14758 }
14759
14760 static void
14761 do_neon_logic (void)
14762 {
14763 if (inst.operands[2].present && inst.operands[2].isreg)
14764 {
14765 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14766 neon_check_type (3, rs, N_IGNORE_TYPE);
14767 /* U bit and size field were set as part of the bitmask. */
14768 NEON_ENCODE (INTEGER, inst);
14769 neon_three_same (neon_quad (rs), 0, -1);
14770 }
14771 else
14772 {
14773 const int three_ops_form = (inst.operands[2].present
14774 && !inst.operands[2].isreg);
14775 const int immoperand = (three_ops_form ? 2 : 1);
14776 enum neon_shape rs = (three_ops_form
14777 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14778 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14779 struct neon_type_el et = neon_check_type (2, rs,
14780 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14781 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14782 unsigned immbits;
14783 int cmode;
14784
14785 if (et.type == NT_invtype)
14786 return;
14787
14788 if (three_ops_form)
14789 constraint (inst.operands[0].reg != inst.operands[1].reg,
14790 _("first and second operands shall be the same register"));
14791
14792 NEON_ENCODE (IMMED, inst);
14793
14794 immbits = inst.operands[immoperand].imm;
14795 if (et.size == 64)
14796 {
14797 /* .i64 is a pseudo-op, so the immediate must be a repeating
14798 pattern. */
14799 if (immbits != (inst.operands[immoperand].regisimm ?
14800 inst.operands[immoperand].reg : 0))
14801 {
14802 /* Set immbits to an invalid constant. */
14803 immbits = 0xdeadbeef;
14804 }
14805 }
14806
14807 switch (opcode)
14808 {
14809 case N_MNEM_vbic:
14810 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14811 break;
14812
14813 case N_MNEM_vorr:
14814 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14815 break;
14816
14817 case N_MNEM_vand:
14818 /* Pseudo-instruction for VBIC. */
14819 neon_invert_size (&immbits, 0, et.size);
14820 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14821 break;
14822
14823 case N_MNEM_vorn:
14824 /* Pseudo-instruction for VORR. */
14825 neon_invert_size (&immbits, 0, et.size);
14826 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14827 break;
14828
14829 default:
14830 abort ();
14831 }
14832
14833 if (cmode == FAIL)
14834 return;
14835
14836 inst.instruction |= neon_quad (rs) << 6;
14837 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14838 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14839 inst.instruction |= cmode << 8;
14840 neon_write_immbits (immbits);
14841
14842 neon_dp_fixup (&inst);
14843 }
14844 }
14845
14846 static void
14847 do_neon_bitfield (void)
14848 {
14849 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14850 neon_check_type (3, rs, N_IGNORE_TYPE);
14851 neon_three_same (neon_quad (rs), 0, -1);
14852 }
14853
14854 static void
14855 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14856 unsigned destbits)
14857 {
14858 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14859 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14860 types | N_KEY);
14861 if (et.type == NT_float)
14862 {
14863 NEON_ENCODE (FLOAT, inst);
14864 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14865 }
14866 else
14867 {
14868 NEON_ENCODE (INTEGER, inst);
14869 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14870 }
14871 }
14872
14873 static void
14874 do_neon_dyadic_if_su (void)
14875 {
14876 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14877 }
14878
14879 static void
14880 do_neon_dyadic_if_su_d (void)
14881 {
14882 /* This version only allow D registers, but that constraint is enforced during
14883 operand parsing so we don't need to do anything extra here. */
14884 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14885 }
14886
14887 static void
14888 do_neon_dyadic_if_i_d (void)
14889 {
14890 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14891 affected if we specify unsigned args. */
14892 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14893 }
14894
14895 enum vfp_or_neon_is_neon_bits
14896 {
14897 NEON_CHECK_CC = 1,
14898 NEON_CHECK_ARCH = 2,
14899 NEON_CHECK_ARCH8 = 4
14900 };
14901
14902 /* Call this function if an instruction which may have belonged to the VFP or
14903 Neon instruction sets, but turned out to be a Neon instruction (due to the
14904 operand types involved, etc.). We have to check and/or fix-up a couple of
14905 things:
14906
14907 - Make sure the user hasn't attempted to make a Neon instruction
14908 conditional.
14909 - Alter the value in the condition code field if necessary.
14910 - Make sure that the arch supports Neon instructions.
14911
14912 Which of these operations take place depends on bits from enum
14913 vfp_or_neon_is_neon_bits.
14914
14915 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14916 current instruction's condition is COND_ALWAYS, the condition field is
14917 changed to inst.uncond_value. This is necessary because instructions shared
14918 between VFP and Neon may be conditional for the VFP variants only, and the
14919 unconditional Neon version must have, e.g., 0xF in the condition field. */
14920
14921 static int
14922 vfp_or_neon_is_neon (unsigned check)
14923 {
14924 /* Conditions are always legal in Thumb mode (IT blocks). */
14925 if (!thumb_mode && (check & NEON_CHECK_CC))
14926 {
14927 if (inst.cond != COND_ALWAYS)
14928 {
14929 first_error (_(BAD_COND));
14930 return FAIL;
14931 }
14932 if (inst.uncond_value != -1)
14933 inst.instruction |= inst.uncond_value << 28;
14934 }
14935
14936 if ((check & NEON_CHECK_ARCH)
14937 && !mark_feature_used (&fpu_neon_ext_v1))
14938 {
14939 first_error (_(BAD_FPU));
14940 return FAIL;
14941 }
14942
14943 if ((check & NEON_CHECK_ARCH8)
14944 && !mark_feature_used (&fpu_neon_ext_armv8))
14945 {
14946 first_error (_(BAD_FPU));
14947 return FAIL;
14948 }
14949
14950 return SUCCESS;
14951 }
14952
14953 static void
14954 do_neon_addsub_if_i (void)
14955 {
14956 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14957 return;
14958
14959 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14960 return;
14961
14962 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14963 affected if we specify unsigned args. */
14964 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14965 }
14966
14967 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14968 result to be:
14969 V<op> A,B (A is operand 0, B is operand 2)
14970 to mean:
14971 V<op> A,B,A
14972 not:
14973 V<op> A,B,B
14974 so handle that case specially. */
14975
14976 static void
14977 neon_exchange_operands (void)
14978 {
14979 if (inst.operands[1].present)
14980 {
14981 void *scratch = xmalloc (sizeof (inst.operands[0]));
14982
14983 /* Swap operands[1] and operands[2]. */
14984 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14985 inst.operands[1] = inst.operands[2];
14986 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14987 free (scratch);
14988 }
14989 else
14990 {
14991 inst.operands[1] = inst.operands[2];
14992 inst.operands[2] = inst.operands[0];
14993 }
14994 }
14995
14996 static void
14997 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14998 {
14999 if (inst.operands[2].isreg)
15000 {
15001 if (invert)
15002 neon_exchange_operands ();
15003 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15004 }
15005 else
15006 {
15007 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15008 struct neon_type_el et = neon_check_type (2, rs,
15009 N_EQK | N_SIZ, immtypes | N_KEY);
15010
15011 NEON_ENCODE (IMMED, inst);
15012 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15013 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15014 inst.instruction |= LOW4 (inst.operands[1].reg);
15015 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15016 inst.instruction |= neon_quad (rs) << 6;
15017 inst.instruction |= (et.type == NT_float) << 10;
15018 inst.instruction |= neon_logbits (et.size) << 18;
15019
15020 neon_dp_fixup (&inst);
15021 }
15022 }
15023
15024 static void
15025 do_neon_cmp (void)
15026 {
15027 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15028 }
15029
15030 static void
15031 do_neon_cmp_inv (void)
15032 {
15033 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15034 }
15035
15036 static void
15037 do_neon_ceq (void)
15038 {
15039 neon_compare (N_IF_32, N_IF_32, FALSE);
15040 }
15041
15042 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15043 scalars, which are encoded in 5 bits, M : Rm.
15044 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15045 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15046 index in M.
15047
15048 Dot Product instructions are similar to multiply instructions except elsize
15049 should always be 32.
15050
15051 This function translates SCALAR, which is GAS's internal encoding of indexed
15052 scalar register, to raw encoding. There is also register and index range
15053 check based on ELSIZE. */
15054
15055 static unsigned
15056 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15057 {
15058 unsigned regno = NEON_SCALAR_REG (scalar);
15059 unsigned elno = NEON_SCALAR_INDEX (scalar);
15060
15061 switch (elsize)
15062 {
15063 case 16:
15064 if (regno > 7 || elno > 3)
15065 goto bad_scalar;
15066 return regno | (elno << 3);
15067
15068 case 32:
15069 if (regno > 15 || elno > 1)
15070 goto bad_scalar;
15071 return regno | (elno << 4);
15072
15073 default:
15074 bad_scalar:
15075 first_error (_("scalar out of range for multiply instruction"));
15076 }
15077
15078 return 0;
15079 }
15080
15081 /* Encode multiply / multiply-accumulate scalar instructions. */
15082
15083 static void
15084 neon_mul_mac (struct neon_type_el et, int ubit)
15085 {
15086 unsigned scalar;
15087
15088 /* Give a more helpful error message if we have an invalid type. */
15089 if (et.type == NT_invtype)
15090 return;
15091
15092 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15093 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15094 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15095 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15096 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15097 inst.instruction |= LOW4 (scalar);
15098 inst.instruction |= HI1 (scalar) << 5;
15099 inst.instruction |= (et.type == NT_float) << 8;
15100 inst.instruction |= neon_logbits (et.size) << 20;
15101 inst.instruction |= (ubit != 0) << 24;
15102
15103 neon_dp_fixup (&inst);
15104 }
15105
15106 static void
15107 do_neon_mac_maybe_scalar (void)
15108 {
15109 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15110 return;
15111
15112 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15113 return;
15114
15115 if (inst.operands[2].isscalar)
15116 {
15117 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15118 struct neon_type_el et = neon_check_type (3, rs,
15119 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15120 NEON_ENCODE (SCALAR, inst);
15121 neon_mul_mac (et, neon_quad (rs));
15122 }
15123 else
15124 {
15125 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15126 affected if we specify unsigned args. */
15127 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15128 }
15129 }
15130
15131 static void
15132 do_neon_fmac (void)
15133 {
15134 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15135 return;
15136
15137 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15138 return;
15139
15140 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15141 }
15142
15143 static void
15144 do_neon_tst (void)
15145 {
15146 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15147 struct neon_type_el et = neon_check_type (3, rs,
15148 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15149 neon_three_same (neon_quad (rs), 0, et.size);
15150 }
15151
15152 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15153 same types as the MAC equivalents. The polynomial type for this instruction
15154 is encoded the same as the integer type. */
15155
15156 static void
15157 do_neon_mul (void)
15158 {
15159 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15160 return;
15161
15162 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15163 return;
15164
15165 if (inst.operands[2].isscalar)
15166 do_neon_mac_maybe_scalar ();
15167 else
15168 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15169 }
15170
15171 static void
15172 do_neon_qdmulh (void)
15173 {
15174 if (inst.operands[2].isscalar)
15175 {
15176 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15177 struct neon_type_el et = neon_check_type (3, rs,
15178 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15179 NEON_ENCODE (SCALAR, inst);
15180 neon_mul_mac (et, neon_quad (rs));
15181 }
15182 else
15183 {
15184 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15185 struct neon_type_el et = neon_check_type (3, rs,
15186 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15187 NEON_ENCODE (INTEGER, inst);
15188 /* The U bit (rounding) comes from bit mask. */
15189 neon_three_same (neon_quad (rs), 0, et.size);
15190 }
15191 }
15192
15193 static void
15194 do_neon_qrdmlah (void)
15195 {
15196 /* Check we're on the correct architecture. */
15197 if (!mark_feature_used (&fpu_neon_ext_armv8))
15198 inst.error =
15199 _("instruction form not available on this architecture.");
15200 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15201 {
15202 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15203 record_feature_use (&fpu_neon_ext_v8_1);
15204 }
15205
15206 if (inst.operands[2].isscalar)
15207 {
15208 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15209 struct neon_type_el et = neon_check_type (3, rs,
15210 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15211 NEON_ENCODE (SCALAR, inst);
15212 neon_mul_mac (et, neon_quad (rs));
15213 }
15214 else
15215 {
15216 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15217 struct neon_type_el et = neon_check_type (3, rs,
15218 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15219 NEON_ENCODE (INTEGER, inst);
15220 /* The U bit (rounding) comes from bit mask. */
15221 neon_three_same (neon_quad (rs), 0, et.size);
15222 }
15223 }
15224
15225 static void
15226 do_neon_fcmp_absolute (void)
15227 {
15228 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15229 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15230 N_F_16_32 | N_KEY);
15231 /* Size field comes from bit mask. */
15232 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15233 }
15234
15235 static void
15236 do_neon_fcmp_absolute_inv (void)
15237 {
15238 neon_exchange_operands ();
15239 do_neon_fcmp_absolute ();
15240 }
15241
15242 static void
15243 do_neon_step (void)
15244 {
15245 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15246 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15247 N_F_16_32 | N_KEY);
15248 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15249 }
15250
15251 static void
15252 do_neon_abs_neg (void)
15253 {
15254 enum neon_shape rs;
15255 struct neon_type_el et;
15256
15257 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15258 return;
15259
15260 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15261 return;
15262
15263 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15264 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15265
15266 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15267 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15268 inst.instruction |= LOW4 (inst.operands[1].reg);
15269 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15270 inst.instruction |= neon_quad (rs) << 6;
15271 inst.instruction |= (et.type == NT_float) << 10;
15272 inst.instruction |= neon_logbits (et.size) << 18;
15273
15274 neon_dp_fixup (&inst);
15275 }
15276
15277 static void
15278 do_neon_sli (void)
15279 {
15280 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15281 struct neon_type_el et = neon_check_type (2, rs,
15282 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15283 int imm = inst.operands[2].imm;
15284 constraint (imm < 0 || (unsigned)imm >= et.size,
15285 _("immediate out of range for insert"));
15286 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15287 }
15288
15289 static void
15290 do_neon_sri (void)
15291 {
15292 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15293 struct neon_type_el et = neon_check_type (2, rs,
15294 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15295 int imm = inst.operands[2].imm;
15296 constraint (imm < 1 || (unsigned)imm > et.size,
15297 _("immediate out of range for insert"));
15298 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15299 }
15300
15301 static void
15302 do_neon_qshlu_imm (void)
15303 {
15304 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15305 struct neon_type_el et = neon_check_type (2, rs,
15306 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15307 int imm = inst.operands[2].imm;
15308 constraint (imm < 0 || (unsigned)imm >= et.size,
15309 _("immediate out of range for shift"));
15310 /* Only encodes the 'U present' variant of the instruction.
15311 In this case, signed types have OP (bit 8) set to 0.
15312 Unsigned types have OP set to 1. */
15313 inst.instruction |= (et.type == NT_unsigned) << 8;
15314 /* The rest of the bits are the same as other immediate shifts. */
15315 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15316 }
15317
15318 static void
15319 do_neon_qmovn (void)
15320 {
15321 struct neon_type_el et = neon_check_type (2, NS_DQ,
15322 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15323 /* Saturating move where operands can be signed or unsigned, and the
15324 destination has the same signedness. */
15325 NEON_ENCODE (INTEGER, inst);
15326 if (et.type == NT_unsigned)
15327 inst.instruction |= 0xc0;
15328 else
15329 inst.instruction |= 0x80;
15330 neon_two_same (0, 1, et.size / 2);
15331 }
15332
15333 static void
15334 do_neon_qmovun (void)
15335 {
15336 struct neon_type_el et = neon_check_type (2, NS_DQ,
15337 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15338 /* Saturating move with unsigned results. Operands must be signed. */
15339 NEON_ENCODE (INTEGER, inst);
15340 neon_two_same (0, 1, et.size / 2);
15341 }
15342
15343 static void
15344 do_neon_rshift_sat_narrow (void)
15345 {
15346 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15347 or unsigned. If operands are unsigned, results must also be unsigned. */
15348 struct neon_type_el et = neon_check_type (2, NS_DQI,
15349 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15350 int imm = inst.operands[2].imm;
15351 /* This gets the bounds check, size encoding and immediate bits calculation
15352 right. */
15353 et.size /= 2;
15354
15355 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15356 VQMOVN.I<size> <Dd>, <Qm>. */
15357 if (imm == 0)
15358 {
15359 inst.operands[2].present = 0;
15360 inst.instruction = N_MNEM_vqmovn;
15361 do_neon_qmovn ();
15362 return;
15363 }
15364
15365 constraint (imm < 1 || (unsigned)imm > et.size,
15366 _("immediate out of range"));
15367 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15368 }
15369
15370 static void
15371 do_neon_rshift_sat_narrow_u (void)
15372 {
15373 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15374 or unsigned. If operands are unsigned, results must also be unsigned. */
15375 struct neon_type_el et = neon_check_type (2, NS_DQI,
15376 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15377 int imm = inst.operands[2].imm;
15378 /* This gets the bounds check, size encoding and immediate bits calculation
15379 right. */
15380 et.size /= 2;
15381
15382 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15383 VQMOVUN.I<size> <Dd>, <Qm>. */
15384 if (imm == 0)
15385 {
15386 inst.operands[2].present = 0;
15387 inst.instruction = N_MNEM_vqmovun;
15388 do_neon_qmovun ();
15389 return;
15390 }
15391
15392 constraint (imm < 1 || (unsigned)imm > et.size,
15393 _("immediate out of range"));
15394 /* FIXME: The manual is kind of unclear about what value U should have in
15395 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15396 must be 1. */
15397 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15398 }
15399
15400 static void
15401 do_neon_movn (void)
15402 {
15403 struct neon_type_el et = neon_check_type (2, NS_DQ,
15404 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15405 NEON_ENCODE (INTEGER, inst);
15406 neon_two_same (0, 1, et.size / 2);
15407 }
15408
15409 static void
15410 do_neon_rshift_narrow (void)
15411 {
15412 struct neon_type_el et = neon_check_type (2, NS_DQI,
15413 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15414 int imm = inst.operands[2].imm;
15415 /* This gets the bounds check, size encoding and immediate bits calculation
15416 right. */
15417 et.size /= 2;
15418
15419 /* If immediate is zero then we are a pseudo-instruction for
15420 VMOVN.I<size> <Dd>, <Qm> */
15421 if (imm == 0)
15422 {
15423 inst.operands[2].present = 0;
15424 inst.instruction = N_MNEM_vmovn;
15425 do_neon_movn ();
15426 return;
15427 }
15428
15429 constraint (imm < 1 || (unsigned)imm > et.size,
15430 _("immediate out of range for narrowing operation"));
15431 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15432 }
15433
15434 static void
15435 do_neon_shll (void)
15436 {
15437 /* FIXME: Type checking when lengthening. */
15438 struct neon_type_el et = neon_check_type (2, NS_QDI,
15439 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15440 unsigned imm = inst.operands[2].imm;
15441
15442 if (imm == et.size)
15443 {
15444 /* Maximum shift variant. */
15445 NEON_ENCODE (INTEGER, inst);
15446 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15447 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15448 inst.instruction |= LOW4 (inst.operands[1].reg);
15449 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15450 inst.instruction |= neon_logbits (et.size) << 18;
15451
15452 neon_dp_fixup (&inst);
15453 }
15454 else
15455 {
15456 /* A more-specific type check for non-max versions. */
15457 et = neon_check_type (2, NS_QDI,
15458 N_EQK | N_DBL, N_SU_32 | N_KEY);
15459 NEON_ENCODE (IMMED, inst);
15460 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15461 }
15462 }
15463
15464 /* Check the various types for the VCVT instruction, and return which version
15465 the current instruction is. */
15466
15467 #define CVT_FLAVOUR_VAR \
15468 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15469 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15470 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15471 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15472 /* Half-precision conversions. */ \
15473 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15474 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15475 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15476 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15477 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15478 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15479 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15480 Compared with single/double precision variants, only the co-processor \
15481 field is different, so the encoding flow is reused here. */ \
15482 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15483 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15484 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15485 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15486 /* VFP instructions. */ \
15487 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15488 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15489 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15490 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15491 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15492 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15493 /* VFP instructions with bitshift. */ \
15494 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15495 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15496 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15497 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15498 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15499 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15500 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15501 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15502
15503 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15504 neon_cvt_flavour_##C,
15505
15506 /* The different types of conversions we can do. */
15507 enum neon_cvt_flavour
15508 {
15509 CVT_FLAVOUR_VAR
15510 neon_cvt_flavour_invalid,
15511 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15512 };
15513
15514 #undef CVT_VAR
15515
15516 static enum neon_cvt_flavour
15517 get_neon_cvt_flavour (enum neon_shape rs)
15518 {
15519 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15520 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15521 if (et.type != NT_invtype) \
15522 { \
15523 inst.error = NULL; \
15524 return (neon_cvt_flavour_##C); \
15525 }
15526
15527 struct neon_type_el et;
15528 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15529 || rs == NS_FF) ? N_VFP : 0;
15530 /* The instruction versions which take an immediate take one register
15531 argument, which is extended to the width of the full register. Thus the
15532 "source" and "destination" registers must have the same width. Hack that
15533 here by making the size equal to the key (wider, in this case) operand. */
15534 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15535
15536 CVT_FLAVOUR_VAR;
15537
15538 return neon_cvt_flavour_invalid;
15539 #undef CVT_VAR
15540 }
15541
15542 enum neon_cvt_mode
15543 {
15544 neon_cvt_mode_a,
15545 neon_cvt_mode_n,
15546 neon_cvt_mode_p,
15547 neon_cvt_mode_m,
15548 neon_cvt_mode_z,
15549 neon_cvt_mode_x,
15550 neon_cvt_mode_r
15551 };
15552
15553 /* Neon-syntax VFP conversions. */
15554
15555 static void
15556 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15557 {
15558 const char *opname = 0;
15559
15560 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15561 || rs == NS_FHI || rs == NS_HFI)
15562 {
15563 /* Conversions with immediate bitshift. */
15564 const char *enc[] =
15565 {
15566 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15567 CVT_FLAVOUR_VAR
15568 NULL
15569 #undef CVT_VAR
15570 };
15571
15572 if (flavour < (int) ARRAY_SIZE (enc))
15573 {
15574 opname = enc[flavour];
15575 constraint (inst.operands[0].reg != inst.operands[1].reg,
15576 _("operands 0 and 1 must be the same register"));
15577 inst.operands[1] = inst.operands[2];
15578 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15579 }
15580 }
15581 else
15582 {
15583 /* Conversions without bitshift. */
15584 const char *enc[] =
15585 {
15586 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15587 CVT_FLAVOUR_VAR
15588 NULL
15589 #undef CVT_VAR
15590 };
15591
15592 if (flavour < (int) ARRAY_SIZE (enc))
15593 opname = enc[flavour];
15594 }
15595
15596 if (opname)
15597 do_vfp_nsyn_opcode (opname);
15598
15599 /* ARMv8.2 fp16 VCVT instruction. */
15600 if (flavour == neon_cvt_flavour_s32_f16
15601 || flavour == neon_cvt_flavour_u32_f16
15602 || flavour == neon_cvt_flavour_f16_u32
15603 || flavour == neon_cvt_flavour_f16_s32)
15604 do_scalar_fp16_v82_encode ();
15605 }
15606
15607 static void
15608 do_vfp_nsyn_cvtz (void)
15609 {
15610 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15611 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15612 const char *enc[] =
15613 {
15614 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15615 CVT_FLAVOUR_VAR
15616 NULL
15617 #undef CVT_VAR
15618 };
15619
15620 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15621 do_vfp_nsyn_opcode (enc[flavour]);
15622 }
15623
15624 static void
15625 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15626 enum neon_cvt_mode mode)
15627 {
15628 int sz, op;
15629 int rm;
15630
15631 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15632 D register operands. */
15633 if (flavour == neon_cvt_flavour_s32_f64
15634 || flavour == neon_cvt_flavour_u32_f64)
15635 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15636 _(BAD_FPU));
15637
15638 if (flavour == neon_cvt_flavour_s32_f16
15639 || flavour == neon_cvt_flavour_u32_f16)
15640 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15641 _(BAD_FP16));
15642
15643 set_it_insn_type (OUTSIDE_IT_INSN);
15644
15645 switch (flavour)
15646 {
15647 case neon_cvt_flavour_s32_f64:
15648 sz = 1;
15649 op = 1;
15650 break;
15651 case neon_cvt_flavour_s32_f32:
15652 sz = 0;
15653 op = 1;
15654 break;
15655 case neon_cvt_flavour_s32_f16:
15656 sz = 0;
15657 op = 1;
15658 break;
15659 case neon_cvt_flavour_u32_f64:
15660 sz = 1;
15661 op = 0;
15662 break;
15663 case neon_cvt_flavour_u32_f32:
15664 sz = 0;
15665 op = 0;
15666 break;
15667 case neon_cvt_flavour_u32_f16:
15668 sz = 0;
15669 op = 0;
15670 break;
15671 default:
15672 first_error (_("invalid instruction shape"));
15673 return;
15674 }
15675
15676 switch (mode)
15677 {
15678 case neon_cvt_mode_a: rm = 0; break;
15679 case neon_cvt_mode_n: rm = 1; break;
15680 case neon_cvt_mode_p: rm = 2; break;
15681 case neon_cvt_mode_m: rm = 3; break;
15682 default: first_error (_("invalid rounding mode")); return;
15683 }
15684
15685 NEON_ENCODE (FPV8, inst);
15686 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15687 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15688 inst.instruction |= sz << 8;
15689
15690 /* ARMv8.2 fp16 VCVT instruction. */
15691 if (flavour == neon_cvt_flavour_s32_f16
15692 ||flavour == neon_cvt_flavour_u32_f16)
15693 do_scalar_fp16_v82_encode ();
15694 inst.instruction |= op << 7;
15695 inst.instruction |= rm << 16;
15696 inst.instruction |= 0xf0000000;
15697 inst.is_neon = TRUE;
15698 }
15699
15700 static void
15701 do_neon_cvt_1 (enum neon_cvt_mode mode)
15702 {
15703 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15704 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15705 NS_FH, NS_HF, NS_FHI, NS_HFI,
15706 NS_NULL);
15707 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15708
15709 if (flavour == neon_cvt_flavour_invalid)
15710 return;
15711
15712 /* PR11109: Handle round-to-zero for VCVT conversions. */
15713 if (mode == neon_cvt_mode_z
15714 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15715 && (flavour == neon_cvt_flavour_s16_f16
15716 || flavour == neon_cvt_flavour_u16_f16
15717 || flavour == neon_cvt_flavour_s32_f32
15718 || flavour == neon_cvt_flavour_u32_f32
15719 || flavour == neon_cvt_flavour_s32_f64
15720 || flavour == neon_cvt_flavour_u32_f64)
15721 && (rs == NS_FD || rs == NS_FF))
15722 {
15723 do_vfp_nsyn_cvtz ();
15724 return;
15725 }
15726
15727 /* ARMv8.2 fp16 VCVT conversions. */
15728 if (mode == neon_cvt_mode_z
15729 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15730 && (flavour == neon_cvt_flavour_s32_f16
15731 || flavour == neon_cvt_flavour_u32_f16)
15732 && (rs == NS_FH))
15733 {
15734 do_vfp_nsyn_cvtz ();
15735 do_scalar_fp16_v82_encode ();
15736 return;
15737 }
15738
15739 /* VFP rather than Neon conversions. */
15740 if (flavour >= neon_cvt_flavour_first_fp)
15741 {
15742 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15743 do_vfp_nsyn_cvt (rs, flavour);
15744 else
15745 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15746
15747 return;
15748 }
15749
15750 switch (rs)
15751 {
15752 case NS_DDI:
15753 case NS_QQI:
15754 {
15755 unsigned immbits;
15756 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15757 0x0000100, 0x1000100, 0x0, 0x1000000};
15758
15759 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15760 return;
15761
15762 /* Fixed-point conversion with #0 immediate is encoded as an
15763 integer conversion. */
15764 if (inst.operands[2].present && inst.operands[2].imm == 0)
15765 goto int_encode;
15766 NEON_ENCODE (IMMED, inst);
15767 if (flavour != neon_cvt_flavour_invalid)
15768 inst.instruction |= enctab[flavour];
15769 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15770 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15771 inst.instruction |= LOW4 (inst.operands[1].reg);
15772 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15773 inst.instruction |= neon_quad (rs) << 6;
15774 inst.instruction |= 1 << 21;
15775 if (flavour < neon_cvt_flavour_s16_f16)
15776 {
15777 inst.instruction |= 1 << 21;
15778 immbits = 32 - inst.operands[2].imm;
15779 inst.instruction |= immbits << 16;
15780 }
15781 else
15782 {
15783 inst.instruction |= 3 << 20;
15784 immbits = 16 - inst.operands[2].imm;
15785 inst.instruction |= immbits << 16;
15786 inst.instruction &= ~(1 << 9);
15787 }
15788
15789 neon_dp_fixup (&inst);
15790 }
15791 break;
15792
15793 case NS_DD:
15794 case NS_QQ:
15795 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15796 {
15797 NEON_ENCODE (FLOAT, inst);
15798 set_it_insn_type (OUTSIDE_IT_INSN);
15799
15800 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15801 return;
15802
15803 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15804 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15805 inst.instruction |= LOW4 (inst.operands[1].reg);
15806 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15807 inst.instruction |= neon_quad (rs) << 6;
15808 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15809 || flavour == neon_cvt_flavour_u32_f32) << 7;
15810 inst.instruction |= mode << 8;
15811 if (flavour == neon_cvt_flavour_u16_f16
15812 || flavour == neon_cvt_flavour_s16_f16)
15813 /* Mask off the original size bits and reencode them. */
15814 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15815
15816 if (thumb_mode)
15817 inst.instruction |= 0xfc000000;
15818 else
15819 inst.instruction |= 0xf0000000;
15820 }
15821 else
15822 {
15823 int_encode:
15824 {
15825 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15826 0x100, 0x180, 0x0, 0x080};
15827
15828 NEON_ENCODE (INTEGER, inst);
15829
15830 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15831 return;
15832
15833 if (flavour != neon_cvt_flavour_invalid)
15834 inst.instruction |= enctab[flavour];
15835
15836 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15837 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15838 inst.instruction |= LOW4 (inst.operands[1].reg);
15839 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15840 inst.instruction |= neon_quad (rs) << 6;
15841 if (flavour >= neon_cvt_flavour_s16_f16
15842 && flavour <= neon_cvt_flavour_f16_u16)
15843 /* Half precision. */
15844 inst.instruction |= 1 << 18;
15845 else
15846 inst.instruction |= 2 << 18;
15847
15848 neon_dp_fixup (&inst);
15849 }
15850 }
15851 break;
15852
15853 /* Half-precision conversions for Advanced SIMD -- neon. */
15854 case NS_QD:
15855 case NS_DQ:
15856
15857 if ((rs == NS_DQ)
15858 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15859 {
15860 as_bad (_("operand size must match register width"));
15861 break;
15862 }
15863
15864 if ((rs == NS_QD)
15865 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15866 {
15867 as_bad (_("operand size must match register width"));
15868 break;
15869 }
15870
15871 if (rs == NS_DQ)
15872 inst.instruction = 0x3b60600;
15873 else
15874 inst.instruction = 0x3b60700;
15875
15876 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15877 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15878 inst.instruction |= LOW4 (inst.operands[1].reg);
15879 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15880 neon_dp_fixup (&inst);
15881 break;
15882
15883 default:
15884 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15885 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15886 do_vfp_nsyn_cvt (rs, flavour);
15887 else
15888 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15889 }
15890 }
15891
15892 static void
15893 do_neon_cvtr (void)
15894 {
15895 do_neon_cvt_1 (neon_cvt_mode_x);
15896 }
15897
15898 static void
15899 do_neon_cvt (void)
15900 {
15901 do_neon_cvt_1 (neon_cvt_mode_z);
15902 }
15903
15904 static void
15905 do_neon_cvta (void)
15906 {
15907 do_neon_cvt_1 (neon_cvt_mode_a);
15908 }
15909
15910 static void
15911 do_neon_cvtn (void)
15912 {
15913 do_neon_cvt_1 (neon_cvt_mode_n);
15914 }
15915
15916 static void
15917 do_neon_cvtp (void)
15918 {
15919 do_neon_cvt_1 (neon_cvt_mode_p);
15920 }
15921
15922 static void
15923 do_neon_cvtm (void)
15924 {
15925 do_neon_cvt_1 (neon_cvt_mode_m);
15926 }
15927
15928 static void
15929 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15930 {
15931 if (is_double)
15932 mark_feature_used (&fpu_vfp_ext_armv8);
15933
15934 encode_arm_vfp_reg (inst.operands[0].reg,
15935 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15936 encode_arm_vfp_reg (inst.operands[1].reg,
15937 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15938 inst.instruction |= to ? 0x10000 : 0;
15939 inst.instruction |= t ? 0x80 : 0;
15940 inst.instruction |= is_double ? 0x100 : 0;
15941 do_vfp_cond_or_thumb ();
15942 }
15943
15944 static void
15945 do_neon_cvttb_1 (bfd_boolean t)
15946 {
15947 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15948 NS_DF, NS_DH, NS_NULL);
15949
15950 if (rs == NS_NULL)
15951 return;
15952 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15953 {
15954 inst.error = NULL;
15955 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15956 }
15957 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15958 {
15959 inst.error = NULL;
15960 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15961 }
15962 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15963 {
15964 /* The VCVTB and VCVTT instructions with D-register operands
15965 don't work for SP only targets. */
15966 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15967 _(BAD_FPU));
15968
15969 inst.error = NULL;
15970 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15971 }
15972 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15973 {
15974 /* The VCVTB and VCVTT instructions with D-register operands
15975 don't work for SP only targets. */
15976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15977 _(BAD_FPU));
15978
15979 inst.error = NULL;
15980 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15981 }
15982 else
15983 return;
15984 }
15985
15986 static void
15987 do_neon_cvtb (void)
15988 {
15989 do_neon_cvttb_1 (FALSE);
15990 }
15991
15992
15993 static void
15994 do_neon_cvtt (void)
15995 {
15996 do_neon_cvttb_1 (TRUE);
15997 }
15998
15999 static void
16000 neon_move_immediate (void)
16001 {
16002 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16003 struct neon_type_el et = neon_check_type (2, rs,
16004 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16005 unsigned immlo, immhi = 0, immbits;
16006 int op, cmode, float_p;
16007
16008 constraint (et.type == NT_invtype,
16009 _("operand size must be specified for immediate VMOV"));
16010
16011 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16012 op = (inst.instruction & (1 << 5)) != 0;
16013
16014 immlo = inst.operands[1].imm;
16015 if (inst.operands[1].regisimm)
16016 immhi = inst.operands[1].reg;
16017
16018 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16019 _("immediate has bits set outside the operand size"));
16020
16021 float_p = inst.operands[1].immisfloat;
16022
16023 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16024 et.size, et.type)) == FAIL)
16025 {
16026 /* Invert relevant bits only. */
16027 neon_invert_size (&immlo, &immhi, et.size);
16028 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16029 with one or the other; those cases are caught by
16030 neon_cmode_for_move_imm. */
16031 op = !op;
16032 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16033 &op, et.size, et.type)) == FAIL)
16034 {
16035 first_error (_("immediate out of range"));
16036 return;
16037 }
16038 }
16039
16040 inst.instruction &= ~(1 << 5);
16041 inst.instruction |= op << 5;
16042
16043 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16044 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16045 inst.instruction |= neon_quad (rs) << 6;
16046 inst.instruction |= cmode << 8;
16047
16048 neon_write_immbits (immbits);
16049 }
16050
16051 static void
16052 do_neon_mvn (void)
16053 {
16054 if (inst.operands[1].isreg)
16055 {
16056 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16057
16058 NEON_ENCODE (INTEGER, inst);
16059 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16060 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16061 inst.instruction |= LOW4 (inst.operands[1].reg);
16062 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16063 inst.instruction |= neon_quad (rs) << 6;
16064 }
16065 else
16066 {
16067 NEON_ENCODE (IMMED, inst);
16068 neon_move_immediate ();
16069 }
16070
16071 neon_dp_fixup (&inst);
16072 }
16073
16074 /* Encode instructions of form:
16075
16076 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16077 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16078
16079 static void
16080 neon_mixed_length (struct neon_type_el et, unsigned size)
16081 {
16082 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16083 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16084 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16085 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16086 inst.instruction |= LOW4 (inst.operands[2].reg);
16087 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16088 inst.instruction |= (et.type == NT_unsigned) << 24;
16089 inst.instruction |= neon_logbits (size) << 20;
16090
16091 neon_dp_fixup (&inst);
16092 }
16093
16094 static void
16095 do_neon_dyadic_long (void)
16096 {
16097 /* FIXME: Type checking for lengthening op. */
16098 struct neon_type_el et = neon_check_type (3, NS_QDD,
16099 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16100 neon_mixed_length (et, et.size);
16101 }
16102
16103 static void
16104 do_neon_abal (void)
16105 {
16106 struct neon_type_el et = neon_check_type (3, NS_QDD,
16107 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16108 neon_mixed_length (et, et.size);
16109 }
16110
16111 static void
16112 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16113 {
16114 if (inst.operands[2].isscalar)
16115 {
16116 struct neon_type_el et = neon_check_type (3, NS_QDS,
16117 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16118 NEON_ENCODE (SCALAR, inst);
16119 neon_mul_mac (et, et.type == NT_unsigned);
16120 }
16121 else
16122 {
16123 struct neon_type_el et = neon_check_type (3, NS_QDD,
16124 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16125 NEON_ENCODE (INTEGER, inst);
16126 neon_mixed_length (et, et.size);
16127 }
16128 }
16129
16130 static void
16131 do_neon_mac_maybe_scalar_long (void)
16132 {
16133 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16134 }
16135
16136 static void
16137 do_neon_dyadic_wide (void)
16138 {
16139 struct neon_type_el et = neon_check_type (3, NS_QQD,
16140 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16141 neon_mixed_length (et, et.size);
16142 }
16143
16144 static void
16145 do_neon_dyadic_narrow (void)
16146 {
16147 struct neon_type_el et = neon_check_type (3, NS_QDD,
16148 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16149 /* Operand sign is unimportant, and the U bit is part of the opcode,
16150 so force the operand type to integer. */
16151 et.type = NT_integer;
16152 neon_mixed_length (et, et.size / 2);
16153 }
16154
16155 static void
16156 do_neon_mul_sat_scalar_long (void)
16157 {
16158 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16159 }
16160
16161 static void
16162 do_neon_vmull (void)
16163 {
16164 if (inst.operands[2].isscalar)
16165 do_neon_mac_maybe_scalar_long ();
16166 else
16167 {
16168 struct neon_type_el et = neon_check_type (3, NS_QDD,
16169 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16170
16171 if (et.type == NT_poly)
16172 NEON_ENCODE (POLY, inst);
16173 else
16174 NEON_ENCODE (INTEGER, inst);
16175
16176 /* For polynomial encoding the U bit must be zero, and the size must
16177 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16178 obviously, as 0b10). */
16179 if (et.size == 64)
16180 {
16181 /* Check we're on the correct architecture. */
16182 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16183 inst.error =
16184 _("Instruction form not available on this architecture.");
16185
16186 et.size = 32;
16187 }
16188
16189 neon_mixed_length (et, et.size);
16190 }
16191 }
16192
16193 static void
16194 do_neon_ext (void)
16195 {
16196 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16197 struct neon_type_el et = neon_check_type (3, rs,
16198 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16199 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16200
16201 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16202 _("shift out of range"));
16203 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16204 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16205 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16206 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16207 inst.instruction |= LOW4 (inst.operands[2].reg);
16208 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16209 inst.instruction |= neon_quad (rs) << 6;
16210 inst.instruction |= imm << 8;
16211
16212 neon_dp_fixup (&inst);
16213 }
16214
16215 static void
16216 do_neon_rev (void)
16217 {
16218 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16219 struct neon_type_el et = neon_check_type (2, rs,
16220 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16221 unsigned op = (inst.instruction >> 7) & 3;
16222 /* N (width of reversed regions) is encoded as part of the bitmask. We
16223 extract it here to check the elements to be reversed are smaller.
16224 Otherwise we'd get a reserved instruction. */
16225 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16226 gas_assert (elsize != 0);
16227 constraint (et.size >= elsize,
16228 _("elements must be smaller than reversal region"));
16229 neon_two_same (neon_quad (rs), 1, et.size);
16230 }
16231
16232 static void
16233 do_neon_dup (void)
16234 {
16235 if (inst.operands[1].isscalar)
16236 {
16237 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16238 struct neon_type_el et = neon_check_type (2, rs,
16239 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16240 unsigned sizebits = et.size >> 3;
16241 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16242 int logsize = neon_logbits (et.size);
16243 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16244
16245 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16246 return;
16247
16248 NEON_ENCODE (SCALAR, inst);
16249 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16250 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16251 inst.instruction |= LOW4 (dm);
16252 inst.instruction |= HI1 (dm) << 5;
16253 inst.instruction |= neon_quad (rs) << 6;
16254 inst.instruction |= x << 17;
16255 inst.instruction |= sizebits << 16;
16256
16257 neon_dp_fixup (&inst);
16258 }
16259 else
16260 {
16261 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16262 struct neon_type_el et = neon_check_type (2, rs,
16263 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16264 /* Duplicate ARM register to lanes of vector. */
16265 NEON_ENCODE (ARMREG, inst);
16266 switch (et.size)
16267 {
16268 case 8: inst.instruction |= 0x400000; break;
16269 case 16: inst.instruction |= 0x000020; break;
16270 case 32: inst.instruction |= 0x000000; break;
16271 default: break;
16272 }
16273 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16274 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16275 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16276 inst.instruction |= neon_quad (rs) << 21;
16277 /* The encoding for this instruction is identical for the ARM and Thumb
16278 variants, except for the condition field. */
16279 do_vfp_cond_or_thumb ();
16280 }
16281 }
16282
16283 /* VMOV has particularly many variations. It can be one of:
16284 0. VMOV<c><q> <Qd>, <Qm>
16285 1. VMOV<c><q> <Dd>, <Dm>
16286 (Register operations, which are VORR with Rm = Rn.)
16287 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16288 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16289 (Immediate loads.)
16290 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16291 (ARM register to scalar.)
16292 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16293 (Two ARM registers to vector.)
16294 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16295 (Scalar to ARM register.)
16296 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16297 (Vector to two ARM registers.)
16298 8. VMOV.F32 <Sd>, <Sm>
16299 9. VMOV.F64 <Dd>, <Dm>
16300 (VFP register moves.)
16301 10. VMOV.F32 <Sd>, #imm
16302 11. VMOV.F64 <Dd>, #imm
16303 (VFP float immediate load.)
16304 12. VMOV <Rd>, <Sm>
16305 (VFP single to ARM reg.)
16306 13. VMOV <Sd>, <Rm>
16307 (ARM reg to VFP single.)
16308 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16309 (Two ARM regs to two VFP singles.)
16310 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16311 (Two VFP singles to two ARM regs.)
16312
16313 These cases can be disambiguated using neon_select_shape, except cases 1/9
16314 and 3/11 which depend on the operand type too.
16315
16316 All the encoded bits are hardcoded by this function.
16317
16318 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16319 Cases 5, 7 may be used with VFPv2 and above.
16320
16321 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16322 can specify a type where it doesn't make sense to, and is ignored). */
16323
16324 static void
16325 do_neon_mov (void)
16326 {
16327 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16328 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16329 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16330 NS_HR, NS_RH, NS_HI, NS_NULL);
16331 struct neon_type_el et;
16332 const char *ldconst = 0;
16333
16334 switch (rs)
16335 {
16336 case NS_DD: /* case 1/9. */
16337 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16338 /* It is not an error here if no type is given. */
16339 inst.error = NULL;
16340 if (et.type == NT_float && et.size == 64)
16341 {
16342 do_vfp_nsyn_opcode ("fcpyd");
16343 break;
16344 }
16345 /* fall through. */
16346
16347 case NS_QQ: /* case 0/1. */
16348 {
16349 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16350 return;
16351 /* The architecture manual I have doesn't explicitly state which
16352 value the U bit should have for register->register moves, but
16353 the equivalent VORR instruction has U = 0, so do that. */
16354 inst.instruction = 0x0200110;
16355 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16356 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16357 inst.instruction |= LOW4 (inst.operands[1].reg);
16358 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16359 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16360 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16361 inst.instruction |= neon_quad (rs) << 6;
16362
16363 neon_dp_fixup (&inst);
16364 }
16365 break;
16366
16367 case NS_DI: /* case 3/11. */
16368 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16369 inst.error = NULL;
16370 if (et.type == NT_float && et.size == 64)
16371 {
16372 /* case 11 (fconstd). */
16373 ldconst = "fconstd";
16374 goto encode_fconstd;
16375 }
16376 /* fall through. */
16377
16378 case NS_QI: /* case 2/3. */
16379 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16380 return;
16381 inst.instruction = 0x0800010;
16382 neon_move_immediate ();
16383 neon_dp_fixup (&inst);
16384 break;
16385
16386 case NS_SR: /* case 4. */
16387 {
16388 unsigned bcdebits = 0;
16389 int logsize;
16390 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16391 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16392
16393 /* .<size> is optional here, defaulting to .32. */
16394 if (inst.vectype.elems == 0
16395 && inst.operands[0].vectype.type == NT_invtype
16396 && inst.operands[1].vectype.type == NT_invtype)
16397 {
16398 inst.vectype.el[0].type = NT_untyped;
16399 inst.vectype.el[0].size = 32;
16400 inst.vectype.elems = 1;
16401 }
16402
16403 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16404 logsize = neon_logbits (et.size);
16405
16406 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16407 _(BAD_FPU));
16408 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16409 && et.size != 32, _(BAD_FPU));
16410 constraint (et.type == NT_invtype, _("bad type for scalar"));
16411 constraint (x >= 64 / et.size, _("scalar index out of range"));
16412
16413 switch (et.size)
16414 {
16415 case 8: bcdebits = 0x8; break;
16416 case 16: bcdebits = 0x1; break;
16417 case 32: bcdebits = 0x0; break;
16418 default: ;
16419 }
16420
16421 bcdebits |= x << logsize;
16422
16423 inst.instruction = 0xe000b10;
16424 do_vfp_cond_or_thumb ();
16425 inst.instruction |= LOW4 (dn) << 16;
16426 inst.instruction |= HI1 (dn) << 7;
16427 inst.instruction |= inst.operands[1].reg << 12;
16428 inst.instruction |= (bcdebits & 3) << 5;
16429 inst.instruction |= (bcdebits >> 2) << 21;
16430 }
16431 break;
16432
16433 case NS_DRR: /* case 5 (fmdrr). */
16434 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16435 _(BAD_FPU));
16436
16437 inst.instruction = 0xc400b10;
16438 do_vfp_cond_or_thumb ();
16439 inst.instruction |= LOW4 (inst.operands[0].reg);
16440 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16441 inst.instruction |= inst.operands[1].reg << 12;
16442 inst.instruction |= inst.operands[2].reg << 16;
16443 break;
16444
16445 case NS_RS: /* case 6. */
16446 {
16447 unsigned logsize;
16448 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16449 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16450 unsigned abcdebits = 0;
16451
16452 /* .<dt> is optional here, defaulting to .32. */
16453 if (inst.vectype.elems == 0
16454 && inst.operands[0].vectype.type == NT_invtype
16455 && inst.operands[1].vectype.type == NT_invtype)
16456 {
16457 inst.vectype.el[0].type = NT_untyped;
16458 inst.vectype.el[0].size = 32;
16459 inst.vectype.elems = 1;
16460 }
16461
16462 et = neon_check_type (2, NS_NULL,
16463 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16464 logsize = neon_logbits (et.size);
16465
16466 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16467 _(BAD_FPU));
16468 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16469 && et.size != 32, _(BAD_FPU));
16470 constraint (et.type == NT_invtype, _("bad type for scalar"));
16471 constraint (x >= 64 / et.size, _("scalar index out of range"));
16472
16473 switch (et.size)
16474 {
16475 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16476 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16477 case 32: abcdebits = 0x00; break;
16478 default: ;
16479 }
16480
16481 abcdebits |= x << logsize;
16482 inst.instruction = 0xe100b10;
16483 do_vfp_cond_or_thumb ();
16484 inst.instruction |= LOW4 (dn) << 16;
16485 inst.instruction |= HI1 (dn) << 7;
16486 inst.instruction |= inst.operands[0].reg << 12;
16487 inst.instruction |= (abcdebits & 3) << 5;
16488 inst.instruction |= (abcdebits >> 2) << 21;
16489 }
16490 break;
16491
16492 case NS_RRD: /* case 7 (fmrrd). */
16493 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16494 _(BAD_FPU));
16495
16496 inst.instruction = 0xc500b10;
16497 do_vfp_cond_or_thumb ();
16498 inst.instruction |= inst.operands[0].reg << 12;
16499 inst.instruction |= inst.operands[1].reg << 16;
16500 inst.instruction |= LOW4 (inst.operands[2].reg);
16501 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16502 break;
16503
16504 case NS_FF: /* case 8 (fcpys). */
16505 do_vfp_nsyn_opcode ("fcpys");
16506 break;
16507
16508 case NS_HI:
16509 case NS_FI: /* case 10 (fconsts). */
16510 ldconst = "fconsts";
16511 encode_fconstd:
16512 if (is_quarter_float (inst.operands[1].imm))
16513 {
16514 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16515 do_vfp_nsyn_opcode (ldconst);
16516
16517 /* ARMv8.2 fp16 vmov.f16 instruction. */
16518 if (rs == NS_HI)
16519 do_scalar_fp16_v82_encode ();
16520 }
16521 else
16522 first_error (_("immediate out of range"));
16523 break;
16524
16525 case NS_RH:
16526 case NS_RF: /* case 12 (fmrs). */
16527 do_vfp_nsyn_opcode ("fmrs");
16528 /* ARMv8.2 fp16 vmov.f16 instruction. */
16529 if (rs == NS_RH)
16530 do_scalar_fp16_v82_encode ();
16531 break;
16532
16533 case NS_HR:
16534 case NS_FR: /* case 13 (fmsr). */
16535 do_vfp_nsyn_opcode ("fmsr");
16536 /* ARMv8.2 fp16 vmov.f16 instruction. */
16537 if (rs == NS_HR)
16538 do_scalar_fp16_v82_encode ();
16539 break;
16540
16541 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16542 (one of which is a list), but we have parsed four. Do some fiddling to
16543 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16544 expect. */
16545 case NS_RRFF: /* case 14 (fmrrs). */
16546 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16547 _("VFP registers must be adjacent"));
16548 inst.operands[2].imm = 2;
16549 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16550 do_vfp_nsyn_opcode ("fmrrs");
16551 break;
16552
16553 case NS_FFRR: /* case 15 (fmsrr). */
16554 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16555 _("VFP registers must be adjacent"));
16556 inst.operands[1] = inst.operands[2];
16557 inst.operands[2] = inst.operands[3];
16558 inst.operands[0].imm = 2;
16559 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16560 do_vfp_nsyn_opcode ("fmsrr");
16561 break;
16562
16563 case NS_NULL:
16564 /* neon_select_shape has determined that the instruction
16565 shape is wrong and has already set the error message. */
16566 break;
16567
16568 default:
16569 abort ();
16570 }
16571 }
16572
16573 static void
16574 do_neon_rshift_round_imm (void)
16575 {
16576 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16577 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16578 int imm = inst.operands[2].imm;
16579
16580 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16581 if (imm == 0)
16582 {
16583 inst.operands[2].present = 0;
16584 do_neon_mov ();
16585 return;
16586 }
16587
16588 constraint (imm < 1 || (unsigned)imm > et.size,
16589 _("immediate out of range for shift"));
16590 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16591 et.size - imm);
16592 }
16593
16594 static void
16595 do_neon_movhf (void)
16596 {
16597 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16598 constraint (rs != NS_HH, _("invalid suffix"));
16599
16600 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16601 _(BAD_FPU));
16602
16603 do_vfp_sp_monadic ();
16604
16605 inst.is_neon = 1;
16606 inst.instruction |= 0xf0000000;
16607 }
16608
16609 static void
16610 do_neon_movl (void)
16611 {
16612 struct neon_type_el et = neon_check_type (2, NS_QD,
16613 N_EQK | N_DBL, N_SU_32 | N_KEY);
16614 unsigned sizebits = et.size >> 3;
16615 inst.instruction |= sizebits << 19;
16616 neon_two_same (0, et.type == NT_unsigned, -1);
16617 }
16618
16619 static void
16620 do_neon_trn (void)
16621 {
16622 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16623 struct neon_type_el et = neon_check_type (2, rs,
16624 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16625 NEON_ENCODE (INTEGER, inst);
16626 neon_two_same (neon_quad (rs), 1, et.size);
16627 }
16628
16629 static void
16630 do_neon_zip_uzp (void)
16631 {
16632 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16633 struct neon_type_el et = neon_check_type (2, rs,
16634 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16635 if (rs == NS_DD && et.size == 32)
16636 {
16637 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16638 inst.instruction = N_MNEM_vtrn;
16639 do_neon_trn ();
16640 return;
16641 }
16642 neon_two_same (neon_quad (rs), 1, et.size);
16643 }
16644
16645 static void
16646 do_neon_sat_abs_neg (void)
16647 {
16648 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16649 struct neon_type_el et = neon_check_type (2, rs,
16650 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16651 neon_two_same (neon_quad (rs), 1, et.size);
16652 }
16653
16654 static void
16655 do_neon_pair_long (void)
16656 {
16657 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16658 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16659 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16660 inst.instruction |= (et.type == NT_unsigned) << 7;
16661 neon_two_same (neon_quad (rs), 1, et.size);
16662 }
16663
16664 static void
16665 do_neon_recip_est (void)
16666 {
16667 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16668 struct neon_type_el et = neon_check_type (2, rs,
16669 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16670 inst.instruction |= (et.type == NT_float) << 8;
16671 neon_two_same (neon_quad (rs), 1, et.size);
16672 }
16673
16674 static void
16675 do_neon_cls (void)
16676 {
16677 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16678 struct neon_type_el et = neon_check_type (2, rs,
16679 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16680 neon_two_same (neon_quad (rs), 1, et.size);
16681 }
16682
16683 static void
16684 do_neon_clz (void)
16685 {
16686 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16687 struct neon_type_el et = neon_check_type (2, rs,
16688 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16689 neon_two_same (neon_quad (rs), 1, et.size);
16690 }
16691
16692 static void
16693 do_neon_cnt (void)
16694 {
16695 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16696 struct neon_type_el et = neon_check_type (2, rs,
16697 N_EQK | N_INT, N_8 | N_KEY);
16698 neon_two_same (neon_quad (rs), 1, et.size);
16699 }
16700
16701 static void
16702 do_neon_swp (void)
16703 {
16704 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16705 neon_two_same (neon_quad (rs), 1, -1);
16706 }
16707
16708 static void
16709 do_neon_tbl_tbx (void)
16710 {
16711 unsigned listlenbits;
16712 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16713
16714 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16715 {
16716 first_error (_("bad list length for table lookup"));
16717 return;
16718 }
16719
16720 listlenbits = inst.operands[1].imm - 1;
16721 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16722 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16723 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16724 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16725 inst.instruction |= LOW4 (inst.operands[2].reg);
16726 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16727 inst.instruction |= listlenbits << 8;
16728
16729 neon_dp_fixup (&inst);
16730 }
16731
16732 static void
16733 do_neon_ldm_stm (void)
16734 {
16735 /* P, U and L bits are part of bitmask. */
16736 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16737 unsigned offsetbits = inst.operands[1].imm * 2;
16738
16739 if (inst.operands[1].issingle)
16740 {
16741 do_vfp_nsyn_ldm_stm (is_dbmode);
16742 return;
16743 }
16744
16745 constraint (is_dbmode && !inst.operands[0].writeback,
16746 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16747
16748 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16749 _("register list must contain at least 1 and at most 16 "
16750 "registers"));
16751
16752 inst.instruction |= inst.operands[0].reg << 16;
16753 inst.instruction |= inst.operands[0].writeback << 21;
16754 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16755 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16756
16757 inst.instruction |= offsetbits;
16758
16759 do_vfp_cond_or_thumb ();
16760 }
16761
16762 static void
16763 do_neon_ldr_str (void)
16764 {
16765 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16766
16767 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16768 And is UNPREDICTABLE in thumb mode. */
16769 if (!is_ldr
16770 && inst.operands[1].reg == REG_PC
16771 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16772 {
16773 if (thumb_mode)
16774 inst.error = _("Use of PC here is UNPREDICTABLE");
16775 else if (warn_on_deprecated)
16776 as_tsktsk (_("Use of PC here is deprecated"));
16777 }
16778
16779 if (inst.operands[0].issingle)
16780 {
16781 if (is_ldr)
16782 do_vfp_nsyn_opcode ("flds");
16783 else
16784 do_vfp_nsyn_opcode ("fsts");
16785
16786 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16787 if (inst.vectype.el[0].size == 16)
16788 do_scalar_fp16_v82_encode ();
16789 }
16790 else
16791 {
16792 if (is_ldr)
16793 do_vfp_nsyn_opcode ("fldd");
16794 else
16795 do_vfp_nsyn_opcode ("fstd");
16796 }
16797 }
16798
16799 /* "interleave" version also handles non-interleaving register VLD1/VST1
16800 instructions. */
16801
16802 static void
16803 do_neon_ld_st_interleave (void)
16804 {
16805 struct neon_type_el et = neon_check_type (1, NS_NULL,
16806 N_8 | N_16 | N_32 | N_64);
16807 unsigned alignbits = 0;
16808 unsigned idx;
16809 /* The bits in this table go:
16810 0: register stride of one (0) or two (1)
16811 1,2: register list length, minus one (1, 2, 3, 4).
16812 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16813 We use -1 for invalid entries. */
16814 const int typetable[] =
16815 {
16816 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16817 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16818 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16819 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16820 };
16821 int typebits;
16822
16823 if (et.type == NT_invtype)
16824 return;
16825
16826 if (inst.operands[1].immisalign)
16827 switch (inst.operands[1].imm >> 8)
16828 {
16829 case 64: alignbits = 1; break;
16830 case 128:
16831 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16832 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16833 goto bad_alignment;
16834 alignbits = 2;
16835 break;
16836 case 256:
16837 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16838 goto bad_alignment;
16839 alignbits = 3;
16840 break;
16841 default:
16842 bad_alignment:
16843 first_error (_("bad alignment"));
16844 return;
16845 }
16846
16847 inst.instruction |= alignbits << 4;
16848 inst.instruction |= neon_logbits (et.size) << 6;
16849
16850 /* Bits [4:6] of the immediate in a list specifier encode register stride
16851 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16852 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16853 up the right value for "type" in a table based on this value and the given
16854 list style, then stick it back. */
16855 idx = ((inst.operands[0].imm >> 4) & 7)
16856 | (((inst.instruction >> 8) & 3) << 3);
16857
16858 typebits = typetable[idx];
16859
16860 constraint (typebits == -1, _("bad list type for instruction"));
16861 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16862 _("bad element type for instruction"));
16863
16864 inst.instruction &= ~0xf00;
16865 inst.instruction |= typebits << 8;
16866 }
16867
16868 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16869 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16870 otherwise. The variable arguments are a list of pairs of legal (size, align)
16871 values, terminated with -1. */
16872
16873 static int
16874 neon_alignment_bit (int size, int align, int *do_alignment, ...)
16875 {
16876 va_list ap;
16877 int result = FAIL, thissize, thisalign;
16878
16879 if (!inst.operands[1].immisalign)
16880 {
16881 *do_alignment = 0;
16882 return SUCCESS;
16883 }
16884
16885 va_start (ap, do_alignment);
16886
16887 do
16888 {
16889 thissize = va_arg (ap, int);
16890 if (thissize == -1)
16891 break;
16892 thisalign = va_arg (ap, int);
16893
16894 if (size == thissize && align == thisalign)
16895 result = SUCCESS;
16896 }
16897 while (result != SUCCESS);
16898
16899 va_end (ap);
16900
16901 if (result == SUCCESS)
16902 *do_alignment = 1;
16903 else
16904 first_error (_("unsupported alignment for instruction"));
16905
16906 return result;
16907 }
16908
16909 static void
16910 do_neon_ld_st_lane (void)
16911 {
16912 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16913 int align_good, do_alignment = 0;
16914 int logsize = neon_logbits (et.size);
16915 int align = inst.operands[1].imm >> 8;
16916 int n = (inst.instruction >> 8) & 3;
16917 int max_el = 64 / et.size;
16918
16919 if (et.type == NT_invtype)
16920 return;
16921
16922 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16923 _("bad list length"));
16924 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16925 _("scalar index out of range"));
16926 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16927 && et.size == 8,
16928 _("stride of 2 unavailable when element size is 8"));
16929
16930 switch (n)
16931 {
16932 case 0: /* VLD1 / VST1. */
16933 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16934 32, 32, -1);
16935 if (align_good == FAIL)
16936 return;
16937 if (do_alignment)
16938 {
16939 unsigned alignbits = 0;
16940 switch (et.size)
16941 {
16942 case 16: alignbits = 0x1; break;
16943 case 32: alignbits = 0x3; break;
16944 default: ;
16945 }
16946 inst.instruction |= alignbits << 4;
16947 }
16948 break;
16949
16950 case 1: /* VLD2 / VST2. */
16951 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16952 16, 32, 32, 64, -1);
16953 if (align_good == FAIL)
16954 return;
16955 if (do_alignment)
16956 inst.instruction |= 1 << 4;
16957 break;
16958
16959 case 2: /* VLD3 / VST3. */
16960 constraint (inst.operands[1].immisalign,
16961 _("can't use alignment with this instruction"));
16962 break;
16963
16964 case 3: /* VLD4 / VST4. */
16965 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16966 16, 64, 32, 64, 32, 128, -1);
16967 if (align_good == FAIL)
16968 return;
16969 if (do_alignment)
16970 {
16971 unsigned alignbits = 0;
16972 switch (et.size)
16973 {
16974 case 8: alignbits = 0x1; break;
16975 case 16: alignbits = 0x1; break;
16976 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16977 default: ;
16978 }
16979 inst.instruction |= alignbits << 4;
16980 }
16981 break;
16982
16983 default: ;
16984 }
16985
16986 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16987 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16988 inst.instruction |= 1 << (4 + logsize);
16989
16990 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16991 inst.instruction |= logsize << 10;
16992 }
16993
16994 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16995
16996 static void
16997 do_neon_ld_dup (void)
16998 {
16999 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17000 int align_good, do_alignment = 0;
17001
17002 if (et.type == NT_invtype)
17003 return;
17004
17005 switch ((inst.instruction >> 8) & 3)
17006 {
17007 case 0: /* VLD1. */
17008 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17009 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17010 &do_alignment, 16, 16, 32, 32, -1);
17011 if (align_good == FAIL)
17012 return;
17013 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17014 {
17015 case 1: break;
17016 case 2: inst.instruction |= 1 << 5; break;
17017 default: first_error (_("bad list length")); return;
17018 }
17019 inst.instruction |= neon_logbits (et.size) << 6;
17020 break;
17021
17022 case 1: /* VLD2. */
17023 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17024 &do_alignment, 8, 16, 16, 32, 32, 64,
17025 -1);
17026 if (align_good == FAIL)
17027 return;
17028 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17029 _("bad list length"));
17030 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17031 inst.instruction |= 1 << 5;
17032 inst.instruction |= neon_logbits (et.size) << 6;
17033 break;
17034
17035 case 2: /* VLD3. */
17036 constraint (inst.operands[1].immisalign,
17037 _("can't use alignment with this instruction"));
17038 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17039 _("bad list length"));
17040 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17041 inst.instruction |= 1 << 5;
17042 inst.instruction |= neon_logbits (et.size) << 6;
17043 break;
17044
17045 case 3: /* VLD4. */
17046 {
17047 int align = inst.operands[1].imm >> 8;
17048 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17049 16, 64, 32, 64, 32, 128, -1);
17050 if (align_good == FAIL)
17051 return;
17052 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17053 _("bad list length"));
17054 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17055 inst.instruction |= 1 << 5;
17056 if (et.size == 32 && align == 128)
17057 inst.instruction |= 0x3 << 6;
17058 else
17059 inst.instruction |= neon_logbits (et.size) << 6;
17060 }
17061 break;
17062
17063 default: ;
17064 }
17065
17066 inst.instruction |= do_alignment << 4;
17067 }
17068
17069 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17070 apart from bits [11:4]. */
17071
17072 static void
17073 do_neon_ldx_stx (void)
17074 {
17075 if (inst.operands[1].isreg)
17076 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17077
17078 switch (NEON_LANE (inst.operands[0].imm))
17079 {
17080 case NEON_INTERLEAVE_LANES:
17081 NEON_ENCODE (INTERLV, inst);
17082 do_neon_ld_st_interleave ();
17083 break;
17084
17085 case NEON_ALL_LANES:
17086 NEON_ENCODE (DUP, inst);
17087 if (inst.instruction == N_INV)
17088 {
17089 first_error ("only loads support such operands");
17090 break;
17091 }
17092 do_neon_ld_dup ();
17093 break;
17094
17095 default:
17096 NEON_ENCODE (LANE, inst);
17097 do_neon_ld_st_lane ();
17098 }
17099
17100 /* L bit comes from bit mask. */
17101 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17102 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17103 inst.instruction |= inst.operands[1].reg << 16;
17104
17105 if (inst.operands[1].postind)
17106 {
17107 int postreg = inst.operands[1].imm & 0xf;
17108 constraint (!inst.operands[1].immisreg,
17109 _("post-index must be a register"));
17110 constraint (postreg == 0xd || postreg == 0xf,
17111 _("bad register for post-index"));
17112 inst.instruction |= postreg;
17113 }
17114 else
17115 {
17116 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17117 constraint (inst.reloc.exp.X_op != O_constant
17118 || inst.reloc.exp.X_add_number != 0,
17119 BAD_ADDR_MODE);
17120
17121 if (inst.operands[1].writeback)
17122 {
17123 inst.instruction |= 0xd;
17124 }
17125 else
17126 inst.instruction |= 0xf;
17127 }
17128
17129 if (thumb_mode)
17130 inst.instruction |= 0xf9000000;
17131 else
17132 inst.instruction |= 0xf4000000;
17133 }
17134
17135 /* FP v8. */
17136 static void
17137 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17138 {
17139 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17140 D register operands. */
17141 if (neon_shape_class[rs] == SC_DOUBLE)
17142 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17143 _(BAD_FPU));
17144
17145 NEON_ENCODE (FPV8, inst);
17146
17147 if (rs == NS_FFF || rs == NS_HHH)
17148 {
17149 do_vfp_sp_dyadic ();
17150
17151 /* ARMv8.2 fp16 instruction. */
17152 if (rs == NS_HHH)
17153 do_scalar_fp16_v82_encode ();
17154 }
17155 else
17156 do_vfp_dp_rd_rn_rm ();
17157
17158 if (rs == NS_DDD)
17159 inst.instruction |= 0x100;
17160
17161 inst.instruction |= 0xf0000000;
17162 }
17163
17164 static void
17165 do_vsel (void)
17166 {
17167 set_it_insn_type (OUTSIDE_IT_INSN);
17168
17169 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17170 first_error (_("invalid instruction shape"));
17171 }
17172
17173 static void
17174 do_vmaxnm (void)
17175 {
17176 set_it_insn_type (OUTSIDE_IT_INSN);
17177
17178 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17179 return;
17180
17181 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17182 return;
17183
17184 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17185 }
17186
17187 static void
17188 do_vrint_1 (enum neon_cvt_mode mode)
17189 {
17190 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17191 struct neon_type_el et;
17192
17193 if (rs == NS_NULL)
17194 return;
17195
17196 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17197 D register operands. */
17198 if (neon_shape_class[rs] == SC_DOUBLE)
17199 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17200 _(BAD_FPU));
17201
17202 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17203 | N_VFP);
17204 if (et.type != NT_invtype)
17205 {
17206 /* VFP encodings. */
17207 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17208 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17209 set_it_insn_type (OUTSIDE_IT_INSN);
17210
17211 NEON_ENCODE (FPV8, inst);
17212 if (rs == NS_FF || rs == NS_HH)
17213 do_vfp_sp_monadic ();
17214 else
17215 do_vfp_dp_rd_rm ();
17216
17217 switch (mode)
17218 {
17219 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17220 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17221 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17222 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17223 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17224 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17225 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17226 default: abort ();
17227 }
17228
17229 inst.instruction |= (rs == NS_DD) << 8;
17230 do_vfp_cond_or_thumb ();
17231
17232 /* ARMv8.2 fp16 vrint instruction. */
17233 if (rs == NS_HH)
17234 do_scalar_fp16_v82_encode ();
17235 }
17236 else
17237 {
17238 /* Neon encodings (or something broken...). */
17239 inst.error = NULL;
17240 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17241
17242 if (et.type == NT_invtype)
17243 return;
17244
17245 set_it_insn_type (OUTSIDE_IT_INSN);
17246 NEON_ENCODE (FLOAT, inst);
17247
17248 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17249 return;
17250
17251 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17252 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17253 inst.instruction |= LOW4 (inst.operands[1].reg);
17254 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17255 inst.instruction |= neon_quad (rs) << 6;
17256 /* Mask off the original size bits and reencode them. */
17257 inst.instruction = ((inst.instruction & 0xfff3ffff)
17258 | neon_logbits (et.size) << 18);
17259
17260 switch (mode)
17261 {
17262 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17263 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17264 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17265 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17266 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17267 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17268 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17269 default: abort ();
17270 }
17271
17272 if (thumb_mode)
17273 inst.instruction |= 0xfc000000;
17274 else
17275 inst.instruction |= 0xf0000000;
17276 }
17277 }
17278
17279 static void
17280 do_vrintx (void)
17281 {
17282 do_vrint_1 (neon_cvt_mode_x);
17283 }
17284
17285 static void
17286 do_vrintz (void)
17287 {
17288 do_vrint_1 (neon_cvt_mode_z);
17289 }
17290
17291 static void
17292 do_vrintr (void)
17293 {
17294 do_vrint_1 (neon_cvt_mode_r);
17295 }
17296
17297 static void
17298 do_vrinta (void)
17299 {
17300 do_vrint_1 (neon_cvt_mode_a);
17301 }
17302
17303 static void
17304 do_vrintn (void)
17305 {
17306 do_vrint_1 (neon_cvt_mode_n);
17307 }
17308
17309 static void
17310 do_vrintp (void)
17311 {
17312 do_vrint_1 (neon_cvt_mode_p);
17313 }
17314
17315 static void
17316 do_vrintm (void)
17317 {
17318 do_vrint_1 (neon_cvt_mode_m);
17319 }
17320
17321 static unsigned
17322 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17323 {
17324 unsigned regno = NEON_SCALAR_REG (opnd);
17325 unsigned elno = NEON_SCALAR_INDEX (opnd);
17326
17327 if (elsize == 16 && elno < 2 && regno < 16)
17328 return regno | (elno << 4);
17329 else if (elsize == 32 && elno == 0)
17330 return regno;
17331
17332 first_error (_("scalar out of range"));
17333 return 0;
17334 }
17335
17336 static void
17337 do_vcmla (void)
17338 {
17339 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17340 _(BAD_FPU));
17341 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17342 unsigned rot = inst.reloc.exp.X_add_number;
17343 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17344 _("immediate out of range"));
17345 rot /= 90;
17346 if (inst.operands[2].isscalar)
17347 {
17348 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17349 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17350 N_KEY | N_F16 | N_F32).size;
17351 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17352 inst.is_neon = 1;
17353 inst.instruction = 0xfe000800;
17354 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17355 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17356 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17357 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17358 inst.instruction |= LOW4 (m);
17359 inst.instruction |= HI1 (m) << 5;
17360 inst.instruction |= neon_quad (rs) << 6;
17361 inst.instruction |= rot << 20;
17362 inst.instruction |= (size == 32) << 23;
17363 }
17364 else
17365 {
17366 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17367 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17368 N_KEY | N_F16 | N_F32).size;
17369 neon_three_same (neon_quad (rs), 0, -1);
17370 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17371 inst.instruction |= 0xfc200800;
17372 inst.instruction |= rot << 23;
17373 inst.instruction |= (size == 32) << 20;
17374 }
17375 }
17376
17377 static void
17378 do_vcadd (void)
17379 {
17380 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17381 _(BAD_FPU));
17382 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17383 unsigned rot = inst.reloc.exp.X_add_number;
17384 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17385 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17386 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17387 N_KEY | N_F16 | N_F32).size;
17388 neon_three_same (neon_quad (rs), 0, -1);
17389 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17390 inst.instruction |= 0xfc800800;
17391 inst.instruction |= (rot == 270) << 24;
17392 inst.instruction |= (size == 32) << 20;
17393 }
17394
17395 /* Dot Product instructions encoding support. */
17396
17397 static void
17398 do_neon_dotproduct (int unsigned_p)
17399 {
17400 enum neon_shape rs;
17401 unsigned scalar_oprd2 = 0;
17402 int high8;
17403
17404 if (inst.cond != COND_ALWAYS)
17405 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17406 "is UNPREDICTABLE"));
17407
17408 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17409 _(BAD_FPU));
17410
17411 /* Dot Product instructions are in three-same D/Q register format or the third
17412 operand can be a scalar index register. */
17413 if (inst.operands[2].isscalar)
17414 {
17415 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17416 high8 = 0xfe000000;
17417 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17418 }
17419 else
17420 {
17421 high8 = 0xfc000000;
17422 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17423 }
17424
17425 if (unsigned_p)
17426 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17427 else
17428 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17429
17430 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17431 Product instruction, so we pass 0 as the "ubit" parameter. And the
17432 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17433 neon_three_same (neon_quad (rs), 0, 32);
17434
17435 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17436 different NEON three-same encoding. */
17437 inst.instruction &= 0x00ffffff;
17438 inst.instruction |= high8;
17439 /* Encode 'U' bit which indicates signedness. */
17440 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17441 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17442 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17443 the instruction encoding. */
17444 if (inst.operands[2].isscalar)
17445 {
17446 inst.instruction &= 0xffffffd0;
17447 inst.instruction |= LOW4 (scalar_oprd2);
17448 inst.instruction |= HI1 (scalar_oprd2) << 5;
17449 }
17450 }
17451
17452 /* Dot Product instructions for signed integer. */
17453
17454 static void
17455 do_neon_dotproduct_s (void)
17456 {
17457 return do_neon_dotproduct (0);
17458 }
17459
17460 /* Dot Product instructions for unsigned integer. */
17461
17462 static void
17463 do_neon_dotproduct_u (void)
17464 {
17465 return do_neon_dotproduct (1);
17466 }
17467
17468 /* Crypto v1 instructions. */
17469 static void
17470 do_crypto_2op_1 (unsigned elttype, int op)
17471 {
17472 set_it_insn_type (OUTSIDE_IT_INSN);
17473
17474 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17475 == NT_invtype)
17476 return;
17477
17478 inst.error = NULL;
17479
17480 NEON_ENCODE (INTEGER, inst);
17481 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17482 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17483 inst.instruction |= LOW4 (inst.operands[1].reg);
17484 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17485 if (op != -1)
17486 inst.instruction |= op << 6;
17487
17488 if (thumb_mode)
17489 inst.instruction |= 0xfc000000;
17490 else
17491 inst.instruction |= 0xf0000000;
17492 }
17493
17494 static void
17495 do_crypto_3op_1 (int u, int op)
17496 {
17497 set_it_insn_type (OUTSIDE_IT_INSN);
17498
17499 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17500 N_32 | N_UNT | N_KEY).type == NT_invtype)
17501 return;
17502
17503 inst.error = NULL;
17504
17505 NEON_ENCODE (INTEGER, inst);
17506 neon_three_same (1, u, 8 << op);
17507 }
17508
17509 static void
17510 do_aese (void)
17511 {
17512 do_crypto_2op_1 (N_8, 0);
17513 }
17514
17515 static void
17516 do_aesd (void)
17517 {
17518 do_crypto_2op_1 (N_8, 1);
17519 }
17520
17521 static void
17522 do_aesmc (void)
17523 {
17524 do_crypto_2op_1 (N_8, 2);
17525 }
17526
17527 static void
17528 do_aesimc (void)
17529 {
17530 do_crypto_2op_1 (N_8, 3);
17531 }
17532
17533 static void
17534 do_sha1c (void)
17535 {
17536 do_crypto_3op_1 (0, 0);
17537 }
17538
17539 static void
17540 do_sha1p (void)
17541 {
17542 do_crypto_3op_1 (0, 1);
17543 }
17544
17545 static void
17546 do_sha1m (void)
17547 {
17548 do_crypto_3op_1 (0, 2);
17549 }
17550
17551 static void
17552 do_sha1su0 (void)
17553 {
17554 do_crypto_3op_1 (0, 3);
17555 }
17556
17557 static void
17558 do_sha256h (void)
17559 {
17560 do_crypto_3op_1 (1, 0);
17561 }
17562
17563 static void
17564 do_sha256h2 (void)
17565 {
17566 do_crypto_3op_1 (1, 1);
17567 }
17568
17569 static void
17570 do_sha256su1 (void)
17571 {
17572 do_crypto_3op_1 (1, 2);
17573 }
17574
17575 static void
17576 do_sha1h (void)
17577 {
17578 do_crypto_2op_1 (N_32, -1);
17579 }
17580
17581 static void
17582 do_sha1su1 (void)
17583 {
17584 do_crypto_2op_1 (N_32, 0);
17585 }
17586
17587 static void
17588 do_sha256su0 (void)
17589 {
17590 do_crypto_2op_1 (N_32, 1);
17591 }
17592
17593 static void
17594 do_crc32_1 (unsigned int poly, unsigned int sz)
17595 {
17596 unsigned int Rd = inst.operands[0].reg;
17597 unsigned int Rn = inst.operands[1].reg;
17598 unsigned int Rm = inst.operands[2].reg;
17599
17600 set_it_insn_type (OUTSIDE_IT_INSN);
17601 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17602 inst.instruction |= LOW4 (Rn) << 16;
17603 inst.instruction |= LOW4 (Rm);
17604 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17605 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17606
17607 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17608 as_warn (UNPRED_REG ("r15"));
17609 }
17610
17611 static void
17612 do_crc32b (void)
17613 {
17614 do_crc32_1 (0, 0);
17615 }
17616
17617 static void
17618 do_crc32h (void)
17619 {
17620 do_crc32_1 (0, 1);
17621 }
17622
17623 static void
17624 do_crc32w (void)
17625 {
17626 do_crc32_1 (0, 2);
17627 }
17628
17629 static void
17630 do_crc32cb (void)
17631 {
17632 do_crc32_1 (1, 0);
17633 }
17634
17635 static void
17636 do_crc32ch (void)
17637 {
17638 do_crc32_1 (1, 1);
17639 }
17640
17641 static void
17642 do_crc32cw (void)
17643 {
17644 do_crc32_1 (1, 2);
17645 }
17646
17647 static void
17648 do_vjcvt (void)
17649 {
17650 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17651 _(BAD_FPU));
17652 neon_check_type (2, NS_FD, N_S32, N_F64);
17653 do_vfp_sp_dp_cvt ();
17654 do_vfp_cond_or_thumb ();
17655 }
17656
17657 \f
17658 /* Overall per-instruction processing. */
17659
17660 /* We need to be able to fix up arbitrary expressions in some statements.
17661 This is so that we can handle symbols that are an arbitrary distance from
17662 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17663 which returns part of an address in a form which will be valid for
17664 a data instruction. We do this by pushing the expression into a symbol
17665 in the expr_section, and creating a fix for that. */
17666
17667 static void
17668 fix_new_arm (fragS * frag,
17669 int where,
17670 short int size,
17671 expressionS * exp,
17672 int pc_rel,
17673 int reloc)
17674 {
17675 fixS * new_fix;
17676
17677 switch (exp->X_op)
17678 {
17679 case O_constant:
17680 if (pc_rel)
17681 {
17682 /* Create an absolute valued symbol, so we have something to
17683 refer to in the object file. Unfortunately for us, gas's
17684 generic expression parsing will already have folded out
17685 any use of .set foo/.type foo %function that may have
17686 been used to set type information of the target location,
17687 that's being specified symbolically. We have to presume
17688 the user knows what they are doing. */
17689 char name[16 + 8];
17690 symbolS *symbol;
17691
17692 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17693
17694 symbol = symbol_find_or_make (name);
17695 S_SET_SEGMENT (symbol, absolute_section);
17696 symbol_set_frag (symbol, &zero_address_frag);
17697 S_SET_VALUE (symbol, exp->X_add_number);
17698 exp->X_op = O_symbol;
17699 exp->X_add_symbol = symbol;
17700 exp->X_add_number = 0;
17701 }
17702 /* FALLTHROUGH */
17703 case O_symbol:
17704 case O_add:
17705 case O_subtract:
17706 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17707 (enum bfd_reloc_code_real) reloc);
17708 break;
17709
17710 default:
17711 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17712 pc_rel, (enum bfd_reloc_code_real) reloc);
17713 break;
17714 }
17715
17716 /* Mark whether the fix is to a THUMB instruction, or an ARM
17717 instruction. */
17718 new_fix->tc_fix_data = thumb_mode;
17719 }
17720
17721 /* Create a frg for an instruction requiring relaxation. */
17722 static void
17723 output_relax_insn (void)
17724 {
17725 char * to;
17726 symbolS *sym;
17727 int offset;
17728
17729 /* The size of the instruction is unknown, so tie the debug info to the
17730 start of the instruction. */
17731 dwarf2_emit_insn (0);
17732
17733 switch (inst.reloc.exp.X_op)
17734 {
17735 case O_symbol:
17736 sym = inst.reloc.exp.X_add_symbol;
17737 offset = inst.reloc.exp.X_add_number;
17738 break;
17739 case O_constant:
17740 sym = NULL;
17741 offset = inst.reloc.exp.X_add_number;
17742 break;
17743 default:
17744 sym = make_expr_symbol (&inst.reloc.exp);
17745 offset = 0;
17746 break;
17747 }
17748 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17749 inst.relax, sym, offset, NULL/*offset, opcode*/);
17750 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17751 }
17752
17753 /* Write a 32-bit thumb instruction to buf. */
17754 static void
17755 put_thumb32_insn (char * buf, unsigned long insn)
17756 {
17757 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17758 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17759 }
17760
17761 static void
17762 output_inst (const char * str)
17763 {
17764 char * to = NULL;
17765
17766 if (inst.error)
17767 {
17768 as_bad ("%s -- `%s'", inst.error, str);
17769 return;
17770 }
17771 if (inst.relax)
17772 {
17773 output_relax_insn ();
17774 return;
17775 }
17776 if (inst.size == 0)
17777 return;
17778
17779 to = frag_more (inst.size);
17780 /* PR 9814: Record the thumb mode into the current frag so that we know
17781 what type of NOP padding to use, if necessary. We override any previous
17782 setting so that if the mode has changed then the NOPS that we use will
17783 match the encoding of the last instruction in the frag. */
17784 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17785
17786 if (thumb_mode && (inst.size > THUMB_SIZE))
17787 {
17788 gas_assert (inst.size == (2 * THUMB_SIZE));
17789 put_thumb32_insn (to, inst.instruction);
17790 }
17791 else if (inst.size > INSN_SIZE)
17792 {
17793 gas_assert (inst.size == (2 * INSN_SIZE));
17794 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17795 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17796 }
17797 else
17798 md_number_to_chars (to, inst.instruction, inst.size);
17799
17800 if (inst.reloc.type != BFD_RELOC_UNUSED)
17801 fix_new_arm (frag_now, to - frag_now->fr_literal,
17802 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17803 inst.reloc.type);
17804
17805 dwarf2_emit_insn (inst.size);
17806 }
17807
17808 static char *
17809 output_it_inst (int cond, int mask, char * to)
17810 {
17811 unsigned long instruction = 0xbf00;
17812
17813 mask &= 0xf;
17814 instruction |= mask;
17815 instruction |= cond << 4;
17816
17817 if (to == NULL)
17818 {
17819 to = frag_more (2);
17820 #ifdef OBJ_ELF
17821 dwarf2_emit_insn (2);
17822 #endif
17823 }
17824
17825 md_number_to_chars (to, instruction, 2);
17826
17827 return to;
17828 }
17829
17830 /* Tag values used in struct asm_opcode's tag field. */
17831 enum opcode_tag
17832 {
17833 OT_unconditional, /* Instruction cannot be conditionalized.
17834 The ARM condition field is still 0xE. */
17835 OT_unconditionalF, /* Instruction cannot be conditionalized
17836 and carries 0xF in its ARM condition field. */
17837 OT_csuffix, /* Instruction takes a conditional suffix. */
17838 OT_csuffixF, /* Some forms of the instruction take a conditional
17839 suffix, others place 0xF where the condition field
17840 would be. */
17841 OT_cinfix3, /* Instruction takes a conditional infix,
17842 beginning at character index 3. (In
17843 unified mode, it becomes a suffix.) */
17844 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17845 tsts, cmps, cmns, and teqs. */
17846 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17847 character index 3, even in unified mode. Used for
17848 legacy instructions where suffix and infix forms
17849 may be ambiguous. */
17850 OT_csuf_or_in3, /* Instruction takes either a conditional
17851 suffix or an infix at character index 3. */
17852 OT_odd_infix_unc, /* This is the unconditional variant of an
17853 instruction that takes a conditional infix
17854 at an unusual position. In unified mode,
17855 this variant will accept a suffix. */
17856 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17857 are the conditional variants of instructions that
17858 take conditional infixes in unusual positions.
17859 The infix appears at character index
17860 (tag - OT_odd_infix_0). These are not accepted
17861 in unified mode. */
17862 };
17863
17864 /* Subroutine of md_assemble, responsible for looking up the primary
17865 opcode from the mnemonic the user wrote. STR points to the
17866 beginning of the mnemonic.
17867
17868 This is not simply a hash table lookup, because of conditional
17869 variants. Most instructions have conditional variants, which are
17870 expressed with a _conditional affix_ to the mnemonic. If we were
17871 to encode each conditional variant as a literal string in the opcode
17872 table, it would have approximately 20,000 entries.
17873
17874 Most mnemonics take this affix as a suffix, and in unified syntax,
17875 'most' is upgraded to 'all'. However, in the divided syntax, some
17876 instructions take the affix as an infix, notably the s-variants of
17877 the arithmetic instructions. Of those instructions, all but six
17878 have the infix appear after the third character of the mnemonic.
17879
17880 Accordingly, the algorithm for looking up primary opcodes given
17881 an identifier is:
17882
17883 1. Look up the identifier in the opcode table.
17884 If we find a match, go to step U.
17885
17886 2. Look up the last two characters of the identifier in the
17887 conditions table. If we find a match, look up the first N-2
17888 characters of the identifier in the opcode table. If we
17889 find a match, go to step CE.
17890
17891 3. Look up the fourth and fifth characters of the identifier in
17892 the conditions table. If we find a match, extract those
17893 characters from the identifier, and look up the remaining
17894 characters in the opcode table. If we find a match, go
17895 to step CM.
17896
17897 4. Fail.
17898
17899 U. Examine the tag field of the opcode structure, in case this is
17900 one of the six instructions with its conditional infix in an
17901 unusual place. If it is, the tag tells us where to find the
17902 infix; look it up in the conditions table and set inst.cond
17903 accordingly. Otherwise, this is an unconditional instruction.
17904 Again set inst.cond accordingly. Return the opcode structure.
17905
17906 CE. Examine the tag field to make sure this is an instruction that
17907 should receive a conditional suffix. If it is not, fail.
17908 Otherwise, set inst.cond from the suffix we already looked up,
17909 and return the opcode structure.
17910
17911 CM. Examine the tag field to make sure this is an instruction that
17912 should receive a conditional infix after the third character.
17913 If it is not, fail. Otherwise, undo the edits to the current
17914 line of input and proceed as for case CE. */
17915
17916 static const struct asm_opcode *
17917 opcode_lookup (char **str)
17918 {
17919 char *end, *base;
17920 char *affix;
17921 const struct asm_opcode *opcode;
17922 const struct asm_cond *cond;
17923 char save[2];
17924
17925 /* Scan up to the end of the mnemonic, which must end in white space,
17926 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17927 for (base = end = *str; *end != '\0'; end++)
17928 if (*end == ' ' || *end == '.')
17929 break;
17930
17931 if (end == base)
17932 return NULL;
17933
17934 /* Handle a possible width suffix and/or Neon type suffix. */
17935 if (end[0] == '.')
17936 {
17937 int offset = 2;
17938
17939 /* The .w and .n suffixes are only valid if the unified syntax is in
17940 use. */
17941 if (unified_syntax && end[1] == 'w')
17942 inst.size_req = 4;
17943 else if (unified_syntax && end[1] == 'n')
17944 inst.size_req = 2;
17945 else
17946 offset = 0;
17947
17948 inst.vectype.elems = 0;
17949
17950 *str = end + offset;
17951
17952 if (end[offset] == '.')
17953 {
17954 /* See if we have a Neon type suffix (possible in either unified or
17955 non-unified ARM syntax mode). */
17956 if (parse_neon_type (&inst.vectype, str) == FAIL)
17957 return NULL;
17958 }
17959 else if (end[offset] != '\0' && end[offset] != ' ')
17960 return NULL;
17961 }
17962 else
17963 *str = end;
17964
17965 /* Look for unaffixed or special-case affixed mnemonic. */
17966 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17967 end - base);
17968 if (opcode)
17969 {
17970 /* step U */
17971 if (opcode->tag < OT_odd_infix_0)
17972 {
17973 inst.cond = COND_ALWAYS;
17974 return opcode;
17975 }
17976
17977 if (warn_on_deprecated && unified_syntax)
17978 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17979 affix = base + (opcode->tag - OT_odd_infix_0);
17980 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17981 gas_assert (cond);
17982
17983 inst.cond = cond->value;
17984 return opcode;
17985 }
17986
17987 /* Cannot have a conditional suffix on a mnemonic of less than two
17988 characters. */
17989 if (end - base < 3)
17990 return NULL;
17991
17992 /* Look for suffixed mnemonic. */
17993 affix = end - 2;
17994 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17995 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17996 affix - base);
17997 if (opcode && cond)
17998 {
17999 /* step CE */
18000 switch (opcode->tag)
18001 {
18002 case OT_cinfix3_legacy:
18003 /* Ignore conditional suffixes matched on infix only mnemonics. */
18004 break;
18005
18006 case OT_cinfix3:
18007 case OT_cinfix3_deprecated:
18008 case OT_odd_infix_unc:
18009 if (!unified_syntax)
18010 return NULL;
18011 /* Fall through. */
18012
18013 case OT_csuffix:
18014 case OT_csuffixF:
18015 case OT_csuf_or_in3:
18016 inst.cond = cond->value;
18017 return opcode;
18018
18019 case OT_unconditional:
18020 case OT_unconditionalF:
18021 if (thumb_mode)
18022 inst.cond = cond->value;
18023 else
18024 {
18025 /* Delayed diagnostic. */
18026 inst.error = BAD_COND;
18027 inst.cond = COND_ALWAYS;
18028 }
18029 return opcode;
18030
18031 default:
18032 return NULL;
18033 }
18034 }
18035
18036 /* Cannot have a usual-position infix on a mnemonic of less than
18037 six characters (five would be a suffix). */
18038 if (end - base < 6)
18039 return NULL;
18040
18041 /* Look for infixed mnemonic in the usual position. */
18042 affix = base + 3;
18043 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18044 if (!cond)
18045 return NULL;
18046
18047 memcpy (save, affix, 2);
18048 memmove (affix, affix + 2, (end - affix) - 2);
18049 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18050 (end - base) - 2);
18051 memmove (affix + 2, affix, (end - affix) - 2);
18052 memcpy (affix, save, 2);
18053
18054 if (opcode
18055 && (opcode->tag == OT_cinfix3
18056 || opcode->tag == OT_cinfix3_deprecated
18057 || opcode->tag == OT_csuf_or_in3
18058 || opcode->tag == OT_cinfix3_legacy))
18059 {
18060 /* Step CM. */
18061 if (warn_on_deprecated && unified_syntax
18062 && (opcode->tag == OT_cinfix3
18063 || opcode->tag == OT_cinfix3_deprecated))
18064 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18065
18066 inst.cond = cond->value;
18067 return opcode;
18068 }
18069
18070 return NULL;
18071 }
18072
18073 /* This function generates an initial IT instruction, leaving its block
18074 virtually open for the new instructions. Eventually,
18075 the mask will be updated by now_it_add_mask () each time
18076 a new instruction needs to be included in the IT block.
18077 Finally, the block is closed with close_automatic_it_block ().
18078 The block closure can be requested either from md_assemble (),
18079 a tencode (), or due to a label hook. */
18080
18081 static void
18082 new_automatic_it_block (int cond)
18083 {
18084 now_it.state = AUTOMATIC_IT_BLOCK;
18085 now_it.mask = 0x18;
18086 now_it.cc = cond;
18087 now_it.block_length = 1;
18088 mapping_state (MAP_THUMB);
18089 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18090 now_it.warn_deprecated = FALSE;
18091 now_it.insn_cond = TRUE;
18092 }
18093
18094 /* Close an automatic IT block.
18095 See comments in new_automatic_it_block (). */
18096
18097 static void
18098 close_automatic_it_block (void)
18099 {
18100 now_it.mask = 0x10;
18101 now_it.block_length = 0;
18102 }
18103
18104 /* Update the mask of the current automatically-generated IT
18105 instruction. See comments in new_automatic_it_block (). */
18106
18107 static void
18108 now_it_add_mask (int cond)
18109 {
18110 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18111 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18112 | ((bitvalue) << (nbit)))
18113 const int resulting_bit = (cond & 1);
18114
18115 now_it.mask &= 0xf;
18116 now_it.mask = SET_BIT_VALUE (now_it.mask,
18117 resulting_bit,
18118 (5 - now_it.block_length));
18119 now_it.mask = SET_BIT_VALUE (now_it.mask,
18120 1,
18121 ((5 - now_it.block_length) - 1) );
18122 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18123
18124 #undef CLEAR_BIT
18125 #undef SET_BIT_VALUE
18126 }
18127
18128 /* The IT blocks handling machinery is accessed through the these functions:
18129 it_fsm_pre_encode () from md_assemble ()
18130 set_it_insn_type () optional, from the tencode functions
18131 set_it_insn_type_last () ditto
18132 in_it_block () ditto
18133 it_fsm_post_encode () from md_assemble ()
18134 force_automatic_it_block_close () from label handling functions
18135
18136 Rationale:
18137 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18138 initializing the IT insn type with a generic initial value depending
18139 on the inst.condition.
18140 2) During the tencode function, two things may happen:
18141 a) The tencode function overrides the IT insn type by
18142 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18143 b) The tencode function queries the IT block state by
18144 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18145
18146 Both set_it_insn_type and in_it_block run the internal FSM state
18147 handling function (handle_it_state), because: a) setting the IT insn
18148 type may incur in an invalid state (exiting the function),
18149 and b) querying the state requires the FSM to be updated.
18150 Specifically we want to avoid creating an IT block for conditional
18151 branches, so it_fsm_pre_encode is actually a guess and we can't
18152 determine whether an IT block is required until the tencode () routine
18153 has decided what type of instruction this actually it.
18154 Because of this, if set_it_insn_type and in_it_block have to be used,
18155 set_it_insn_type has to be called first.
18156
18157 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18158 determines the insn IT type depending on the inst.cond code.
18159 When a tencode () routine encodes an instruction that can be
18160 either outside an IT block, or, in the case of being inside, has to be
18161 the last one, set_it_insn_type_last () will determine the proper
18162 IT instruction type based on the inst.cond code. Otherwise,
18163 set_it_insn_type can be called for overriding that logic or
18164 for covering other cases.
18165
18166 Calling handle_it_state () may not transition the IT block state to
18167 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18168 still queried. Instead, if the FSM determines that the state should
18169 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18170 after the tencode () function: that's what it_fsm_post_encode () does.
18171
18172 Since in_it_block () calls the state handling function to get an
18173 updated state, an error may occur (due to invalid insns combination).
18174 In that case, inst.error is set.
18175 Therefore, inst.error has to be checked after the execution of
18176 the tencode () routine.
18177
18178 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18179 any pending state change (if any) that didn't take place in
18180 handle_it_state () as explained above. */
18181
18182 static void
18183 it_fsm_pre_encode (void)
18184 {
18185 if (inst.cond != COND_ALWAYS)
18186 inst.it_insn_type = INSIDE_IT_INSN;
18187 else
18188 inst.it_insn_type = OUTSIDE_IT_INSN;
18189
18190 now_it.state_handled = 0;
18191 }
18192
18193 /* IT state FSM handling function. */
18194
18195 static int
18196 handle_it_state (void)
18197 {
18198 now_it.state_handled = 1;
18199 now_it.insn_cond = FALSE;
18200
18201 switch (now_it.state)
18202 {
18203 case OUTSIDE_IT_BLOCK:
18204 switch (inst.it_insn_type)
18205 {
18206 case OUTSIDE_IT_INSN:
18207 break;
18208
18209 case INSIDE_IT_INSN:
18210 case INSIDE_IT_LAST_INSN:
18211 if (thumb_mode == 0)
18212 {
18213 if (unified_syntax
18214 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18215 as_tsktsk (_("Warning: conditional outside an IT block"\
18216 " for Thumb."));
18217 }
18218 else
18219 {
18220 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18221 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18222 {
18223 /* Automatically generate the IT instruction. */
18224 new_automatic_it_block (inst.cond);
18225 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18226 close_automatic_it_block ();
18227 }
18228 else
18229 {
18230 inst.error = BAD_OUT_IT;
18231 return FAIL;
18232 }
18233 }
18234 break;
18235
18236 case IF_INSIDE_IT_LAST_INSN:
18237 case NEUTRAL_IT_INSN:
18238 break;
18239
18240 case IT_INSN:
18241 now_it.state = MANUAL_IT_BLOCK;
18242 now_it.block_length = 0;
18243 break;
18244 }
18245 break;
18246
18247 case AUTOMATIC_IT_BLOCK:
18248 /* Three things may happen now:
18249 a) We should increment current it block size;
18250 b) We should close current it block (closing insn or 4 insns);
18251 c) We should close current it block and start a new one (due
18252 to incompatible conditions or
18253 4 insns-length block reached). */
18254
18255 switch (inst.it_insn_type)
18256 {
18257 case OUTSIDE_IT_INSN:
18258 /* The closure of the block shall happen immediately,
18259 so any in_it_block () call reports the block as closed. */
18260 force_automatic_it_block_close ();
18261 break;
18262
18263 case INSIDE_IT_INSN:
18264 case INSIDE_IT_LAST_INSN:
18265 case IF_INSIDE_IT_LAST_INSN:
18266 now_it.block_length++;
18267
18268 if (now_it.block_length > 4
18269 || !now_it_compatible (inst.cond))
18270 {
18271 force_automatic_it_block_close ();
18272 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18273 new_automatic_it_block (inst.cond);
18274 }
18275 else
18276 {
18277 now_it.insn_cond = TRUE;
18278 now_it_add_mask (inst.cond);
18279 }
18280
18281 if (now_it.state == AUTOMATIC_IT_BLOCK
18282 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18283 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18284 close_automatic_it_block ();
18285 break;
18286
18287 case NEUTRAL_IT_INSN:
18288 now_it.block_length++;
18289 now_it.insn_cond = TRUE;
18290
18291 if (now_it.block_length > 4)
18292 force_automatic_it_block_close ();
18293 else
18294 now_it_add_mask (now_it.cc & 1);
18295 break;
18296
18297 case IT_INSN:
18298 close_automatic_it_block ();
18299 now_it.state = MANUAL_IT_BLOCK;
18300 break;
18301 }
18302 break;
18303
18304 case MANUAL_IT_BLOCK:
18305 {
18306 /* Check conditional suffixes. */
18307 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18308 int is_last;
18309 now_it.mask <<= 1;
18310 now_it.mask &= 0x1f;
18311 is_last = (now_it.mask == 0x10);
18312 now_it.insn_cond = TRUE;
18313
18314 switch (inst.it_insn_type)
18315 {
18316 case OUTSIDE_IT_INSN:
18317 inst.error = BAD_NOT_IT;
18318 return FAIL;
18319
18320 case INSIDE_IT_INSN:
18321 if (cond != inst.cond)
18322 {
18323 inst.error = BAD_IT_COND;
18324 return FAIL;
18325 }
18326 break;
18327
18328 case INSIDE_IT_LAST_INSN:
18329 case IF_INSIDE_IT_LAST_INSN:
18330 if (cond != inst.cond)
18331 {
18332 inst.error = BAD_IT_COND;
18333 return FAIL;
18334 }
18335 if (!is_last)
18336 {
18337 inst.error = BAD_BRANCH;
18338 return FAIL;
18339 }
18340 break;
18341
18342 case NEUTRAL_IT_INSN:
18343 /* The BKPT instruction is unconditional even in an IT block. */
18344 break;
18345
18346 case IT_INSN:
18347 inst.error = BAD_IT_IT;
18348 return FAIL;
18349 }
18350 }
18351 break;
18352 }
18353
18354 return SUCCESS;
18355 }
18356
18357 struct depr_insn_mask
18358 {
18359 unsigned long pattern;
18360 unsigned long mask;
18361 const char* description;
18362 };
18363
18364 /* List of 16-bit instruction patterns deprecated in an IT block in
18365 ARMv8. */
18366 static const struct depr_insn_mask depr_it_insns[] = {
18367 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18368 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18369 { 0xa000, 0xb800, N_("ADR") },
18370 { 0x4800, 0xf800, N_("Literal loads") },
18371 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18372 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18373 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18374 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18375 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18376 { 0, 0, NULL }
18377 };
18378
18379 static void
18380 it_fsm_post_encode (void)
18381 {
18382 int is_last;
18383
18384 if (!now_it.state_handled)
18385 handle_it_state ();
18386
18387 if (now_it.insn_cond
18388 && !now_it.warn_deprecated
18389 && warn_on_deprecated
18390 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18391 {
18392 if (inst.instruction >= 0x10000)
18393 {
18394 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18395 "deprecated in ARMv8"));
18396 now_it.warn_deprecated = TRUE;
18397 }
18398 else
18399 {
18400 const struct depr_insn_mask *p = depr_it_insns;
18401
18402 while (p->mask != 0)
18403 {
18404 if ((inst.instruction & p->mask) == p->pattern)
18405 {
18406 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18407 "of the following class are deprecated in ARMv8: "
18408 "%s"), p->description);
18409 now_it.warn_deprecated = TRUE;
18410 break;
18411 }
18412
18413 ++p;
18414 }
18415 }
18416
18417 if (now_it.block_length > 1)
18418 {
18419 as_tsktsk (_("IT blocks containing more than one conditional "
18420 "instruction are deprecated in ARMv8"));
18421 now_it.warn_deprecated = TRUE;
18422 }
18423 }
18424
18425 is_last = (now_it.mask == 0x10);
18426 if (is_last)
18427 {
18428 now_it.state = OUTSIDE_IT_BLOCK;
18429 now_it.mask = 0;
18430 }
18431 }
18432
18433 static void
18434 force_automatic_it_block_close (void)
18435 {
18436 if (now_it.state == AUTOMATIC_IT_BLOCK)
18437 {
18438 close_automatic_it_block ();
18439 now_it.state = OUTSIDE_IT_BLOCK;
18440 now_it.mask = 0;
18441 }
18442 }
18443
18444 static int
18445 in_it_block (void)
18446 {
18447 if (!now_it.state_handled)
18448 handle_it_state ();
18449
18450 return now_it.state != OUTSIDE_IT_BLOCK;
18451 }
18452
18453 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18454 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18455 here, hence the "known" in the function name. */
18456
18457 static bfd_boolean
18458 known_t32_only_insn (const struct asm_opcode *opcode)
18459 {
18460 /* Original Thumb-1 wide instruction. */
18461 if (opcode->tencode == do_t_blx
18462 || opcode->tencode == do_t_branch23
18463 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18464 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18465 return TRUE;
18466
18467 /* Wide-only instruction added to ARMv8-M Baseline. */
18468 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18469 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18470 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18471 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18472 return TRUE;
18473
18474 return FALSE;
18475 }
18476
18477 /* Whether wide instruction variant can be used if available for a valid OPCODE
18478 in ARCH. */
18479
18480 static bfd_boolean
18481 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18482 {
18483 if (known_t32_only_insn (opcode))
18484 return TRUE;
18485
18486 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18487 of variant T3 of B.W is checked in do_t_branch. */
18488 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18489 && opcode->tencode == do_t_branch)
18490 return TRUE;
18491
18492 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18493 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18494 && opcode->tencode == do_t_mov_cmp
18495 /* Make sure CMP instruction is not affected. */
18496 && opcode->aencode == do_mov)
18497 return TRUE;
18498
18499 /* Wide instruction variants of all instructions with narrow *and* wide
18500 variants become available with ARMv6t2. Other opcodes are either
18501 narrow-only or wide-only and are thus available if OPCODE is valid. */
18502 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18503 return TRUE;
18504
18505 /* OPCODE with narrow only instruction variant or wide variant not
18506 available. */
18507 return FALSE;
18508 }
18509
18510 void
18511 md_assemble (char *str)
18512 {
18513 char *p = str;
18514 const struct asm_opcode * opcode;
18515
18516 /* Align the previous label if needed. */
18517 if (last_label_seen != NULL)
18518 {
18519 symbol_set_frag (last_label_seen, frag_now);
18520 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18521 S_SET_SEGMENT (last_label_seen, now_seg);
18522 }
18523
18524 memset (&inst, '\0', sizeof (inst));
18525 inst.reloc.type = BFD_RELOC_UNUSED;
18526
18527 opcode = opcode_lookup (&p);
18528 if (!opcode)
18529 {
18530 /* It wasn't an instruction, but it might be a register alias of
18531 the form alias .req reg, or a Neon .dn/.qn directive. */
18532 if (! create_register_alias (str, p)
18533 && ! create_neon_reg_alias (str, p))
18534 as_bad (_("bad instruction `%s'"), str);
18535
18536 return;
18537 }
18538
18539 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18540 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18541
18542 /* The value which unconditional instructions should have in place of the
18543 condition field. */
18544 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18545
18546 if (thumb_mode)
18547 {
18548 arm_feature_set variant;
18549
18550 variant = cpu_variant;
18551 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18552 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18553 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18554 /* Check that this instruction is supported for this CPU. */
18555 if (!opcode->tvariant
18556 || (thumb_mode == 1
18557 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18558 {
18559 if (opcode->tencode == do_t_swi)
18560 as_bad (_("SVC is not permitted on this architecture"));
18561 else
18562 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18563 return;
18564 }
18565 if (inst.cond != COND_ALWAYS && !unified_syntax
18566 && opcode->tencode != do_t_branch)
18567 {
18568 as_bad (_("Thumb does not support conditional execution"));
18569 return;
18570 }
18571
18572 /* Two things are addressed here:
18573 1) Implicit require narrow instructions on Thumb-1.
18574 This avoids relaxation accidentally introducing Thumb-2
18575 instructions.
18576 2) Reject wide instructions in non Thumb-2 cores.
18577
18578 Only instructions with narrow and wide variants need to be handled
18579 but selecting all non wide-only instructions is easier. */
18580 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18581 && !t32_insn_ok (variant, opcode))
18582 {
18583 if (inst.size_req == 0)
18584 inst.size_req = 2;
18585 else if (inst.size_req == 4)
18586 {
18587 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18588 as_bad (_("selected processor does not support 32bit wide "
18589 "variant of instruction `%s'"), str);
18590 else
18591 as_bad (_("selected processor does not support `%s' in "
18592 "Thumb-2 mode"), str);
18593 return;
18594 }
18595 }
18596
18597 inst.instruction = opcode->tvalue;
18598
18599 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18600 {
18601 /* Prepare the it_insn_type for those encodings that don't set
18602 it. */
18603 it_fsm_pre_encode ();
18604
18605 opcode->tencode ();
18606
18607 it_fsm_post_encode ();
18608 }
18609
18610 if (!(inst.error || inst.relax))
18611 {
18612 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18613 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18614 if (inst.size_req && inst.size_req != inst.size)
18615 {
18616 as_bad (_("cannot honor width suffix -- `%s'"), str);
18617 return;
18618 }
18619 }
18620
18621 /* Something has gone badly wrong if we try to relax a fixed size
18622 instruction. */
18623 gas_assert (inst.size_req == 0 || !inst.relax);
18624
18625 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18626 *opcode->tvariant);
18627 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18628 set those bits when Thumb-2 32-bit instructions are seen. The impact
18629 of relaxable instructions will be considered later after we finish all
18630 relaxation. */
18631 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18632 variant = arm_arch_none;
18633 else
18634 variant = cpu_variant;
18635 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18636 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18637 arm_ext_v6t2);
18638
18639 check_neon_suffixes;
18640
18641 if (!inst.error)
18642 {
18643 mapping_state (MAP_THUMB);
18644 }
18645 }
18646 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18647 {
18648 bfd_boolean is_bx;
18649
18650 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18651 is_bx = (opcode->aencode == do_bx);
18652
18653 /* Check that this instruction is supported for this CPU. */
18654 if (!(is_bx && fix_v4bx)
18655 && !(opcode->avariant &&
18656 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18657 {
18658 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18659 return;
18660 }
18661 if (inst.size_req)
18662 {
18663 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18664 return;
18665 }
18666
18667 inst.instruction = opcode->avalue;
18668 if (opcode->tag == OT_unconditionalF)
18669 inst.instruction |= 0xFU << 28;
18670 else
18671 inst.instruction |= inst.cond << 28;
18672 inst.size = INSN_SIZE;
18673 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18674 {
18675 it_fsm_pre_encode ();
18676 opcode->aencode ();
18677 it_fsm_post_encode ();
18678 }
18679 /* Arm mode bx is marked as both v4T and v5 because it's still required
18680 on a hypothetical non-thumb v5 core. */
18681 if (is_bx)
18682 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18683 else
18684 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18685 *opcode->avariant);
18686
18687 check_neon_suffixes;
18688
18689 if (!inst.error)
18690 {
18691 mapping_state (MAP_ARM);
18692 }
18693 }
18694 else
18695 {
18696 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18697 "-- `%s'"), str);
18698 return;
18699 }
18700 output_inst (str);
18701 }
18702
18703 static void
18704 check_it_blocks_finished (void)
18705 {
18706 #ifdef OBJ_ELF
18707 asection *sect;
18708
18709 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18710 if (seg_info (sect)->tc_segment_info_data.current_it.state
18711 == MANUAL_IT_BLOCK)
18712 {
18713 as_warn (_("section '%s' finished with an open IT block."),
18714 sect->name);
18715 }
18716 #else
18717 if (now_it.state == MANUAL_IT_BLOCK)
18718 as_warn (_("file finished with an open IT block."));
18719 #endif
18720 }
18721
18722 /* Various frobbings of labels and their addresses. */
18723
18724 void
18725 arm_start_line_hook (void)
18726 {
18727 last_label_seen = NULL;
18728 }
18729
18730 void
18731 arm_frob_label (symbolS * sym)
18732 {
18733 last_label_seen = sym;
18734
18735 ARM_SET_THUMB (sym, thumb_mode);
18736
18737 #if defined OBJ_COFF || defined OBJ_ELF
18738 ARM_SET_INTERWORK (sym, support_interwork);
18739 #endif
18740
18741 force_automatic_it_block_close ();
18742
18743 /* Note - do not allow local symbols (.Lxxx) to be labelled
18744 as Thumb functions. This is because these labels, whilst
18745 they exist inside Thumb code, are not the entry points for
18746 possible ARM->Thumb calls. Also, these labels can be used
18747 as part of a computed goto or switch statement. eg gcc
18748 can generate code that looks like this:
18749
18750 ldr r2, [pc, .Laaa]
18751 lsl r3, r3, #2
18752 ldr r2, [r3, r2]
18753 mov pc, r2
18754
18755 .Lbbb: .word .Lxxx
18756 .Lccc: .word .Lyyy
18757 ..etc...
18758 .Laaa: .word Lbbb
18759
18760 The first instruction loads the address of the jump table.
18761 The second instruction converts a table index into a byte offset.
18762 The third instruction gets the jump address out of the table.
18763 The fourth instruction performs the jump.
18764
18765 If the address stored at .Laaa is that of a symbol which has the
18766 Thumb_Func bit set, then the linker will arrange for this address
18767 to have the bottom bit set, which in turn would mean that the
18768 address computation performed by the third instruction would end
18769 up with the bottom bit set. Since the ARM is capable of unaligned
18770 word loads, the instruction would then load the incorrect address
18771 out of the jump table, and chaos would ensue. */
18772 if (label_is_thumb_function_name
18773 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18774 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18775 {
18776 /* When the address of a Thumb function is taken the bottom
18777 bit of that address should be set. This will allow
18778 interworking between Arm and Thumb functions to work
18779 correctly. */
18780
18781 THUMB_SET_FUNC (sym, 1);
18782
18783 label_is_thumb_function_name = FALSE;
18784 }
18785
18786 dwarf2_emit_label (sym);
18787 }
18788
18789 bfd_boolean
18790 arm_data_in_code (void)
18791 {
18792 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18793 {
18794 *input_line_pointer = '/';
18795 input_line_pointer += 5;
18796 *input_line_pointer = 0;
18797 return TRUE;
18798 }
18799
18800 return FALSE;
18801 }
18802
18803 char *
18804 arm_canonicalize_symbol_name (char * name)
18805 {
18806 int len;
18807
18808 if (thumb_mode && (len = strlen (name)) > 5
18809 && streq (name + len - 5, "/data"))
18810 *(name + len - 5) = 0;
18811
18812 return name;
18813 }
18814 \f
18815 /* Table of all register names defined by default. The user can
18816 define additional names with .req. Note that all register names
18817 should appear in both upper and lowercase variants. Some registers
18818 also have mixed-case names. */
18819
18820 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18821 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18822 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18823 #define REGSET(p,t) \
18824 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18825 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18826 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18827 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18828 #define REGSETH(p,t) \
18829 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18830 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18831 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18832 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18833 #define REGSET2(p,t) \
18834 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18835 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18836 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18837 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18838 #define SPLRBANK(base,bank,t) \
18839 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18840 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18841 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18842 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18843 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18844 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18845
18846 static const struct reg_entry reg_names[] =
18847 {
18848 /* ARM integer registers. */
18849 REGSET(r, RN), REGSET(R, RN),
18850
18851 /* ATPCS synonyms. */
18852 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18853 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18854 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18855
18856 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18857 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18858 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18859
18860 /* Well-known aliases. */
18861 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18862 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18863
18864 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18865 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18866
18867 /* Coprocessor numbers. */
18868 REGSET(p, CP), REGSET(P, CP),
18869
18870 /* Coprocessor register numbers. The "cr" variants are for backward
18871 compatibility. */
18872 REGSET(c, CN), REGSET(C, CN),
18873 REGSET(cr, CN), REGSET(CR, CN),
18874
18875 /* ARM banked registers. */
18876 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18877 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18878 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18879 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18880 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18881 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18882 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18883
18884 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18885 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18886 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18887 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18888 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18889 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18890 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18891 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18892
18893 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18894 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18895 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18896 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18897 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18898 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18899 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18900 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18901 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18902
18903 /* FPA registers. */
18904 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18905 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18906
18907 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18908 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18909
18910 /* VFP SP registers. */
18911 REGSET(s,VFS), REGSET(S,VFS),
18912 REGSETH(s,VFS), REGSETH(S,VFS),
18913
18914 /* VFP DP Registers. */
18915 REGSET(d,VFD), REGSET(D,VFD),
18916 /* Extra Neon DP registers. */
18917 REGSETH(d,VFD), REGSETH(D,VFD),
18918
18919 /* Neon QP registers. */
18920 REGSET2(q,NQ), REGSET2(Q,NQ),
18921
18922 /* VFP control registers. */
18923 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18924 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18925 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18926 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18927 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18928 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18929 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
18930
18931 /* Maverick DSP coprocessor registers. */
18932 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18933 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18934
18935 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18936 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18937 REGDEF(dspsc,0,DSPSC),
18938
18939 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18940 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18941 REGDEF(DSPSC,0,DSPSC),
18942
18943 /* iWMMXt data registers - p0, c0-15. */
18944 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18945
18946 /* iWMMXt control registers - p1, c0-3. */
18947 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18948 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18949 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18950 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18951
18952 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18953 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18954 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18955 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18956 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18957
18958 /* XScale accumulator registers. */
18959 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18960 };
18961 #undef REGDEF
18962 #undef REGNUM
18963 #undef REGSET
18964
18965 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18966 within psr_required_here. */
18967 static const struct asm_psr psrs[] =
18968 {
18969 /* Backward compatibility notation. Note that "all" is no longer
18970 truly all possible PSR bits. */
18971 {"all", PSR_c | PSR_f},
18972 {"flg", PSR_f},
18973 {"ctl", PSR_c},
18974
18975 /* Individual flags. */
18976 {"f", PSR_f},
18977 {"c", PSR_c},
18978 {"x", PSR_x},
18979 {"s", PSR_s},
18980
18981 /* Combinations of flags. */
18982 {"fs", PSR_f | PSR_s},
18983 {"fx", PSR_f | PSR_x},
18984 {"fc", PSR_f | PSR_c},
18985 {"sf", PSR_s | PSR_f},
18986 {"sx", PSR_s | PSR_x},
18987 {"sc", PSR_s | PSR_c},
18988 {"xf", PSR_x | PSR_f},
18989 {"xs", PSR_x | PSR_s},
18990 {"xc", PSR_x | PSR_c},
18991 {"cf", PSR_c | PSR_f},
18992 {"cs", PSR_c | PSR_s},
18993 {"cx", PSR_c | PSR_x},
18994 {"fsx", PSR_f | PSR_s | PSR_x},
18995 {"fsc", PSR_f | PSR_s | PSR_c},
18996 {"fxs", PSR_f | PSR_x | PSR_s},
18997 {"fxc", PSR_f | PSR_x | PSR_c},
18998 {"fcs", PSR_f | PSR_c | PSR_s},
18999 {"fcx", PSR_f | PSR_c | PSR_x},
19000 {"sfx", PSR_s | PSR_f | PSR_x},
19001 {"sfc", PSR_s | PSR_f | PSR_c},
19002 {"sxf", PSR_s | PSR_x | PSR_f},
19003 {"sxc", PSR_s | PSR_x | PSR_c},
19004 {"scf", PSR_s | PSR_c | PSR_f},
19005 {"scx", PSR_s | PSR_c | PSR_x},
19006 {"xfs", PSR_x | PSR_f | PSR_s},
19007 {"xfc", PSR_x | PSR_f | PSR_c},
19008 {"xsf", PSR_x | PSR_s | PSR_f},
19009 {"xsc", PSR_x | PSR_s | PSR_c},
19010 {"xcf", PSR_x | PSR_c | PSR_f},
19011 {"xcs", PSR_x | PSR_c | PSR_s},
19012 {"cfs", PSR_c | PSR_f | PSR_s},
19013 {"cfx", PSR_c | PSR_f | PSR_x},
19014 {"csf", PSR_c | PSR_s | PSR_f},
19015 {"csx", PSR_c | PSR_s | PSR_x},
19016 {"cxf", PSR_c | PSR_x | PSR_f},
19017 {"cxs", PSR_c | PSR_x | PSR_s},
19018 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19019 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19020 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19021 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19022 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19023 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19024 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19025 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19026 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19027 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19028 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19029 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19030 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19031 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19032 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19033 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19034 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19035 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19036 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19037 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19038 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19039 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19040 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19041 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19042 };
19043
19044 /* Table of V7M psr names. */
19045 static const struct asm_psr v7m_psrs[] =
19046 {
19047 {"apsr", 0x0 }, {"APSR", 0x0 },
19048 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19049 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19050 {"psr", 0x3 }, {"PSR", 0x3 },
19051 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19052 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19053 {"epsr", 0x6 }, {"EPSR", 0x6 },
19054 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19055 {"msp", 0x8 }, {"MSP", 0x8 },
19056 {"psp", 0x9 }, {"PSP", 0x9 },
19057 {"msplim", 0xa }, {"MSPLIM", 0xa },
19058 {"psplim", 0xb }, {"PSPLIM", 0xb },
19059 {"primask", 0x10}, {"PRIMASK", 0x10},
19060 {"basepri", 0x11}, {"BASEPRI", 0x11},
19061 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19062 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19063 {"control", 0x14}, {"CONTROL", 0x14},
19064 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19065 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19066 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19067 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19068 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19069 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19070 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19071 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19072 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19073 };
19074
19075 /* Table of all shift-in-operand names. */
19076 static const struct asm_shift_name shift_names [] =
19077 {
19078 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19079 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19080 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19081 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19082 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19083 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19084 };
19085
19086 /* Table of all explicit relocation names. */
19087 #ifdef OBJ_ELF
19088 static struct reloc_entry reloc_names[] =
19089 {
19090 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19091 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19092 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19093 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19094 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19095 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19096 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19097 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19098 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19099 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19100 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19101 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19102 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19103 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19104 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19105 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19106 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19107 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
19108 };
19109 #endif
19110
19111 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19112 static const struct asm_cond conds[] =
19113 {
19114 {"eq", 0x0},
19115 {"ne", 0x1},
19116 {"cs", 0x2}, {"hs", 0x2},
19117 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19118 {"mi", 0x4},
19119 {"pl", 0x5},
19120 {"vs", 0x6},
19121 {"vc", 0x7},
19122 {"hi", 0x8},
19123 {"ls", 0x9},
19124 {"ge", 0xa},
19125 {"lt", 0xb},
19126 {"gt", 0xc},
19127 {"le", 0xd},
19128 {"al", 0xe}
19129 };
19130
19131 #define UL_BARRIER(L,U,CODE,FEAT) \
19132 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19133 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19134
19135 static struct asm_barrier_opt barrier_opt_names[] =
19136 {
19137 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19138 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19139 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19140 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19141 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19142 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19143 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19144 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19145 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19146 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19147 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19148 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19149 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19150 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19151 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19152 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19153 };
19154
19155 #undef UL_BARRIER
19156
19157 /* Table of ARM-format instructions. */
19158
19159 /* Macros for gluing together operand strings. N.B. In all cases
19160 other than OPS0, the trailing OP_stop comes from default
19161 zero-initialization of the unspecified elements of the array. */
19162 #define OPS0() { OP_stop, }
19163 #define OPS1(a) { OP_##a, }
19164 #define OPS2(a,b) { OP_##a,OP_##b, }
19165 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19166 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19167 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19168 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19169
19170 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19171 This is useful when mixing operands for ARM and THUMB, i.e. using the
19172 MIX_ARM_THUMB_OPERANDS macro.
19173 In order to use these macros, prefix the number of operands with _
19174 e.g. _3. */
19175 #define OPS_1(a) { a, }
19176 #define OPS_2(a,b) { a,b, }
19177 #define OPS_3(a,b,c) { a,b,c, }
19178 #define OPS_4(a,b,c,d) { a,b,c,d, }
19179 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19180 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19181
19182 /* These macros abstract out the exact format of the mnemonic table and
19183 save some repeated characters. */
19184
19185 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19186 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19187 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19188 THUMB_VARIANT, do_##ae, do_##te }
19189
19190 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19191 a T_MNEM_xyz enumerator. */
19192 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19193 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19194 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19195 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19196
19197 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19198 infix after the third character. */
19199 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19200 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19201 THUMB_VARIANT, do_##ae, do_##te }
19202 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19203 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19204 THUMB_VARIANT, do_##ae, do_##te }
19205 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19206 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19207 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19208 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19209 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19210 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19211 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19212 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19213
19214 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19215 field is still 0xE. Many of the Thumb variants can be executed
19216 conditionally, so this is checked separately. */
19217 #define TUE(mnem, op, top, nops, ops, ae, te) \
19218 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19219 THUMB_VARIANT, do_##ae, do_##te }
19220
19221 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19222 Used by mnemonics that have very minimal differences in the encoding for
19223 ARM and Thumb variants and can be handled in a common function. */
19224 #define TUEc(mnem, op, top, nops, ops, en) \
19225 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19226 THUMB_VARIANT, do_##en, do_##en }
19227
19228 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19229 condition code field. */
19230 #define TUF(mnem, op, top, nops, ops, ae, te) \
19231 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19232 THUMB_VARIANT, do_##ae, do_##te }
19233
19234 /* ARM-only variants of all the above. */
19235 #define CE(mnem, op, nops, ops, ae) \
19236 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19237
19238 #define C3(mnem, op, nops, ops, ae) \
19239 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19240
19241 /* Legacy mnemonics that always have conditional infix after the third
19242 character. */
19243 #define CL(mnem, op, nops, ops, ae) \
19244 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19245 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19246
19247 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19248 #define cCE(mnem, op, nops, ops, ae) \
19249 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19250
19251 /* Legacy coprocessor instructions where conditional infix and conditional
19252 suffix are ambiguous. For consistency this includes all FPA instructions,
19253 not just the potentially ambiguous ones. */
19254 #define cCL(mnem, op, nops, ops, ae) \
19255 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19256 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19257
19258 /* Coprocessor, takes either a suffix or a position-3 infix
19259 (for an FPA corner case). */
19260 #define C3E(mnem, op, nops, ops, ae) \
19261 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19262 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19263
19264 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19265 { m1 #m2 m3, OPS##nops ops, \
19266 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19267 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19268
19269 #define CM(m1, m2, op, nops, ops, ae) \
19270 xCM_ (m1, , m2, op, nops, ops, ae), \
19271 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19272 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19273 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19274 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19275 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19276 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19277 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19278 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19279 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19280 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19281 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19282 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19283 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19284 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19285 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19286 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19287 xCM_ (m1, le, m2, op, nops, ops, ae), \
19288 xCM_ (m1, al, m2, op, nops, ops, ae)
19289
19290 #define UE(mnem, op, nops, ops, ae) \
19291 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19292
19293 #define UF(mnem, op, nops, ops, ae) \
19294 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19295
19296 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19297 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19298 use the same encoding function for each. */
19299 #define NUF(mnem, op, nops, ops, enc) \
19300 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19301 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19302
19303 /* Neon data processing, version which indirects through neon_enc_tab for
19304 the various overloaded versions of opcodes. */
19305 #define nUF(mnem, op, nops, ops, enc) \
19306 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19307 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19308
19309 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19310 version. */
19311 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19312 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19313 THUMB_VARIANT, do_##enc, do_##enc }
19314
19315 #define NCE(mnem, op, nops, ops, enc) \
19316 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19317
19318 #define NCEF(mnem, op, nops, ops, enc) \
19319 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19320
19321 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19322 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19323 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19324 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19325
19326 #define nCE(mnem, op, nops, ops, enc) \
19327 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19328
19329 #define nCEF(mnem, op, nops, ops, enc) \
19330 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19331
19332 #define do_0 0
19333
19334 static const struct asm_opcode insns[] =
19335 {
19336 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19337 #define THUMB_VARIANT & arm_ext_v4t
19338 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19339 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19340 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19341 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19342 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19343 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19344 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19345 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19346 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19347 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19348 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19349 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19350 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19351 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19352 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19353 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19354
19355 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19356 for setting PSR flag bits. They are obsolete in V6 and do not
19357 have Thumb equivalents. */
19358 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19359 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19360 CL("tstp", 110f000, 2, (RR, SH), cmp),
19361 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19362 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19363 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19364 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19365 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19366 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19367
19368 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19369 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19370 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19371 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19372
19373 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19374 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19375 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19376 OP_RRnpc),
19377 OP_ADDRGLDR),ldst, t_ldst),
19378 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19379
19380 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19381 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19382 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19383 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19384 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19385 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19386
19387 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19388 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19389
19390 /* Pseudo ops. */
19391 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19392 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19393 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19394 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19395
19396 /* Thumb-compatibility pseudo ops. */
19397 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19398 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19399 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19400 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19401 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19402 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19403 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19404 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19405 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19406 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19407 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19408 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19409
19410 /* These may simplify to neg. */
19411 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19412 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19413
19414 #undef THUMB_VARIANT
19415 #define THUMB_VARIANT & arm_ext_os
19416
19417 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19418 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19419
19420 #undef THUMB_VARIANT
19421 #define THUMB_VARIANT & arm_ext_v6
19422
19423 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19424
19425 /* V1 instructions with no Thumb analogue prior to V6T2. */
19426 #undef THUMB_VARIANT
19427 #define THUMB_VARIANT & arm_ext_v6t2
19428
19429 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19430 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19431 CL("teqp", 130f000, 2, (RR, SH), cmp),
19432
19433 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19434 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19435 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19436 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19437
19438 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19439 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19440
19441 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19442 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19443
19444 /* V1 instructions with no Thumb analogue at all. */
19445 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19446 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19447
19448 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19449 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19450 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19451 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19452 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19453 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19454 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19455 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19456
19457 #undef ARM_VARIANT
19458 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19459 #undef THUMB_VARIANT
19460 #define THUMB_VARIANT & arm_ext_v4t
19461
19462 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19463 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19464
19465 #undef THUMB_VARIANT
19466 #define THUMB_VARIANT & arm_ext_v6t2
19467
19468 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19469 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19470
19471 /* Generic coprocessor instructions. */
19472 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19473 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19474 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19475 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19476 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19477 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19478 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19479
19480 #undef ARM_VARIANT
19481 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19482
19483 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19484 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19485
19486 #undef ARM_VARIANT
19487 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19488 #undef THUMB_VARIANT
19489 #define THUMB_VARIANT & arm_ext_msr
19490
19491 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19492 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19493
19494 #undef ARM_VARIANT
19495 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19496 #undef THUMB_VARIANT
19497 #define THUMB_VARIANT & arm_ext_v6t2
19498
19499 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19500 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19501 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19502 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19503 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19504 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19505 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19506 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19507
19508 #undef ARM_VARIANT
19509 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19510 #undef THUMB_VARIANT
19511 #define THUMB_VARIANT & arm_ext_v4t
19512
19513 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19514 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19515 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19516 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19517 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19518 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19519
19520 #undef ARM_VARIANT
19521 #define ARM_VARIANT & arm_ext_v4t_5
19522
19523 /* ARM Architecture 4T. */
19524 /* Note: bx (and blx) are required on V5, even if the processor does
19525 not support Thumb. */
19526 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19527
19528 #undef ARM_VARIANT
19529 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19530 #undef THUMB_VARIANT
19531 #define THUMB_VARIANT & arm_ext_v5t
19532
19533 /* Note: blx has 2 variants; the .value coded here is for
19534 BLX(2). Only this variant has conditional execution. */
19535 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19536 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19537
19538 #undef THUMB_VARIANT
19539 #define THUMB_VARIANT & arm_ext_v6t2
19540
19541 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19542 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19543 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19544 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19545 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19546 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19547 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19548 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19549
19550 #undef ARM_VARIANT
19551 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19552 #undef THUMB_VARIANT
19553 #define THUMB_VARIANT & arm_ext_v5exp
19554
19555 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19556 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19557 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19558 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19559
19560 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19561 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19562
19563 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19564 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19565 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19566 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19567
19568 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19569 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19570 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19571 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19572
19573 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19574 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19575
19576 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19577 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19578 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19579 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19580
19581 #undef ARM_VARIANT
19582 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19583 #undef THUMB_VARIANT
19584 #define THUMB_VARIANT & arm_ext_v6t2
19585
19586 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19587 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19588 ldrd, t_ldstd),
19589 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19590 ADDRGLDRS), ldrd, t_ldstd),
19591
19592 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19593 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19594
19595 #undef ARM_VARIANT
19596 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19597
19598 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19599
19600 #undef ARM_VARIANT
19601 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19602 #undef THUMB_VARIANT
19603 #define THUMB_VARIANT & arm_ext_v6
19604
19605 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19606 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19607 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19608 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19609 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19610 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19611 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19612 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19613 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19614 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19615
19616 #undef THUMB_VARIANT
19617 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19618
19619 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19620 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19621 strex, t_strex),
19622 #undef THUMB_VARIANT
19623 #define THUMB_VARIANT & arm_ext_v6t2
19624
19625 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19626 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19627
19628 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19629 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19630
19631 /* ARM V6 not included in V7M. */
19632 #undef THUMB_VARIANT
19633 #define THUMB_VARIANT & arm_ext_v6_notm
19634 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19635 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19636 UF(rfeib, 9900a00, 1, (RRw), rfe),
19637 UF(rfeda, 8100a00, 1, (RRw), rfe),
19638 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19639 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19640 UF(rfefa, 8100a00, 1, (RRw), rfe),
19641 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19642 UF(rfeed, 9900a00, 1, (RRw), rfe),
19643 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19644 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19645 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19646 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19647 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19648 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19649 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19650 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19651 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19652 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19653
19654 /* ARM V6 not included in V7M (eg. integer SIMD). */
19655 #undef THUMB_VARIANT
19656 #define THUMB_VARIANT & arm_ext_v6_dsp
19657 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19658 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19659 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19660 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19661 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19662 /* Old name for QASX. */
19663 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19664 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19665 /* Old name for QSAX. */
19666 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19667 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19668 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19669 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19670 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19671 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19672 /* Old name for SASX. */
19673 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19674 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19675 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19676 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19677 /* Old name for SHASX. */
19678 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19679 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19680 /* Old name for SHSAX. */
19681 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19682 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19683 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19684 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19685 /* Old name for SSAX. */
19686 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19687 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19688 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19689 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19690 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19691 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19692 /* Old name for UASX. */
19693 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19694 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19695 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19696 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19697 /* Old name for UHASX. */
19698 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19699 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19700 /* Old name for UHSAX. */
19701 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19702 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19703 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19704 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19705 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19706 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19707 /* Old name for UQASX. */
19708 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19709 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19710 /* Old name for UQSAX. */
19711 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19712 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19713 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19714 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19715 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19716 /* Old name for USAX. */
19717 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19718 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19719 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19720 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19721 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19722 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19723 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19724 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19725 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19726 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19727 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19728 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19729 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19730 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19731 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19732 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19733 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19734 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19735 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19736 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19737 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19738 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19739 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19740 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19741 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19742 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19743 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19744 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19745 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19746 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19747 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19748 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19749 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19750 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19751
19752 #undef ARM_VARIANT
19753 #define ARM_VARIANT & arm_ext_v6k
19754 #undef THUMB_VARIANT
19755 #define THUMB_VARIANT & arm_ext_v6k
19756
19757 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19758 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19759 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19760 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19761
19762 #undef THUMB_VARIANT
19763 #define THUMB_VARIANT & arm_ext_v6_notm
19764 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19765 ldrexd, t_ldrexd),
19766 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19767 RRnpcb), strexd, t_strexd),
19768
19769 #undef THUMB_VARIANT
19770 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19771 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19772 rd_rn, rd_rn),
19773 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19774 rd_rn, rd_rn),
19775 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19776 strex, t_strexbh),
19777 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19778 strex, t_strexbh),
19779 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19780
19781 #undef ARM_VARIANT
19782 #define ARM_VARIANT & arm_ext_sec
19783 #undef THUMB_VARIANT
19784 #define THUMB_VARIANT & arm_ext_sec
19785
19786 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19787
19788 #undef ARM_VARIANT
19789 #define ARM_VARIANT & arm_ext_virt
19790 #undef THUMB_VARIANT
19791 #define THUMB_VARIANT & arm_ext_virt
19792
19793 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19794 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19795
19796 #undef ARM_VARIANT
19797 #define ARM_VARIANT & arm_ext_pan
19798 #undef THUMB_VARIANT
19799 #define THUMB_VARIANT & arm_ext_pan
19800
19801 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19802
19803 #undef ARM_VARIANT
19804 #define ARM_VARIANT & arm_ext_v6t2
19805 #undef THUMB_VARIANT
19806 #define THUMB_VARIANT & arm_ext_v6t2
19807
19808 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19809 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19810 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19811 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19812
19813 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19814 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19815
19816 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19817 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19818 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19819 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19820
19821 #undef THUMB_VARIANT
19822 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19823 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19824 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19825
19826 /* Thumb-only instructions. */
19827 #undef ARM_VARIANT
19828 #define ARM_VARIANT NULL
19829 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19830 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19831
19832 /* ARM does not really have an IT instruction, so always allow it.
19833 The opcode is copied from Thumb in order to allow warnings in
19834 -mimplicit-it=[never | arm] modes. */
19835 #undef ARM_VARIANT
19836 #define ARM_VARIANT & arm_ext_v1
19837 #undef THUMB_VARIANT
19838 #define THUMB_VARIANT & arm_ext_v6t2
19839
19840 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19841 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19842 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19843 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19844 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19845 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19846 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19847 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19848 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19849 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19850 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19851 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19852 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19853 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19854 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19855 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19856 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19857 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19858
19859 /* Thumb2 only instructions. */
19860 #undef ARM_VARIANT
19861 #define ARM_VARIANT NULL
19862
19863 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19864 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19865 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19866 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19867 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19868 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19869
19870 /* Hardware division instructions. */
19871 #undef ARM_VARIANT
19872 #define ARM_VARIANT & arm_ext_adiv
19873 #undef THUMB_VARIANT
19874 #define THUMB_VARIANT & arm_ext_div
19875
19876 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19877 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19878
19879 /* ARM V6M/V7 instructions. */
19880 #undef ARM_VARIANT
19881 #define ARM_VARIANT & arm_ext_barrier
19882 #undef THUMB_VARIANT
19883 #define THUMB_VARIANT & arm_ext_barrier
19884
19885 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19886 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19887 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19888
19889 /* ARM V7 instructions. */
19890 #undef ARM_VARIANT
19891 #define ARM_VARIANT & arm_ext_v7
19892 #undef THUMB_VARIANT
19893 #define THUMB_VARIANT & arm_ext_v7
19894
19895 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19896 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19897
19898 #undef ARM_VARIANT
19899 #define ARM_VARIANT & arm_ext_mp
19900 #undef THUMB_VARIANT
19901 #define THUMB_VARIANT & arm_ext_mp
19902
19903 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19904
19905 /* AArchv8 instructions. */
19906 #undef ARM_VARIANT
19907 #define ARM_VARIANT & arm_ext_v8
19908
19909 /* Instructions shared between armv8-a and armv8-m. */
19910 #undef THUMB_VARIANT
19911 #define THUMB_VARIANT & arm_ext_atomics
19912
19913 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19914 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19915 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19916 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19917 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19918 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19919 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19920 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19921 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19922 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19923 stlex, t_stlex),
19924 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19925 stlex, t_stlex),
19926 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19927 stlex, t_stlex),
19928 #undef THUMB_VARIANT
19929 #define THUMB_VARIANT & arm_ext_v8
19930
19931 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19932 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19933 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19934 ldrexd, t_ldrexd),
19935 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19936 strexd, t_strexd),
19937 /* ARMv8 T32 only. */
19938 #undef ARM_VARIANT
19939 #define ARM_VARIANT NULL
19940 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19941 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19942 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19943
19944 /* FP for ARMv8. */
19945 #undef ARM_VARIANT
19946 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19947 #undef THUMB_VARIANT
19948 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19949
19950 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19951 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19952 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19953 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19954 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19955 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19956 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19957 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19958 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19959 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19960 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19961 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19962 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19963 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19964 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19965 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19966 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19967
19968 /* Crypto v1 extensions. */
19969 #undef ARM_VARIANT
19970 #define ARM_VARIANT & fpu_crypto_ext_armv8
19971 #undef THUMB_VARIANT
19972 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19973
19974 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19975 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19976 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19977 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19978 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19979 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19980 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19981 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19982 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19983 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19984 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19985 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19986 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19987 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19988
19989 #undef ARM_VARIANT
19990 #define ARM_VARIANT & crc_ext_armv8
19991 #undef THUMB_VARIANT
19992 #define THUMB_VARIANT & crc_ext_armv8
19993 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19994 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19995 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19996 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19997 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19998 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19999
20000 /* ARMv8.2 RAS extension. */
20001 #undef ARM_VARIANT
20002 #define ARM_VARIANT & arm_ext_ras
20003 #undef THUMB_VARIANT
20004 #define THUMB_VARIANT & arm_ext_ras
20005 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20006
20007 #undef ARM_VARIANT
20008 #define ARM_VARIANT & arm_ext_v8_3
20009 #undef THUMB_VARIANT
20010 #define THUMB_VARIANT & arm_ext_v8_3
20011 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20012 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20013 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20014
20015 #undef ARM_VARIANT
20016 #define ARM_VARIANT & fpu_neon_ext_dotprod
20017 #undef THUMB_VARIANT
20018 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20019 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20020 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20021
20022 #undef ARM_VARIANT
20023 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20024 #undef THUMB_VARIANT
20025 #define THUMB_VARIANT NULL
20026
20027 cCE("wfs", e200110, 1, (RR), rd),
20028 cCE("rfs", e300110, 1, (RR), rd),
20029 cCE("wfc", e400110, 1, (RR), rd),
20030 cCE("rfc", e500110, 1, (RR), rd),
20031
20032 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20033 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20034 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20035 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20036
20037 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20038 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20039 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20040 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20041
20042 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20043 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20044 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20045 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20046 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20047 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20048 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20049 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20050 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20051 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20052 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20053 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20054
20055 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20056 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20057 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20058 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20059 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20060 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20061 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20062 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20063 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20064 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20065 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20066 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20067
20068 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20069 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20070 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20071 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20072 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20073 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20074 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20075 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20076 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20077 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20078 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20079 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20080
20081 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20082 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20083 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20084 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20085 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20086 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20087 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20088 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20089 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20090 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20091 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20092 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20093
20094 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20095 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20096 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20097 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20098 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20099 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20100 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20101 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20102 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20103 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20104 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20105 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20106
20107 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20108 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20109 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20110 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20111 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20112 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20113 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20114 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20115 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20116 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20117 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20118 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20119
20120 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20121 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20122 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20123 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20124 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20125 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20126 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20127 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20128 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20129 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20130 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20131 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20132
20133 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20134 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20135 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20136 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20137 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20138 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20139 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20140 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20141 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20142 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20143 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20144 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20145
20146 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20147 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20148 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20149 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20150 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20151 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20152 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20153 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20154 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20155 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20156 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20157 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20158
20159 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20160 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20161 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20162 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20163 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20164 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20165 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20166 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20167 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20168 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20169 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20170 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20171
20172 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20173 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20174 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20175 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20176 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20177 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20178 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20179 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20180 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20181 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20182 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20183 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20184
20185 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20186 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20187 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20188 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20189 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20190 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20191 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20192 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20193 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20194 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20195 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20196 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20197
20198 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20199 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20200 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20201 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20202 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20203 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20204 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20205 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20206 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20207 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20208 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20209 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20210
20211 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20212 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20213 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20214 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20215 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20216 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20217 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20218 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20219 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20220 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20221 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20222 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20223
20224 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20225 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20226 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20227 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20228 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20229 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20230 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20231 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20232 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20233 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20234 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20235 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20236
20237 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20238 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20239 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20240 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20241 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20242 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20243 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20244 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20245 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20246 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20247 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20248 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20249
20250 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20251 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20252 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20253 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20254 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20255 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20256 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20257 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20258 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20259 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20260 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20261 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20262
20263 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20264 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20265 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20266 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20267 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20268 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20269 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20270 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20271 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20272 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20273 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20274 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20275
20276 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20277 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20278 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20279 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20280 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20281 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20282 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20283 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20284 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20285 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20286 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20287 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20288
20289 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20290 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20291 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20292 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20293 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20294 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20295 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20296 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20297 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20298 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20299 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20300 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20301
20302 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20303 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20304 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20305 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20306 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20307 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20308 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20309 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20310 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20311 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20312 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20313 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20314
20315 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20316 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20317 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20318 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20319 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20320 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20321 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20322 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20323 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20324 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20325 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20326 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20327
20328 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20329 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20330 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20331 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20332 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20333 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20334 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20335 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20336 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20337 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20338 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20339 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20340
20341 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20342 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20343 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20344 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20345 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20346 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20347 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20348 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20349 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20350 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20351 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20352 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20353
20354 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20355 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20356 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20357 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20358 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20359 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20360 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20361 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20362 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20363 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20364 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20365 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20366
20367 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20368 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20369 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20370 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20371 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20372 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20373 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20374 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20375 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20376 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20377 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20378 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20379
20380 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20381 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20382 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20383 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20384 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20385 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20386 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20387 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20388 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20389 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20390 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20391 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20392
20393 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20394 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20395 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20396 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20397 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20398 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20399 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20400 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20401 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20402 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20403 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20404 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20405
20406 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20407 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20408 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20409 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20410 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20411 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20412 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20413 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20414 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20415 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20416 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20417 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20418
20419 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20420 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20421 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20422 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20423
20424 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20425 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20426 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20427 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20428 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20429 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20430 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20431 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20432 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20433 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20434 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20435 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20436
20437 /* The implementation of the FIX instruction is broken on some
20438 assemblers, in that it accepts a precision specifier as well as a
20439 rounding specifier, despite the fact that this is meaningless.
20440 To be more compatible, we accept it as well, though of course it
20441 does not set any bits. */
20442 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20443 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20444 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20445 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20446 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20447 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20448 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20449 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20450 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20451 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20452 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20453 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20454 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20455
20456 /* Instructions that were new with the real FPA, call them V2. */
20457 #undef ARM_VARIANT
20458 #define ARM_VARIANT & fpu_fpa_ext_v2
20459
20460 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20461 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20462 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20463 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20464 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20465 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20466
20467 #undef ARM_VARIANT
20468 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20469
20470 /* Moves and type conversions. */
20471 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20472 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20473 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20474 cCE("fmstat", ef1fa10, 0, (), noargs),
20475 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20476 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20477 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20478 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20479 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20480 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20481 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20482 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20483 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20484 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20485
20486 /* Memory operations. */
20487 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20488 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20489 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20490 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20491 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20492 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20493 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20494 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20495 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20496 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20497 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20498 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20499 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20500 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20501 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20502 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20503 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20504 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20505
20506 /* Monadic operations. */
20507 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20508 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20509 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20510
20511 /* Dyadic operations. */
20512 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20513 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20514 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20515 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20516 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20517 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20518 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20519 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20520 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20521
20522 /* Comparisons. */
20523 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20524 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20525 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20526 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20527
20528 /* Double precision load/store are still present on single precision
20529 implementations. */
20530 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20531 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20532 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20533 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20534 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20535 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20536 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20537 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20538 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20539 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20540
20541 #undef ARM_VARIANT
20542 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20543
20544 /* Moves and type conversions. */
20545 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20546 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20547 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20548 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20549 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20550 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20551 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20552 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20553 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20554 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20555 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20556 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20557 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20558
20559 /* Monadic operations. */
20560 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20561 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20562 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20563
20564 /* Dyadic operations. */
20565 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20566 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20567 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20568 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20569 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20570 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20571 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20572 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20573 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20574
20575 /* Comparisons. */
20576 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20577 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20578 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20579 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20580
20581 #undef ARM_VARIANT
20582 #define ARM_VARIANT & fpu_vfp_ext_v2
20583
20584 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20585 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20586 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20587 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20588
20589 /* Instructions which may belong to either the Neon or VFP instruction sets.
20590 Individual encoder functions perform additional architecture checks. */
20591 #undef ARM_VARIANT
20592 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20593 #undef THUMB_VARIANT
20594 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20595
20596 /* These mnemonics are unique to VFP. */
20597 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20598 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20599 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20600 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20601 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20602 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20603 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20604 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20605 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20606 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20607
20608 /* Mnemonics shared by Neon and VFP. */
20609 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20610 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20611 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20612
20613 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20614 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20615
20616 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20617 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20618
20619 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20620 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20621 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20622 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20623 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20624 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20625 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20626 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20627
20628 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20629 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20630 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20631 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20632
20633
20634 /* NOTE: All VMOV encoding is special-cased! */
20635 NCE(vmov, 0, 1, (VMOV), neon_mov),
20636 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20637
20638 #undef ARM_VARIANT
20639 #define ARM_VARIANT & arm_ext_fp16
20640 #undef THUMB_VARIANT
20641 #define THUMB_VARIANT & arm_ext_fp16
20642 /* New instructions added from v8.2, allowing the extraction and insertion of
20643 the upper 16 bits of a 32-bit vector register. */
20644 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20645 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20646
20647 #undef THUMB_VARIANT
20648 #define THUMB_VARIANT & fpu_neon_ext_v1
20649 #undef ARM_VARIANT
20650 #define ARM_VARIANT & fpu_neon_ext_v1
20651
20652 /* Data processing with three registers of the same length. */
20653 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20654 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20655 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20656 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20657 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20658 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20659 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20660 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20661 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20662 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20663 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20664 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20665 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20666 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20667 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20668 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20669 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20670 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20671 /* If not immediate, fall back to neon_dyadic_i64_su.
20672 shl_imm should accept I8 I16 I32 I64,
20673 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20674 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20675 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20676 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20677 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20678 /* Logic ops, types optional & ignored. */
20679 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20680 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20681 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20682 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20683 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20684 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20685 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20686 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20687 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20688 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20689 /* Bitfield ops, untyped. */
20690 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20691 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20692 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20693 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20694 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20695 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20696 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20697 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20698 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20699 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20700 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20701 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20702 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20703 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20704 back to neon_dyadic_if_su. */
20705 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20706 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20707 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20708 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20709 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20710 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20711 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20712 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20713 /* Comparison. Type I8 I16 I32 F32. */
20714 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20715 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20716 /* As above, D registers only. */
20717 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20718 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20719 /* Int and float variants, signedness unimportant. */
20720 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20721 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20722 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20723 /* Add/sub take types I8 I16 I32 I64 F32. */
20724 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20725 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20726 /* vtst takes sizes 8, 16, 32. */
20727 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20728 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20729 /* VMUL takes I8 I16 I32 F32 P8. */
20730 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20731 /* VQD{R}MULH takes S16 S32. */
20732 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20733 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20734 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20735 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20736 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20737 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20738 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20739 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20740 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20741 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20742 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20743 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20744 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20745 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20746 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20747 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20748 /* ARM v8.1 extension. */
20749 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20750 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20751 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20752 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20753
20754 /* Two address, int/float. Types S8 S16 S32 F32. */
20755 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20756 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20757
20758 /* Data processing with two registers and a shift amount. */
20759 /* Right shifts, and variants with rounding.
20760 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20761 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20762 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20763 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20764 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20765 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20766 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20767 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20768 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20769 /* Shift and insert. Sizes accepted 8 16 32 64. */
20770 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20771 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20772 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20773 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20774 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20775 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20776 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20777 /* Right shift immediate, saturating & narrowing, with rounding variants.
20778 Types accepted S16 S32 S64 U16 U32 U64. */
20779 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20780 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20781 /* As above, unsigned. Types accepted S16 S32 S64. */
20782 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20783 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20784 /* Right shift narrowing. Types accepted I16 I32 I64. */
20785 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20786 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20787 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20788 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20789 /* CVT with optional immediate for fixed-point variant. */
20790 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20791
20792 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20793 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20794
20795 /* Data processing, three registers of different lengths. */
20796 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20797 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20798 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20799 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20800 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20801 /* If not scalar, fall back to neon_dyadic_long.
20802 Vector types as above, scalar types S16 S32 U16 U32. */
20803 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20804 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20805 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20806 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20807 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20808 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20809 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20810 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20811 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20812 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20813 /* Saturating doubling multiplies. Types S16 S32. */
20814 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20815 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20816 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20817 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20818 S16 S32 U16 U32. */
20819 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20820
20821 /* Extract. Size 8. */
20822 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20823 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20824
20825 /* Two registers, miscellaneous. */
20826 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20827 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20828 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20829 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20830 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20831 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20832 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20833 /* Vector replicate. Sizes 8 16 32. */
20834 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20835 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20836 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20837 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20838 /* VMOVN. Types I16 I32 I64. */
20839 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20840 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20841 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20842 /* VQMOVUN. Types S16 S32 S64. */
20843 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20844 /* VZIP / VUZP. Sizes 8 16 32. */
20845 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20846 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20847 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20848 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20849 /* VQABS / VQNEG. Types S8 S16 S32. */
20850 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20851 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20852 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20853 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20854 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20855 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20856 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20857 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20858 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20859 /* Reciprocal estimates. Types U32 F16 F32. */
20860 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20861 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20862 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20863 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20864 /* VCLS. Types S8 S16 S32. */
20865 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20866 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20867 /* VCLZ. Types I8 I16 I32. */
20868 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20869 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20870 /* VCNT. Size 8. */
20871 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20872 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20873 /* Two address, untyped. */
20874 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20875 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20876 /* VTRN. Sizes 8 16 32. */
20877 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20878 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20879
20880 /* Table lookup. Size 8. */
20881 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20882 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20883
20884 #undef THUMB_VARIANT
20885 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20886 #undef ARM_VARIANT
20887 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20888
20889 /* Neon element/structure load/store. */
20890 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20891 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20892 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20893 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20894 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20895 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20896 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20897 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20898
20899 #undef THUMB_VARIANT
20900 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20901 #undef ARM_VARIANT
20902 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20903 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20904 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20905 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20906 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20907 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20908 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20909 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20910 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20911 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20912
20913 #undef THUMB_VARIANT
20914 #define THUMB_VARIANT & fpu_vfp_ext_v3
20915 #undef ARM_VARIANT
20916 #define ARM_VARIANT & fpu_vfp_ext_v3
20917
20918 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20919 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20920 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20921 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20922 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20923 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20924 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20925 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20926 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20927
20928 #undef ARM_VARIANT
20929 #define ARM_VARIANT & fpu_vfp_ext_fma
20930 #undef THUMB_VARIANT
20931 #define THUMB_VARIANT & fpu_vfp_ext_fma
20932 /* Mnemonics shared by Neon and VFP. These are included in the
20933 VFP FMA variant; NEON and VFP FMA always includes the NEON
20934 FMA instructions. */
20935 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20936 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20937 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20938 the v form should always be used. */
20939 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20940 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20941 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20942 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20943 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20944 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20945
20946 #undef THUMB_VARIANT
20947 #undef ARM_VARIANT
20948 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20949
20950 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20951 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20952 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20953 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20954 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20955 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20956 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20957 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20958
20959 #undef ARM_VARIANT
20960 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20961
20962 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20963 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20964 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20965 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20966 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20967 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20968 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20969 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20970 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20971 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20972 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20973 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20974 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20975 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20976 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20977 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20978 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20979 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20980 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20981 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20982 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20983 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20984 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20985 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20986 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20987 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20988 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20989 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20990 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20991 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20992 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20993 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20994 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20995 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20996 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20997 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20998 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20999 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21000 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21001 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21002 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21003 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21004 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21005 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21006 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21007 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21008 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21009 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21010 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21011 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21012 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21013 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21014 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21015 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21016 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21017 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21018 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21019 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21020 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21021 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21022 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21023 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21024 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21025 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21026 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21027 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21028 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21029 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21030 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21031 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21032 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21033 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21034 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21035 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21036 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21037 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21038 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21039 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21040 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21041 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21042 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21043 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21044 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21045 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21046 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21047 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21048 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21049 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21050 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21051 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21052 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21053 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21054 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21055 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21056 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21057 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21058 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21059 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21060 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21061 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21062 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21063 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21064 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21065 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21066 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21067 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21068 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21069 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21070 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21071 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21072 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21073 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21074 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21075 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21076 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21077 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21078 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21079 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21080 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21081 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21082 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21083 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21084 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21085 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21086 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21087 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21088 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21089 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21090 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21091 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21092 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21093 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21094 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21095 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21096 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21097 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21098 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21099 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21100 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21101 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21102 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21103 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21104 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21105 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21106 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21107 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21108 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21109 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21110 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21111 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21112 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21113 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21114 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21115 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21116 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21117 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21118 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21119 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21120 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21121 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21122 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21123 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21124
21125 #undef ARM_VARIANT
21126 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21127
21128 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21129 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21130 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21131 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21132 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21133 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21134 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21135 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21136 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21137 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21138 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21139 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21140 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21141 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21142 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21143 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21144 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21145 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21146 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21147 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21148 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21149 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21150 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21151 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21152 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21153 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21154 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21155 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21156 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21157 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21158 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21159 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21160 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21161 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21162 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21163 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21164 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21165 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21166 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21167 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21168 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21169 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21170 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21171 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21172 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21173 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21174 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21175 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21176 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21177 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21178 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21179 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21180 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21181 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21182 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21183 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21184 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21185
21186 #undef ARM_VARIANT
21187 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21188
21189 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21190 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21191 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21192 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21193 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21194 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21195 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21196 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21197 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21198 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21199 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21200 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21201 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21202 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21203 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21204 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21205 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21206 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21207 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21208 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21209 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21210 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21211 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21212 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21213 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21214 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21215 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21216 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21217 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21218 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21219 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21220 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21221 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21222 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21223 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21224 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21225 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21226 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21227 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21228 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21229 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21230 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21231 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21232 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21233 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21234 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21235 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21236 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21237 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21238 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21239 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21240 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21241 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21242 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21243 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21244 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21245 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21246 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21247 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21248 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21249 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21250 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21251 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21252 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21253 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21254 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21255 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21256 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21257 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21258 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21259 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21260 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21261 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21262 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21263 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21264 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21265
21266 /* ARMv8-M instructions. */
21267 #undef ARM_VARIANT
21268 #define ARM_VARIANT NULL
21269 #undef THUMB_VARIANT
21270 #define THUMB_VARIANT & arm_ext_v8m
21271 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21272 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21273 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21274 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21275 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21276 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21277 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21278
21279 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21280 instructions behave as nop if no VFP is present. */
21281 #undef THUMB_VARIANT
21282 #define THUMB_VARIANT & arm_ext_v8m_main
21283 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21284 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21285 };
21286 #undef ARM_VARIANT
21287 #undef THUMB_VARIANT
21288 #undef TCE
21289 #undef TUE
21290 #undef TUF
21291 #undef TCC
21292 #undef cCE
21293 #undef cCL
21294 #undef C3E
21295 #undef CE
21296 #undef CM
21297 #undef UE
21298 #undef UF
21299 #undef UT
21300 #undef NUF
21301 #undef nUF
21302 #undef NCE
21303 #undef nCE
21304 #undef OPS0
21305 #undef OPS1
21306 #undef OPS2
21307 #undef OPS3
21308 #undef OPS4
21309 #undef OPS5
21310 #undef OPS6
21311 #undef do_0
21312 \f
21313 /* MD interface: bits in the object file. */
21314
21315 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21316 for use in the a.out file, and stores them in the array pointed to by buf.
21317 This knows about the endian-ness of the target machine and does
21318 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21319 2 (short) and 4 (long) Floating numbers are put out as a series of
21320 LITTLENUMS (shorts, here at least). */
21321
21322 void
21323 md_number_to_chars (char * buf, valueT val, int n)
21324 {
21325 if (target_big_endian)
21326 number_to_chars_bigendian (buf, val, n);
21327 else
21328 number_to_chars_littleendian (buf, val, n);
21329 }
21330
21331 static valueT
21332 md_chars_to_number (char * buf, int n)
21333 {
21334 valueT result = 0;
21335 unsigned char * where = (unsigned char *) buf;
21336
21337 if (target_big_endian)
21338 {
21339 while (n--)
21340 {
21341 result <<= 8;
21342 result |= (*where++ & 255);
21343 }
21344 }
21345 else
21346 {
21347 while (n--)
21348 {
21349 result <<= 8;
21350 result |= (where[n] & 255);
21351 }
21352 }
21353
21354 return result;
21355 }
21356
21357 /* MD interface: Sections. */
21358
21359 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21360 that an rs_machine_dependent frag may reach. */
21361
21362 unsigned int
21363 arm_frag_max_var (fragS *fragp)
21364 {
21365 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21366 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21367
21368 Note that we generate relaxable instructions even for cases that don't
21369 really need it, like an immediate that's a trivial constant. So we're
21370 overestimating the instruction size for some of those cases. Rather
21371 than putting more intelligence here, it would probably be better to
21372 avoid generating a relaxation frag in the first place when it can be
21373 determined up front that a short instruction will suffice. */
21374
21375 gas_assert (fragp->fr_type == rs_machine_dependent);
21376 return INSN_SIZE;
21377 }
21378
21379 /* Estimate the size of a frag before relaxing. Assume everything fits in
21380 2 bytes. */
21381
21382 int
21383 md_estimate_size_before_relax (fragS * fragp,
21384 segT segtype ATTRIBUTE_UNUSED)
21385 {
21386 fragp->fr_var = 2;
21387 return 2;
21388 }
21389
21390 /* Convert a machine dependent frag. */
21391
21392 void
21393 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21394 {
21395 unsigned long insn;
21396 unsigned long old_op;
21397 char *buf;
21398 expressionS exp;
21399 fixS *fixp;
21400 int reloc_type;
21401 int pc_rel;
21402 int opcode;
21403
21404 buf = fragp->fr_literal + fragp->fr_fix;
21405
21406 old_op = bfd_get_16(abfd, buf);
21407 if (fragp->fr_symbol)
21408 {
21409 exp.X_op = O_symbol;
21410 exp.X_add_symbol = fragp->fr_symbol;
21411 }
21412 else
21413 {
21414 exp.X_op = O_constant;
21415 }
21416 exp.X_add_number = fragp->fr_offset;
21417 opcode = fragp->fr_subtype;
21418 switch (opcode)
21419 {
21420 case T_MNEM_ldr_pc:
21421 case T_MNEM_ldr_pc2:
21422 case T_MNEM_ldr_sp:
21423 case T_MNEM_str_sp:
21424 case T_MNEM_ldr:
21425 case T_MNEM_ldrb:
21426 case T_MNEM_ldrh:
21427 case T_MNEM_str:
21428 case T_MNEM_strb:
21429 case T_MNEM_strh:
21430 if (fragp->fr_var == 4)
21431 {
21432 insn = THUMB_OP32 (opcode);
21433 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21434 {
21435 insn |= (old_op & 0x700) << 4;
21436 }
21437 else
21438 {
21439 insn |= (old_op & 7) << 12;
21440 insn |= (old_op & 0x38) << 13;
21441 }
21442 insn |= 0x00000c00;
21443 put_thumb32_insn (buf, insn);
21444 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21445 }
21446 else
21447 {
21448 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21449 }
21450 pc_rel = (opcode == T_MNEM_ldr_pc2);
21451 break;
21452 case T_MNEM_adr:
21453 if (fragp->fr_var == 4)
21454 {
21455 insn = THUMB_OP32 (opcode);
21456 insn |= (old_op & 0xf0) << 4;
21457 put_thumb32_insn (buf, insn);
21458 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21459 }
21460 else
21461 {
21462 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21463 exp.X_add_number -= 4;
21464 }
21465 pc_rel = 1;
21466 break;
21467 case T_MNEM_mov:
21468 case T_MNEM_movs:
21469 case T_MNEM_cmp:
21470 case T_MNEM_cmn:
21471 if (fragp->fr_var == 4)
21472 {
21473 int r0off = (opcode == T_MNEM_mov
21474 || opcode == T_MNEM_movs) ? 0 : 8;
21475 insn = THUMB_OP32 (opcode);
21476 insn = (insn & 0xe1ffffff) | 0x10000000;
21477 insn |= (old_op & 0x700) << r0off;
21478 put_thumb32_insn (buf, insn);
21479 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21480 }
21481 else
21482 {
21483 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21484 }
21485 pc_rel = 0;
21486 break;
21487 case T_MNEM_b:
21488 if (fragp->fr_var == 4)
21489 {
21490 insn = THUMB_OP32(opcode);
21491 put_thumb32_insn (buf, insn);
21492 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21493 }
21494 else
21495 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21496 pc_rel = 1;
21497 break;
21498 case T_MNEM_bcond:
21499 if (fragp->fr_var == 4)
21500 {
21501 insn = THUMB_OP32(opcode);
21502 insn |= (old_op & 0xf00) << 14;
21503 put_thumb32_insn (buf, insn);
21504 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21505 }
21506 else
21507 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21508 pc_rel = 1;
21509 break;
21510 case T_MNEM_add_sp:
21511 case T_MNEM_add_pc:
21512 case T_MNEM_inc_sp:
21513 case T_MNEM_dec_sp:
21514 if (fragp->fr_var == 4)
21515 {
21516 /* ??? Choose between add and addw. */
21517 insn = THUMB_OP32 (opcode);
21518 insn |= (old_op & 0xf0) << 4;
21519 put_thumb32_insn (buf, insn);
21520 if (opcode == T_MNEM_add_pc)
21521 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21522 else
21523 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21524 }
21525 else
21526 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21527 pc_rel = 0;
21528 break;
21529
21530 case T_MNEM_addi:
21531 case T_MNEM_addis:
21532 case T_MNEM_subi:
21533 case T_MNEM_subis:
21534 if (fragp->fr_var == 4)
21535 {
21536 insn = THUMB_OP32 (opcode);
21537 insn |= (old_op & 0xf0) << 4;
21538 insn |= (old_op & 0xf) << 16;
21539 put_thumb32_insn (buf, insn);
21540 if (insn & (1 << 20))
21541 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21542 else
21543 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21544 }
21545 else
21546 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21547 pc_rel = 0;
21548 break;
21549 default:
21550 abort ();
21551 }
21552 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21553 (enum bfd_reloc_code_real) reloc_type);
21554 fixp->fx_file = fragp->fr_file;
21555 fixp->fx_line = fragp->fr_line;
21556 fragp->fr_fix += fragp->fr_var;
21557
21558 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21559 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21560 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21561 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21562 }
21563
21564 /* Return the size of a relaxable immediate operand instruction.
21565 SHIFT and SIZE specify the form of the allowable immediate. */
21566 static int
21567 relax_immediate (fragS *fragp, int size, int shift)
21568 {
21569 offsetT offset;
21570 offsetT mask;
21571 offsetT low;
21572
21573 /* ??? Should be able to do better than this. */
21574 if (fragp->fr_symbol)
21575 return 4;
21576
21577 low = (1 << shift) - 1;
21578 mask = (1 << (shift + size)) - (1 << shift);
21579 offset = fragp->fr_offset;
21580 /* Force misaligned offsets to 32-bit variant. */
21581 if (offset & low)
21582 return 4;
21583 if (offset & ~mask)
21584 return 4;
21585 return 2;
21586 }
21587
21588 /* Get the address of a symbol during relaxation. */
21589 static addressT
21590 relaxed_symbol_addr (fragS *fragp, long stretch)
21591 {
21592 fragS *sym_frag;
21593 addressT addr;
21594 symbolS *sym;
21595
21596 sym = fragp->fr_symbol;
21597 sym_frag = symbol_get_frag (sym);
21598 know (S_GET_SEGMENT (sym) != absolute_section
21599 || sym_frag == &zero_address_frag);
21600 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21601
21602 /* If frag has yet to be reached on this pass, assume it will
21603 move by STRETCH just as we did. If this is not so, it will
21604 be because some frag between grows, and that will force
21605 another pass. */
21606
21607 if (stretch != 0
21608 && sym_frag->relax_marker != fragp->relax_marker)
21609 {
21610 fragS *f;
21611
21612 /* Adjust stretch for any alignment frag. Note that if have
21613 been expanding the earlier code, the symbol may be
21614 defined in what appears to be an earlier frag. FIXME:
21615 This doesn't handle the fr_subtype field, which specifies
21616 a maximum number of bytes to skip when doing an
21617 alignment. */
21618 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21619 {
21620 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21621 {
21622 if (stretch < 0)
21623 stretch = - ((- stretch)
21624 & ~ ((1 << (int) f->fr_offset) - 1));
21625 else
21626 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21627 if (stretch == 0)
21628 break;
21629 }
21630 }
21631 if (f != NULL)
21632 addr += stretch;
21633 }
21634
21635 return addr;
21636 }
21637
21638 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21639 load. */
21640 static int
21641 relax_adr (fragS *fragp, asection *sec, long stretch)
21642 {
21643 addressT addr;
21644 offsetT val;
21645
21646 /* Assume worst case for symbols not known to be in the same section. */
21647 if (fragp->fr_symbol == NULL
21648 || !S_IS_DEFINED (fragp->fr_symbol)
21649 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21650 || S_IS_WEAK (fragp->fr_symbol))
21651 return 4;
21652
21653 val = relaxed_symbol_addr (fragp, stretch);
21654 addr = fragp->fr_address + fragp->fr_fix;
21655 addr = (addr + 4) & ~3;
21656 /* Force misaligned targets to 32-bit variant. */
21657 if (val & 3)
21658 return 4;
21659 val -= addr;
21660 if (val < 0 || val > 1020)
21661 return 4;
21662 return 2;
21663 }
21664
21665 /* Return the size of a relaxable add/sub immediate instruction. */
21666 static int
21667 relax_addsub (fragS *fragp, asection *sec)
21668 {
21669 char *buf;
21670 int op;
21671
21672 buf = fragp->fr_literal + fragp->fr_fix;
21673 op = bfd_get_16(sec->owner, buf);
21674 if ((op & 0xf) == ((op >> 4) & 0xf))
21675 return relax_immediate (fragp, 8, 0);
21676 else
21677 return relax_immediate (fragp, 3, 0);
21678 }
21679
21680 /* Return TRUE iff the definition of symbol S could be pre-empted
21681 (overridden) at link or load time. */
21682 static bfd_boolean
21683 symbol_preemptible (symbolS *s)
21684 {
21685 /* Weak symbols can always be pre-empted. */
21686 if (S_IS_WEAK (s))
21687 return TRUE;
21688
21689 /* Non-global symbols cannot be pre-empted. */
21690 if (! S_IS_EXTERNAL (s))
21691 return FALSE;
21692
21693 #ifdef OBJ_ELF
21694 /* In ELF, a global symbol can be marked protected, or private. In that
21695 case it can't be pre-empted (other definitions in the same link unit
21696 would violate the ODR). */
21697 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21698 return FALSE;
21699 #endif
21700
21701 /* Other global symbols might be pre-empted. */
21702 return TRUE;
21703 }
21704
21705 /* Return the size of a relaxable branch instruction. BITS is the
21706 size of the offset field in the narrow instruction. */
21707
21708 static int
21709 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21710 {
21711 addressT addr;
21712 offsetT val;
21713 offsetT limit;
21714
21715 /* Assume worst case for symbols not known to be in the same section. */
21716 if (!S_IS_DEFINED (fragp->fr_symbol)
21717 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21718 || S_IS_WEAK (fragp->fr_symbol))
21719 return 4;
21720
21721 #ifdef OBJ_ELF
21722 /* A branch to a function in ARM state will require interworking. */
21723 if (S_IS_DEFINED (fragp->fr_symbol)
21724 && ARM_IS_FUNC (fragp->fr_symbol))
21725 return 4;
21726 #endif
21727
21728 if (symbol_preemptible (fragp->fr_symbol))
21729 return 4;
21730
21731 val = relaxed_symbol_addr (fragp, stretch);
21732 addr = fragp->fr_address + fragp->fr_fix + 4;
21733 val -= addr;
21734
21735 /* Offset is a signed value *2 */
21736 limit = 1 << bits;
21737 if (val >= limit || val < -limit)
21738 return 4;
21739 return 2;
21740 }
21741
21742
21743 /* Relax a machine dependent frag. This returns the amount by which
21744 the current size of the frag should change. */
21745
21746 int
21747 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21748 {
21749 int oldsize;
21750 int newsize;
21751
21752 oldsize = fragp->fr_var;
21753 switch (fragp->fr_subtype)
21754 {
21755 case T_MNEM_ldr_pc2:
21756 newsize = relax_adr (fragp, sec, stretch);
21757 break;
21758 case T_MNEM_ldr_pc:
21759 case T_MNEM_ldr_sp:
21760 case T_MNEM_str_sp:
21761 newsize = relax_immediate (fragp, 8, 2);
21762 break;
21763 case T_MNEM_ldr:
21764 case T_MNEM_str:
21765 newsize = relax_immediate (fragp, 5, 2);
21766 break;
21767 case T_MNEM_ldrh:
21768 case T_MNEM_strh:
21769 newsize = relax_immediate (fragp, 5, 1);
21770 break;
21771 case T_MNEM_ldrb:
21772 case T_MNEM_strb:
21773 newsize = relax_immediate (fragp, 5, 0);
21774 break;
21775 case T_MNEM_adr:
21776 newsize = relax_adr (fragp, sec, stretch);
21777 break;
21778 case T_MNEM_mov:
21779 case T_MNEM_movs:
21780 case T_MNEM_cmp:
21781 case T_MNEM_cmn:
21782 newsize = relax_immediate (fragp, 8, 0);
21783 break;
21784 case T_MNEM_b:
21785 newsize = relax_branch (fragp, sec, 11, stretch);
21786 break;
21787 case T_MNEM_bcond:
21788 newsize = relax_branch (fragp, sec, 8, stretch);
21789 break;
21790 case T_MNEM_add_sp:
21791 case T_MNEM_add_pc:
21792 newsize = relax_immediate (fragp, 8, 2);
21793 break;
21794 case T_MNEM_inc_sp:
21795 case T_MNEM_dec_sp:
21796 newsize = relax_immediate (fragp, 7, 2);
21797 break;
21798 case T_MNEM_addi:
21799 case T_MNEM_addis:
21800 case T_MNEM_subi:
21801 case T_MNEM_subis:
21802 newsize = relax_addsub (fragp, sec);
21803 break;
21804 default:
21805 abort ();
21806 }
21807
21808 fragp->fr_var = newsize;
21809 /* Freeze wide instructions that are at or before the same location as
21810 in the previous pass. This avoids infinite loops.
21811 Don't freeze them unconditionally because targets may be artificially
21812 misaligned by the expansion of preceding frags. */
21813 if (stretch <= 0 && newsize > 2)
21814 {
21815 md_convert_frag (sec->owner, sec, fragp);
21816 frag_wane (fragp);
21817 }
21818
21819 return newsize - oldsize;
21820 }
21821
21822 /* Round up a section size to the appropriate boundary. */
21823
21824 valueT
21825 md_section_align (segT segment ATTRIBUTE_UNUSED,
21826 valueT size)
21827 {
21828 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21829 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21830 {
21831 /* For a.out, force the section size to be aligned. If we don't do
21832 this, BFD will align it for us, but it will not write out the
21833 final bytes of the section. This may be a bug in BFD, but it is
21834 easier to fix it here since that is how the other a.out targets
21835 work. */
21836 int align;
21837
21838 align = bfd_get_section_alignment (stdoutput, segment);
21839 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21840 }
21841 #endif
21842
21843 return size;
21844 }
21845
21846 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21847 of an rs_align_code fragment. */
21848
21849 void
21850 arm_handle_align (fragS * fragP)
21851 {
21852 static unsigned char const arm_noop[2][2][4] =
21853 {
21854 { /* ARMv1 */
21855 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21856 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21857 },
21858 { /* ARMv6k */
21859 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21860 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21861 },
21862 };
21863 static unsigned char const thumb_noop[2][2][2] =
21864 {
21865 { /* Thumb-1 */
21866 {0xc0, 0x46}, /* LE */
21867 {0x46, 0xc0}, /* BE */
21868 },
21869 { /* Thumb-2 */
21870 {0x00, 0xbf}, /* LE */
21871 {0xbf, 0x00} /* BE */
21872 }
21873 };
21874 static unsigned char const wide_thumb_noop[2][4] =
21875 { /* Wide Thumb-2 */
21876 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21877 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21878 };
21879
21880 unsigned bytes, fix, noop_size;
21881 char * p;
21882 const unsigned char * noop;
21883 const unsigned char *narrow_noop = NULL;
21884 #ifdef OBJ_ELF
21885 enum mstate state;
21886 #endif
21887
21888 if (fragP->fr_type != rs_align_code)
21889 return;
21890
21891 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21892 p = fragP->fr_literal + fragP->fr_fix;
21893 fix = 0;
21894
21895 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21896 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21897
21898 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21899
21900 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21901 {
21902 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21903 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21904 {
21905 narrow_noop = thumb_noop[1][target_big_endian];
21906 noop = wide_thumb_noop[target_big_endian];
21907 }
21908 else
21909 noop = thumb_noop[0][target_big_endian];
21910 noop_size = 2;
21911 #ifdef OBJ_ELF
21912 state = MAP_THUMB;
21913 #endif
21914 }
21915 else
21916 {
21917 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21918 ? selected_cpu : arm_arch_none,
21919 arm_ext_v6k) != 0]
21920 [target_big_endian];
21921 noop_size = 4;
21922 #ifdef OBJ_ELF
21923 state = MAP_ARM;
21924 #endif
21925 }
21926
21927 fragP->fr_var = noop_size;
21928
21929 if (bytes & (noop_size - 1))
21930 {
21931 fix = bytes & (noop_size - 1);
21932 #ifdef OBJ_ELF
21933 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21934 #endif
21935 memset (p, 0, fix);
21936 p += fix;
21937 bytes -= fix;
21938 }
21939
21940 if (narrow_noop)
21941 {
21942 if (bytes & noop_size)
21943 {
21944 /* Insert a narrow noop. */
21945 memcpy (p, narrow_noop, noop_size);
21946 p += noop_size;
21947 bytes -= noop_size;
21948 fix += noop_size;
21949 }
21950
21951 /* Use wide noops for the remainder */
21952 noop_size = 4;
21953 }
21954
21955 while (bytes >= noop_size)
21956 {
21957 memcpy (p, noop, noop_size);
21958 p += noop_size;
21959 bytes -= noop_size;
21960 fix += noop_size;
21961 }
21962
21963 fragP->fr_fix += fix;
21964 }
21965
21966 /* Called from md_do_align. Used to create an alignment
21967 frag in a code section. */
21968
21969 void
21970 arm_frag_align_code (int n, int max)
21971 {
21972 char * p;
21973
21974 /* We assume that there will never be a requirement
21975 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21976 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21977 {
21978 char err_msg[128];
21979
21980 sprintf (err_msg,
21981 _("alignments greater than %d bytes not supported in .text sections."),
21982 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21983 as_fatal ("%s", err_msg);
21984 }
21985
21986 p = frag_var (rs_align_code,
21987 MAX_MEM_FOR_RS_ALIGN_CODE,
21988 1,
21989 (relax_substateT) max,
21990 (symbolS *) NULL,
21991 (offsetT) n,
21992 (char *) NULL);
21993 *p = 0;
21994 }
21995
21996 /* Perform target specific initialisation of a frag.
21997 Note - despite the name this initialisation is not done when the frag
21998 is created, but only when its type is assigned. A frag can be created
21999 and used a long time before its type is set, so beware of assuming that
22000 this initialisation is performed first. */
22001
22002 #ifndef OBJ_ELF
22003 void
22004 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22005 {
22006 /* Record whether this frag is in an ARM or a THUMB area. */
22007 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22008 }
22009
22010 #else /* OBJ_ELF is defined. */
22011 void
22012 arm_init_frag (fragS * fragP, int max_chars)
22013 {
22014 bfd_boolean frag_thumb_mode;
22015
22016 /* If the current ARM vs THUMB mode has not already
22017 been recorded into this frag then do so now. */
22018 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22019 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22020
22021 /* PR 21809: Do not set a mapping state for debug sections
22022 - it just confuses other tools. */
22023 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22024 return;
22025
22026 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22027
22028 /* Record a mapping symbol for alignment frags. We will delete this
22029 later if the alignment ends up empty. */
22030 switch (fragP->fr_type)
22031 {
22032 case rs_align:
22033 case rs_align_test:
22034 case rs_fill:
22035 mapping_state_2 (MAP_DATA, max_chars);
22036 break;
22037 case rs_align_code:
22038 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22039 break;
22040 default:
22041 break;
22042 }
22043 }
22044
22045 /* When we change sections we need to issue a new mapping symbol. */
22046
22047 void
22048 arm_elf_change_section (void)
22049 {
22050 /* Link an unlinked unwind index table section to the .text section. */
22051 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22052 && elf_linked_to_section (now_seg) == NULL)
22053 elf_linked_to_section (now_seg) = text_section;
22054 }
22055
22056 int
22057 arm_elf_section_type (const char * str, size_t len)
22058 {
22059 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22060 return SHT_ARM_EXIDX;
22061
22062 return -1;
22063 }
22064 \f
22065 /* Code to deal with unwinding tables. */
22066
22067 static void add_unwind_adjustsp (offsetT);
22068
22069 /* Generate any deferred unwind frame offset. */
22070
22071 static void
22072 flush_pending_unwind (void)
22073 {
22074 offsetT offset;
22075
22076 offset = unwind.pending_offset;
22077 unwind.pending_offset = 0;
22078 if (offset != 0)
22079 add_unwind_adjustsp (offset);
22080 }
22081
22082 /* Add an opcode to this list for this function. Two-byte opcodes should
22083 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22084 order. */
22085
22086 static void
22087 add_unwind_opcode (valueT op, int length)
22088 {
22089 /* Add any deferred stack adjustment. */
22090 if (unwind.pending_offset)
22091 flush_pending_unwind ();
22092
22093 unwind.sp_restored = 0;
22094
22095 if (unwind.opcode_count + length > unwind.opcode_alloc)
22096 {
22097 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22098 if (unwind.opcodes)
22099 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22100 unwind.opcode_alloc);
22101 else
22102 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22103 }
22104 while (length > 0)
22105 {
22106 length--;
22107 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22108 op >>= 8;
22109 unwind.opcode_count++;
22110 }
22111 }
22112
22113 /* Add unwind opcodes to adjust the stack pointer. */
22114
22115 static void
22116 add_unwind_adjustsp (offsetT offset)
22117 {
22118 valueT op;
22119
22120 if (offset > 0x200)
22121 {
22122 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22123 char bytes[5];
22124 int n;
22125 valueT o;
22126
22127 /* Long form: 0xb2, uleb128. */
22128 /* This might not fit in a word so add the individual bytes,
22129 remembering the list is built in reverse order. */
22130 o = (valueT) ((offset - 0x204) >> 2);
22131 if (o == 0)
22132 add_unwind_opcode (0, 1);
22133
22134 /* Calculate the uleb128 encoding of the offset. */
22135 n = 0;
22136 while (o)
22137 {
22138 bytes[n] = o & 0x7f;
22139 o >>= 7;
22140 if (o)
22141 bytes[n] |= 0x80;
22142 n++;
22143 }
22144 /* Add the insn. */
22145 for (; n; n--)
22146 add_unwind_opcode (bytes[n - 1], 1);
22147 add_unwind_opcode (0xb2, 1);
22148 }
22149 else if (offset > 0x100)
22150 {
22151 /* Two short opcodes. */
22152 add_unwind_opcode (0x3f, 1);
22153 op = (offset - 0x104) >> 2;
22154 add_unwind_opcode (op, 1);
22155 }
22156 else if (offset > 0)
22157 {
22158 /* Short opcode. */
22159 op = (offset - 4) >> 2;
22160 add_unwind_opcode (op, 1);
22161 }
22162 else if (offset < 0)
22163 {
22164 offset = -offset;
22165 while (offset > 0x100)
22166 {
22167 add_unwind_opcode (0x7f, 1);
22168 offset -= 0x100;
22169 }
22170 op = ((offset - 4) >> 2) | 0x40;
22171 add_unwind_opcode (op, 1);
22172 }
22173 }
22174
22175 /* Finish the list of unwind opcodes for this function. */
22176
22177 static void
22178 finish_unwind_opcodes (void)
22179 {
22180 valueT op;
22181
22182 if (unwind.fp_used)
22183 {
22184 /* Adjust sp as necessary. */
22185 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22186 flush_pending_unwind ();
22187
22188 /* After restoring sp from the frame pointer. */
22189 op = 0x90 | unwind.fp_reg;
22190 add_unwind_opcode (op, 1);
22191 }
22192 else
22193 flush_pending_unwind ();
22194 }
22195
22196
22197 /* Start an exception table entry. If idx is nonzero this is an index table
22198 entry. */
22199
22200 static void
22201 start_unwind_section (const segT text_seg, int idx)
22202 {
22203 const char * text_name;
22204 const char * prefix;
22205 const char * prefix_once;
22206 const char * group_name;
22207 char * sec_name;
22208 int type;
22209 int flags;
22210 int linkonce;
22211
22212 if (idx)
22213 {
22214 prefix = ELF_STRING_ARM_unwind;
22215 prefix_once = ELF_STRING_ARM_unwind_once;
22216 type = SHT_ARM_EXIDX;
22217 }
22218 else
22219 {
22220 prefix = ELF_STRING_ARM_unwind_info;
22221 prefix_once = ELF_STRING_ARM_unwind_info_once;
22222 type = SHT_PROGBITS;
22223 }
22224
22225 text_name = segment_name (text_seg);
22226 if (streq (text_name, ".text"))
22227 text_name = "";
22228
22229 if (strncmp (text_name, ".gnu.linkonce.t.",
22230 strlen (".gnu.linkonce.t.")) == 0)
22231 {
22232 prefix = prefix_once;
22233 text_name += strlen (".gnu.linkonce.t.");
22234 }
22235
22236 sec_name = concat (prefix, text_name, (char *) NULL);
22237
22238 flags = SHF_ALLOC;
22239 linkonce = 0;
22240 group_name = 0;
22241
22242 /* Handle COMDAT group. */
22243 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22244 {
22245 group_name = elf_group_name (text_seg);
22246 if (group_name == NULL)
22247 {
22248 as_bad (_("Group section `%s' has no group signature"),
22249 segment_name (text_seg));
22250 ignore_rest_of_line ();
22251 return;
22252 }
22253 flags |= SHF_GROUP;
22254 linkonce = 1;
22255 }
22256
22257 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22258 linkonce, 0);
22259
22260 /* Set the section link for index tables. */
22261 if (idx)
22262 elf_linked_to_section (now_seg) = text_seg;
22263 }
22264
22265
22266 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22267 personality routine data. Returns zero, or the index table value for
22268 an inline entry. */
22269
22270 static valueT
22271 create_unwind_entry (int have_data)
22272 {
22273 int size;
22274 addressT where;
22275 char *ptr;
22276 /* The current word of data. */
22277 valueT data;
22278 /* The number of bytes left in this word. */
22279 int n;
22280
22281 finish_unwind_opcodes ();
22282
22283 /* Remember the current text section. */
22284 unwind.saved_seg = now_seg;
22285 unwind.saved_subseg = now_subseg;
22286
22287 start_unwind_section (now_seg, 0);
22288
22289 if (unwind.personality_routine == NULL)
22290 {
22291 if (unwind.personality_index == -2)
22292 {
22293 if (have_data)
22294 as_bad (_("handlerdata in cantunwind frame"));
22295 return 1; /* EXIDX_CANTUNWIND. */
22296 }
22297
22298 /* Use a default personality routine if none is specified. */
22299 if (unwind.personality_index == -1)
22300 {
22301 if (unwind.opcode_count > 3)
22302 unwind.personality_index = 1;
22303 else
22304 unwind.personality_index = 0;
22305 }
22306
22307 /* Space for the personality routine entry. */
22308 if (unwind.personality_index == 0)
22309 {
22310 if (unwind.opcode_count > 3)
22311 as_bad (_("too many unwind opcodes for personality routine 0"));
22312
22313 if (!have_data)
22314 {
22315 /* All the data is inline in the index table. */
22316 data = 0x80;
22317 n = 3;
22318 while (unwind.opcode_count > 0)
22319 {
22320 unwind.opcode_count--;
22321 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22322 n--;
22323 }
22324
22325 /* Pad with "finish" opcodes. */
22326 while (n--)
22327 data = (data << 8) | 0xb0;
22328
22329 return data;
22330 }
22331 size = 0;
22332 }
22333 else
22334 /* We get two opcodes "free" in the first word. */
22335 size = unwind.opcode_count - 2;
22336 }
22337 else
22338 {
22339 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22340 if (unwind.personality_index != -1)
22341 {
22342 as_bad (_("attempt to recreate an unwind entry"));
22343 return 1;
22344 }
22345
22346 /* An extra byte is required for the opcode count. */
22347 size = unwind.opcode_count + 1;
22348 }
22349
22350 size = (size + 3) >> 2;
22351 if (size > 0xff)
22352 as_bad (_("too many unwind opcodes"));
22353
22354 frag_align (2, 0, 0);
22355 record_alignment (now_seg, 2);
22356 unwind.table_entry = expr_build_dot ();
22357
22358 /* Allocate the table entry. */
22359 ptr = frag_more ((size << 2) + 4);
22360 /* PR 13449: Zero the table entries in case some of them are not used. */
22361 memset (ptr, 0, (size << 2) + 4);
22362 where = frag_now_fix () - ((size << 2) + 4);
22363
22364 switch (unwind.personality_index)
22365 {
22366 case -1:
22367 /* ??? Should this be a PLT generating relocation? */
22368 /* Custom personality routine. */
22369 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22370 BFD_RELOC_ARM_PREL31);
22371
22372 where += 4;
22373 ptr += 4;
22374
22375 /* Set the first byte to the number of additional words. */
22376 data = size > 0 ? size - 1 : 0;
22377 n = 3;
22378 break;
22379
22380 /* ABI defined personality routines. */
22381 case 0:
22382 /* Three opcodes bytes are packed into the first word. */
22383 data = 0x80;
22384 n = 3;
22385 break;
22386
22387 case 1:
22388 case 2:
22389 /* The size and first two opcode bytes go in the first word. */
22390 data = ((0x80 + unwind.personality_index) << 8) | size;
22391 n = 2;
22392 break;
22393
22394 default:
22395 /* Should never happen. */
22396 abort ();
22397 }
22398
22399 /* Pack the opcodes into words (MSB first), reversing the list at the same
22400 time. */
22401 while (unwind.opcode_count > 0)
22402 {
22403 if (n == 0)
22404 {
22405 md_number_to_chars (ptr, data, 4);
22406 ptr += 4;
22407 n = 4;
22408 data = 0;
22409 }
22410 unwind.opcode_count--;
22411 n--;
22412 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22413 }
22414
22415 /* Finish off the last word. */
22416 if (n < 4)
22417 {
22418 /* Pad with "finish" opcodes. */
22419 while (n--)
22420 data = (data << 8) | 0xb0;
22421
22422 md_number_to_chars (ptr, data, 4);
22423 }
22424
22425 if (!have_data)
22426 {
22427 /* Add an empty descriptor if there is no user-specified data. */
22428 ptr = frag_more (4);
22429 md_number_to_chars (ptr, 0, 4);
22430 }
22431
22432 return 0;
22433 }
22434
22435
22436 /* Initialize the DWARF-2 unwind information for this procedure. */
22437
22438 void
22439 tc_arm_frame_initial_instructions (void)
22440 {
22441 cfi_add_CFA_def_cfa (REG_SP, 0);
22442 }
22443 #endif /* OBJ_ELF */
22444
22445 /* Convert REGNAME to a DWARF-2 register number. */
22446
22447 int
22448 tc_arm_regname_to_dw2regnum (char *regname)
22449 {
22450 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22451 if (reg != FAIL)
22452 return reg;
22453
22454 /* PR 16694: Allow VFP registers as well. */
22455 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22456 if (reg != FAIL)
22457 return 64 + reg;
22458
22459 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22460 if (reg != FAIL)
22461 return reg + 256;
22462
22463 return FAIL;
22464 }
22465
22466 #ifdef TE_PE
22467 void
22468 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22469 {
22470 expressionS exp;
22471
22472 exp.X_op = O_secrel;
22473 exp.X_add_symbol = symbol;
22474 exp.X_add_number = 0;
22475 emit_expr (&exp, size);
22476 }
22477 #endif
22478
22479 /* MD interface: Symbol and relocation handling. */
22480
22481 /* Return the address within the segment that a PC-relative fixup is
22482 relative to. For ARM, PC-relative fixups applied to instructions
22483 are generally relative to the location of the fixup plus 8 bytes.
22484 Thumb branches are offset by 4, and Thumb loads relative to PC
22485 require special handling. */
22486
22487 long
22488 md_pcrel_from_section (fixS * fixP, segT seg)
22489 {
22490 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22491
22492 /* If this is pc-relative and we are going to emit a relocation
22493 then we just want to put out any pipeline compensation that the linker
22494 will need. Otherwise we want to use the calculated base.
22495 For WinCE we skip the bias for externals as well, since this
22496 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22497 if (fixP->fx_pcrel
22498 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22499 || (arm_force_relocation (fixP)
22500 #ifdef TE_WINCE
22501 && !S_IS_EXTERNAL (fixP->fx_addsy)
22502 #endif
22503 )))
22504 base = 0;
22505
22506
22507 switch (fixP->fx_r_type)
22508 {
22509 /* PC relative addressing on the Thumb is slightly odd as the
22510 bottom two bits of the PC are forced to zero for the
22511 calculation. This happens *after* application of the
22512 pipeline offset. However, Thumb adrl already adjusts for
22513 this, so we need not do it again. */
22514 case BFD_RELOC_ARM_THUMB_ADD:
22515 return base & ~3;
22516
22517 case BFD_RELOC_ARM_THUMB_OFFSET:
22518 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22519 case BFD_RELOC_ARM_T32_ADD_PC12:
22520 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22521 return (base + 4) & ~3;
22522
22523 /* Thumb branches are simply offset by +4. */
22524 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22525 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22526 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22527 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22528 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22529 return base + 4;
22530
22531 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22532 if (fixP->fx_addsy
22533 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22534 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22535 && ARM_IS_FUNC (fixP->fx_addsy)
22536 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22537 base = fixP->fx_where + fixP->fx_frag->fr_address;
22538 return base + 4;
22539
22540 /* BLX is like branches above, but forces the low two bits of PC to
22541 zero. */
22542 case BFD_RELOC_THUMB_PCREL_BLX:
22543 if (fixP->fx_addsy
22544 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22545 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22546 && THUMB_IS_FUNC (fixP->fx_addsy)
22547 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22548 base = fixP->fx_where + fixP->fx_frag->fr_address;
22549 return (base + 4) & ~3;
22550
22551 /* ARM mode branches are offset by +8. However, the Windows CE
22552 loader expects the relocation not to take this into account. */
22553 case BFD_RELOC_ARM_PCREL_BLX:
22554 if (fixP->fx_addsy
22555 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22556 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22557 && ARM_IS_FUNC (fixP->fx_addsy)
22558 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22559 base = fixP->fx_where + fixP->fx_frag->fr_address;
22560 return base + 8;
22561
22562 case BFD_RELOC_ARM_PCREL_CALL:
22563 if (fixP->fx_addsy
22564 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22565 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22566 && THUMB_IS_FUNC (fixP->fx_addsy)
22567 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22568 base = fixP->fx_where + fixP->fx_frag->fr_address;
22569 return base + 8;
22570
22571 case BFD_RELOC_ARM_PCREL_BRANCH:
22572 case BFD_RELOC_ARM_PCREL_JUMP:
22573 case BFD_RELOC_ARM_PLT32:
22574 #ifdef TE_WINCE
22575 /* When handling fixups immediately, because we have already
22576 discovered the value of a symbol, or the address of the frag involved
22577 we must account for the offset by +8, as the OS loader will never see the reloc.
22578 see fixup_segment() in write.c
22579 The S_IS_EXTERNAL test handles the case of global symbols.
22580 Those need the calculated base, not just the pipe compensation the linker will need. */
22581 if (fixP->fx_pcrel
22582 && fixP->fx_addsy != NULL
22583 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22584 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22585 return base + 8;
22586 return base;
22587 #else
22588 return base + 8;
22589 #endif
22590
22591
22592 /* ARM mode loads relative to PC are also offset by +8. Unlike
22593 branches, the Windows CE loader *does* expect the relocation
22594 to take this into account. */
22595 case BFD_RELOC_ARM_OFFSET_IMM:
22596 case BFD_RELOC_ARM_OFFSET_IMM8:
22597 case BFD_RELOC_ARM_HWLITERAL:
22598 case BFD_RELOC_ARM_LITERAL:
22599 case BFD_RELOC_ARM_CP_OFF_IMM:
22600 return base + 8;
22601
22602
22603 /* Other PC-relative relocations are un-offset. */
22604 default:
22605 return base;
22606 }
22607 }
22608
22609 static bfd_boolean flag_warn_syms = TRUE;
22610
22611 bfd_boolean
22612 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22613 {
22614 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22615 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22616 does mean that the resulting code might be very confusing to the reader.
22617 Also this warning can be triggered if the user omits an operand before
22618 an immediate address, eg:
22619
22620 LDR =foo
22621
22622 GAS treats this as an assignment of the value of the symbol foo to a
22623 symbol LDR, and so (without this code) it will not issue any kind of
22624 warning or error message.
22625
22626 Note - ARM instructions are case-insensitive but the strings in the hash
22627 table are all stored in lower case, so we must first ensure that name is
22628 lower case too. */
22629 if (flag_warn_syms && arm_ops_hsh)
22630 {
22631 char * nbuf = strdup (name);
22632 char * p;
22633
22634 for (p = nbuf; *p; p++)
22635 *p = TOLOWER (*p);
22636 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22637 {
22638 static struct hash_control * already_warned = NULL;
22639
22640 if (already_warned == NULL)
22641 already_warned = hash_new ();
22642 /* Only warn about the symbol once. To keep the code
22643 simple we let hash_insert do the lookup for us. */
22644 if (hash_insert (already_warned, name, NULL) == NULL)
22645 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22646 }
22647 else
22648 free (nbuf);
22649 }
22650
22651 return FALSE;
22652 }
22653
22654 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22655 Otherwise we have no need to default values of symbols. */
22656
22657 symbolS *
22658 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22659 {
22660 #ifdef OBJ_ELF
22661 if (name[0] == '_' && name[1] == 'G'
22662 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22663 {
22664 if (!GOT_symbol)
22665 {
22666 if (symbol_find (name))
22667 as_bad (_("GOT already in the symbol table"));
22668
22669 GOT_symbol = symbol_new (name, undefined_section,
22670 (valueT) 0, & zero_address_frag);
22671 }
22672
22673 return GOT_symbol;
22674 }
22675 #endif
22676
22677 return NULL;
22678 }
22679
22680 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22681 computed as two separate immediate values, added together. We
22682 already know that this value cannot be computed by just one ARM
22683 instruction. */
22684
22685 static unsigned int
22686 validate_immediate_twopart (unsigned int val,
22687 unsigned int * highpart)
22688 {
22689 unsigned int a;
22690 unsigned int i;
22691
22692 for (i = 0; i < 32; i += 2)
22693 if (((a = rotate_left (val, i)) & 0xff) != 0)
22694 {
22695 if (a & 0xff00)
22696 {
22697 if (a & ~ 0xffff)
22698 continue;
22699 * highpart = (a >> 8) | ((i + 24) << 7);
22700 }
22701 else if (a & 0xff0000)
22702 {
22703 if (a & 0xff000000)
22704 continue;
22705 * highpart = (a >> 16) | ((i + 16) << 7);
22706 }
22707 else
22708 {
22709 gas_assert (a & 0xff000000);
22710 * highpart = (a >> 24) | ((i + 8) << 7);
22711 }
22712
22713 return (a & 0xff) | (i << 7);
22714 }
22715
22716 return FAIL;
22717 }
22718
22719 static int
22720 validate_offset_imm (unsigned int val, int hwse)
22721 {
22722 if ((hwse && val > 255) || val > 4095)
22723 return FAIL;
22724 return val;
22725 }
22726
22727 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22728 negative immediate constant by altering the instruction. A bit of
22729 a hack really.
22730 MOV <-> MVN
22731 AND <-> BIC
22732 ADC <-> SBC
22733 by inverting the second operand, and
22734 ADD <-> SUB
22735 CMP <-> CMN
22736 by negating the second operand. */
22737
22738 static int
22739 negate_data_op (unsigned long * instruction,
22740 unsigned long value)
22741 {
22742 int op, new_inst;
22743 unsigned long negated, inverted;
22744
22745 negated = encode_arm_immediate (-value);
22746 inverted = encode_arm_immediate (~value);
22747
22748 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22749 switch (op)
22750 {
22751 /* First negates. */
22752 case OPCODE_SUB: /* ADD <-> SUB */
22753 new_inst = OPCODE_ADD;
22754 value = negated;
22755 break;
22756
22757 case OPCODE_ADD:
22758 new_inst = OPCODE_SUB;
22759 value = negated;
22760 break;
22761
22762 case OPCODE_CMP: /* CMP <-> CMN */
22763 new_inst = OPCODE_CMN;
22764 value = negated;
22765 break;
22766
22767 case OPCODE_CMN:
22768 new_inst = OPCODE_CMP;
22769 value = negated;
22770 break;
22771
22772 /* Now Inverted ops. */
22773 case OPCODE_MOV: /* MOV <-> MVN */
22774 new_inst = OPCODE_MVN;
22775 value = inverted;
22776 break;
22777
22778 case OPCODE_MVN:
22779 new_inst = OPCODE_MOV;
22780 value = inverted;
22781 break;
22782
22783 case OPCODE_AND: /* AND <-> BIC */
22784 new_inst = OPCODE_BIC;
22785 value = inverted;
22786 break;
22787
22788 case OPCODE_BIC:
22789 new_inst = OPCODE_AND;
22790 value = inverted;
22791 break;
22792
22793 case OPCODE_ADC: /* ADC <-> SBC */
22794 new_inst = OPCODE_SBC;
22795 value = inverted;
22796 break;
22797
22798 case OPCODE_SBC:
22799 new_inst = OPCODE_ADC;
22800 value = inverted;
22801 break;
22802
22803 /* We cannot do anything. */
22804 default:
22805 return FAIL;
22806 }
22807
22808 if (value == (unsigned) FAIL)
22809 return FAIL;
22810
22811 *instruction &= OPCODE_MASK;
22812 *instruction |= new_inst << DATA_OP_SHIFT;
22813 return value;
22814 }
22815
22816 /* Like negate_data_op, but for Thumb-2. */
22817
22818 static unsigned int
22819 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22820 {
22821 int op, new_inst;
22822 int rd;
22823 unsigned int negated, inverted;
22824
22825 negated = encode_thumb32_immediate (-value);
22826 inverted = encode_thumb32_immediate (~value);
22827
22828 rd = (*instruction >> 8) & 0xf;
22829 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22830 switch (op)
22831 {
22832 /* ADD <-> SUB. Includes CMP <-> CMN. */
22833 case T2_OPCODE_SUB:
22834 new_inst = T2_OPCODE_ADD;
22835 value = negated;
22836 break;
22837
22838 case T2_OPCODE_ADD:
22839 new_inst = T2_OPCODE_SUB;
22840 value = negated;
22841 break;
22842
22843 /* ORR <-> ORN. Includes MOV <-> MVN. */
22844 case T2_OPCODE_ORR:
22845 new_inst = T2_OPCODE_ORN;
22846 value = inverted;
22847 break;
22848
22849 case T2_OPCODE_ORN:
22850 new_inst = T2_OPCODE_ORR;
22851 value = inverted;
22852 break;
22853
22854 /* AND <-> BIC. TST has no inverted equivalent. */
22855 case T2_OPCODE_AND:
22856 new_inst = T2_OPCODE_BIC;
22857 if (rd == 15)
22858 value = FAIL;
22859 else
22860 value = inverted;
22861 break;
22862
22863 case T2_OPCODE_BIC:
22864 new_inst = T2_OPCODE_AND;
22865 value = inverted;
22866 break;
22867
22868 /* ADC <-> SBC */
22869 case T2_OPCODE_ADC:
22870 new_inst = T2_OPCODE_SBC;
22871 value = inverted;
22872 break;
22873
22874 case T2_OPCODE_SBC:
22875 new_inst = T2_OPCODE_ADC;
22876 value = inverted;
22877 break;
22878
22879 /* We cannot do anything. */
22880 default:
22881 return FAIL;
22882 }
22883
22884 if (value == (unsigned int)FAIL)
22885 return FAIL;
22886
22887 *instruction &= T2_OPCODE_MASK;
22888 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22889 return value;
22890 }
22891
22892 /* Read a 32-bit thumb instruction from buf. */
22893
22894 static unsigned long
22895 get_thumb32_insn (char * buf)
22896 {
22897 unsigned long insn;
22898 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22899 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22900
22901 return insn;
22902 }
22903
22904 /* We usually want to set the low bit on the address of thumb function
22905 symbols. In particular .word foo - . should have the low bit set.
22906 Generic code tries to fold the difference of two symbols to
22907 a constant. Prevent this and force a relocation when the first symbols
22908 is a thumb function. */
22909
22910 bfd_boolean
22911 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22912 {
22913 if (op == O_subtract
22914 && l->X_op == O_symbol
22915 && r->X_op == O_symbol
22916 && THUMB_IS_FUNC (l->X_add_symbol))
22917 {
22918 l->X_op = O_subtract;
22919 l->X_op_symbol = r->X_add_symbol;
22920 l->X_add_number -= r->X_add_number;
22921 return TRUE;
22922 }
22923
22924 /* Process as normal. */
22925 return FALSE;
22926 }
22927
22928 /* Encode Thumb2 unconditional branches and calls. The encoding
22929 for the 2 are identical for the immediate values. */
22930
22931 static void
22932 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22933 {
22934 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22935 offsetT newval;
22936 offsetT newval2;
22937 addressT S, I1, I2, lo, hi;
22938
22939 S = (value >> 24) & 0x01;
22940 I1 = (value >> 23) & 0x01;
22941 I2 = (value >> 22) & 0x01;
22942 hi = (value >> 12) & 0x3ff;
22943 lo = (value >> 1) & 0x7ff;
22944 newval = md_chars_to_number (buf, THUMB_SIZE);
22945 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22946 newval |= (S << 10) | hi;
22947 newval2 &= ~T2I1I2MASK;
22948 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22949 md_number_to_chars (buf, newval, THUMB_SIZE);
22950 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22951 }
22952
22953 void
22954 md_apply_fix (fixS * fixP,
22955 valueT * valP,
22956 segT seg)
22957 {
22958 offsetT value = * valP;
22959 offsetT newval;
22960 unsigned int newimm;
22961 unsigned long temp;
22962 int sign;
22963 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22964
22965 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22966
22967 /* Note whether this will delete the relocation. */
22968
22969 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22970 fixP->fx_done = 1;
22971
22972 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22973 consistency with the behaviour on 32-bit hosts. Remember value
22974 for emit_reloc. */
22975 value &= 0xffffffff;
22976 value ^= 0x80000000;
22977 value -= 0x80000000;
22978
22979 *valP = value;
22980 fixP->fx_addnumber = value;
22981
22982 /* Same treatment for fixP->fx_offset. */
22983 fixP->fx_offset &= 0xffffffff;
22984 fixP->fx_offset ^= 0x80000000;
22985 fixP->fx_offset -= 0x80000000;
22986
22987 switch (fixP->fx_r_type)
22988 {
22989 case BFD_RELOC_NONE:
22990 /* This will need to go in the object file. */
22991 fixP->fx_done = 0;
22992 break;
22993
22994 case BFD_RELOC_ARM_IMMEDIATE:
22995 /* We claim that this fixup has been processed here,
22996 even if in fact we generate an error because we do
22997 not have a reloc for it, so tc_gen_reloc will reject it. */
22998 fixP->fx_done = 1;
22999
23000 if (fixP->fx_addsy)
23001 {
23002 const char *msg = 0;
23003
23004 if (! S_IS_DEFINED (fixP->fx_addsy))
23005 msg = _("undefined symbol %s used as an immediate value");
23006 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23007 msg = _("symbol %s is in a different section");
23008 else if (S_IS_WEAK (fixP->fx_addsy))
23009 msg = _("symbol %s is weak and may be overridden later");
23010
23011 if (msg)
23012 {
23013 as_bad_where (fixP->fx_file, fixP->fx_line,
23014 msg, S_GET_NAME (fixP->fx_addsy));
23015 break;
23016 }
23017 }
23018
23019 temp = md_chars_to_number (buf, INSN_SIZE);
23020
23021 /* If the offset is negative, we should use encoding A2 for ADR. */
23022 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23023 newimm = negate_data_op (&temp, value);
23024 else
23025 {
23026 newimm = encode_arm_immediate (value);
23027
23028 /* If the instruction will fail, see if we can fix things up by
23029 changing the opcode. */
23030 if (newimm == (unsigned int) FAIL)
23031 newimm = negate_data_op (&temp, value);
23032 /* MOV accepts both ARM modified immediate (A1 encoding) and
23033 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23034 When disassembling, MOV is preferred when there is no encoding
23035 overlap. */
23036 if (newimm == (unsigned int) FAIL
23037 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23038 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23039 && !((temp >> SBIT_SHIFT) & 0x1)
23040 && value >= 0 && value <= 0xffff)
23041 {
23042 /* Clear bits[23:20] to change encoding from A1 to A2. */
23043 temp &= 0xff0fffff;
23044 /* Encoding high 4bits imm. Code below will encode the remaining
23045 low 12bits. */
23046 temp |= (value & 0x0000f000) << 4;
23047 newimm = value & 0x00000fff;
23048 }
23049 }
23050
23051 if (newimm == (unsigned int) FAIL)
23052 {
23053 as_bad_where (fixP->fx_file, fixP->fx_line,
23054 _("invalid constant (%lx) after fixup"),
23055 (unsigned long) value);
23056 break;
23057 }
23058
23059 newimm |= (temp & 0xfffff000);
23060 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23061 break;
23062
23063 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23064 {
23065 unsigned int highpart = 0;
23066 unsigned int newinsn = 0xe1a00000; /* nop. */
23067
23068 if (fixP->fx_addsy)
23069 {
23070 const char *msg = 0;
23071
23072 if (! S_IS_DEFINED (fixP->fx_addsy))
23073 msg = _("undefined symbol %s used as an immediate value");
23074 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23075 msg = _("symbol %s is in a different section");
23076 else if (S_IS_WEAK (fixP->fx_addsy))
23077 msg = _("symbol %s is weak and may be overridden later");
23078
23079 if (msg)
23080 {
23081 as_bad_where (fixP->fx_file, fixP->fx_line,
23082 msg, S_GET_NAME (fixP->fx_addsy));
23083 break;
23084 }
23085 }
23086
23087 newimm = encode_arm_immediate (value);
23088 temp = md_chars_to_number (buf, INSN_SIZE);
23089
23090 /* If the instruction will fail, see if we can fix things up by
23091 changing the opcode. */
23092 if (newimm == (unsigned int) FAIL
23093 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23094 {
23095 /* No ? OK - try using two ADD instructions to generate
23096 the value. */
23097 newimm = validate_immediate_twopart (value, & highpart);
23098
23099 /* Yes - then make sure that the second instruction is
23100 also an add. */
23101 if (newimm != (unsigned int) FAIL)
23102 newinsn = temp;
23103 /* Still No ? Try using a negated value. */
23104 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23105 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23106 /* Otherwise - give up. */
23107 else
23108 {
23109 as_bad_where (fixP->fx_file, fixP->fx_line,
23110 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23111 (long) value);
23112 break;
23113 }
23114
23115 /* Replace the first operand in the 2nd instruction (which
23116 is the PC) with the destination register. We have
23117 already added in the PC in the first instruction and we
23118 do not want to do it again. */
23119 newinsn &= ~ 0xf0000;
23120 newinsn |= ((newinsn & 0x0f000) << 4);
23121 }
23122
23123 newimm |= (temp & 0xfffff000);
23124 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23125
23126 highpart |= (newinsn & 0xfffff000);
23127 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23128 }
23129 break;
23130
23131 case BFD_RELOC_ARM_OFFSET_IMM:
23132 if (!fixP->fx_done && seg->use_rela_p)
23133 value = 0;
23134 /* Fall through. */
23135
23136 case BFD_RELOC_ARM_LITERAL:
23137 sign = value > 0;
23138
23139 if (value < 0)
23140 value = - value;
23141
23142 if (validate_offset_imm (value, 0) == FAIL)
23143 {
23144 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23145 as_bad_where (fixP->fx_file, fixP->fx_line,
23146 _("invalid literal constant: pool needs to be closer"));
23147 else
23148 as_bad_where (fixP->fx_file, fixP->fx_line,
23149 _("bad immediate value for offset (%ld)"),
23150 (long) value);
23151 break;
23152 }
23153
23154 newval = md_chars_to_number (buf, INSN_SIZE);
23155 if (value == 0)
23156 newval &= 0xfffff000;
23157 else
23158 {
23159 newval &= 0xff7ff000;
23160 newval |= value | (sign ? INDEX_UP : 0);
23161 }
23162 md_number_to_chars (buf, newval, INSN_SIZE);
23163 break;
23164
23165 case BFD_RELOC_ARM_OFFSET_IMM8:
23166 case BFD_RELOC_ARM_HWLITERAL:
23167 sign = value > 0;
23168
23169 if (value < 0)
23170 value = - value;
23171
23172 if (validate_offset_imm (value, 1) == FAIL)
23173 {
23174 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23175 as_bad_where (fixP->fx_file, fixP->fx_line,
23176 _("invalid literal constant: pool needs to be closer"));
23177 else
23178 as_bad_where (fixP->fx_file, fixP->fx_line,
23179 _("bad immediate value for 8-bit offset (%ld)"),
23180 (long) value);
23181 break;
23182 }
23183
23184 newval = md_chars_to_number (buf, INSN_SIZE);
23185 if (value == 0)
23186 newval &= 0xfffff0f0;
23187 else
23188 {
23189 newval &= 0xff7ff0f0;
23190 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23191 }
23192 md_number_to_chars (buf, newval, INSN_SIZE);
23193 break;
23194
23195 case BFD_RELOC_ARM_T32_OFFSET_U8:
23196 if (value < 0 || value > 1020 || value % 4 != 0)
23197 as_bad_where (fixP->fx_file, fixP->fx_line,
23198 _("bad immediate value for offset (%ld)"), (long) value);
23199 value /= 4;
23200
23201 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23202 newval |= value;
23203 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23204 break;
23205
23206 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23207 /* This is a complicated relocation used for all varieties of Thumb32
23208 load/store instruction with immediate offset:
23209
23210 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23211 *4, optional writeback(W)
23212 (doubleword load/store)
23213
23214 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23215 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23216 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23217 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23218 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23219
23220 Uppercase letters indicate bits that are already encoded at
23221 this point. Lowercase letters are our problem. For the
23222 second block of instructions, the secondary opcode nybble
23223 (bits 8..11) is present, and bit 23 is zero, even if this is
23224 a PC-relative operation. */
23225 newval = md_chars_to_number (buf, THUMB_SIZE);
23226 newval <<= 16;
23227 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23228
23229 if ((newval & 0xf0000000) == 0xe0000000)
23230 {
23231 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23232 if (value >= 0)
23233 newval |= (1 << 23);
23234 else
23235 value = -value;
23236 if (value % 4 != 0)
23237 {
23238 as_bad_where (fixP->fx_file, fixP->fx_line,
23239 _("offset not a multiple of 4"));
23240 break;
23241 }
23242 value /= 4;
23243 if (value > 0xff)
23244 {
23245 as_bad_where (fixP->fx_file, fixP->fx_line,
23246 _("offset out of range"));
23247 break;
23248 }
23249 newval &= ~0xff;
23250 }
23251 else if ((newval & 0x000f0000) == 0x000f0000)
23252 {
23253 /* PC-relative, 12-bit offset. */
23254 if (value >= 0)
23255 newval |= (1 << 23);
23256 else
23257 value = -value;
23258 if (value > 0xfff)
23259 {
23260 as_bad_where (fixP->fx_file, fixP->fx_line,
23261 _("offset out of range"));
23262 break;
23263 }
23264 newval &= ~0xfff;
23265 }
23266 else if ((newval & 0x00000100) == 0x00000100)
23267 {
23268 /* Writeback: 8-bit, +/- offset. */
23269 if (value >= 0)
23270 newval |= (1 << 9);
23271 else
23272 value = -value;
23273 if (value > 0xff)
23274 {
23275 as_bad_where (fixP->fx_file, fixP->fx_line,
23276 _("offset out of range"));
23277 break;
23278 }
23279 newval &= ~0xff;
23280 }
23281 else if ((newval & 0x00000f00) == 0x00000e00)
23282 {
23283 /* T-instruction: positive 8-bit offset. */
23284 if (value < 0 || value > 0xff)
23285 {
23286 as_bad_where (fixP->fx_file, fixP->fx_line,
23287 _("offset out of range"));
23288 break;
23289 }
23290 newval &= ~0xff;
23291 newval |= value;
23292 }
23293 else
23294 {
23295 /* Positive 12-bit or negative 8-bit offset. */
23296 int limit;
23297 if (value >= 0)
23298 {
23299 newval |= (1 << 23);
23300 limit = 0xfff;
23301 }
23302 else
23303 {
23304 value = -value;
23305 limit = 0xff;
23306 }
23307 if (value > limit)
23308 {
23309 as_bad_where (fixP->fx_file, fixP->fx_line,
23310 _("offset out of range"));
23311 break;
23312 }
23313 newval &= ~limit;
23314 }
23315
23316 newval |= value;
23317 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23318 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23319 break;
23320
23321 case BFD_RELOC_ARM_SHIFT_IMM:
23322 newval = md_chars_to_number (buf, INSN_SIZE);
23323 if (((unsigned long) value) > 32
23324 || (value == 32
23325 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23326 {
23327 as_bad_where (fixP->fx_file, fixP->fx_line,
23328 _("shift expression is too large"));
23329 break;
23330 }
23331
23332 if (value == 0)
23333 /* Shifts of zero must be done as lsl. */
23334 newval &= ~0x60;
23335 else if (value == 32)
23336 value = 0;
23337 newval &= 0xfffff07f;
23338 newval |= (value & 0x1f) << 7;
23339 md_number_to_chars (buf, newval, INSN_SIZE);
23340 break;
23341
23342 case BFD_RELOC_ARM_T32_IMMEDIATE:
23343 case BFD_RELOC_ARM_T32_ADD_IMM:
23344 case BFD_RELOC_ARM_T32_IMM12:
23345 case BFD_RELOC_ARM_T32_ADD_PC12:
23346 /* We claim that this fixup has been processed here,
23347 even if in fact we generate an error because we do
23348 not have a reloc for it, so tc_gen_reloc will reject it. */
23349 fixP->fx_done = 1;
23350
23351 if (fixP->fx_addsy
23352 && ! S_IS_DEFINED (fixP->fx_addsy))
23353 {
23354 as_bad_where (fixP->fx_file, fixP->fx_line,
23355 _("undefined symbol %s used as an immediate value"),
23356 S_GET_NAME (fixP->fx_addsy));
23357 break;
23358 }
23359
23360 newval = md_chars_to_number (buf, THUMB_SIZE);
23361 newval <<= 16;
23362 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23363
23364 newimm = FAIL;
23365 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23366 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23367 Thumb2 modified immediate encoding (T2). */
23368 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23369 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23370 {
23371 newimm = encode_thumb32_immediate (value);
23372 if (newimm == (unsigned int) FAIL)
23373 newimm = thumb32_negate_data_op (&newval, value);
23374 }
23375 if (newimm == (unsigned int) FAIL)
23376 {
23377 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23378 {
23379 /* Turn add/sum into addw/subw. */
23380 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23381 newval = (newval & 0xfeffffff) | 0x02000000;
23382 /* No flat 12-bit imm encoding for addsw/subsw. */
23383 if ((newval & 0x00100000) == 0)
23384 {
23385 /* 12 bit immediate for addw/subw. */
23386 if (value < 0)
23387 {
23388 value = -value;
23389 newval ^= 0x00a00000;
23390 }
23391 if (value > 0xfff)
23392 newimm = (unsigned int) FAIL;
23393 else
23394 newimm = value;
23395 }
23396 }
23397 else
23398 {
23399 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23400 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23401 disassembling, MOV is preferred when there is no encoding
23402 overlap.
23403 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23404 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23405 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23406 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23407 && value >= 0 && value <=0xffff)
23408 {
23409 /* Toggle bit[25] to change encoding from T2 to T3. */
23410 newval ^= 1 << 25;
23411 /* Clear bits[19:16]. */
23412 newval &= 0xfff0ffff;
23413 /* Encoding high 4bits imm. Code below will encode the
23414 remaining low 12bits. */
23415 newval |= (value & 0x0000f000) << 4;
23416 newimm = value & 0x00000fff;
23417 }
23418 }
23419 }
23420
23421 if (newimm == (unsigned int)FAIL)
23422 {
23423 as_bad_where (fixP->fx_file, fixP->fx_line,
23424 _("invalid constant (%lx) after fixup"),
23425 (unsigned long) value);
23426 break;
23427 }
23428
23429 newval |= (newimm & 0x800) << 15;
23430 newval |= (newimm & 0x700) << 4;
23431 newval |= (newimm & 0x0ff);
23432
23433 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23434 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23435 break;
23436
23437 case BFD_RELOC_ARM_SMC:
23438 if (((unsigned long) value) > 0xffff)
23439 as_bad_where (fixP->fx_file, fixP->fx_line,
23440 _("invalid smc expression"));
23441 newval = md_chars_to_number (buf, INSN_SIZE);
23442 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23443 md_number_to_chars (buf, newval, INSN_SIZE);
23444 break;
23445
23446 case BFD_RELOC_ARM_HVC:
23447 if (((unsigned long) value) > 0xffff)
23448 as_bad_where (fixP->fx_file, fixP->fx_line,
23449 _("invalid hvc expression"));
23450 newval = md_chars_to_number (buf, INSN_SIZE);
23451 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23452 md_number_to_chars (buf, newval, INSN_SIZE);
23453 break;
23454
23455 case BFD_RELOC_ARM_SWI:
23456 if (fixP->tc_fix_data != 0)
23457 {
23458 if (((unsigned long) value) > 0xff)
23459 as_bad_where (fixP->fx_file, fixP->fx_line,
23460 _("invalid swi expression"));
23461 newval = md_chars_to_number (buf, THUMB_SIZE);
23462 newval |= value;
23463 md_number_to_chars (buf, newval, THUMB_SIZE);
23464 }
23465 else
23466 {
23467 if (((unsigned long) value) > 0x00ffffff)
23468 as_bad_where (fixP->fx_file, fixP->fx_line,
23469 _("invalid swi expression"));
23470 newval = md_chars_to_number (buf, INSN_SIZE);
23471 newval |= value;
23472 md_number_to_chars (buf, newval, INSN_SIZE);
23473 }
23474 break;
23475
23476 case BFD_RELOC_ARM_MULTI:
23477 if (((unsigned long) value) > 0xffff)
23478 as_bad_where (fixP->fx_file, fixP->fx_line,
23479 _("invalid expression in load/store multiple"));
23480 newval = value | md_chars_to_number (buf, INSN_SIZE);
23481 md_number_to_chars (buf, newval, INSN_SIZE);
23482 break;
23483
23484 #ifdef OBJ_ELF
23485 case BFD_RELOC_ARM_PCREL_CALL:
23486
23487 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23488 && fixP->fx_addsy
23489 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23490 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23491 && THUMB_IS_FUNC (fixP->fx_addsy))
23492 /* Flip the bl to blx. This is a simple flip
23493 bit here because we generate PCREL_CALL for
23494 unconditional bls. */
23495 {
23496 newval = md_chars_to_number (buf, INSN_SIZE);
23497 newval = newval | 0x10000000;
23498 md_number_to_chars (buf, newval, INSN_SIZE);
23499 temp = 1;
23500 fixP->fx_done = 1;
23501 }
23502 else
23503 temp = 3;
23504 goto arm_branch_common;
23505
23506 case BFD_RELOC_ARM_PCREL_JUMP:
23507 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23508 && fixP->fx_addsy
23509 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23510 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23511 && THUMB_IS_FUNC (fixP->fx_addsy))
23512 {
23513 /* This would map to a bl<cond>, b<cond>,
23514 b<always> to a Thumb function. We
23515 need to force a relocation for this particular
23516 case. */
23517 newval = md_chars_to_number (buf, INSN_SIZE);
23518 fixP->fx_done = 0;
23519 }
23520 /* Fall through. */
23521
23522 case BFD_RELOC_ARM_PLT32:
23523 #endif
23524 case BFD_RELOC_ARM_PCREL_BRANCH:
23525 temp = 3;
23526 goto arm_branch_common;
23527
23528 case BFD_RELOC_ARM_PCREL_BLX:
23529
23530 temp = 1;
23531 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23532 && fixP->fx_addsy
23533 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23534 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23535 && ARM_IS_FUNC (fixP->fx_addsy))
23536 {
23537 /* Flip the blx to a bl and warn. */
23538 const char *name = S_GET_NAME (fixP->fx_addsy);
23539 newval = 0xeb000000;
23540 as_warn_where (fixP->fx_file, fixP->fx_line,
23541 _("blx to '%s' an ARM ISA state function changed to bl"),
23542 name);
23543 md_number_to_chars (buf, newval, INSN_SIZE);
23544 temp = 3;
23545 fixP->fx_done = 1;
23546 }
23547
23548 #ifdef OBJ_ELF
23549 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23550 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23551 #endif
23552
23553 arm_branch_common:
23554 /* We are going to store value (shifted right by two) in the
23555 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23556 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23557 also be clear. */
23558 if (value & temp)
23559 as_bad_where (fixP->fx_file, fixP->fx_line,
23560 _("misaligned branch destination"));
23561 if ((value & (offsetT)0xfe000000) != (offsetT)0
23562 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23563 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23564
23565 if (fixP->fx_done || !seg->use_rela_p)
23566 {
23567 newval = md_chars_to_number (buf, INSN_SIZE);
23568 newval |= (value >> 2) & 0x00ffffff;
23569 /* Set the H bit on BLX instructions. */
23570 if (temp == 1)
23571 {
23572 if (value & 2)
23573 newval |= 0x01000000;
23574 else
23575 newval &= ~0x01000000;
23576 }
23577 md_number_to_chars (buf, newval, INSN_SIZE);
23578 }
23579 break;
23580
23581 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23582 /* CBZ can only branch forward. */
23583
23584 /* Attempts to use CBZ to branch to the next instruction
23585 (which, strictly speaking, are prohibited) will be turned into
23586 no-ops.
23587
23588 FIXME: It may be better to remove the instruction completely and
23589 perform relaxation. */
23590 if (value == -2)
23591 {
23592 newval = md_chars_to_number (buf, THUMB_SIZE);
23593 newval = 0xbf00; /* NOP encoding T1 */
23594 md_number_to_chars (buf, newval, THUMB_SIZE);
23595 }
23596 else
23597 {
23598 if (value & ~0x7e)
23599 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23600
23601 if (fixP->fx_done || !seg->use_rela_p)
23602 {
23603 newval = md_chars_to_number (buf, THUMB_SIZE);
23604 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23605 md_number_to_chars (buf, newval, THUMB_SIZE);
23606 }
23607 }
23608 break;
23609
23610 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23611 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23612 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23613
23614 if (fixP->fx_done || !seg->use_rela_p)
23615 {
23616 newval = md_chars_to_number (buf, THUMB_SIZE);
23617 newval |= (value & 0x1ff) >> 1;
23618 md_number_to_chars (buf, newval, THUMB_SIZE);
23619 }
23620 break;
23621
23622 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23623 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23624 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23625
23626 if (fixP->fx_done || !seg->use_rela_p)
23627 {
23628 newval = md_chars_to_number (buf, THUMB_SIZE);
23629 newval |= (value & 0xfff) >> 1;
23630 md_number_to_chars (buf, newval, THUMB_SIZE);
23631 }
23632 break;
23633
23634 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23635 if (fixP->fx_addsy
23636 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23637 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23638 && ARM_IS_FUNC (fixP->fx_addsy)
23639 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23640 {
23641 /* Force a relocation for a branch 20 bits wide. */
23642 fixP->fx_done = 0;
23643 }
23644 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23645 as_bad_where (fixP->fx_file, fixP->fx_line,
23646 _("conditional branch out of range"));
23647
23648 if (fixP->fx_done || !seg->use_rela_p)
23649 {
23650 offsetT newval2;
23651 addressT S, J1, J2, lo, hi;
23652
23653 S = (value & 0x00100000) >> 20;
23654 J2 = (value & 0x00080000) >> 19;
23655 J1 = (value & 0x00040000) >> 18;
23656 hi = (value & 0x0003f000) >> 12;
23657 lo = (value & 0x00000ffe) >> 1;
23658
23659 newval = md_chars_to_number (buf, THUMB_SIZE);
23660 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23661 newval |= (S << 10) | hi;
23662 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23663 md_number_to_chars (buf, newval, THUMB_SIZE);
23664 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23665 }
23666 break;
23667
23668 case BFD_RELOC_THUMB_PCREL_BLX:
23669 /* If there is a blx from a thumb state function to
23670 another thumb function flip this to a bl and warn
23671 about it. */
23672
23673 if (fixP->fx_addsy
23674 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23675 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23676 && THUMB_IS_FUNC (fixP->fx_addsy))
23677 {
23678 const char *name = S_GET_NAME (fixP->fx_addsy);
23679 as_warn_where (fixP->fx_file, fixP->fx_line,
23680 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23681 name);
23682 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23683 newval = newval | 0x1000;
23684 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23685 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23686 fixP->fx_done = 1;
23687 }
23688
23689
23690 goto thumb_bl_common;
23691
23692 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23693 /* A bl from Thumb state ISA to an internal ARM state function
23694 is converted to a blx. */
23695 if (fixP->fx_addsy
23696 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23697 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23698 && ARM_IS_FUNC (fixP->fx_addsy)
23699 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23700 {
23701 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23702 newval = newval & ~0x1000;
23703 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23704 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23705 fixP->fx_done = 1;
23706 }
23707
23708 thumb_bl_common:
23709
23710 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23711 /* For a BLX instruction, make sure that the relocation is rounded up
23712 to a word boundary. This follows the semantics of the instruction
23713 which specifies that bit 1 of the target address will come from bit
23714 1 of the base address. */
23715 value = (value + 3) & ~ 3;
23716
23717 #ifdef OBJ_ELF
23718 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23719 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23720 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23721 #endif
23722
23723 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23724 {
23725 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23726 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23727 else if ((value & ~0x1ffffff)
23728 && ((value & ~0x1ffffff) != ~0x1ffffff))
23729 as_bad_where (fixP->fx_file, fixP->fx_line,
23730 _("Thumb2 branch out of range"));
23731 }
23732
23733 if (fixP->fx_done || !seg->use_rela_p)
23734 encode_thumb2_b_bl_offset (buf, value);
23735
23736 break;
23737
23738 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23739 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23740 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23741
23742 if (fixP->fx_done || !seg->use_rela_p)
23743 encode_thumb2_b_bl_offset (buf, value);
23744
23745 break;
23746
23747 case BFD_RELOC_8:
23748 if (fixP->fx_done || !seg->use_rela_p)
23749 *buf = value;
23750 break;
23751
23752 case BFD_RELOC_16:
23753 if (fixP->fx_done || !seg->use_rela_p)
23754 md_number_to_chars (buf, value, 2);
23755 break;
23756
23757 #ifdef OBJ_ELF
23758 case BFD_RELOC_ARM_TLS_CALL:
23759 case BFD_RELOC_ARM_THM_TLS_CALL:
23760 case BFD_RELOC_ARM_TLS_DESCSEQ:
23761 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23762 case BFD_RELOC_ARM_TLS_GOTDESC:
23763 case BFD_RELOC_ARM_TLS_GD32:
23764 case BFD_RELOC_ARM_TLS_LE32:
23765 case BFD_RELOC_ARM_TLS_IE32:
23766 case BFD_RELOC_ARM_TLS_LDM32:
23767 case BFD_RELOC_ARM_TLS_LDO32:
23768 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23769 break;
23770
23771 case BFD_RELOC_ARM_GOT32:
23772 case BFD_RELOC_ARM_GOTOFF:
23773 break;
23774
23775 case BFD_RELOC_ARM_GOT_PREL:
23776 if (fixP->fx_done || !seg->use_rela_p)
23777 md_number_to_chars (buf, value, 4);
23778 break;
23779
23780 case BFD_RELOC_ARM_TARGET2:
23781 /* TARGET2 is not partial-inplace, so we need to write the
23782 addend here for REL targets, because it won't be written out
23783 during reloc processing later. */
23784 if (fixP->fx_done || !seg->use_rela_p)
23785 md_number_to_chars (buf, fixP->fx_offset, 4);
23786 break;
23787 #endif
23788
23789 case BFD_RELOC_RVA:
23790 case BFD_RELOC_32:
23791 case BFD_RELOC_ARM_TARGET1:
23792 case BFD_RELOC_ARM_ROSEGREL32:
23793 case BFD_RELOC_ARM_SBREL32:
23794 case BFD_RELOC_32_PCREL:
23795 #ifdef TE_PE
23796 case BFD_RELOC_32_SECREL:
23797 #endif
23798 if (fixP->fx_done || !seg->use_rela_p)
23799 #ifdef TE_WINCE
23800 /* For WinCE we only do this for pcrel fixups. */
23801 if (fixP->fx_done || fixP->fx_pcrel)
23802 #endif
23803 md_number_to_chars (buf, value, 4);
23804 break;
23805
23806 #ifdef OBJ_ELF
23807 case BFD_RELOC_ARM_PREL31:
23808 if (fixP->fx_done || !seg->use_rela_p)
23809 {
23810 newval = md_chars_to_number (buf, 4) & 0x80000000;
23811 if ((value ^ (value >> 1)) & 0x40000000)
23812 {
23813 as_bad_where (fixP->fx_file, fixP->fx_line,
23814 _("rel31 relocation overflow"));
23815 }
23816 newval |= value & 0x7fffffff;
23817 md_number_to_chars (buf, newval, 4);
23818 }
23819 break;
23820 #endif
23821
23822 case BFD_RELOC_ARM_CP_OFF_IMM:
23823 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23824 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23825 newval = md_chars_to_number (buf, INSN_SIZE);
23826 else
23827 newval = get_thumb32_insn (buf);
23828 if ((newval & 0x0f200f00) == 0x0d000900)
23829 {
23830 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23831 has permitted values that are multiples of 2, in the range 0
23832 to 510. */
23833 if (value < -510 || value > 510 || (value & 1))
23834 as_bad_where (fixP->fx_file, fixP->fx_line,
23835 _("co-processor offset out of range"));
23836 }
23837 else if (value < -1023 || value > 1023 || (value & 3))
23838 as_bad_where (fixP->fx_file, fixP->fx_line,
23839 _("co-processor offset out of range"));
23840 cp_off_common:
23841 sign = value > 0;
23842 if (value < 0)
23843 value = -value;
23844 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23845 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23846 newval = md_chars_to_number (buf, INSN_SIZE);
23847 else
23848 newval = get_thumb32_insn (buf);
23849 if (value == 0)
23850 newval &= 0xffffff00;
23851 else
23852 {
23853 newval &= 0xff7fff00;
23854 if ((newval & 0x0f200f00) == 0x0d000900)
23855 {
23856 /* This is a fp16 vstr/vldr.
23857
23858 It requires the immediate offset in the instruction is shifted
23859 left by 1 to be a half-word offset.
23860
23861 Here, left shift by 1 first, and later right shift by 2
23862 should get the right offset. */
23863 value <<= 1;
23864 }
23865 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23866 }
23867 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23868 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23869 md_number_to_chars (buf, newval, INSN_SIZE);
23870 else
23871 put_thumb32_insn (buf, newval);
23872 break;
23873
23874 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23875 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23876 if (value < -255 || value > 255)
23877 as_bad_where (fixP->fx_file, fixP->fx_line,
23878 _("co-processor offset out of range"));
23879 value *= 4;
23880 goto cp_off_common;
23881
23882 case BFD_RELOC_ARM_THUMB_OFFSET:
23883 newval = md_chars_to_number (buf, THUMB_SIZE);
23884 /* Exactly what ranges, and where the offset is inserted depends
23885 on the type of instruction, we can establish this from the
23886 top 4 bits. */
23887 switch (newval >> 12)
23888 {
23889 case 4: /* PC load. */
23890 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23891 forced to zero for these loads; md_pcrel_from has already
23892 compensated for this. */
23893 if (value & 3)
23894 as_bad_where (fixP->fx_file, fixP->fx_line,
23895 _("invalid offset, target not word aligned (0x%08lX)"),
23896 (((unsigned long) fixP->fx_frag->fr_address
23897 + (unsigned long) fixP->fx_where) & ~3)
23898 + (unsigned long) value);
23899
23900 if (value & ~0x3fc)
23901 as_bad_where (fixP->fx_file, fixP->fx_line,
23902 _("invalid offset, value too big (0x%08lX)"),
23903 (long) value);
23904
23905 newval |= value >> 2;
23906 break;
23907
23908 case 9: /* SP load/store. */
23909 if (value & ~0x3fc)
23910 as_bad_where (fixP->fx_file, fixP->fx_line,
23911 _("invalid offset, value too big (0x%08lX)"),
23912 (long) value);
23913 newval |= value >> 2;
23914 break;
23915
23916 case 6: /* Word load/store. */
23917 if (value & ~0x7c)
23918 as_bad_where (fixP->fx_file, fixP->fx_line,
23919 _("invalid offset, value too big (0x%08lX)"),
23920 (long) value);
23921 newval |= value << 4; /* 6 - 2. */
23922 break;
23923
23924 case 7: /* Byte load/store. */
23925 if (value & ~0x1f)
23926 as_bad_where (fixP->fx_file, fixP->fx_line,
23927 _("invalid offset, value too big (0x%08lX)"),
23928 (long) value);
23929 newval |= value << 6;
23930 break;
23931
23932 case 8: /* Halfword load/store. */
23933 if (value & ~0x3e)
23934 as_bad_where (fixP->fx_file, fixP->fx_line,
23935 _("invalid offset, value too big (0x%08lX)"),
23936 (long) value);
23937 newval |= value << 5; /* 6 - 1. */
23938 break;
23939
23940 default:
23941 as_bad_where (fixP->fx_file, fixP->fx_line,
23942 "Unable to process relocation for thumb opcode: %lx",
23943 (unsigned long) newval);
23944 break;
23945 }
23946 md_number_to_chars (buf, newval, THUMB_SIZE);
23947 break;
23948
23949 case BFD_RELOC_ARM_THUMB_ADD:
23950 /* This is a complicated relocation, since we use it for all of
23951 the following immediate relocations:
23952
23953 3bit ADD/SUB
23954 8bit ADD/SUB
23955 9bit ADD/SUB SP word-aligned
23956 10bit ADD PC/SP word-aligned
23957
23958 The type of instruction being processed is encoded in the
23959 instruction field:
23960
23961 0x8000 SUB
23962 0x00F0 Rd
23963 0x000F Rs
23964 */
23965 newval = md_chars_to_number (buf, THUMB_SIZE);
23966 {
23967 int rd = (newval >> 4) & 0xf;
23968 int rs = newval & 0xf;
23969 int subtract = !!(newval & 0x8000);
23970
23971 /* Check for HI regs, only very restricted cases allowed:
23972 Adjusting SP, and using PC or SP to get an address. */
23973 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23974 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23975 as_bad_where (fixP->fx_file, fixP->fx_line,
23976 _("invalid Hi register with immediate"));
23977
23978 /* If value is negative, choose the opposite instruction. */
23979 if (value < 0)
23980 {
23981 value = -value;
23982 subtract = !subtract;
23983 if (value < 0)
23984 as_bad_where (fixP->fx_file, fixP->fx_line,
23985 _("immediate value out of range"));
23986 }
23987
23988 if (rd == REG_SP)
23989 {
23990 if (value & ~0x1fc)
23991 as_bad_where (fixP->fx_file, fixP->fx_line,
23992 _("invalid immediate for stack address calculation"));
23993 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23994 newval |= value >> 2;
23995 }
23996 else if (rs == REG_PC || rs == REG_SP)
23997 {
23998 /* PR gas/18541. If the addition is for a defined symbol
23999 within range of an ADR instruction then accept it. */
24000 if (subtract
24001 && value == 4
24002 && fixP->fx_addsy != NULL)
24003 {
24004 subtract = 0;
24005
24006 if (! S_IS_DEFINED (fixP->fx_addsy)
24007 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24008 || S_IS_WEAK (fixP->fx_addsy))
24009 {
24010 as_bad_where (fixP->fx_file, fixP->fx_line,
24011 _("address calculation needs a strongly defined nearby symbol"));
24012 }
24013 else
24014 {
24015 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24016
24017 /* Round up to the next 4-byte boundary. */
24018 if (v & 3)
24019 v = (v + 3) & ~ 3;
24020 else
24021 v += 4;
24022 v = S_GET_VALUE (fixP->fx_addsy) - v;
24023
24024 if (v & ~0x3fc)
24025 {
24026 as_bad_where (fixP->fx_file, fixP->fx_line,
24027 _("symbol too far away"));
24028 }
24029 else
24030 {
24031 fixP->fx_done = 1;
24032 value = v;
24033 }
24034 }
24035 }
24036
24037 if (subtract || value & ~0x3fc)
24038 as_bad_where (fixP->fx_file, fixP->fx_line,
24039 _("invalid immediate for address calculation (value = 0x%08lX)"),
24040 (unsigned long) (subtract ? - value : value));
24041 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24042 newval |= rd << 8;
24043 newval |= value >> 2;
24044 }
24045 else if (rs == rd)
24046 {
24047 if (value & ~0xff)
24048 as_bad_where (fixP->fx_file, fixP->fx_line,
24049 _("immediate value out of range"));
24050 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24051 newval |= (rd << 8) | value;
24052 }
24053 else
24054 {
24055 if (value & ~0x7)
24056 as_bad_where (fixP->fx_file, fixP->fx_line,
24057 _("immediate value out of range"));
24058 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24059 newval |= rd | (rs << 3) | (value << 6);
24060 }
24061 }
24062 md_number_to_chars (buf, newval, THUMB_SIZE);
24063 break;
24064
24065 case BFD_RELOC_ARM_THUMB_IMM:
24066 newval = md_chars_to_number (buf, THUMB_SIZE);
24067 if (value < 0 || value > 255)
24068 as_bad_where (fixP->fx_file, fixP->fx_line,
24069 _("invalid immediate: %ld is out of range"),
24070 (long) value);
24071 newval |= value;
24072 md_number_to_chars (buf, newval, THUMB_SIZE);
24073 break;
24074
24075 case BFD_RELOC_ARM_THUMB_SHIFT:
24076 /* 5bit shift value (0..32). LSL cannot take 32. */
24077 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24078 temp = newval & 0xf800;
24079 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24080 as_bad_where (fixP->fx_file, fixP->fx_line,
24081 _("invalid shift value: %ld"), (long) value);
24082 /* Shifts of zero must be encoded as LSL. */
24083 if (value == 0)
24084 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24085 /* Shifts of 32 are encoded as zero. */
24086 else if (value == 32)
24087 value = 0;
24088 newval |= value << 6;
24089 md_number_to_chars (buf, newval, THUMB_SIZE);
24090 break;
24091
24092 case BFD_RELOC_VTABLE_INHERIT:
24093 case BFD_RELOC_VTABLE_ENTRY:
24094 fixP->fx_done = 0;
24095 return;
24096
24097 case BFD_RELOC_ARM_MOVW:
24098 case BFD_RELOC_ARM_MOVT:
24099 case BFD_RELOC_ARM_THUMB_MOVW:
24100 case BFD_RELOC_ARM_THUMB_MOVT:
24101 if (fixP->fx_done || !seg->use_rela_p)
24102 {
24103 /* REL format relocations are limited to a 16-bit addend. */
24104 if (!fixP->fx_done)
24105 {
24106 if (value < -0x8000 || value > 0x7fff)
24107 as_bad_where (fixP->fx_file, fixP->fx_line,
24108 _("offset out of range"));
24109 }
24110 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24111 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24112 {
24113 value >>= 16;
24114 }
24115
24116 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24117 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24118 {
24119 newval = get_thumb32_insn (buf);
24120 newval &= 0xfbf08f00;
24121 newval |= (value & 0xf000) << 4;
24122 newval |= (value & 0x0800) << 15;
24123 newval |= (value & 0x0700) << 4;
24124 newval |= (value & 0x00ff);
24125 put_thumb32_insn (buf, newval);
24126 }
24127 else
24128 {
24129 newval = md_chars_to_number (buf, 4);
24130 newval &= 0xfff0f000;
24131 newval |= value & 0x0fff;
24132 newval |= (value & 0xf000) << 4;
24133 md_number_to_chars (buf, newval, 4);
24134 }
24135 }
24136 return;
24137
24138 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24139 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24140 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24141 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24142 gas_assert (!fixP->fx_done);
24143 {
24144 bfd_vma insn;
24145 bfd_boolean is_mov;
24146 bfd_vma encoded_addend = value;
24147
24148 /* Check that addend can be encoded in instruction. */
24149 if (!seg->use_rela_p && (value < 0 || value > 255))
24150 as_bad_where (fixP->fx_file, fixP->fx_line,
24151 _("the offset 0x%08lX is not representable"),
24152 (unsigned long) encoded_addend);
24153
24154 /* Extract the instruction. */
24155 insn = md_chars_to_number (buf, THUMB_SIZE);
24156 is_mov = (insn & 0xf800) == 0x2000;
24157
24158 /* Encode insn. */
24159 if (is_mov)
24160 {
24161 if (!seg->use_rela_p)
24162 insn |= encoded_addend;
24163 }
24164 else
24165 {
24166 int rd, rs;
24167
24168 /* Extract the instruction. */
24169 /* Encoding is the following
24170 0x8000 SUB
24171 0x00F0 Rd
24172 0x000F Rs
24173 */
24174 /* The following conditions must be true :
24175 - ADD
24176 - Rd == Rs
24177 - Rd <= 7
24178 */
24179 rd = (insn >> 4) & 0xf;
24180 rs = insn & 0xf;
24181 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24182 as_bad_where (fixP->fx_file, fixP->fx_line,
24183 _("Unable to process relocation for thumb opcode: %lx"),
24184 (unsigned long) insn);
24185
24186 /* Encode as ADD immediate8 thumb 1 code. */
24187 insn = 0x3000 | (rd << 8);
24188
24189 /* Place the encoded addend into the first 8 bits of the
24190 instruction. */
24191 if (!seg->use_rela_p)
24192 insn |= encoded_addend;
24193 }
24194
24195 /* Update the instruction. */
24196 md_number_to_chars (buf, insn, THUMB_SIZE);
24197 }
24198 break;
24199
24200 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24201 case BFD_RELOC_ARM_ALU_PC_G0:
24202 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24203 case BFD_RELOC_ARM_ALU_PC_G1:
24204 case BFD_RELOC_ARM_ALU_PC_G2:
24205 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24206 case BFD_RELOC_ARM_ALU_SB_G0:
24207 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24208 case BFD_RELOC_ARM_ALU_SB_G1:
24209 case BFD_RELOC_ARM_ALU_SB_G2:
24210 gas_assert (!fixP->fx_done);
24211 if (!seg->use_rela_p)
24212 {
24213 bfd_vma insn;
24214 bfd_vma encoded_addend;
24215 bfd_vma addend_abs = abs (value);
24216
24217 /* Check that the absolute value of the addend can be
24218 expressed as an 8-bit constant plus a rotation. */
24219 encoded_addend = encode_arm_immediate (addend_abs);
24220 if (encoded_addend == (unsigned int) FAIL)
24221 as_bad_where (fixP->fx_file, fixP->fx_line,
24222 _("the offset 0x%08lX is not representable"),
24223 (unsigned long) addend_abs);
24224
24225 /* Extract the instruction. */
24226 insn = md_chars_to_number (buf, INSN_SIZE);
24227
24228 /* If the addend is positive, use an ADD instruction.
24229 Otherwise use a SUB. Take care not to destroy the S bit. */
24230 insn &= 0xff1fffff;
24231 if (value < 0)
24232 insn |= 1 << 22;
24233 else
24234 insn |= 1 << 23;
24235
24236 /* Place the encoded addend into the first 12 bits of the
24237 instruction. */
24238 insn &= 0xfffff000;
24239 insn |= encoded_addend;
24240
24241 /* Update the instruction. */
24242 md_number_to_chars (buf, insn, INSN_SIZE);
24243 }
24244 break;
24245
24246 case BFD_RELOC_ARM_LDR_PC_G0:
24247 case BFD_RELOC_ARM_LDR_PC_G1:
24248 case BFD_RELOC_ARM_LDR_PC_G2:
24249 case BFD_RELOC_ARM_LDR_SB_G0:
24250 case BFD_RELOC_ARM_LDR_SB_G1:
24251 case BFD_RELOC_ARM_LDR_SB_G2:
24252 gas_assert (!fixP->fx_done);
24253 if (!seg->use_rela_p)
24254 {
24255 bfd_vma insn;
24256 bfd_vma addend_abs = abs (value);
24257
24258 /* Check that the absolute value of the addend can be
24259 encoded in 12 bits. */
24260 if (addend_abs >= 0x1000)
24261 as_bad_where (fixP->fx_file, fixP->fx_line,
24262 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24263 (unsigned long) addend_abs);
24264
24265 /* Extract the instruction. */
24266 insn = md_chars_to_number (buf, INSN_SIZE);
24267
24268 /* If the addend is negative, clear bit 23 of the instruction.
24269 Otherwise set it. */
24270 if (value < 0)
24271 insn &= ~(1 << 23);
24272 else
24273 insn |= 1 << 23;
24274
24275 /* Place the absolute value of the addend into the first 12 bits
24276 of the instruction. */
24277 insn &= 0xfffff000;
24278 insn |= addend_abs;
24279
24280 /* Update the instruction. */
24281 md_number_to_chars (buf, insn, INSN_SIZE);
24282 }
24283 break;
24284
24285 case BFD_RELOC_ARM_LDRS_PC_G0:
24286 case BFD_RELOC_ARM_LDRS_PC_G1:
24287 case BFD_RELOC_ARM_LDRS_PC_G2:
24288 case BFD_RELOC_ARM_LDRS_SB_G0:
24289 case BFD_RELOC_ARM_LDRS_SB_G1:
24290 case BFD_RELOC_ARM_LDRS_SB_G2:
24291 gas_assert (!fixP->fx_done);
24292 if (!seg->use_rela_p)
24293 {
24294 bfd_vma insn;
24295 bfd_vma addend_abs = abs (value);
24296
24297 /* Check that the absolute value of the addend can be
24298 encoded in 8 bits. */
24299 if (addend_abs >= 0x100)
24300 as_bad_where (fixP->fx_file, fixP->fx_line,
24301 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24302 (unsigned long) addend_abs);
24303
24304 /* Extract the instruction. */
24305 insn = md_chars_to_number (buf, INSN_SIZE);
24306
24307 /* If the addend is negative, clear bit 23 of the instruction.
24308 Otherwise set it. */
24309 if (value < 0)
24310 insn &= ~(1 << 23);
24311 else
24312 insn |= 1 << 23;
24313
24314 /* Place the first four bits of the absolute value of the addend
24315 into the first 4 bits of the instruction, and the remaining
24316 four into bits 8 .. 11. */
24317 insn &= 0xfffff0f0;
24318 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24319
24320 /* Update the instruction. */
24321 md_number_to_chars (buf, insn, INSN_SIZE);
24322 }
24323 break;
24324
24325 case BFD_RELOC_ARM_LDC_PC_G0:
24326 case BFD_RELOC_ARM_LDC_PC_G1:
24327 case BFD_RELOC_ARM_LDC_PC_G2:
24328 case BFD_RELOC_ARM_LDC_SB_G0:
24329 case BFD_RELOC_ARM_LDC_SB_G1:
24330 case BFD_RELOC_ARM_LDC_SB_G2:
24331 gas_assert (!fixP->fx_done);
24332 if (!seg->use_rela_p)
24333 {
24334 bfd_vma insn;
24335 bfd_vma addend_abs = abs (value);
24336
24337 /* Check that the absolute value of the addend is a multiple of
24338 four and, when divided by four, fits in 8 bits. */
24339 if (addend_abs & 0x3)
24340 as_bad_where (fixP->fx_file, fixP->fx_line,
24341 _("bad offset 0x%08lX (must be word-aligned)"),
24342 (unsigned long) addend_abs);
24343
24344 if ((addend_abs >> 2) > 0xff)
24345 as_bad_where (fixP->fx_file, fixP->fx_line,
24346 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24347 (unsigned long) addend_abs);
24348
24349 /* Extract the instruction. */
24350 insn = md_chars_to_number (buf, INSN_SIZE);
24351
24352 /* If the addend is negative, clear bit 23 of the instruction.
24353 Otherwise set it. */
24354 if (value < 0)
24355 insn &= ~(1 << 23);
24356 else
24357 insn |= 1 << 23;
24358
24359 /* Place the addend (divided by four) into the first eight
24360 bits of the instruction. */
24361 insn &= 0xfffffff0;
24362 insn |= addend_abs >> 2;
24363
24364 /* Update the instruction. */
24365 md_number_to_chars (buf, insn, INSN_SIZE);
24366 }
24367 break;
24368
24369 case BFD_RELOC_ARM_V4BX:
24370 /* This will need to go in the object file. */
24371 fixP->fx_done = 0;
24372 break;
24373
24374 case BFD_RELOC_UNUSED:
24375 default:
24376 as_bad_where (fixP->fx_file, fixP->fx_line,
24377 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24378 }
24379 }
24380
24381 /* Translate internal representation of relocation info to BFD target
24382 format. */
24383
24384 arelent *
24385 tc_gen_reloc (asection *section, fixS *fixp)
24386 {
24387 arelent * reloc;
24388 bfd_reloc_code_real_type code;
24389
24390 reloc = XNEW (arelent);
24391
24392 reloc->sym_ptr_ptr = XNEW (asymbol *);
24393 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24394 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24395
24396 if (fixp->fx_pcrel)
24397 {
24398 if (section->use_rela_p)
24399 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24400 else
24401 fixp->fx_offset = reloc->address;
24402 }
24403 reloc->addend = fixp->fx_offset;
24404
24405 switch (fixp->fx_r_type)
24406 {
24407 case BFD_RELOC_8:
24408 if (fixp->fx_pcrel)
24409 {
24410 code = BFD_RELOC_8_PCREL;
24411 break;
24412 }
24413 /* Fall through. */
24414
24415 case BFD_RELOC_16:
24416 if (fixp->fx_pcrel)
24417 {
24418 code = BFD_RELOC_16_PCREL;
24419 break;
24420 }
24421 /* Fall through. */
24422
24423 case BFD_RELOC_32:
24424 if (fixp->fx_pcrel)
24425 {
24426 code = BFD_RELOC_32_PCREL;
24427 break;
24428 }
24429 /* Fall through. */
24430
24431 case BFD_RELOC_ARM_MOVW:
24432 if (fixp->fx_pcrel)
24433 {
24434 code = BFD_RELOC_ARM_MOVW_PCREL;
24435 break;
24436 }
24437 /* Fall through. */
24438
24439 case BFD_RELOC_ARM_MOVT:
24440 if (fixp->fx_pcrel)
24441 {
24442 code = BFD_RELOC_ARM_MOVT_PCREL;
24443 break;
24444 }
24445 /* Fall through. */
24446
24447 case BFD_RELOC_ARM_THUMB_MOVW:
24448 if (fixp->fx_pcrel)
24449 {
24450 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24451 break;
24452 }
24453 /* Fall through. */
24454
24455 case BFD_RELOC_ARM_THUMB_MOVT:
24456 if (fixp->fx_pcrel)
24457 {
24458 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24459 break;
24460 }
24461 /* Fall through. */
24462
24463 case BFD_RELOC_NONE:
24464 case BFD_RELOC_ARM_PCREL_BRANCH:
24465 case BFD_RELOC_ARM_PCREL_BLX:
24466 case BFD_RELOC_RVA:
24467 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24468 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24469 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24470 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24471 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24472 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24473 case BFD_RELOC_VTABLE_ENTRY:
24474 case BFD_RELOC_VTABLE_INHERIT:
24475 #ifdef TE_PE
24476 case BFD_RELOC_32_SECREL:
24477 #endif
24478 code = fixp->fx_r_type;
24479 break;
24480
24481 case BFD_RELOC_THUMB_PCREL_BLX:
24482 #ifdef OBJ_ELF
24483 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24484 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24485 else
24486 #endif
24487 code = BFD_RELOC_THUMB_PCREL_BLX;
24488 break;
24489
24490 case BFD_RELOC_ARM_LITERAL:
24491 case BFD_RELOC_ARM_HWLITERAL:
24492 /* If this is called then the a literal has
24493 been referenced across a section boundary. */
24494 as_bad_where (fixp->fx_file, fixp->fx_line,
24495 _("literal referenced across section boundary"));
24496 return NULL;
24497
24498 #ifdef OBJ_ELF
24499 case BFD_RELOC_ARM_TLS_CALL:
24500 case BFD_RELOC_ARM_THM_TLS_CALL:
24501 case BFD_RELOC_ARM_TLS_DESCSEQ:
24502 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24503 case BFD_RELOC_ARM_GOT32:
24504 case BFD_RELOC_ARM_GOTOFF:
24505 case BFD_RELOC_ARM_GOT_PREL:
24506 case BFD_RELOC_ARM_PLT32:
24507 case BFD_RELOC_ARM_TARGET1:
24508 case BFD_RELOC_ARM_ROSEGREL32:
24509 case BFD_RELOC_ARM_SBREL32:
24510 case BFD_RELOC_ARM_PREL31:
24511 case BFD_RELOC_ARM_TARGET2:
24512 case BFD_RELOC_ARM_TLS_LDO32:
24513 case BFD_RELOC_ARM_PCREL_CALL:
24514 case BFD_RELOC_ARM_PCREL_JUMP:
24515 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24516 case BFD_RELOC_ARM_ALU_PC_G0:
24517 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24518 case BFD_RELOC_ARM_ALU_PC_G1:
24519 case BFD_RELOC_ARM_ALU_PC_G2:
24520 case BFD_RELOC_ARM_LDR_PC_G0:
24521 case BFD_RELOC_ARM_LDR_PC_G1:
24522 case BFD_RELOC_ARM_LDR_PC_G2:
24523 case BFD_RELOC_ARM_LDRS_PC_G0:
24524 case BFD_RELOC_ARM_LDRS_PC_G1:
24525 case BFD_RELOC_ARM_LDRS_PC_G2:
24526 case BFD_RELOC_ARM_LDC_PC_G0:
24527 case BFD_RELOC_ARM_LDC_PC_G1:
24528 case BFD_RELOC_ARM_LDC_PC_G2:
24529 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24530 case BFD_RELOC_ARM_ALU_SB_G0:
24531 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24532 case BFD_RELOC_ARM_ALU_SB_G1:
24533 case BFD_RELOC_ARM_ALU_SB_G2:
24534 case BFD_RELOC_ARM_LDR_SB_G0:
24535 case BFD_RELOC_ARM_LDR_SB_G1:
24536 case BFD_RELOC_ARM_LDR_SB_G2:
24537 case BFD_RELOC_ARM_LDRS_SB_G0:
24538 case BFD_RELOC_ARM_LDRS_SB_G1:
24539 case BFD_RELOC_ARM_LDRS_SB_G2:
24540 case BFD_RELOC_ARM_LDC_SB_G0:
24541 case BFD_RELOC_ARM_LDC_SB_G1:
24542 case BFD_RELOC_ARM_LDC_SB_G2:
24543 case BFD_RELOC_ARM_V4BX:
24544 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24545 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24546 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24547 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24548 code = fixp->fx_r_type;
24549 break;
24550
24551 case BFD_RELOC_ARM_TLS_GOTDESC:
24552 case BFD_RELOC_ARM_TLS_GD32:
24553 case BFD_RELOC_ARM_TLS_LE32:
24554 case BFD_RELOC_ARM_TLS_IE32:
24555 case BFD_RELOC_ARM_TLS_LDM32:
24556 /* BFD will include the symbol's address in the addend.
24557 But we don't want that, so subtract it out again here. */
24558 if (!S_IS_COMMON (fixp->fx_addsy))
24559 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24560 code = fixp->fx_r_type;
24561 break;
24562 #endif
24563
24564 case BFD_RELOC_ARM_IMMEDIATE:
24565 as_bad_where (fixp->fx_file, fixp->fx_line,
24566 _("internal relocation (type: IMMEDIATE) not fixed up"));
24567 return NULL;
24568
24569 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24570 as_bad_where (fixp->fx_file, fixp->fx_line,
24571 _("ADRL used for a symbol not defined in the same file"));
24572 return NULL;
24573
24574 case BFD_RELOC_ARM_OFFSET_IMM:
24575 if (section->use_rela_p)
24576 {
24577 code = fixp->fx_r_type;
24578 break;
24579 }
24580
24581 if (fixp->fx_addsy != NULL
24582 && !S_IS_DEFINED (fixp->fx_addsy)
24583 && S_IS_LOCAL (fixp->fx_addsy))
24584 {
24585 as_bad_where (fixp->fx_file, fixp->fx_line,
24586 _("undefined local label `%s'"),
24587 S_GET_NAME (fixp->fx_addsy));
24588 return NULL;
24589 }
24590
24591 as_bad_where (fixp->fx_file, fixp->fx_line,
24592 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24593 return NULL;
24594
24595 default:
24596 {
24597 const char * type;
24598
24599 switch (fixp->fx_r_type)
24600 {
24601 case BFD_RELOC_NONE: type = "NONE"; break;
24602 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24603 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24604 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24605 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24606 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24607 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24608 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24609 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24610 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24611 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24612 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24613 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24614 default: type = _("<unknown>"); break;
24615 }
24616 as_bad_where (fixp->fx_file, fixp->fx_line,
24617 _("cannot represent %s relocation in this object file format"),
24618 type);
24619 return NULL;
24620 }
24621 }
24622
24623 #ifdef OBJ_ELF
24624 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24625 && GOT_symbol
24626 && fixp->fx_addsy == GOT_symbol)
24627 {
24628 code = BFD_RELOC_ARM_GOTPC;
24629 reloc->addend = fixp->fx_offset = reloc->address;
24630 }
24631 #endif
24632
24633 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24634
24635 if (reloc->howto == NULL)
24636 {
24637 as_bad_where (fixp->fx_file, fixp->fx_line,
24638 _("cannot represent %s relocation in this object file format"),
24639 bfd_get_reloc_code_name (code));
24640 return NULL;
24641 }
24642
24643 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24644 vtable entry to be used in the relocation's section offset. */
24645 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24646 reloc->address = fixp->fx_offset;
24647
24648 return reloc;
24649 }
24650
24651 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24652
24653 void
24654 cons_fix_new_arm (fragS * frag,
24655 int where,
24656 int size,
24657 expressionS * exp,
24658 bfd_reloc_code_real_type reloc)
24659 {
24660 int pcrel = 0;
24661
24662 /* Pick a reloc.
24663 FIXME: @@ Should look at CPU word size. */
24664 switch (size)
24665 {
24666 case 1:
24667 reloc = BFD_RELOC_8;
24668 break;
24669 case 2:
24670 reloc = BFD_RELOC_16;
24671 break;
24672 case 4:
24673 default:
24674 reloc = BFD_RELOC_32;
24675 break;
24676 case 8:
24677 reloc = BFD_RELOC_64;
24678 break;
24679 }
24680
24681 #ifdef TE_PE
24682 if (exp->X_op == O_secrel)
24683 {
24684 exp->X_op = O_symbol;
24685 reloc = BFD_RELOC_32_SECREL;
24686 }
24687 #endif
24688
24689 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24690 }
24691
24692 #if defined (OBJ_COFF)
24693 void
24694 arm_validate_fix (fixS * fixP)
24695 {
24696 /* If the destination of the branch is a defined symbol which does not have
24697 the THUMB_FUNC attribute, then we must be calling a function which has
24698 the (interfacearm) attribute. We look for the Thumb entry point to that
24699 function and change the branch to refer to that function instead. */
24700 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24701 && fixP->fx_addsy != NULL
24702 && S_IS_DEFINED (fixP->fx_addsy)
24703 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24704 {
24705 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24706 }
24707 }
24708 #endif
24709
24710
24711 int
24712 arm_force_relocation (struct fix * fixp)
24713 {
24714 #if defined (OBJ_COFF) && defined (TE_PE)
24715 if (fixp->fx_r_type == BFD_RELOC_RVA)
24716 return 1;
24717 #endif
24718
24719 /* In case we have a call or a branch to a function in ARM ISA mode from
24720 a thumb function or vice-versa force the relocation. These relocations
24721 are cleared off for some cores that might have blx and simple transformations
24722 are possible. */
24723
24724 #ifdef OBJ_ELF
24725 switch (fixp->fx_r_type)
24726 {
24727 case BFD_RELOC_ARM_PCREL_JUMP:
24728 case BFD_RELOC_ARM_PCREL_CALL:
24729 case BFD_RELOC_THUMB_PCREL_BLX:
24730 if (THUMB_IS_FUNC (fixp->fx_addsy))
24731 return 1;
24732 break;
24733
24734 case BFD_RELOC_ARM_PCREL_BLX:
24735 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24736 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24737 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24738 if (ARM_IS_FUNC (fixp->fx_addsy))
24739 return 1;
24740 break;
24741
24742 default:
24743 break;
24744 }
24745 #endif
24746
24747 /* Resolve these relocations even if the symbol is extern or weak.
24748 Technically this is probably wrong due to symbol preemption.
24749 In practice these relocations do not have enough range to be useful
24750 at dynamic link time, and some code (e.g. in the Linux kernel)
24751 expects these references to be resolved. */
24752 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24753 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24754 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24755 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24756 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24757 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24758 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24759 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24760 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24761 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24762 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24763 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24764 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24765 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24766 return 0;
24767
24768 /* Always leave these relocations for the linker. */
24769 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24770 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24771 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24772 return 1;
24773
24774 /* Always generate relocations against function symbols. */
24775 if (fixp->fx_r_type == BFD_RELOC_32
24776 && fixp->fx_addsy
24777 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24778 return 1;
24779
24780 return generic_force_reloc (fixp);
24781 }
24782
24783 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24784 /* Relocations against function names must be left unadjusted,
24785 so that the linker can use this information to generate interworking
24786 stubs. The MIPS version of this function
24787 also prevents relocations that are mips-16 specific, but I do not
24788 know why it does this.
24789
24790 FIXME:
24791 There is one other problem that ought to be addressed here, but
24792 which currently is not: Taking the address of a label (rather
24793 than a function) and then later jumping to that address. Such
24794 addresses also ought to have their bottom bit set (assuming that
24795 they reside in Thumb code), but at the moment they will not. */
24796
24797 bfd_boolean
24798 arm_fix_adjustable (fixS * fixP)
24799 {
24800 if (fixP->fx_addsy == NULL)
24801 return 1;
24802
24803 /* Preserve relocations against symbols with function type. */
24804 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24805 return FALSE;
24806
24807 if (THUMB_IS_FUNC (fixP->fx_addsy)
24808 && fixP->fx_subsy == NULL)
24809 return FALSE;
24810
24811 /* We need the symbol name for the VTABLE entries. */
24812 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24813 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24814 return FALSE;
24815
24816 /* Don't allow symbols to be discarded on GOT related relocs. */
24817 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24818 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24819 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24820 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24821 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24822 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24823 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24824 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24825 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24826 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24827 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24828 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24829 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24830 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24831 return FALSE;
24832
24833 /* Similarly for group relocations. */
24834 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24835 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24836 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24837 return FALSE;
24838
24839 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24840 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24841 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24842 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24843 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24844 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24845 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24846 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24847 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24848 return FALSE;
24849
24850 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24851 offsets, so keep these symbols. */
24852 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24853 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24854 return FALSE;
24855
24856 return TRUE;
24857 }
24858 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24859
24860 #ifdef OBJ_ELF
24861 const char *
24862 elf32_arm_target_format (void)
24863 {
24864 #ifdef TE_SYMBIAN
24865 return (target_big_endian
24866 ? "elf32-bigarm-symbian"
24867 : "elf32-littlearm-symbian");
24868 #elif defined (TE_VXWORKS)
24869 return (target_big_endian
24870 ? "elf32-bigarm-vxworks"
24871 : "elf32-littlearm-vxworks");
24872 #elif defined (TE_NACL)
24873 return (target_big_endian
24874 ? "elf32-bigarm-nacl"
24875 : "elf32-littlearm-nacl");
24876 #else
24877 if (target_big_endian)
24878 return "elf32-bigarm";
24879 else
24880 return "elf32-littlearm";
24881 #endif
24882 }
24883
24884 void
24885 armelf_frob_symbol (symbolS * symp,
24886 int * puntp)
24887 {
24888 elf_frob_symbol (symp, puntp);
24889 }
24890 #endif
24891
24892 /* MD interface: Finalization. */
24893
24894 void
24895 arm_cleanup (void)
24896 {
24897 literal_pool * pool;
24898
24899 /* Ensure that all the IT blocks are properly closed. */
24900 check_it_blocks_finished ();
24901
24902 for (pool = list_of_pools; pool; pool = pool->next)
24903 {
24904 /* Put it at the end of the relevant section. */
24905 subseg_set (pool->section, pool->sub_section);
24906 #ifdef OBJ_ELF
24907 arm_elf_change_section ();
24908 #endif
24909 s_ltorg (0);
24910 }
24911 }
24912
24913 #ifdef OBJ_ELF
24914 /* Remove any excess mapping symbols generated for alignment frags in
24915 SEC. We may have created a mapping symbol before a zero byte
24916 alignment; remove it if there's a mapping symbol after the
24917 alignment. */
24918 static void
24919 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24920 void *dummy ATTRIBUTE_UNUSED)
24921 {
24922 segment_info_type *seginfo = seg_info (sec);
24923 fragS *fragp;
24924
24925 if (seginfo == NULL || seginfo->frchainP == NULL)
24926 return;
24927
24928 for (fragp = seginfo->frchainP->frch_root;
24929 fragp != NULL;
24930 fragp = fragp->fr_next)
24931 {
24932 symbolS *sym = fragp->tc_frag_data.last_map;
24933 fragS *next = fragp->fr_next;
24934
24935 /* Variable-sized frags have been converted to fixed size by
24936 this point. But if this was variable-sized to start with,
24937 there will be a fixed-size frag after it. So don't handle
24938 next == NULL. */
24939 if (sym == NULL || next == NULL)
24940 continue;
24941
24942 if (S_GET_VALUE (sym) < next->fr_address)
24943 /* Not at the end of this frag. */
24944 continue;
24945 know (S_GET_VALUE (sym) == next->fr_address);
24946
24947 do
24948 {
24949 if (next->tc_frag_data.first_map != NULL)
24950 {
24951 /* Next frag starts with a mapping symbol. Discard this
24952 one. */
24953 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24954 break;
24955 }
24956
24957 if (next->fr_next == NULL)
24958 {
24959 /* This mapping symbol is at the end of the section. Discard
24960 it. */
24961 know (next->fr_fix == 0 && next->fr_var == 0);
24962 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24963 break;
24964 }
24965
24966 /* As long as we have empty frags without any mapping symbols,
24967 keep looking. */
24968 /* If the next frag is non-empty and does not start with a
24969 mapping symbol, then this mapping symbol is required. */
24970 if (next->fr_address != next->fr_next->fr_address)
24971 break;
24972
24973 next = next->fr_next;
24974 }
24975 while (next != NULL);
24976 }
24977 }
24978 #endif
24979
24980 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24981 ARM ones. */
24982
24983 void
24984 arm_adjust_symtab (void)
24985 {
24986 #ifdef OBJ_COFF
24987 symbolS * sym;
24988
24989 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24990 {
24991 if (ARM_IS_THUMB (sym))
24992 {
24993 if (THUMB_IS_FUNC (sym))
24994 {
24995 /* Mark the symbol as a Thumb function. */
24996 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24997 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24998 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24999
25000 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25001 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25002 else
25003 as_bad (_("%s: unexpected function type: %d"),
25004 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25005 }
25006 else switch (S_GET_STORAGE_CLASS (sym))
25007 {
25008 case C_EXT:
25009 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25010 break;
25011 case C_STAT:
25012 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25013 break;
25014 case C_LABEL:
25015 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25016 break;
25017 default:
25018 /* Do nothing. */
25019 break;
25020 }
25021 }
25022
25023 if (ARM_IS_INTERWORK (sym))
25024 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25025 }
25026 #endif
25027 #ifdef OBJ_ELF
25028 symbolS * sym;
25029 char bind;
25030
25031 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25032 {
25033 if (ARM_IS_THUMB (sym))
25034 {
25035 elf_symbol_type * elf_sym;
25036
25037 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25038 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25039
25040 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25041 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25042 {
25043 /* If it's a .thumb_func, declare it as so,
25044 otherwise tag label as .code 16. */
25045 if (THUMB_IS_FUNC (sym))
25046 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25047 ST_BRANCH_TO_THUMB);
25048 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25049 elf_sym->internal_elf_sym.st_info =
25050 ELF_ST_INFO (bind, STT_ARM_16BIT);
25051 }
25052 }
25053 }
25054
25055 /* Remove any overlapping mapping symbols generated by alignment frags. */
25056 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25057 /* Now do generic ELF adjustments. */
25058 elf_adjust_symtab ();
25059 #endif
25060 }
25061
25062 /* MD interface: Initialization. */
25063
25064 static void
25065 set_constant_flonums (void)
25066 {
25067 int i;
25068
25069 for (i = 0; i < NUM_FLOAT_VALS; i++)
25070 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25071 abort ();
25072 }
25073
25074 /* Auto-select Thumb mode if it's the only available instruction set for the
25075 given architecture. */
25076
25077 static void
25078 autoselect_thumb_from_cpu_variant (void)
25079 {
25080 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25081 opcode_select (16);
25082 }
25083
25084 void
25085 md_begin (void)
25086 {
25087 unsigned mach;
25088 unsigned int i;
25089
25090 if ( (arm_ops_hsh = hash_new ()) == NULL
25091 || (arm_cond_hsh = hash_new ()) == NULL
25092 || (arm_shift_hsh = hash_new ()) == NULL
25093 || (arm_psr_hsh = hash_new ()) == NULL
25094 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25095 || (arm_reg_hsh = hash_new ()) == NULL
25096 || (arm_reloc_hsh = hash_new ()) == NULL
25097 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25098 as_fatal (_("virtual memory exhausted"));
25099
25100 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25101 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25102 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25103 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25104 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25105 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25106 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25107 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25108 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25109 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25110 (void *) (v7m_psrs + i));
25111 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25112 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25113 for (i = 0;
25114 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25115 i++)
25116 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25117 (void *) (barrier_opt_names + i));
25118 #ifdef OBJ_ELF
25119 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25120 {
25121 struct reloc_entry * entry = reloc_names + i;
25122
25123 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25124 /* This makes encode_branch() use the EABI versions of this relocation. */
25125 entry->reloc = BFD_RELOC_UNUSED;
25126
25127 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25128 }
25129 #endif
25130
25131 set_constant_flonums ();
25132
25133 /* Set the cpu variant based on the command-line options. We prefer
25134 -mcpu= over -march= if both are set (as for GCC); and we prefer
25135 -mfpu= over any other way of setting the floating point unit.
25136 Use of legacy options with new options are faulted. */
25137 if (legacy_cpu)
25138 {
25139 if (mcpu_cpu_opt || march_cpu_opt)
25140 as_bad (_("use of old and new-style options to set CPU type"));
25141
25142 mcpu_cpu_opt = legacy_cpu;
25143 }
25144 else if (!mcpu_cpu_opt)
25145 {
25146 mcpu_cpu_opt = march_cpu_opt;
25147 dyn_mcpu_ext_opt = dyn_march_ext_opt;
25148 /* Avoid double free in arm_md_end. */
25149 dyn_march_ext_opt = NULL;
25150 }
25151
25152 if (legacy_fpu)
25153 {
25154 if (mfpu_opt)
25155 as_bad (_("use of old and new-style options to set FPU type"));
25156
25157 mfpu_opt = legacy_fpu;
25158 }
25159 else if (!mfpu_opt)
25160 {
25161 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25162 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25163 /* Some environments specify a default FPU. If they don't, infer it
25164 from the processor. */
25165 if (mcpu_fpu_opt)
25166 mfpu_opt = mcpu_fpu_opt;
25167 else
25168 mfpu_opt = march_fpu_opt;
25169 #else
25170 mfpu_opt = &fpu_default;
25171 #endif
25172 }
25173
25174 if (!mfpu_opt)
25175 {
25176 if (mcpu_cpu_opt != NULL)
25177 mfpu_opt = &fpu_default;
25178 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
25179 mfpu_opt = &fpu_arch_vfp_v2;
25180 else
25181 mfpu_opt = &fpu_arch_fpa;
25182 }
25183
25184 #ifdef CPU_DEFAULT
25185 if (!mcpu_cpu_opt)
25186 {
25187 mcpu_cpu_opt = &cpu_default;
25188 selected_cpu = cpu_default;
25189 }
25190 else if (dyn_mcpu_ext_opt)
25191 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
25192 else
25193 selected_cpu = *mcpu_cpu_opt;
25194 #else
25195 if (mcpu_cpu_opt && dyn_mcpu_ext_opt)
25196 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
25197 else if (mcpu_cpu_opt)
25198 selected_cpu = *mcpu_cpu_opt;
25199 else
25200 mcpu_cpu_opt = &arm_arch_any;
25201 #endif
25202
25203 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25204 if (dyn_mcpu_ext_opt)
25205 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
25206
25207 autoselect_thumb_from_cpu_variant ();
25208
25209 arm_arch_used = thumb_arch_used = arm_arch_none;
25210
25211 #if defined OBJ_COFF || defined OBJ_ELF
25212 {
25213 unsigned int flags = 0;
25214
25215 #if defined OBJ_ELF
25216 flags = meabi_flags;
25217
25218 switch (meabi_flags)
25219 {
25220 case EF_ARM_EABI_UNKNOWN:
25221 #endif
25222 /* Set the flags in the private structure. */
25223 if (uses_apcs_26) flags |= F_APCS26;
25224 if (support_interwork) flags |= F_INTERWORK;
25225 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25226 if (pic_code) flags |= F_PIC;
25227 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25228 flags |= F_SOFT_FLOAT;
25229
25230 switch (mfloat_abi_opt)
25231 {
25232 case ARM_FLOAT_ABI_SOFT:
25233 case ARM_FLOAT_ABI_SOFTFP:
25234 flags |= F_SOFT_FLOAT;
25235 break;
25236
25237 case ARM_FLOAT_ABI_HARD:
25238 if (flags & F_SOFT_FLOAT)
25239 as_bad (_("hard-float conflicts with specified fpu"));
25240 break;
25241 }
25242
25243 /* Using pure-endian doubles (even if soft-float). */
25244 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25245 flags |= F_VFP_FLOAT;
25246
25247 #if defined OBJ_ELF
25248 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25249 flags |= EF_ARM_MAVERICK_FLOAT;
25250 break;
25251
25252 case EF_ARM_EABI_VER4:
25253 case EF_ARM_EABI_VER5:
25254 /* No additional flags to set. */
25255 break;
25256
25257 default:
25258 abort ();
25259 }
25260 #endif
25261 bfd_set_private_flags (stdoutput, flags);
25262
25263 /* We have run out flags in the COFF header to encode the
25264 status of ATPCS support, so instead we create a dummy,
25265 empty, debug section called .arm.atpcs. */
25266 if (atpcs)
25267 {
25268 asection * sec;
25269
25270 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25271
25272 if (sec != NULL)
25273 {
25274 bfd_set_section_flags
25275 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25276 bfd_set_section_size (stdoutput, sec, 0);
25277 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25278 }
25279 }
25280 }
25281 #endif
25282
25283 /* Record the CPU type as well. */
25284 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25285 mach = bfd_mach_arm_iWMMXt2;
25286 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25287 mach = bfd_mach_arm_iWMMXt;
25288 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25289 mach = bfd_mach_arm_XScale;
25290 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25291 mach = bfd_mach_arm_ep9312;
25292 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25293 mach = bfd_mach_arm_5TE;
25294 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25295 {
25296 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25297 mach = bfd_mach_arm_5T;
25298 else
25299 mach = bfd_mach_arm_5;
25300 }
25301 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25302 {
25303 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25304 mach = bfd_mach_arm_4T;
25305 else
25306 mach = bfd_mach_arm_4;
25307 }
25308 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25309 mach = bfd_mach_arm_3M;
25310 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25311 mach = bfd_mach_arm_3;
25312 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25313 mach = bfd_mach_arm_2a;
25314 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25315 mach = bfd_mach_arm_2;
25316 else
25317 mach = bfd_mach_arm_unknown;
25318
25319 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25320 }
25321
25322 /* Command line processing. */
25323
25324 /* md_parse_option
25325 Invocation line includes a switch not recognized by the base assembler.
25326 See if it's a processor-specific option.
25327
25328 This routine is somewhat complicated by the need for backwards
25329 compatibility (since older releases of gcc can't be changed).
25330 The new options try to make the interface as compatible as
25331 possible with GCC.
25332
25333 New options (supported) are:
25334
25335 -mcpu=<cpu name> Assemble for selected processor
25336 -march=<architecture name> Assemble for selected architecture
25337 -mfpu=<fpu architecture> Assemble for selected FPU.
25338 -EB/-mbig-endian Big-endian
25339 -EL/-mlittle-endian Little-endian
25340 -k Generate PIC code
25341 -mthumb Start in Thumb mode
25342 -mthumb-interwork Code supports ARM/Thumb interworking
25343
25344 -m[no-]warn-deprecated Warn about deprecated features
25345 -m[no-]warn-syms Warn when symbols match instructions
25346
25347 For now we will also provide support for:
25348
25349 -mapcs-32 32-bit Program counter
25350 -mapcs-26 26-bit Program counter
25351 -macps-float Floats passed in FP registers
25352 -mapcs-reentrant Reentrant code
25353 -matpcs
25354 (sometime these will probably be replaced with -mapcs=<list of options>
25355 and -matpcs=<list of options>)
25356
25357 The remaining options are only supported for back-wards compatibility.
25358 Cpu variants, the arm part is optional:
25359 -m[arm]1 Currently not supported.
25360 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25361 -m[arm]3 Arm 3 processor
25362 -m[arm]6[xx], Arm 6 processors
25363 -m[arm]7[xx][t][[d]m] Arm 7 processors
25364 -m[arm]8[10] Arm 8 processors
25365 -m[arm]9[20][tdmi] Arm 9 processors
25366 -mstrongarm[110[0]] StrongARM processors
25367 -mxscale XScale processors
25368 -m[arm]v[2345[t[e]]] Arm architectures
25369 -mall All (except the ARM1)
25370 FP variants:
25371 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25372 -mfpe-old (No float load/store multiples)
25373 -mvfpxd VFP Single precision
25374 -mvfp All VFP
25375 -mno-fpu Disable all floating point instructions
25376
25377 The following CPU names are recognized:
25378 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25379 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25380 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25381 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25382 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25383 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25384 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25385
25386 */
25387
25388 const char * md_shortopts = "m:k";
25389
25390 #ifdef ARM_BI_ENDIAN
25391 #define OPTION_EB (OPTION_MD_BASE + 0)
25392 #define OPTION_EL (OPTION_MD_BASE + 1)
25393 #else
25394 #if TARGET_BYTES_BIG_ENDIAN
25395 #define OPTION_EB (OPTION_MD_BASE + 0)
25396 #else
25397 #define OPTION_EL (OPTION_MD_BASE + 1)
25398 #endif
25399 #endif
25400 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25401
25402 struct option md_longopts[] =
25403 {
25404 #ifdef OPTION_EB
25405 {"EB", no_argument, NULL, OPTION_EB},
25406 #endif
25407 #ifdef OPTION_EL
25408 {"EL", no_argument, NULL, OPTION_EL},
25409 #endif
25410 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25411 {NULL, no_argument, NULL, 0}
25412 };
25413
25414 size_t md_longopts_size = sizeof (md_longopts);
25415
25416 struct arm_option_table
25417 {
25418 const char * option; /* Option name to match. */
25419 const char * help; /* Help information. */
25420 int * var; /* Variable to change. */
25421 int value; /* What to change it to. */
25422 const char * deprecated; /* If non-null, print this message. */
25423 };
25424
25425 struct arm_option_table arm_opts[] =
25426 {
25427 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25428 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25429 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25430 &support_interwork, 1, NULL},
25431 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25432 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25433 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25434 1, NULL},
25435 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25436 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25437 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25438 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25439 NULL},
25440
25441 /* These are recognized by the assembler, but have no affect on code. */
25442 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25443 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25444
25445 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25446 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25447 &warn_on_deprecated, 0, NULL},
25448 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25449 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25450 {NULL, NULL, NULL, 0, NULL}
25451 };
25452
25453 struct arm_legacy_option_table
25454 {
25455 const char * option; /* Option name to match. */
25456 const arm_feature_set ** var; /* Variable to change. */
25457 const arm_feature_set value; /* What to change it to. */
25458 const char * deprecated; /* If non-null, print this message. */
25459 };
25460
25461 const struct arm_legacy_option_table arm_legacy_opts[] =
25462 {
25463 /* DON'T add any new processors to this list -- we want the whole list
25464 to go away... Add them to the processors table instead. */
25465 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25466 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25467 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25468 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25469 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25470 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25471 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25472 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25473 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25474 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25475 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25476 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25477 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25478 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25479 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25480 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25481 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25482 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25483 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25484 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25485 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25486 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25487 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25488 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25489 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25490 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25491 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25492 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25493 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25494 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25495 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25496 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25497 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25498 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25499 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25500 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25501 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25502 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25503 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25504 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25505 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25506 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25507 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25508 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25509 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25510 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25511 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25512 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25513 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25514 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25515 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25516 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25517 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25518 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25519 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25520 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25521 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25522 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25523 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25524 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25525 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25526 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25527 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25528 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25529 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25530 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25531 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25532 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25533 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25534 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25535 N_("use -mcpu=strongarm110")},
25536 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25537 N_("use -mcpu=strongarm1100")},
25538 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25539 N_("use -mcpu=strongarm1110")},
25540 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25541 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25542 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25543
25544 /* Architecture variants -- don't add any more to this list either. */
25545 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25546 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25547 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25548 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25549 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25550 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25551 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25552 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25553 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25554 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25555 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25556 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25557 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25558 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25559 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25560 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25561 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25562 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25563
25564 /* Floating point variants -- don't add any more to this list either. */
25565 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25566 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25567 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25568 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25569 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25570
25571 {NULL, NULL, ARM_ARCH_NONE, NULL}
25572 };
25573
25574 struct arm_cpu_option_table
25575 {
25576 const char * name;
25577 size_t name_len;
25578 const arm_feature_set value;
25579 const arm_feature_set ext;
25580 /* For some CPUs we assume an FPU unless the user explicitly sets
25581 -mfpu=... */
25582 const arm_feature_set default_fpu;
25583 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25584 case. */
25585 const char * canonical_name;
25586 };
25587
25588 /* This list should, at a minimum, contain all the cpu names
25589 recognized by GCC. */
25590 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25591
25592 static const struct arm_cpu_option_table arm_cpus[] =
25593 {
25594 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25595 ARM_ARCH_NONE,
25596 FPU_ARCH_FPA),
25597 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25598 ARM_ARCH_NONE,
25599 FPU_ARCH_FPA),
25600 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25601 ARM_ARCH_NONE,
25602 FPU_ARCH_FPA),
25603 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25604 ARM_ARCH_NONE,
25605 FPU_ARCH_FPA),
25606 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25607 ARM_ARCH_NONE,
25608 FPU_ARCH_FPA),
25609 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25610 ARM_ARCH_NONE,
25611 FPU_ARCH_FPA),
25612 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25613 ARM_ARCH_NONE,
25614 FPU_ARCH_FPA),
25615 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25616 ARM_ARCH_NONE,
25617 FPU_ARCH_FPA),
25618 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25619 ARM_ARCH_NONE,
25620 FPU_ARCH_FPA),
25621 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25622 ARM_ARCH_NONE,
25623 FPU_ARCH_FPA),
25624 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25625 ARM_ARCH_NONE,
25626 FPU_ARCH_FPA),
25627 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25628 ARM_ARCH_NONE,
25629 FPU_ARCH_FPA),
25630 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25631 ARM_ARCH_NONE,
25632 FPU_ARCH_FPA),
25633 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25634 ARM_ARCH_NONE,
25635 FPU_ARCH_FPA),
25636 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25637 ARM_ARCH_NONE,
25638 FPU_ARCH_FPA),
25639 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25640 ARM_ARCH_NONE,
25641 FPU_ARCH_FPA),
25642 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25643 ARM_ARCH_NONE,
25644 FPU_ARCH_FPA),
25645 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25646 ARM_ARCH_NONE,
25647 FPU_ARCH_FPA),
25648 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25649 ARM_ARCH_NONE,
25650 FPU_ARCH_FPA),
25651 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25652 ARM_ARCH_NONE,
25653 FPU_ARCH_FPA),
25654 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25655 ARM_ARCH_NONE,
25656 FPU_ARCH_FPA),
25657 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25658 ARM_ARCH_NONE,
25659 FPU_ARCH_FPA),
25660 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25661 ARM_ARCH_NONE,
25662 FPU_ARCH_FPA),
25663 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25664 ARM_ARCH_NONE,
25665 FPU_ARCH_FPA),
25666 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25667 ARM_ARCH_NONE,
25668 FPU_ARCH_FPA),
25669 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25670 ARM_ARCH_NONE,
25671 FPU_ARCH_FPA),
25672 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25673 ARM_ARCH_NONE,
25674 FPU_ARCH_FPA),
25675 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25676 ARM_ARCH_NONE,
25677 FPU_ARCH_FPA),
25678 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25679 ARM_ARCH_NONE,
25680 FPU_ARCH_FPA),
25681 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25682 ARM_ARCH_NONE,
25683 FPU_ARCH_FPA),
25684 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25685 ARM_ARCH_NONE,
25686 FPU_ARCH_FPA),
25687 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
25688 ARM_ARCH_NONE,
25689 FPU_ARCH_FPA),
25690 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
25691 ARM_ARCH_NONE,
25692 FPU_ARCH_FPA),
25693 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
25694 ARM_ARCH_NONE,
25695 FPU_ARCH_FPA),
25696 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
25697 ARM_ARCH_NONE,
25698 FPU_ARCH_FPA),
25699 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
25700 ARM_ARCH_NONE,
25701 FPU_ARCH_FPA),
25702 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
25703 ARM_ARCH_NONE,
25704 FPU_ARCH_FPA),
25705 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
25706 ARM_ARCH_NONE,
25707 FPU_ARCH_FPA),
25708 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
25709 ARM_ARCH_NONE,
25710 FPU_ARCH_FPA),
25711 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
25712 ARM_ARCH_NONE,
25713 FPU_ARCH_FPA),
25714 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
25715 ARM_ARCH_NONE,
25716 FPU_ARCH_FPA),
25717 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
25718 ARM_ARCH_NONE,
25719 FPU_ARCH_FPA),
25720 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
25721 ARM_ARCH_NONE,
25722 FPU_ARCH_FPA),
25723 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
25724 ARM_ARCH_NONE,
25725 FPU_ARCH_FPA),
25726 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
25727 ARM_ARCH_NONE,
25728 FPU_ARCH_FPA),
25729 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
25730 ARM_ARCH_NONE,
25731 FPU_ARCH_FPA),
25732
25733 /* For V5 or later processors we default to using VFP; but the user
25734 should really set the FPU type explicitly. */
25735 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
25736 ARM_ARCH_NONE,
25737 FPU_ARCH_VFP_V2),
25738 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
25739 ARM_ARCH_NONE,
25740 FPU_ARCH_VFP_V2),
25741 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25742 ARM_ARCH_NONE,
25743 FPU_ARCH_VFP_V2),
25744 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25745 ARM_ARCH_NONE,
25746 FPU_ARCH_VFP_V2),
25747 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
25748 ARM_ARCH_NONE,
25749 FPU_ARCH_VFP_V2),
25750 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
25751 ARM_ARCH_NONE,
25752 FPU_ARCH_VFP_V2),
25753 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
25754 ARM_ARCH_NONE,
25755 FPU_ARCH_VFP_V2),
25756 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
25757 ARM_ARCH_NONE,
25758 FPU_ARCH_VFP_V2),
25759 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
25760 ARM_ARCH_NONE,
25761 FPU_ARCH_VFP_V2),
25762 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
25763 ARM_ARCH_NONE,
25764 FPU_ARCH_VFP_V2),
25765 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
25766 ARM_ARCH_NONE,
25767 FPU_ARCH_VFP_V2),
25768 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
25769 ARM_ARCH_NONE,
25770 FPU_ARCH_VFP_V2),
25771 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
25772 ARM_ARCH_NONE,
25773 FPU_ARCH_VFP_V1),
25774 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
25775 ARM_ARCH_NONE,
25776 FPU_ARCH_VFP_V1),
25777 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
25778 ARM_ARCH_NONE,
25779 FPU_ARCH_VFP_V2),
25780 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
25781 ARM_ARCH_NONE,
25782 FPU_ARCH_VFP_V2),
25783 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
25784 ARM_ARCH_NONE,
25785 FPU_ARCH_VFP_V1),
25786 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
25787 ARM_ARCH_NONE,
25788 FPU_ARCH_VFP_V2),
25789 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
25790 ARM_ARCH_NONE,
25791 FPU_ARCH_VFP_V2),
25792 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
25793 ARM_ARCH_NONE,
25794 FPU_ARCH_VFP_V2),
25795 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
25796 ARM_ARCH_NONE,
25797 FPU_ARCH_VFP_V2),
25798 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
25799 ARM_ARCH_NONE,
25800 FPU_ARCH_VFP_V2),
25801 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
25802 ARM_ARCH_NONE,
25803 FPU_ARCH_VFP_V2),
25804 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
25805 ARM_ARCH_NONE,
25806 FPU_ARCH_VFP_V2),
25807 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
25808 ARM_ARCH_NONE,
25809 FPU_ARCH_VFP_V2),
25810 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
25811 ARM_ARCH_NONE,
25812 FPU_ARCH_VFP_V2),
25813 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
25814 ARM_ARCH_NONE,
25815 FPU_NONE),
25816 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
25817 ARM_ARCH_NONE,
25818 FPU_NONE),
25819 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
25820 ARM_ARCH_NONE,
25821 FPU_ARCH_VFP_V2),
25822 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
25823 ARM_ARCH_NONE,
25824 FPU_ARCH_VFP_V2),
25825 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
25826 ARM_ARCH_NONE,
25827 FPU_ARCH_VFP_V2),
25828 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
25829 ARM_ARCH_NONE,
25830 FPU_NONE),
25831 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
25832 ARM_ARCH_NONE,
25833 FPU_NONE),
25834 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
25835 ARM_ARCH_NONE,
25836 FPU_ARCH_VFP_V2),
25837 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
25838 ARM_ARCH_NONE,
25839 FPU_NONE),
25840 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
25841 ARM_ARCH_NONE,
25842 FPU_ARCH_VFP_V2),
25843 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
25844 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25845 FPU_NONE),
25846 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
25847 ARM_ARCH_NONE,
25848 FPU_ARCH_NEON_VFP_V4),
25849 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
25850 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25851 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
25852 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
25853 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25854 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
25855 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
25856 ARM_ARCH_NONE,
25857 FPU_ARCH_NEON_VFP_V4),
25858 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
25859 ARM_ARCH_NONE,
25860 FPU_ARCH_NEON_VFP_V4),
25861 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
25862 ARM_ARCH_NONE,
25863 FPU_ARCH_NEON_VFP_V4),
25864 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
25865 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25866 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25867 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
25868 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25869 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25870 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
25871 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25872 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25873 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
25874 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25875 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
25876 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
25877 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25878 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25879 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
25880 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25881 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25882 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
25883 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25884 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25885 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
25886 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25887 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
25888 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
25889 ARM_ARCH_NONE,
25890 FPU_NONE),
25891 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
25892 ARM_ARCH_NONE,
25893 FPU_ARCH_VFP_V3D16),
25894 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
25895 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
25896 FPU_NONE),
25897 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
25898 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
25899 FPU_ARCH_VFP_V3D16),
25900 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
25901 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
25902 FPU_ARCH_VFP_V3D16),
25903 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
25904 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25905 FPU_ARCH_NEON_VFP_ARMV8),
25906 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
25907 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25908 FPU_NONE),
25909 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
25910 ARM_ARCH_NONE,
25911 FPU_NONE),
25912 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
25913 ARM_ARCH_NONE,
25914 FPU_NONE),
25915 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
25916 ARM_ARCH_NONE,
25917 FPU_NONE),
25918 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
25919 ARM_ARCH_NONE,
25920 FPU_NONE),
25921 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
25922 ARM_ARCH_NONE,
25923 FPU_NONE),
25924 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
25925 ARM_ARCH_NONE,
25926 FPU_NONE),
25927 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
25928 ARM_ARCH_NONE,
25929 FPU_NONE),
25930 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
25931 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25932 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25933
25934 /* ??? XSCALE is really an architecture. */
25935 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
25936 ARM_ARCH_NONE,
25937 FPU_ARCH_VFP_V2),
25938
25939 /* ??? iwmmxt is not a processor. */
25940 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
25941 ARM_ARCH_NONE,
25942 FPU_ARCH_VFP_V2),
25943 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
25944 ARM_ARCH_NONE,
25945 FPU_ARCH_VFP_V2),
25946 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
25947 ARM_ARCH_NONE,
25948 FPU_ARCH_VFP_V2),
25949
25950 /* Maverick. */
25951 ARM_CPU_OPT ("ep9312", "ARM920T",
25952 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25953 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
25954
25955 /* Marvell processors. */
25956 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
25957 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25958 FPU_ARCH_VFP_V3D16),
25959 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
25960 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25961 FPU_ARCH_NEON_VFP_V4),
25962
25963 /* APM X-Gene family. */
25964 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
25965 ARM_ARCH_NONE,
25966 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25967 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
25968 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25969 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25970
25971 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25972 };
25973 #undef ARM_CPU_OPT
25974
25975 struct arm_arch_option_table
25976 {
25977 const char * name;
25978 size_t name_len;
25979 const arm_feature_set value;
25980 const arm_feature_set default_fpu;
25981 };
25982
25983 /* This list should, at a minimum, contain all the architecture names
25984 recognized by GCC. */
25985 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25986
25987 static const struct arm_arch_option_table arm_archs[] =
25988 {
25989 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25990 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25991 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25992 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25993 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25994 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25995 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25996 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25997 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25998 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25999 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
26000 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
26001 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
26002 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
26003 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
26004 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
26005 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
26006 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
26007 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
26008 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
26009 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
26010 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26011 kept to preserve existing behaviour. */
26012 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26013 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26014 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
26015 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
26016 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
26017 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26018 kept to preserve existing behaviour. */
26019 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26020 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26021 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
26022 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
26023 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
26024 /* The official spelling of the ARMv7 profile variants is the dashed form.
26025 Accept the non-dashed form for compatibility with old toolchains. */
26026 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26027 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
26028 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26029 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26030 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26031 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26032 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26033 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
26034 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
26035 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
26036 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
26037 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
26038 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
26039 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
26040 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
26041 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
26042 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
26043 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
26044 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26045 };
26046 #undef ARM_ARCH_OPT
26047
26048 /* ISA extensions in the co-processor and main instruction set space. */
26049
26050 struct arm_option_extension_value_table
26051 {
26052 const char * name;
26053 size_t name_len;
26054 const arm_feature_set merge_value;
26055 const arm_feature_set clear_value;
26056 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26057 indicates that an extension is available for all architectures while
26058 ARM_ANY marks an empty entry. */
26059 const arm_feature_set allowed_archs[2];
26060 };
26061
26062 /* The following table must be in alphabetical order with a NULL last entry. */
26063
26064 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26065 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26066
26067 static const struct arm_option_extension_value_table arm_extensions[] =
26068 {
26069 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26071 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26072 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
26073 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26074 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
26075 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
26076 ARM_ARCH_V8_2A),
26077 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26078 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26079 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
26080 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
26081 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26082 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26083 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26084 ARM_ARCH_V8_2A),
26085 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26086 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26087 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26088 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26089 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26090 Thumb divide instruction. Due to this having the same name as the
26091 previous entry, this will be ignored when doing command-line parsing and
26092 only considered by build attribute selection code. */
26093 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26094 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26095 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
26096 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
26097 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
26098 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
26099 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
26100 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
26101 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
26102 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26103 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26104 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26105 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26106 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26107 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26108 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
26109 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
26110 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
26111 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26112 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
26113 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
26114 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26115 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
26116 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
26117 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26118 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26119 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26120 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
26121 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26122 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26123 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26124 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26125 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26126 | ARM_EXT_DIV),
26127 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26128 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26129 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26130 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26131 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26132 };
26133 #undef ARM_EXT_OPT
26134
26135 /* ISA floating-point and Advanced SIMD extensions. */
26136 struct arm_option_fpu_value_table
26137 {
26138 const char * name;
26139 const arm_feature_set value;
26140 };
26141
26142 /* This list should, at a minimum, contain all the fpu names
26143 recognized by GCC. */
26144 static const struct arm_option_fpu_value_table arm_fpus[] =
26145 {
26146 {"softfpa", FPU_NONE},
26147 {"fpe", FPU_ARCH_FPE},
26148 {"fpe2", FPU_ARCH_FPE},
26149 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26150 {"fpa", FPU_ARCH_FPA},
26151 {"fpa10", FPU_ARCH_FPA},
26152 {"fpa11", FPU_ARCH_FPA},
26153 {"arm7500fe", FPU_ARCH_FPA},
26154 {"softvfp", FPU_ARCH_VFP},
26155 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26156 {"vfp", FPU_ARCH_VFP_V2},
26157 {"vfp9", FPU_ARCH_VFP_V2},
26158 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26159 {"vfp10", FPU_ARCH_VFP_V2},
26160 {"vfp10-r0", FPU_ARCH_VFP_V1},
26161 {"vfpxd", FPU_ARCH_VFP_V1xD},
26162 {"vfpv2", FPU_ARCH_VFP_V2},
26163 {"vfpv3", FPU_ARCH_VFP_V3},
26164 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26165 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26166 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26167 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26168 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26169 {"arm1020t", FPU_ARCH_VFP_V1},
26170 {"arm1020e", FPU_ARCH_VFP_V2},
26171 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26172 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26173 {"maverick", FPU_ARCH_MAVERICK},
26174 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26175 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26176 {"neon-fp16", FPU_ARCH_NEON_FP16},
26177 {"vfpv4", FPU_ARCH_VFP_V4},
26178 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26179 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26180 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26181 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26182 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26183 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26184 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26185 {"crypto-neon-fp-armv8",
26186 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26187 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26188 {"crypto-neon-fp-armv8.1",
26189 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26190 {NULL, ARM_ARCH_NONE}
26191 };
26192
26193 struct arm_option_value_table
26194 {
26195 const char *name;
26196 long value;
26197 };
26198
26199 static const struct arm_option_value_table arm_float_abis[] =
26200 {
26201 {"hard", ARM_FLOAT_ABI_HARD},
26202 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26203 {"soft", ARM_FLOAT_ABI_SOFT},
26204 {NULL, 0}
26205 };
26206
26207 #ifdef OBJ_ELF
26208 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26209 static const struct arm_option_value_table arm_eabis[] =
26210 {
26211 {"gnu", EF_ARM_EABI_UNKNOWN},
26212 {"4", EF_ARM_EABI_VER4},
26213 {"5", EF_ARM_EABI_VER5},
26214 {NULL, 0}
26215 };
26216 #endif
26217
26218 struct arm_long_option_table
26219 {
26220 const char * option; /* Substring to match. */
26221 const char * help; /* Help information. */
26222 int (* func) (const char * subopt); /* Function to decode sub-option. */
26223 const char * deprecated; /* If non-null, print this message. */
26224 };
26225
26226 static bfd_boolean
26227 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26228 arm_feature_set **ext_set_p)
26229 {
26230 /* We insist on extensions being specified in alphabetical order, and with
26231 extensions being added before being removed. We achieve this by having
26232 the global ARM_EXTENSIONS table in alphabetical order, and using the
26233 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26234 or removing it (0) and only allowing it to change in the order
26235 -1 -> 1 -> 0. */
26236 const struct arm_option_extension_value_table * opt = NULL;
26237 const arm_feature_set arm_any = ARM_ANY;
26238 int adding_value = -1;
26239
26240 if (!*ext_set_p)
26241 {
26242 *ext_set_p = XNEW (arm_feature_set);
26243 **ext_set_p = arm_arch_none;
26244 }
26245
26246 while (str != NULL && *str != 0)
26247 {
26248 const char *ext;
26249 size_t len;
26250
26251 if (*str != '+')
26252 {
26253 as_bad (_("invalid architectural extension"));
26254 return FALSE;
26255 }
26256
26257 str++;
26258 ext = strchr (str, '+');
26259
26260 if (ext != NULL)
26261 len = ext - str;
26262 else
26263 len = strlen (str);
26264
26265 if (len >= 2 && strncmp (str, "no", 2) == 0)
26266 {
26267 if (adding_value != 0)
26268 {
26269 adding_value = 0;
26270 opt = arm_extensions;
26271 }
26272
26273 len -= 2;
26274 str += 2;
26275 }
26276 else if (len > 0)
26277 {
26278 if (adding_value == -1)
26279 {
26280 adding_value = 1;
26281 opt = arm_extensions;
26282 }
26283 else if (adding_value != 1)
26284 {
26285 as_bad (_("must specify extensions to add before specifying "
26286 "those to remove"));
26287 return FALSE;
26288 }
26289 }
26290
26291 if (len == 0)
26292 {
26293 as_bad (_("missing architectural extension"));
26294 return FALSE;
26295 }
26296
26297 gas_assert (adding_value != -1);
26298 gas_assert (opt != NULL);
26299
26300 /* Scan over the options table trying to find an exact match. */
26301 for (; opt->name != NULL; opt++)
26302 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26303 {
26304 int i, nb_allowed_archs =
26305 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26306 /* Check we can apply the extension to this architecture. */
26307 for (i = 0; i < nb_allowed_archs; i++)
26308 {
26309 /* Empty entry. */
26310 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26311 continue;
26312 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26313 break;
26314 }
26315 if (i == nb_allowed_archs)
26316 {
26317 as_bad (_("extension does not apply to the base architecture"));
26318 return FALSE;
26319 }
26320
26321 /* Add or remove the extension. */
26322 if (adding_value)
26323 ARM_MERGE_FEATURE_SETS (**ext_set_p, **ext_set_p,
26324 opt->merge_value);
26325 else
26326 ARM_CLEAR_FEATURE (**ext_set_p, **ext_set_p, opt->clear_value);
26327
26328 /* Allowing Thumb division instructions for ARMv7 in autodetection
26329 rely on this break so that duplicate extensions (extensions
26330 with the same name as a previous extension in the list) are not
26331 considered for command-line parsing. */
26332 break;
26333 }
26334
26335 if (opt->name == NULL)
26336 {
26337 /* Did we fail to find an extension because it wasn't specified in
26338 alphabetical order, or because it does not exist? */
26339
26340 for (opt = arm_extensions; opt->name != NULL; opt++)
26341 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26342 break;
26343
26344 if (opt->name == NULL)
26345 as_bad (_("unknown architectural extension `%s'"), str);
26346 else
26347 as_bad (_("architectural extensions must be specified in "
26348 "alphabetical order"));
26349
26350 return FALSE;
26351 }
26352 else
26353 {
26354 /* We should skip the extension we've just matched the next time
26355 round. */
26356 opt++;
26357 }
26358
26359 str = ext;
26360 };
26361
26362 return TRUE;
26363 }
26364
26365 static bfd_boolean
26366 arm_parse_cpu (const char *str)
26367 {
26368 const struct arm_cpu_option_table *opt;
26369 const char *ext = strchr (str, '+');
26370 size_t len;
26371
26372 if (ext != NULL)
26373 len = ext - str;
26374 else
26375 len = strlen (str);
26376
26377 if (len == 0)
26378 {
26379 as_bad (_("missing cpu name `%s'"), str);
26380 return FALSE;
26381 }
26382
26383 for (opt = arm_cpus; opt->name != NULL; opt++)
26384 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26385 {
26386 mcpu_cpu_opt = &opt->value;
26387 if (!dyn_mcpu_ext_opt)
26388 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
26389 *dyn_mcpu_ext_opt = opt->ext;
26390 mcpu_fpu_opt = &opt->default_fpu;
26391 if (opt->canonical_name)
26392 {
26393 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26394 strcpy (selected_cpu_name, opt->canonical_name);
26395 }
26396 else
26397 {
26398 size_t i;
26399
26400 if (len >= sizeof selected_cpu_name)
26401 len = (sizeof selected_cpu_name) - 1;
26402
26403 for (i = 0; i < len; i++)
26404 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26405 selected_cpu_name[i] = 0;
26406 }
26407
26408 if (ext != NULL)
26409 return arm_parse_extension (ext, mcpu_cpu_opt, &dyn_mcpu_ext_opt);
26410
26411 return TRUE;
26412 }
26413
26414 as_bad (_("unknown cpu `%s'"), str);
26415 return FALSE;
26416 }
26417
26418 static bfd_boolean
26419 arm_parse_arch (const char *str)
26420 {
26421 const struct arm_arch_option_table *opt;
26422 const char *ext = strchr (str, '+');
26423 size_t len;
26424
26425 if (ext != NULL)
26426 len = ext - str;
26427 else
26428 len = strlen (str);
26429
26430 if (len == 0)
26431 {
26432 as_bad (_("missing architecture name `%s'"), str);
26433 return FALSE;
26434 }
26435
26436 for (opt = arm_archs; opt->name != NULL; opt++)
26437 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26438 {
26439 march_cpu_opt = &opt->value;
26440 march_fpu_opt = &opt->default_fpu;
26441 strcpy (selected_cpu_name, opt->name);
26442
26443 if (ext != NULL)
26444 return arm_parse_extension (ext, march_cpu_opt, &dyn_march_ext_opt);
26445
26446 return TRUE;
26447 }
26448
26449 as_bad (_("unknown architecture `%s'\n"), str);
26450 return FALSE;
26451 }
26452
26453 static bfd_boolean
26454 arm_parse_fpu (const char * str)
26455 {
26456 const struct arm_option_fpu_value_table * opt;
26457
26458 for (opt = arm_fpus; opt->name != NULL; opt++)
26459 if (streq (opt->name, str))
26460 {
26461 mfpu_opt = &opt->value;
26462 return TRUE;
26463 }
26464
26465 as_bad (_("unknown floating point format `%s'\n"), str);
26466 return FALSE;
26467 }
26468
26469 static bfd_boolean
26470 arm_parse_float_abi (const char * str)
26471 {
26472 const struct arm_option_value_table * opt;
26473
26474 for (opt = arm_float_abis; opt->name != NULL; opt++)
26475 if (streq (opt->name, str))
26476 {
26477 mfloat_abi_opt = opt->value;
26478 return TRUE;
26479 }
26480
26481 as_bad (_("unknown floating point abi `%s'\n"), str);
26482 return FALSE;
26483 }
26484
26485 #ifdef OBJ_ELF
26486 static bfd_boolean
26487 arm_parse_eabi (const char * str)
26488 {
26489 const struct arm_option_value_table *opt;
26490
26491 for (opt = arm_eabis; opt->name != NULL; opt++)
26492 if (streq (opt->name, str))
26493 {
26494 meabi_flags = opt->value;
26495 return TRUE;
26496 }
26497 as_bad (_("unknown EABI `%s'\n"), str);
26498 return FALSE;
26499 }
26500 #endif
26501
26502 static bfd_boolean
26503 arm_parse_it_mode (const char * str)
26504 {
26505 bfd_boolean ret = TRUE;
26506
26507 if (streq ("arm", str))
26508 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26509 else if (streq ("thumb", str))
26510 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26511 else if (streq ("always", str))
26512 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26513 else if (streq ("never", str))
26514 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26515 else
26516 {
26517 as_bad (_("unknown implicit IT mode `%s', should be "\
26518 "arm, thumb, always, or never."), str);
26519 ret = FALSE;
26520 }
26521
26522 return ret;
26523 }
26524
26525 static bfd_boolean
26526 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26527 {
26528 codecomposer_syntax = TRUE;
26529 arm_comment_chars[0] = ';';
26530 arm_line_separator_chars[0] = 0;
26531 return TRUE;
26532 }
26533
26534 struct arm_long_option_table arm_long_opts[] =
26535 {
26536 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26537 arm_parse_cpu, NULL},
26538 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26539 arm_parse_arch, NULL},
26540 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26541 arm_parse_fpu, NULL},
26542 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26543 arm_parse_float_abi, NULL},
26544 #ifdef OBJ_ELF
26545 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26546 arm_parse_eabi, NULL},
26547 #endif
26548 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26549 arm_parse_it_mode, NULL},
26550 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26551 arm_ccs_mode, NULL},
26552 {NULL, NULL, 0, NULL}
26553 };
26554
26555 int
26556 md_parse_option (int c, const char * arg)
26557 {
26558 struct arm_option_table *opt;
26559 const struct arm_legacy_option_table *fopt;
26560 struct arm_long_option_table *lopt;
26561
26562 switch (c)
26563 {
26564 #ifdef OPTION_EB
26565 case OPTION_EB:
26566 target_big_endian = 1;
26567 break;
26568 #endif
26569
26570 #ifdef OPTION_EL
26571 case OPTION_EL:
26572 target_big_endian = 0;
26573 break;
26574 #endif
26575
26576 case OPTION_FIX_V4BX:
26577 fix_v4bx = TRUE;
26578 break;
26579
26580 case 'a':
26581 /* Listing option. Just ignore these, we don't support additional
26582 ones. */
26583 return 0;
26584
26585 default:
26586 for (opt = arm_opts; opt->option != NULL; opt++)
26587 {
26588 if (c == opt->option[0]
26589 && ((arg == NULL && opt->option[1] == 0)
26590 || streq (arg, opt->option + 1)))
26591 {
26592 /* If the option is deprecated, tell the user. */
26593 if (warn_on_deprecated && opt->deprecated != NULL)
26594 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26595 arg ? arg : "", _(opt->deprecated));
26596
26597 if (opt->var != NULL)
26598 *opt->var = opt->value;
26599
26600 return 1;
26601 }
26602 }
26603
26604 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26605 {
26606 if (c == fopt->option[0]
26607 && ((arg == NULL && fopt->option[1] == 0)
26608 || streq (arg, fopt->option + 1)))
26609 {
26610 /* If the option is deprecated, tell the user. */
26611 if (warn_on_deprecated && fopt->deprecated != NULL)
26612 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26613 arg ? arg : "", _(fopt->deprecated));
26614
26615 if (fopt->var != NULL)
26616 *fopt->var = &fopt->value;
26617
26618 return 1;
26619 }
26620 }
26621
26622 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26623 {
26624 /* These options are expected to have an argument. */
26625 if (c == lopt->option[0]
26626 && arg != NULL
26627 && strncmp (arg, lopt->option + 1,
26628 strlen (lopt->option + 1)) == 0)
26629 {
26630 /* If the option is deprecated, tell the user. */
26631 if (warn_on_deprecated && lopt->deprecated != NULL)
26632 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26633 _(lopt->deprecated));
26634
26635 /* Call the sup-option parser. */
26636 return lopt->func (arg + strlen (lopt->option) - 1);
26637 }
26638 }
26639
26640 return 0;
26641 }
26642
26643 return 1;
26644 }
26645
26646 void
26647 md_show_usage (FILE * fp)
26648 {
26649 struct arm_option_table *opt;
26650 struct arm_long_option_table *lopt;
26651
26652 fprintf (fp, _(" ARM-specific assembler options:\n"));
26653
26654 for (opt = arm_opts; opt->option != NULL; opt++)
26655 if (opt->help != NULL)
26656 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26657
26658 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26659 if (lopt->help != NULL)
26660 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26661
26662 #ifdef OPTION_EB
26663 fprintf (fp, _("\
26664 -EB assemble code for a big-endian cpu\n"));
26665 #endif
26666
26667 #ifdef OPTION_EL
26668 fprintf (fp, _("\
26669 -EL assemble code for a little-endian cpu\n"));
26670 #endif
26671
26672 fprintf (fp, _("\
26673 --fix-v4bx Allow BX in ARMv4 code\n"));
26674 }
26675
26676 #ifdef OBJ_ELF
26677
26678 typedef struct
26679 {
26680 int val;
26681 arm_feature_set flags;
26682 } cpu_arch_ver_table;
26683
26684 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
26685 chronologically for architectures, with an exception for ARMv6-M and
26686 ARMv6S-M due to legacy reasons. No new architecture should have a
26687 special case. This allows for build attribute selection results to be
26688 stable when new architectures are added. */
26689 static const cpu_arch_ver_table cpu_arch_ver[] =
26690 {
26691 {0, ARM_ARCH_V1},
26692 {0, ARM_ARCH_V2},
26693 {0, ARM_ARCH_V2S},
26694 {0, ARM_ARCH_V3},
26695 {0, ARM_ARCH_V3M},
26696 {1, ARM_ARCH_V4xM},
26697 {1, ARM_ARCH_V4},
26698 {2, ARM_ARCH_V4TxM},
26699 {2, ARM_ARCH_V4T},
26700 {3, ARM_ARCH_V5xM},
26701 {3, ARM_ARCH_V5},
26702 {3, ARM_ARCH_V5TxM},
26703 {3, ARM_ARCH_V5T},
26704 {4, ARM_ARCH_V5TExP},
26705 {4, ARM_ARCH_V5TE},
26706 {5, ARM_ARCH_V5TEJ},
26707 {6, ARM_ARCH_V6},
26708 {7, ARM_ARCH_V6Z},
26709 {7, ARM_ARCH_V6KZ},
26710 {9, ARM_ARCH_V6K},
26711 {8, ARM_ARCH_V6T2},
26712 {8, ARM_ARCH_V6KT2},
26713 {8, ARM_ARCH_V6ZT2},
26714 {8, ARM_ARCH_V6KZT2},
26715
26716 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
26717 always selected build attributes to match those of ARMv6-M
26718 (resp. ARMv6S-M). However, due to these architectures being a strict
26719 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
26720 would be selected when fully respecting chronology of architectures.
26721 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
26722 move them before ARMv7 architectures. */
26723 {11, ARM_ARCH_V6M},
26724 {12, ARM_ARCH_V6SM},
26725
26726 {10, ARM_ARCH_V7},
26727 {10, ARM_ARCH_V7A},
26728 {10, ARM_ARCH_V7R},
26729 {10, ARM_ARCH_V7M},
26730 {10, ARM_ARCH_V7VE},
26731 {13, ARM_ARCH_V7EM},
26732 {14, ARM_ARCH_V8A},
26733 {14, ARM_ARCH_V8_1A},
26734 {14, ARM_ARCH_V8_2A},
26735 {14, ARM_ARCH_V8_3A},
26736 {16, ARM_ARCH_V8M_BASE},
26737 {17, ARM_ARCH_V8M_MAIN},
26738 {15, ARM_ARCH_V8R},
26739 {-1, ARM_ARCH_NONE}
26740 };
26741
26742 /* Set an attribute if it has not already been set by the user. */
26743
26744 static void
26745 aeabi_set_attribute_int (int tag, int value)
26746 {
26747 if (tag < 1
26748 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26749 || !attributes_set_explicitly[tag])
26750 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26751 }
26752
26753 static void
26754 aeabi_set_attribute_string (int tag, const char *value)
26755 {
26756 if (tag < 1
26757 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26758 || !attributes_set_explicitly[tag])
26759 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26760 }
26761
26762 /* Return whether features in the *NEEDED feature set are available via
26763 extensions for the architecture whose feature set is *ARCH_FSET. */
26764
26765 static bfd_boolean
26766 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
26767 const arm_feature_set *needed)
26768 {
26769 int i, nb_allowed_archs;
26770 arm_feature_set ext_fset;
26771 const struct arm_option_extension_value_table *opt;
26772
26773 ext_fset = arm_arch_none;
26774 for (opt = arm_extensions; opt->name != NULL; opt++)
26775 {
26776 /* Extension does not provide any feature we need. */
26777 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
26778 continue;
26779
26780 nb_allowed_archs =
26781 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26782 for (i = 0; i < nb_allowed_archs; i++)
26783 {
26784 /* Empty entry. */
26785 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
26786 break;
26787
26788 /* Extension is available, add it. */
26789 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
26790 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
26791 }
26792 }
26793
26794 /* Can we enable all features in *needed? */
26795 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
26796 }
26797
26798 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
26799 a given architecture feature set *ARCH_EXT_FSET including extension feature
26800 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
26801 - if true, check for an exact match of the architecture modulo extensions;
26802 - otherwise, select build attribute value of the first superset
26803 architecture released so that results remains stable when new architectures
26804 are added.
26805 For -march/-mcpu=all the build attribute value of the most featureful
26806 architecture is returned. Tag_CPU_arch_profile result is returned in
26807 PROFILE. */
26808
26809 static int
26810 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
26811 const arm_feature_set *ext_fset,
26812 char *profile, int exact_match)
26813 {
26814 arm_feature_set arch_fset;
26815 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
26816
26817 /* Select most featureful architecture with all its extensions if building
26818 for -march=all as the feature sets used to set build attributes. */
26819 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
26820 {
26821 /* Force revisiting of decision for each new architecture. */
26822 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
26823 *profile = 'A';
26824 return TAG_CPU_ARCH_V8;
26825 }
26826
26827 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
26828
26829 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
26830 {
26831 arm_feature_set known_arch_fset;
26832
26833 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
26834 if (exact_match)
26835 {
26836 /* Base architecture match user-specified architecture and
26837 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
26838 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
26839 {
26840 p_ver_ret = p_ver;
26841 goto found;
26842 }
26843 /* Base architecture match user-specified architecture only
26844 (eg. ARMv6-M in the same case as above). Record it in case we
26845 find a match with above condition. */
26846 else if (p_ver_ret == NULL
26847 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
26848 p_ver_ret = p_ver;
26849 }
26850 else
26851 {
26852
26853 /* Architecture has all features wanted. */
26854 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
26855 {
26856 arm_feature_set added_fset;
26857
26858 /* Compute features added by this architecture over the one
26859 recorded in p_ver_ret. */
26860 if (p_ver_ret != NULL)
26861 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
26862 p_ver_ret->flags);
26863 /* First architecture that match incl. with extensions, or the
26864 only difference in features over the recorded match is
26865 features that were optional and are now mandatory. */
26866 if (p_ver_ret == NULL
26867 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
26868 {
26869 p_ver_ret = p_ver;
26870 goto found;
26871 }
26872 }
26873 else if (p_ver_ret == NULL)
26874 {
26875 arm_feature_set needed_ext_fset;
26876
26877 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
26878
26879 /* Architecture has all features needed when using some
26880 extensions. Record it and continue searching in case there
26881 exist an architecture providing all needed features without
26882 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
26883 OS extension). */
26884 if (have_ext_for_needed_feat_p (&known_arch_fset,
26885 &needed_ext_fset))
26886 p_ver_ret = p_ver;
26887 }
26888 }
26889 }
26890
26891 if (p_ver_ret == NULL)
26892 return -1;
26893
26894 found:
26895 /* Tag_CPU_arch_profile. */
26896 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
26897 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
26898 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
26899 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
26900 *profile = 'A';
26901 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
26902 *profile = 'R';
26903 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
26904 *profile = 'M';
26905 else
26906 *profile = '\0';
26907 return p_ver_ret->val;
26908 }
26909
26910 /* Set the public EABI object attributes. */
26911
26912 static void
26913 aeabi_set_public_attributes (void)
26914 {
26915 char profile;
26916 int arch = -1;
26917 int virt_sec = 0;
26918 int fp16_optional = 0;
26919 int skip_exact_match = 0;
26920 arm_feature_set flags, flags_arch, flags_ext;
26921
26922 /* Autodetection mode, choose the architecture based the instructions
26923 actually used. */
26924 if (no_cpu_selected ())
26925 {
26926 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26927
26928 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26929 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26930
26931 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26932 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26933
26934 /* Code run during relaxation relies on selected_cpu being set. */
26935 selected_cpu = flags;
26936 }
26937 /* Otherwise, choose the architecture based on the capabilities of the
26938 requested cpu. */
26939 else
26940 flags = selected_cpu;
26941 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26942
26943 /* Allow the user to override the reported architecture. */
26944 if (object_arch)
26945 {
26946 ARM_CLEAR_FEATURE (flags_arch, *object_arch, fpu_any);
26947 flags_ext = arm_arch_none;
26948 }
26949 else
26950 {
26951 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
26952 flags_ext = dyn_mcpu_ext_opt ? *dyn_mcpu_ext_opt : arm_arch_none;
26953 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
26954 }
26955
26956 /* When this function is run again after relaxation has happened there is no
26957 way to determine whether an architecture or CPU was specified by the user:
26958 - selected_cpu is set above for relaxation to work;
26959 - march_cpu_opt is not set if only -mcpu or .cpu is used;
26960 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
26961 Therefore, if not in -march=all case we first try an exact match and fall
26962 back to autodetection. */
26963 if (!skip_exact_match)
26964 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
26965 if (arch == -1)
26966 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
26967 if (arch == -1)
26968 as_bad (_("no architecture contains all the instructions used\n"));
26969
26970 /* Tag_CPU_name. */
26971 if (selected_cpu_name[0])
26972 {
26973 char *q;
26974
26975 q = selected_cpu_name;
26976 if (strncmp (q, "armv", 4) == 0)
26977 {
26978 int i;
26979
26980 q += 4;
26981 for (i = 0; q[i]; i++)
26982 q[i] = TOUPPER (q[i]);
26983 }
26984 aeabi_set_attribute_string (Tag_CPU_name, q);
26985 }
26986
26987 /* Tag_CPU_arch. */
26988 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26989
26990 /* Tag_CPU_arch_profile. */
26991 if (profile != '\0')
26992 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26993
26994 /* Tag_DSP_extension. */
26995 if (dyn_mcpu_ext_opt && ARM_CPU_HAS_FEATURE (*dyn_mcpu_ext_opt, arm_ext_dsp))
26996 aeabi_set_attribute_int (Tag_DSP_extension, 1);
26997
26998 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
26999 /* Tag_ARM_ISA_use. */
27000 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
27001 || ARM_FEATURE_ZERO (flags_arch))
27002 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
27003
27004 /* Tag_THUMB_ISA_use. */
27005 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
27006 || ARM_FEATURE_ZERO (flags_arch))
27007 {
27008 int thumb_isa_use;
27009
27010 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27011 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
27012 thumb_isa_use = 3;
27013 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
27014 thumb_isa_use = 2;
27015 else
27016 thumb_isa_use = 1;
27017 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
27018 }
27019
27020 /* Tag_VFP_arch. */
27021 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
27022 aeabi_set_attribute_int (Tag_VFP_arch,
27023 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27024 ? 7 : 8);
27025 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
27026 aeabi_set_attribute_int (Tag_VFP_arch,
27027 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27028 ? 5 : 6);
27029 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
27030 {
27031 fp16_optional = 1;
27032 aeabi_set_attribute_int (Tag_VFP_arch, 3);
27033 }
27034 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
27035 {
27036 aeabi_set_attribute_int (Tag_VFP_arch, 4);
27037 fp16_optional = 1;
27038 }
27039 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
27040 aeabi_set_attribute_int (Tag_VFP_arch, 2);
27041 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
27042 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
27043 aeabi_set_attribute_int (Tag_VFP_arch, 1);
27044
27045 /* Tag_ABI_HardFP_use. */
27046 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
27047 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
27048 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
27049
27050 /* Tag_WMMX_arch. */
27051 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
27052 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
27053 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
27054 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
27055
27056 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27057 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
27058 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
27059 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
27060 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
27061 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
27062 {
27063 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
27064 {
27065 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
27066 }
27067 else
27068 {
27069 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
27070 fp16_optional = 1;
27071 }
27072 }
27073
27074 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27075 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
27076 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
27077
27078 /* Tag_DIV_use.
27079
27080 We set Tag_DIV_use to two when integer divide instructions have been used
27081 in ARM state, or when Thumb integer divide instructions have been used,
27082 but we have no architecture profile set, nor have we any ARM instructions.
27083
27084 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27085 by the base architecture.
27086
27087 For new architectures we will have to check these tests. */
27088 gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
27089 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27090 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
27091 aeabi_set_attribute_int (Tag_DIV_use, 0);
27092 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
27093 || (profile == '\0'
27094 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
27095 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
27096 aeabi_set_attribute_int (Tag_DIV_use, 2);
27097
27098 /* Tag_MP_extension_use. */
27099 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
27100 aeabi_set_attribute_int (Tag_MPextension_use, 1);
27101
27102 /* Tag Virtualization_use. */
27103 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
27104 virt_sec |= 1;
27105 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
27106 virt_sec |= 2;
27107 if (virt_sec != 0)
27108 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
27109 }
27110
27111 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27112 finished and free extension feature bits which will not be used anymore. */
27113
27114 void
27115 arm_md_post_relax (void)
27116 {
27117 aeabi_set_public_attributes ();
27118 XDELETE (dyn_mcpu_ext_opt);
27119 dyn_mcpu_ext_opt = NULL;
27120 XDELETE (dyn_march_ext_opt);
27121 dyn_march_ext_opt = NULL;
27122 }
27123
27124 /* Add the default contents for the .ARM.attributes section. */
27125
27126 void
27127 arm_md_end (void)
27128 {
27129 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27130 return;
27131
27132 aeabi_set_public_attributes ();
27133 }
27134 #endif /* OBJ_ELF */
27135
27136 /* Parse a .cpu directive. */
27137
27138 static void
27139 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
27140 {
27141 const struct arm_cpu_option_table *opt;
27142 char *name;
27143 char saved_char;
27144
27145 name = input_line_pointer;
27146 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27147 input_line_pointer++;
27148 saved_char = *input_line_pointer;
27149 *input_line_pointer = 0;
27150
27151 /* Skip the first "all" entry. */
27152 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
27153 if (streq (opt->name, name))
27154 {
27155 mcpu_cpu_opt = &opt->value;
27156 if (!dyn_mcpu_ext_opt)
27157 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
27158 *dyn_mcpu_ext_opt = opt->ext;
27159 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
27160 if (opt->canonical_name)
27161 strcpy (selected_cpu_name, opt->canonical_name);
27162 else
27163 {
27164 int i;
27165 for (i = 0; opt->name[i]; i++)
27166 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27167
27168 selected_cpu_name[i] = 0;
27169 }
27170 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
27171 if (dyn_mcpu_ext_opt)
27172 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
27173 *input_line_pointer = saved_char;
27174 demand_empty_rest_of_line ();
27175 return;
27176 }
27177 as_bad (_("unknown cpu `%s'"), name);
27178 *input_line_pointer = saved_char;
27179 ignore_rest_of_line ();
27180 }
27181
27182 /* Parse a .arch directive. */
27183
27184 static void
27185 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
27186 {
27187 const struct arm_arch_option_table *opt;
27188 char saved_char;
27189 char *name;
27190
27191 name = input_line_pointer;
27192 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27193 input_line_pointer++;
27194 saved_char = *input_line_pointer;
27195 *input_line_pointer = 0;
27196
27197 /* Skip the first "all" entry. */
27198 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27199 if (streq (opt->name, name))
27200 {
27201 mcpu_cpu_opt = &opt->value;
27202 XDELETE (dyn_mcpu_ext_opt);
27203 dyn_mcpu_ext_opt = NULL;
27204 selected_cpu = *mcpu_cpu_opt;
27205 strcpy (selected_cpu_name, opt->name);
27206 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
27207 *input_line_pointer = saved_char;
27208 demand_empty_rest_of_line ();
27209 return;
27210 }
27211
27212 as_bad (_("unknown architecture `%s'\n"), name);
27213 *input_line_pointer = saved_char;
27214 ignore_rest_of_line ();
27215 }
27216
27217 /* Parse a .object_arch directive. */
27218
27219 static void
27220 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
27221 {
27222 const struct arm_arch_option_table *opt;
27223 char saved_char;
27224 char *name;
27225
27226 name = input_line_pointer;
27227 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27228 input_line_pointer++;
27229 saved_char = *input_line_pointer;
27230 *input_line_pointer = 0;
27231
27232 /* Skip the first "all" entry. */
27233 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27234 if (streq (opt->name, name))
27235 {
27236 object_arch = &opt->value;
27237 *input_line_pointer = saved_char;
27238 demand_empty_rest_of_line ();
27239 return;
27240 }
27241
27242 as_bad (_("unknown architecture `%s'\n"), name);
27243 *input_line_pointer = saved_char;
27244 ignore_rest_of_line ();
27245 }
27246
27247 /* Parse a .arch_extension directive. */
27248
27249 static void
27250 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
27251 {
27252 const struct arm_option_extension_value_table *opt;
27253 const arm_feature_set arm_any = ARM_ANY;
27254 char saved_char;
27255 char *name;
27256 int adding_value = 1;
27257
27258 name = input_line_pointer;
27259 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27260 input_line_pointer++;
27261 saved_char = *input_line_pointer;
27262 *input_line_pointer = 0;
27263
27264 if (strlen (name) >= 2
27265 && strncmp (name, "no", 2) == 0)
27266 {
27267 adding_value = 0;
27268 name += 2;
27269 }
27270
27271 for (opt = arm_extensions; opt->name != NULL; opt++)
27272 if (streq (opt->name, name))
27273 {
27274 int i, nb_allowed_archs =
27275 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27276 for (i = 0; i < nb_allowed_archs; i++)
27277 {
27278 /* Empty entry. */
27279 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
27280 continue;
27281 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
27282 break;
27283 }
27284
27285 if (i == nb_allowed_archs)
27286 {
27287 as_bad (_("architectural extension `%s' is not allowed for the "
27288 "current base architecture"), name);
27289 break;
27290 }
27291
27292 if (!dyn_mcpu_ext_opt)
27293 {
27294 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
27295 *dyn_mcpu_ext_opt = arm_arch_none;
27296 }
27297 if (adding_value)
27298 ARM_MERGE_FEATURE_SETS (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
27299 opt->merge_value);
27300 else
27301 ARM_CLEAR_FEATURE (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
27302 opt->clear_value);
27303
27304 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
27305 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
27306 *input_line_pointer = saved_char;
27307 demand_empty_rest_of_line ();
27308 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27309 on this return so that duplicate extensions (extensions with the
27310 same name as a previous extension in the list) are not considered
27311 for command-line parsing. */
27312 return;
27313 }
27314
27315 if (opt->name == NULL)
27316 as_bad (_("unknown architecture extension `%s'\n"), name);
27317
27318 *input_line_pointer = saved_char;
27319 ignore_rest_of_line ();
27320 }
27321
27322 /* Parse a .fpu directive. */
27323
27324 static void
27325 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27326 {
27327 const struct arm_option_fpu_value_table *opt;
27328 char saved_char;
27329 char *name;
27330
27331 name = input_line_pointer;
27332 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27333 input_line_pointer++;
27334 saved_char = *input_line_pointer;
27335 *input_line_pointer = 0;
27336
27337 for (opt = arm_fpus; opt->name != NULL; opt++)
27338 if (streq (opt->name, name))
27339 {
27340 mfpu_opt = &opt->value;
27341 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
27342 if (dyn_mcpu_ext_opt)
27343 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
27344 *input_line_pointer = saved_char;
27345 demand_empty_rest_of_line ();
27346 return;
27347 }
27348
27349 as_bad (_("unknown floating point format `%s'\n"), name);
27350 *input_line_pointer = saved_char;
27351 ignore_rest_of_line ();
27352 }
27353
27354 /* Copy symbol information. */
27355
27356 void
27357 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27358 {
27359 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27360 }
27361
27362 #ifdef OBJ_ELF
27363 /* Given a symbolic attribute NAME, return the proper integer value.
27364 Returns -1 if the attribute is not known. */
27365
27366 int
27367 arm_convert_symbolic_attribute (const char *name)
27368 {
27369 static const struct
27370 {
27371 const char * name;
27372 const int tag;
27373 }
27374 attribute_table[] =
27375 {
27376 /* When you modify this table you should
27377 also modify the list in doc/c-arm.texi. */
27378 #define T(tag) {#tag, tag}
27379 T (Tag_CPU_raw_name),
27380 T (Tag_CPU_name),
27381 T (Tag_CPU_arch),
27382 T (Tag_CPU_arch_profile),
27383 T (Tag_ARM_ISA_use),
27384 T (Tag_THUMB_ISA_use),
27385 T (Tag_FP_arch),
27386 T (Tag_VFP_arch),
27387 T (Tag_WMMX_arch),
27388 T (Tag_Advanced_SIMD_arch),
27389 T (Tag_PCS_config),
27390 T (Tag_ABI_PCS_R9_use),
27391 T (Tag_ABI_PCS_RW_data),
27392 T (Tag_ABI_PCS_RO_data),
27393 T (Tag_ABI_PCS_GOT_use),
27394 T (Tag_ABI_PCS_wchar_t),
27395 T (Tag_ABI_FP_rounding),
27396 T (Tag_ABI_FP_denormal),
27397 T (Tag_ABI_FP_exceptions),
27398 T (Tag_ABI_FP_user_exceptions),
27399 T (Tag_ABI_FP_number_model),
27400 T (Tag_ABI_align_needed),
27401 T (Tag_ABI_align8_needed),
27402 T (Tag_ABI_align_preserved),
27403 T (Tag_ABI_align8_preserved),
27404 T (Tag_ABI_enum_size),
27405 T (Tag_ABI_HardFP_use),
27406 T (Tag_ABI_VFP_args),
27407 T (Tag_ABI_WMMX_args),
27408 T (Tag_ABI_optimization_goals),
27409 T (Tag_ABI_FP_optimization_goals),
27410 T (Tag_compatibility),
27411 T (Tag_CPU_unaligned_access),
27412 T (Tag_FP_HP_extension),
27413 T (Tag_VFP_HP_extension),
27414 T (Tag_ABI_FP_16bit_format),
27415 T (Tag_MPextension_use),
27416 T (Tag_DIV_use),
27417 T (Tag_nodefaults),
27418 T (Tag_also_compatible_with),
27419 T (Tag_conformance),
27420 T (Tag_T2EE_use),
27421 T (Tag_Virtualization_use),
27422 T (Tag_DSP_extension),
27423 /* We deliberately do not include Tag_MPextension_use_legacy. */
27424 #undef T
27425 };
27426 unsigned int i;
27427
27428 if (name == NULL)
27429 return -1;
27430
27431 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27432 if (streq (name, attribute_table[i].name))
27433 return attribute_table[i].tag;
27434
27435 return -1;
27436 }
27437
27438 /* Apply sym value for relocations only in the case that they are for
27439 local symbols in the same segment as the fixup and you have the
27440 respective architectural feature for blx and simple switches. */
27441
27442 int
27443 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27444 {
27445 if (fixP->fx_addsy
27446 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27447 /* PR 17444: If the local symbol is in a different section then a reloc
27448 will always be generated for it, so applying the symbol value now
27449 will result in a double offset being stored in the relocation. */
27450 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27451 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27452 {
27453 switch (fixP->fx_r_type)
27454 {
27455 case BFD_RELOC_ARM_PCREL_BLX:
27456 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27457 if (ARM_IS_FUNC (fixP->fx_addsy))
27458 return 1;
27459 break;
27460
27461 case BFD_RELOC_ARM_PCREL_CALL:
27462 case BFD_RELOC_THUMB_PCREL_BLX:
27463 if (THUMB_IS_FUNC (fixP->fx_addsy))
27464 return 1;
27465 break;
27466
27467 default:
27468 break;
27469 }
27470
27471 }
27472 return 0;
27473 }
27474 #endif /* OBJ_ELF */