]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
Separate the new FP16 instructions backported from Armv8.4-a to Armv8.2-a into a...
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2017 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set * legacy_cpu = NULL;
147 static const arm_feature_set * legacy_fpu = NULL;
148
149 static const arm_feature_set * mcpu_cpu_opt = NULL;
150 static arm_feature_set * dyn_mcpu_ext_opt = NULL;
151 static const arm_feature_set * mcpu_fpu_opt = NULL;
152 static const arm_feature_set * march_cpu_opt = NULL;
153 static arm_feature_set * dyn_march_ext_opt = NULL;
154 static const arm_feature_set * march_fpu_opt = NULL;
155 static const arm_feature_set * mfpu_opt = NULL;
156 static const arm_feature_set * object_arch = NULL;
157
158 /* Constants for known architecture features. */
159 static const arm_feature_set fpu_default = FPU_DEFAULT;
160 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
161 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
162 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
163 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
164 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
165 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
166 #ifdef OBJ_ELF
167 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
168 #endif
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
191 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
192 static const arm_feature_set arm_ext_v6_notm =
193 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
194 static const arm_feature_set arm_ext_v6_dsp =
195 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
196 static const arm_feature_set arm_ext_barrier =
197 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
198 static const arm_feature_set arm_ext_msr =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
200 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
201 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
202 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
203 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
204 #ifdef OBJ_ELF
205 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
206 #endif
207 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
208 static const arm_feature_set arm_ext_m =
209 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
210 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
211 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
212 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
213 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
214 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
215 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
216 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
217 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
218 static const arm_feature_set arm_ext_v8m_main =
219 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
220 /* Instructions in ARMv8-M only found in M profile architectures. */
221 static const arm_feature_set arm_ext_v8m_m_only =
222 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
223 static const arm_feature_set arm_ext_v6t2_v8m =
224 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
225 /* Instructions shared between ARMv8-A and ARMv8-M. */
226 static const arm_feature_set arm_ext_atomics =
227 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
228 #ifdef OBJ_ELF
229 /* DSP instructions Tag_DSP_extension refers to. */
230 static const arm_feature_set arm_ext_dsp =
231 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
232 #endif
233 static const arm_feature_set arm_ext_ras =
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
235 /* FP16 instructions. */
236 static const arm_feature_set arm_ext_fp16 =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
238 static const arm_feature_set arm_ext_fp16_fml =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
240 static const arm_feature_set arm_ext_v8_2 =
241 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
242 static const arm_feature_set arm_ext_v8_3 =
243 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
244
245 static const arm_feature_set arm_arch_any = ARM_ANY;
246 #ifdef OBJ_ELF
247 static const arm_feature_set fpu_any = FPU_ANY;
248 #endif
249 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
250 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
251 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
252
253 static const arm_feature_set arm_cext_iwmmxt2 =
254 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
255 static const arm_feature_set arm_cext_iwmmxt =
256 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
257 static const arm_feature_set arm_cext_xscale =
258 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
259 static const arm_feature_set arm_cext_maverick =
260 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
261 static const arm_feature_set fpu_fpa_ext_v1 =
262 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
263 static const arm_feature_set fpu_fpa_ext_v2 =
264 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
265 static const arm_feature_set fpu_vfp_ext_v1xd =
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
267 static const arm_feature_set fpu_vfp_ext_v1 =
268 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
269 static const arm_feature_set fpu_vfp_ext_v2 =
270 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
271 static const arm_feature_set fpu_vfp_ext_v3xd =
272 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
273 static const arm_feature_set fpu_vfp_ext_v3 =
274 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
275 static const arm_feature_set fpu_vfp_ext_d32 =
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
277 static const arm_feature_set fpu_neon_ext_v1 =
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
279 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
280 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
281 #ifdef OBJ_ELF
282 static const arm_feature_set fpu_vfp_fp16 =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
284 static const arm_feature_set fpu_neon_ext_fma =
285 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
286 #endif
287 static const arm_feature_set fpu_vfp_ext_fma =
288 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
289 static const arm_feature_set fpu_vfp_ext_armv8 =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
291 static const arm_feature_set fpu_vfp_ext_armv8xd =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
293 static const arm_feature_set fpu_neon_ext_armv8 =
294 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
295 static const arm_feature_set fpu_crypto_ext_armv8 =
296 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
297 static const arm_feature_set crc_ext_armv8 =
298 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
299 static const arm_feature_set fpu_neon_ext_v8_1 =
300 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
301 static const arm_feature_set fpu_neon_ext_dotprod =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
303
304 static int mfloat_abi_opt = -1;
305 /* Record user cpu selection for object attributes. */
306 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
307 /* Must be long enough to hold any of the names in arm_cpus. */
308 static char selected_cpu_name[20];
309
310 extern FLONUM_TYPE generic_floating_point_number;
311
312 /* Return if no cpu was selected on command-line. */
313 static bfd_boolean
314 no_cpu_selected (void)
315 {
316 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
317 }
318
319 #ifdef OBJ_ELF
320 # ifdef EABI_DEFAULT
321 static int meabi_flags = EABI_DEFAULT;
322 # else
323 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
324 # endif
325
326 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
327
328 bfd_boolean
329 arm_is_eabi (void)
330 {
331 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
332 }
333 #endif
334
335 #ifdef OBJ_ELF
336 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
337 symbolS * GOT_symbol;
338 #endif
339
340 /* 0: assemble for ARM,
341 1: assemble for Thumb,
342 2: assemble for Thumb even though target CPU does not support thumb
343 instructions. */
344 static int thumb_mode = 0;
345 /* A value distinct from the possible values for thumb_mode that we
346 can use to record whether thumb_mode has been copied into the
347 tc_frag_data field of a frag. */
348 #define MODE_RECORDED (1 << 4)
349
350 /* Specifies the intrinsic IT insn behavior mode. */
351 enum implicit_it_mode
352 {
353 IMPLICIT_IT_MODE_NEVER = 0x00,
354 IMPLICIT_IT_MODE_ARM = 0x01,
355 IMPLICIT_IT_MODE_THUMB = 0x02,
356 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
357 };
358 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
359
360 /* If unified_syntax is true, we are processing the new unified
361 ARM/Thumb syntax. Important differences from the old ARM mode:
362
363 - Immediate operands do not require a # prefix.
364 - Conditional affixes always appear at the end of the
365 instruction. (For backward compatibility, those instructions
366 that formerly had them in the middle, continue to accept them
367 there.)
368 - The IT instruction may appear, and if it does is validated
369 against subsequent conditional affixes. It does not generate
370 machine code.
371
372 Important differences from the old Thumb mode:
373
374 - Immediate operands do not require a # prefix.
375 - Most of the V6T2 instructions are only available in unified mode.
376 - The .N and .W suffixes are recognized and honored (it is an error
377 if they cannot be honored).
378 - All instructions set the flags if and only if they have an 's' affix.
379 - Conditional affixes may be used. They are validated against
380 preceding IT instructions. Unlike ARM mode, you cannot use a
381 conditional affix except in the scope of an IT instruction. */
382
383 static bfd_boolean unified_syntax = FALSE;
384
385 /* An immediate operand can start with #, and ld*, st*, pld operands
386 can contain [ and ]. We need to tell APP not to elide whitespace
387 before a [, which can appear as the first operand for pld.
388 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
389 const char arm_symbol_chars[] = "#[]{}";
390
391 enum neon_el_type
392 {
393 NT_invtype,
394 NT_untyped,
395 NT_integer,
396 NT_float,
397 NT_poly,
398 NT_signed,
399 NT_unsigned
400 };
401
402 struct neon_type_el
403 {
404 enum neon_el_type type;
405 unsigned size;
406 };
407
408 #define NEON_MAX_TYPE_ELS 4
409
410 struct neon_type
411 {
412 struct neon_type_el el[NEON_MAX_TYPE_ELS];
413 unsigned elems;
414 };
415
416 enum it_instruction_type
417 {
418 OUTSIDE_IT_INSN,
419 INSIDE_IT_INSN,
420 INSIDE_IT_LAST_INSN,
421 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
422 if inside, should be the last one. */
423 NEUTRAL_IT_INSN, /* This could be either inside or outside,
424 i.e. BKPT and NOP. */
425 IT_INSN /* The IT insn has been parsed. */
426 };
427
428 /* The maximum number of operands we need. */
429 #define ARM_IT_MAX_OPERANDS 6
430
431 struct arm_it
432 {
433 const char * error;
434 unsigned long instruction;
435 int size;
436 int size_req;
437 int cond;
438 /* "uncond_value" is set to the value in place of the conditional field in
439 unconditional versions of the instruction, or -1 if nothing is
440 appropriate. */
441 int uncond_value;
442 struct neon_type vectype;
443 /* This does not indicate an actual NEON instruction, only that
444 the mnemonic accepts neon-style type suffixes. */
445 int is_neon;
446 /* Set to the opcode if the instruction needs relaxation.
447 Zero if the instruction is not relaxed. */
448 unsigned long relax;
449 struct
450 {
451 bfd_reloc_code_real_type type;
452 expressionS exp;
453 int pc_rel;
454 } reloc;
455
456 enum it_instruction_type it_insn_type;
457
458 struct
459 {
460 unsigned reg;
461 signed int imm;
462 struct neon_type_el vectype;
463 unsigned present : 1; /* Operand present. */
464 unsigned isreg : 1; /* Operand was a register. */
465 unsigned immisreg : 1; /* .imm field is a second register. */
466 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
467 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
468 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
469 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
470 instructions. This allows us to disambiguate ARM <-> vector insns. */
471 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
472 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
473 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
474 unsigned issingle : 1; /* Operand is VFP single-precision register. */
475 unsigned hasreloc : 1; /* Operand has relocation suffix. */
476 unsigned writeback : 1; /* Operand has trailing ! */
477 unsigned preind : 1; /* Preindexed address. */
478 unsigned postind : 1; /* Postindexed address. */
479 unsigned negative : 1; /* Index register was negated. */
480 unsigned shifted : 1; /* Shift applied to operation. */
481 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
482 } operands[ARM_IT_MAX_OPERANDS];
483 };
484
485 static struct arm_it inst;
486
487 #define NUM_FLOAT_VALS 8
488
489 const char * fp_const[] =
490 {
491 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
492 };
493
494 /* Number of littlenums required to hold an extended precision number. */
495 #define MAX_LITTLENUMS 6
496
497 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
498
499 #define FAIL (-1)
500 #define SUCCESS (0)
501
502 #define SUFF_S 1
503 #define SUFF_D 2
504 #define SUFF_E 3
505 #define SUFF_P 4
506
507 #define CP_T_X 0x00008000
508 #define CP_T_Y 0x00400000
509
510 #define CONDS_BIT 0x00100000
511 #define LOAD_BIT 0x00100000
512
513 #define DOUBLE_LOAD_FLAG 0x00000001
514
515 struct asm_cond
516 {
517 const char * template_name;
518 unsigned long value;
519 };
520
521 #define COND_ALWAYS 0xE
522
523 struct asm_psr
524 {
525 const char * template_name;
526 unsigned long field;
527 };
528
529 struct asm_barrier_opt
530 {
531 const char * template_name;
532 unsigned long value;
533 const arm_feature_set arch;
534 };
535
536 /* The bit that distinguishes CPSR and SPSR. */
537 #define SPSR_BIT (1 << 22)
538
539 /* The individual PSR flag bits. */
540 #define PSR_c (1 << 16)
541 #define PSR_x (1 << 17)
542 #define PSR_s (1 << 18)
543 #define PSR_f (1 << 19)
544
545 struct reloc_entry
546 {
547 const char * name;
548 bfd_reloc_code_real_type reloc;
549 };
550
551 enum vfp_reg_pos
552 {
553 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
554 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
555 };
556
557 enum vfp_ldstm_type
558 {
559 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
560 };
561
562 /* Bits for DEFINED field in neon_typed_alias. */
563 #define NTA_HASTYPE 1
564 #define NTA_HASINDEX 2
565
566 struct neon_typed_alias
567 {
568 unsigned char defined;
569 unsigned char index;
570 struct neon_type_el eltype;
571 };
572
573 /* ARM register categories. This includes coprocessor numbers and various
574 architecture extensions' registers. */
575 enum arm_reg_type
576 {
577 REG_TYPE_RN,
578 REG_TYPE_CP,
579 REG_TYPE_CN,
580 REG_TYPE_FN,
581 REG_TYPE_VFS,
582 REG_TYPE_VFD,
583 REG_TYPE_NQ,
584 REG_TYPE_VFSD,
585 REG_TYPE_NDQ,
586 REG_TYPE_NSD,
587 REG_TYPE_NSDQ,
588 REG_TYPE_VFC,
589 REG_TYPE_MVF,
590 REG_TYPE_MVD,
591 REG_TYPE_MVFX,
592 REG_TYPE_MVDX,
593 REG_TYPE_MVAX,
594 REG_TYPE_DSPSC,
595 REG_TYPE_MMXWR,
596 REG_TYPE_MMXWC,
597 REG_TYPE_MMXWCG,
598 REG_TYPE_XSCALE,
599 REG_TYPE_RNB
600 };
601
602 /* Structure for a hash table entry for a register.
603 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
604 information which states whether a vector type or index is specified (for a
605 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
606 struct reg_entry
607 {
608 const char * name;
609 unsigned int number;
610 unsigned char type;
611 unsigned char builtin;
612 struct neon_typed_alias * neon;
613 };
614
615 /* Diagnostics used when we don't get a register of the expected type. */
616 const char * const reg_expected_msgs[] =
617 {
618 N_("ARM register expected"),
619 N_("bad or missing co-processor number"),
620 N_("co-processor register expected"),
621 N_("FPA register expected"),
622 N_("VFP single precision register expected"),
623 N_("VFP/Neon double precision register expected"),
624 N_("Neon quad precision register expected"),
625 N_("VFP single or double precision register expected"),
626 N_("Neon double or quad precision register expected"),
627 N_("Neon single or double precision register expected"),
628 N_("VFP single, double or Neon quad precision register expected"),
629 N_("VFP system register expected"),
630 N_("Maverick MVF register expected"),
631 N_("Maverick MVD register expected"),
632 N_("Maverick MVFX register expected"),
633 N_("Maverick MVDX register expected"),
634 N_("Maverick MVAX register expected"),
635 N_("Maverick DSPSC register expected"),
636 N_("iWMMXt data register expected"),
637 N_("iWMMXt control register expected"),
638 N_("iWMMXt scalar register expected"),
639 N_("XScale accumulator register expected"),
640 };
641
642 /* Some well known registers that we refer to directly elsewhere. */
643 #define REG_R12 12
644 #define REG_SP 13
645 #define REG_LR 14
646 #define REG_PC 15
647
648 /* ARM instructions take 4bytes in the object file, Thumb instructions
649 take 2: */
650 #define INSN_SIZE 4
651
652 struct asm_opcode
653 {
654 /* Basic string to match. */
655 const char * template_name;
656
657 /* Parameters to instruction. */
658 unsigned int operands[8];
659
660 /* Conditional tag - see opcode_lookup. */
661 unsigned int tag : 4;
662
663 /* Basic instruction code. */
664 unsigned int avalue : 28;
665
666 /* Thumb-format instruction code. */
667 unsigned int tvalue;
668
669 /* Which architecture variant provides this instruction. */
670 const arm_feature_set * avariant;
671 const arm_feature_set * tvariant;
672
673 /* Function to call to encode instruction in ARM format. */
674 void (* aencode) (void);
675
676 /* Function to call to encode instruction in Thumb format. */
677 void (* tencode) (void);
678 };
679
680 /* Defines for various bits that we will want to toggle. */
681 #define INST_IMMEDIATE 0x02000000
682 #define OFFSET_REG 0x02000000
683 #define HWOFFSET_IMM 0x00400000
684 #define SHIFT_BY_REG 0x00000010
685 #define PRE_INDEX 0x01000000
686 #define INDEX_UP 0x00800000
687 #define WRITE_BACK 0x00200000
688 #define LDM_TYPE_2_OR_3 0x00400000
689 #define CPSI_MMOD 0x00020000
690
691 #define LITERAL_MASK 0xf000f000
692 #define OPCODE_MASK 0xfe1fffff
693 #define V4_STR_BIT 0x00000020
694 #define VLDR_VMOV_SAME 0x0040f000
695
696 #define T2_SUBS_PC_LR 0xf3de8f00
697
698 #define DATA_OP_SHIFT 21
699 #define SBIT_SHIFT 20
700
701 #define T2_OPCODE_MASK 0xfe1fffff
702 #define T2_DATA_OP_SHIFT 21
703 #define T2_SBIT_SHIFT 20
704
705 #define A_COND_MASK 0xf0000000
706 #define A_PUSH_POP_OP_MASK 0x0fff0000
707
708 /* Opcodes for pushing/poping registers to/from the stack. */
709 #define A1_OPCODE_PUSH 0x092d0000
710 #define A2_OPCODE_PUSH 0x052d0004
711 #define A2_OPCODE_POP 0x049d0004
712
713 /* Codes to distinguish the arithmetic instructions. */
714 #define OPCODE_AND 0
715 #define OPCODE_EOR 1
716 #define OPCODE_SUB 2
717 #define OPCODE_RSB 3
718 #define OPCODE_ADD 4
719 #define OPCODE_ADC 5
720 #define OPCODE_SBC 6
721 #define OPCODE_RSC 7
722 #define OPCODE_TST 8
723 #define OPCODE_TEQ 9
724 #define OPCODE_CMP 10
725 #define OPCODE_CMN 11
726 #define OPCODE_ORR 12
727 #define OPCODE_MOV 13
728 #define OPCODE_BIC 14
729 #define OPCODE_MVN 15
730
731 #define T2_OPCODE_AND 0
732 #define T2_OPCODE_BIC 1
733 #define T2_OPCODE_ORR 2
734 #define T2_OPCODE_ORN 3
735 #define T2_OPCODE_EOR 4
736 #define T2_OPCODE_ADD 8
737 #define T2_OPCODE_ADC 10
738 #define T2_OPCODE_SBC 11
739 #define T2_OPCODE_SUB 13
740 #define T2_OPCODE_RSB 14
741
742 #define T_OPCODE_MUL 0x4340
743 #define T_OPCODE_TST 0x4200
744 #define T_OPCODE_CMN 0x42c0
745 #define T_OPCODE_NEG 0x4240
746 #define T_OPCODE_MVN 0x43c0
747
748 #define T_OPCODE_ADD_R3 0x1800
749 #define T_OPCODE_SUB_R3 0x1a00
750 #define T_OPCODE_ADD_HI 0x4400
751 #define T_OPCODE_ADD_ST 0xb000
752 #define T_OPCODE_SUB_ST 0xb080
753 #define T_OPCODE_ADD_SP 0xa800
754 #define T_OPCODE_ADD_PC 0xa000
755 #define T_OPCODE_ADD_I8 0x3000
756 #define T_OPCODE_SUB_I8 0x3800
757 #define T_OPCODE_ADD_I3 0x1c00
758 #define T_OPCODE_SUB_I3 0x1e00
759
760 #define T_OPCODE_ASR_R 0x4100
761 #define T_OPCODE_LSL_R 0x4080
762 #define T_OPCODE_LSR_R 0x40c0
763 #define T_OPCODE_ROR_R 0x41c0
764 #define T_OPCODE_ASR_I 0x1000
765 #define T_OPCODE_LSL_I 0x0000
766 #define T_OPCODE_LSR_I 0x0800
767
768 #define T_OPCODE_MOV_I8 0x2000
769 #define T_OPCODE_CMP_I8 0x2800
770 #define T_OPCODE_CMP_LR 0x4280
771 #define T_OPCODE_MOV_HR 0x4600
772 #define T_OPCODE_CMP_HR 0x4500
773
774 #define T_OPCODE_LDR_PC 0x4800
775 #define T_OPCODE_LDR_SP 0x9800
776 #define T_OPCODE_STR_SP 0x9000
777 #define T_OPCODE_LDR_IW 0x6800
778 #define T_OPCODE_STR_IW 0x6000
779 #define T_OPCODE_LDR_IH 0x8800
780 #define T_OPCODE_STR_IH 0x8000
781 #define T_OPCODE_LDR_IB 0x7800
782 #define T_OPCODE_STR_IB 0x7000
783 #define T_OPCODE_LDR_RW 0x5800
784 #define T_OPCODE_STR_RW 0x5000
785 #define T_OPCODE_LDR_RH 0x5a00
786 #define T_OPCODE_STR_RH 0x5200
787 #define T_OPCODE_LDR_RB 0x5c00
788 #define T_OPCODE_STR_RB 0x5400
789
790 #define T_OPCODE_PUSH 0xb400
791 #define T_OPCODE_POP 0xbc00
792
793 #define T_OPCODE_BRANCH 0xe000
794
795 #define THUMB_SIZE 2 /* Size of thumb instruction. */
796 #define THUMB_PP_PC_LR 0x0100
797 #define THUMB_LOAD_BIT 0x0800
798 #define THUMB2_LOAD_BIT 0x00100000
799
800 #define BAD_ARGS _("bad arguments to instruction")
801 #define BAD_SP _("r13 not allowed here")
802 #define BAD_PC _("r15 not allowed here")
803 #define BAD_COND _("instruction cannot be conditional")
804 #define BAD_OVERLAP _("registers may not be the same")
805 #define BAD_HIREG _("lo register required")
806 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
807 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
808 #define BAD_BRANCH _("branch must be last instruction in IT block")
809 #define BAD_NOT_IT _("instruction not allowed in IT block")
810 #define BAD_FPU _("selected FPU does not support instruction")
811 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
812 #define BAD_IT_COND _("incorrect condition in IT block")
813 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
814 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
815 #define BAD_PC_ADDRESSING \
816 _("cannot use register index with PC-relative addressing")
817 #define BAD_PC_WRITEBACK \
818 _("cannot use writeback with PC-relative addressing")
819 #define BAD_RANGE _("branch out of range")
820 #define BAD_FP16 _("selected processor does not support fp16 instruction")
821 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
822 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
823
824 static struct hash_control * arm_ops_hsh;
825 static struct hash_control * arm_cond_hsh;
826 static struct hash_control * arm_shift_hsh;
827 static struct hash_control * arm_psr_hsh;
828 static struct hash_control * arm_v7m_psr_hsh;
829 static struct hash_control * arm_reg_hsh;
830 static struct hash_control * arm_reloc_hsh;
831 static struct hash_control * arm_barrier_opt_hsh;
832
833 /* Stuff needed to resolve the label ambiguity
834 As:
835 ...
836 label: <insn>
837 may differ from:
838 ...
839 label:
840 <insn> */
841
842 symbolS * last_label_seen;
843 static int label_is_thumb_function_name = FALSE;
844
845 /* Literal pool structure. Held on a per-section
846 and per-sub-section basis. */
847
848 #define MAX_LITERAL_POOL_SIZE 1024
849 typedef struct literal_pool
850 {
851 expressionS literals [MAX_LITERAL_POOL_SIZE];
852 unsigned int next_free_entry;
853 unsigned int id;
854 symbolS * symbol;
855 segT section;
856 subsegT sub_section;
857 #ifdef OBJ_ELF
858 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
859 #endif
860 struct literal_pool * next;
861 unsigned int alignment;
862 } literal_pool;
863
864 /* Pointer to a linked list of literal pools. */
865 literal_pool * list_of_pools = NULL;
866
867 typedef enum asmfunc_states
868 {
869 OUTSIDE_ASMFUNC,
870 WAITING_ASMFUNC_NAME,
871 WAITING_ENDASMFUNC
872 } asmfunc_states;
873
874 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
875
876 #ifdef OBJ_ELF
877 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
878 #else
879 static struct current_it now_it;
880 #endif
881
882 static inline int
883 now_it_compatible (int cond)
884 {
885 return (cond & ~1) == (now_it.cc & ~1);
886 }
887
888 static inline int
889 conditional_insn (void)
890 {
891 return inst.cond != COND_ALWAYS;
892 }
893
894 static int in_it_block (void);
895
896 static int handle_it_state (void);
897
898 static void force_automatic_it_block_close (void);
899
900 static void it_fsm_post_encode (void);
901
902 #define set_it_insn_type(type) \
903 do \
904 { \
905 inst.it_insn_type = type; \
906 if (handle_it_state () == FAIL) \
907 return; \
908 } \
909 while (0)
910
911 #define set_it_insn_type_nonvoid(type, failret) \
912 do \
913 { \
914 inst.it_insn_type = type; \
915 if (handle_it_state () == FAIL) \
916 return failret; \
917 } \
918 while(0)
919
920 #define set_it_insn_type_last() \
921 do \
922 { \
923 if (inst.cond == COND_ALWAYS) \
924 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
925 else \
926 set_it_insn_type (INSIDE_IT_LAST_INSN); \
927 } \
928 while (0)
929
930 /* Pure syntax. */
931
932 /* This array holds the chars that always start a comment. If the
933 pre-processor is disabled, these aren't very useful. */
934 char arm_comment_chars[] = "@";
935
936 /* This array holds the chars that only start a comment at the beginning of
937 a line. If the line seems to have the form '# 123 filename'
938 .line and .file directives will appear in the pre-processed output. */
939 /* Note that input_file.c hand checks for '#' at the beginning of the
940 first line of the input file. This is because the compiler outputs
941 #NO_APP at the beginning of its output. */
942 /* Also note that comments like this one will always work. */
943 const char line_comment_chars[] = "#";
944
945 char arm_line_separator_chars[] = ";";
946
947 /* Chars that can be used to separate mant
948 from exp in floating point numbers. */
949 const char EXP_CHARS[] = "eE";
950
951 /* Chars that mean this number is a floating point constant. */
952 /* As in 0f12.456 */
953 /* or 0d1.2345e12 */
954
955 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
956
957 /* Prefix characters that indicate the start of an immediate
958 value. */
959 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
960
961 /* Separator character handling. */
962
963 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
964
965 static inline int
966 skip_past_char (char ** str, char c)
967 {
968 /* PR gas/14987: Allow for whitespace before the expected character. */
969 skip_whitespace (*str);
970
971 if (**str == c)
972 {
973 (*str)++;
974 return SUCCESS;
975 }
976 else
977 return FAIL;
978 }
979
980 #define skip_past_comma(str) skip_past_char (str, ',')
981
982 /* Arithmetic expressions (possibly involving symbols). */
983
984 /* Return TRUE if anything in the expression is a bignum. */
985
986 static bfd_boolean
987 walk_no_bignums (symbolS * sp)
988 {
989 if (symbol_get_value_expression (sp)->X_op == O_big)
990 return TRUE;
991
992 if (symbol_get_value_expression (sp)->X_add_symbol)
993 {
994 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
995 || (symbol_get_value_expression (sp)->X_op_symbol
996 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
997 }
998
999 return FALSE;
1000 }
1001
1002 static bfd_boolean in_my_get_expression = FALSE;
1003
1004 /* Third argument to my_get_expression. */
1005 #define GE_NO_PREFIX 0
1006 #define GE_IMM_PREFIX 1
1007 #define GE_OPT_PREFIX 2
1008 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1009 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1010 #define GE_OPT_PREFIX_BIG 3
1011
1012 static int
1013 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1014 {
1015 char * save_in;
1016 segT seg;
1017
1018 /* In unified syntax, all prefixes are optional. */
1019 if (unified_syntax)
1020 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1021 : GE_OPT_PREFIX;
1022
1023 switch (prefix_mode)
1024 {
1025 case GE_NO_PREFIX: break;
1026 case GE_IMM_PREFIX:
1027 if (!is_immediate_prefix (**str))
1028 {
1029 inst.error = _("immediate expression requires a # prefix");
1030 return FAIL;
1031 }
1032 (*str)++;
1033 break;
1034 case GE_OPT_PREFIX:
1035 case GE_OPT_PREFIX_BIG:
1036 if (is_immediate_prefix (**str))
1037 (*str)++;
1038 break;
1039 default:
1040 abort ();
1041 }
1042
1043 memset (ep, 0, sizeof (expressionS));
1044
1045 save_in = input_line_pointer;
1046 input_line_pointer = *str;
1047 in_my_get_expression = TRUE;
1048 seg = expression (ep);
1049 in_my_get_expression = FALSE;
1050
1051 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1052 {
1053 /* We found a bad or missing expression in md_operand(). */
1054 *str = input_line_pointer;
1055 input_line_pointer = save_in;
1056 if (inst.error == NULL)
1057 inst.error = (ep->X_op == O_absent
1058 ? _("missing expression") :_("bad expression"));
1059 return 1;
1060 }
1061
1062 #ifdef OBJ_AOUT
1063 if (seg != absolute_section
1064 && seg != text_section
1065 && seg != data_section
1066 && seg != bss_section
1067 && seg != undefined_section)
1068 {
1069 inst.error = _("bad segment");
1070 *str = input_line_pointer;
1071 input_line_pointer = save_in;
1072 return 1;
1073 }
1074 #else
1075 (void) seg;
1076 #endif
1077
1078 /* Get rid of any bignums now, so that we don't generate an error for which
1079 we can't establish a line number later on. Big numbers are never valid
1080 in instructions, which is where this routine is always called. */
1081 if (prefix_mode != GE_OPT_PREFIX_BIG
1082 && (ep->X_op == O_big
1083 || (ep->X_add_symbol
1084 && (walk_no_bignums (ep->X_add_symbol)
1085 || (ep->X_op_symbol
1086 && walk_no_bignums (ep->X_op_symbol))))))
1087 {
1088 inst.error = _("invalid constant");
1089 *str = input_line_pointer;
1090 input_line_pointer = save_in;
1091 return 1;
1092 }
1093
1094 *str = input_line_pointer;
1095 input_line_pointer = save_in;
1096 return SUCCESS;
1097 }
1098
1099 /* Turn a string in input_line_pointer into a floating point constant
1100 of type TYPE, and store the appropriate bytes in *LITP. The number
1101 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1102 returned, or NULL on OK.
1103
1104 Note that fp constants aren't represent in the normal way on the ARM.
1105 In big endian mode, things are as expected. However, in little endian
1106 mode fp constants are big-endian word-wise, and little-endian byte-wise
1107 within the words. For example, (double) 1.1 in big endian mode is
1108 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1109 the byte sequence 99 99 f1 3f 9a 99 99 99.
1110
1111 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1112
1113 const char *
1114 md_atof (int type, char * litP, int * sizeP)
1115 {
1116 int prec;
1117 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1118 char *t;
1119 int i;
1120
1121 switch (type)
1122 {
1123 case 'f':
1124 case 'F':
1125 case 's':
1126 case 'S':
1127 prec = 2;
1128 break;
1129
1130 case 'd':
1131 case 'D':
1132 case 'r':
1133 case 'R':
1134 prec = 4;
1135 break;
1136
1137 case 'x':
1138 case 'X':
1139 prec = 5;
1140 break;
1141
1142 case 'p':
1143 case 'P':
1144 prec = 5;
1145 break;
1146
1147 default:
1148 *sizeP = 0;
1149 return _("Unrecognized or unsupported floating point constant");
1150 }
1151
1152 t = atof_ieee (input_line_pointer, type, words);
1153 if (t)
1154 input_line_pointer = t;
1155 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1156
1157 if (target_big_endian)
1158 {
1159 for (i = 0; i < prec; i++)
1160 {
1161 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1162 litP += sizeof (LITTLENUM_TYPE);
1163 }
1164 }
1165 else
1166 {
1167 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1168 for (i = prec - 1; i >= 0; i--)
1169 {
1170 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1171 litP += sizeof (LITTLENUM_TYPE);
1172 }
1173 else
1174 /* For a 4 byte float the order of elements in `words' is 1 0.
1175 For an 8 byte float the order is 1 0 3 2. */
1176 for (i = 0; i < prec; i += 2)
1177 {
1178 md_number_to_chars (litP, (valueT) words[i + 1],
1179 sizeof (LITTLENUM_TYPE));
1180 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1181 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1182 litP += 2 * sizeof (LITTLENUM_TYPE);
1183 }
1184 }
1185
1186 return NULL;
1187 }
1188
1189 /* We handle all bad expressions here, so that we can report the faulty
1190 instruction in the error message. */
1191
1192 void
1193 md_operand (expressionS * exp)
1194 {
1195 if (in_my_get_expression)
1196 exp->X_op = O_illegal;
1197 }
1198
1199 /* Immediate values. */
1200
1201 #ifdef OBJ_ELF
1202 /* Generic immediate-value read function for use in directives.
1203 Accepts anything that 'expression' can fold to a constant.
1204 *val receives the number. */
1205
1206 static int
1207 immediate_for_directive (int *val)
1208 {
1209 expressionS exp;
1210 exp.X_op = O_illegal;
1211
1212 if (is_immediate_prefix (*input_line_pointer))
1213 {
1214 input_line_pointer++;
1215 expression (&exp);
1216 }
1217
1218 if (exp.X_op != O_constant)
1219 {
1220 as_bad (_("expected #constant"));
1221 ignore_rest_of_line ();
1222 return FAIL;
1223 }
1224 *val = exp.X_add_number;
1225 return SUCCESS;
1226 }
1227 #endif
1228
1229 /* Register parsing. */
1230
1231 /* Generic register parser. CCP points to what should be the
1232 beginning of a register name. If it is indeed a valid register
1233 name, advance CCP over it and return the reg_entry structure;
1234 otherwise return NULL. Does not issue diagnostics. */
1235
1236 static struct reg_entry *
1237 arm_reg_parse_multi (char **ccp)
1238 {
1239 char *start = *ccp;
1240 char *p;
1241 struct reg_entry *reg;
1242
1243 skip_whitespace (start);
1244
1245 #ifdef REGISTER_PREFIX
1246 if (*start != REGISTER_PREFIX)
1247 return NULL;
1248 start++;
1249 #endif
1250 #ifdef OPTIONAL_REGISTER_PREFIX
1251 if (*start == OPTIONAL_REGISTER_PREFIX)
1252 start++;
1253 #endif
1254
1255 p = start;
1256 if (!ISALPHA (*p) || !is_name_beginner (*p))
1257 return NULL;
1258
1259 do
1260 p++;
1261 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1262
1263 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1264
1265 if (!reg)
1266 return NULL;
1267
1268 *ccp = p;
1269 return reg;
1270 }
1271
1272 static int
1273 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1274 enum arm_reg_type type)
1275 {
1276 /* Alternative syntaxes are accepted for a few register classes. */
1277 switch (type)
1278 {
1279 case REG_TYPE_MVF:
1280 case REG_TYPE_MVD:
1281 case REG_TYPE_MVFX:
1282 case REG_TYPE_MVDX:
1283 /* Generic coprocessor register names are allowed for these. */
1284 if (reg && reg->type == REG_TYPE_CN)
1285 return reg->number;
1286 break;
1287
1288 case REG_TYPE_CP:
1289 /* For backward compatibility, a bare number is valid here. */
1290 {
1291 unsigned long processor = strtoul (start, ccp, 10);
1292 if (*ccp != start && processor <= 15)
1293 return processor;
1294 }
1295 /* Fall through. */
1296
1297 case REG_TYPE_MMXWC:
1298 /* WC includes WCG. ??? I'm not sure this is true for all
1299 instructions that take WC registers. */
1300 if (reg && reg->type == REG_TYPE_MMXWCG)
1301 return reg->number;
1302 break;
1303
1304 default:
1305 break;
1306 }
1307
1308 return FAIL;
1309 }
1310
1311 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1312 return value is the register number or FAIL. */
1313
1314 static int
1315 arm_reg_parse (char **ccp, enum arm_reg_type type)
1316 {
1317 char *start = *ccp;
1318 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1319 int ret;
1320
1321 /* Do not allow a scalar (reg+index) to parse as a register. */
1322 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1323 return FAIL;
1324
1325 if (reg && reg->type == type)
1326 return reg->number;
1327
1328 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1329 return ret;
1330
1331 *ccp = start;
1332 return FAIL;
1333 }
1334
1335 /* Parse a Neon type specifier. *STR should point at the leading '.'
1336 character. Does no verification at this stage that the type fits the opcode
1337 properly. E.g.,
1338
1339 .i32.i32.s16
1340 .s32.f32
1341 .u16
1342
1343 Can all be legally parsed by this function.
1344
1345 Fills in neon_type struct pointer with parsed information, and updates STR
1346 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1347 type, FAIL if not. */
1348
1349 static int
1350 parse_neon_type (struct neon_type *type, char **str)
1351 {
1352 char *ptr = *str;
1353
1354 if (type)
1355 type->elems = 0;
1356
1357 while (type->elems < NEON_MAX_TYPE_ELS)
1358 {
1359 enum neon_el_type thistype = NT_untyped;
1360 unsigned thissize = -1u;
1361
1362 if (*ptr != '.')
1363 break;
1364
1365 ptr++;
1366
1367 /* Just a size without an explicit type. */
1368 if (ISDIGIT (*ptr))
1369 goto parsesize;
1370
1371 switch (TOLOWER (*ptr))
1372 {
1373 case 'i': thistype = NT_integer; break;
1374 case 'f': thistype = NT_float; break;
1375 case 'p': thistype = NT_poly; break;
1376 case 's': thistype = NT_signed; break;
1377 case 'u': thistype = NT_unsigned; break;
1378 case 'd':
1379 thistype = NT_float;
1380 thissize = 64;
1381 ptr++;
1382 goto done;
1383 default:
1384 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1385 return FAIL;
1386 }
1387
1388 ptr++;
1389
1390 /* .f is an abbreviation for .f32. */
1391 if (thistype == NT_float && !ISDIGIT (*ptr))
1392 thissize = 32;
1393 else
1394 {
1395 parsesize:
1396 thissize = strtoul (ptr, &ptr, 10);
1397
1398 if (thissize != 8 && thissize != 16 && thissize != 32
1399 && thissize != 64)
1400 {
1401 as_bad (_("bad size %d in type specifier"), thissize);
1402 return FAIL;
1403 }
1404 }
1405
1406 done:
1407 if (type)
1408 {
1409 type->el[type->elems].type = thistype;
1410 type->el[type->elems].size = thissize;
1411 type->elems++;
1412 }
1413 }
1414
1415 /* Empty/missing type is not a successful parse. */
1416 if (type->elems == 0)
1417 return FAIL;
1418
1419 *str = ptr;
1420
1421 return SUCCESS;
1422 }
1423
1424 /* Errors may be set multiple times during parsing or bit encoding
1425 (particularly in the Neon bits), but usually the earliest error which is set
1426 will be the most meaningful. Avoid overwriting it with later (cascading)
1427 errors by calling this function. */
1428
1429 static void
1430 first_error (const char *err)
1431 {
1432 if (!inst.error)
1433 inst.error = err;
1434 }
1435
1436 /* Parse a single type, e.g. ".s32", leading period included. */
1437 static int
1438 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1439 {
1440 char *str = *ccp;
1441 struct neon_type optype;
1442
1443 if (*str == '.')
1444 {
1445 if (parse_neon_type (&optype, &str) == SUCCESS)
1446 {
1447 if (optype.elems == 1)
1448 *vectype = optype.el[0];
1449 else
1450 {
1451 first_error (_("only one type should be specified for operand"));
1452 return FAIL;
1453 }
1454 }
1455 else
1456 {
1457 first_error (_("vector type expected"));
1458 return FAIL;
1459 }
1460 }
1461 else
1462 return FAIL;
1463
1464 *ccp = str;
1465
1466 return SUCCESS;
1467 }
1468
1469 /* Special meanings for indices (which have a range of 0-7), which will fit into
1470 a 4-bit integer. */
1471
1472 #define NEON_ALL_LANES 15
1473 #define NEON_INTERLEAVE_LANES 14
1474
1475 /* Parse either a register or a scalar, with an optional type. Return the
1476 register number, and optionally fill in the actual type of the register
1477 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1478 type/index information in *TYPEINFO. */
1479
1480 static int
1481 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1482 enum arm_reg_type *rtype,
1483 struct neon_typed_alias *typeinfo)
1484 {
1485 char *str = *ccp;
1486 struct reg_entry *reg = arm_reg_parse_multi (&str);
1487 struct neon_typed_alias atype;
1488 struct neon_type_el parsetype;
1489
1490 atype.defined = 0;
1491 atype.index = -1;
1492 atype.eltype.type = NT_invtype;
1493 atype.eltype.size = -1;
1494
1495 /* Try alternate syntax for some types of register. Note these are mutually
1496 exclusive with the Neon syntax extensions. */
1497 if (reg == NULL)
1498 {
1499 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1500 if (altreg != FAIL)
1501 *ccp = str;
1502 if (typeinfo)
1503 *typeinfo = atype;
1504 return altreg;
1505 }
1506
1507 /* Undo polymorphism when a set of register types may be accepted. */
1508 if ((type == REG_TYPE_NDQ
1509 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1510 || (type == REG_TYPE_VFSD
1511 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1512 || (type == REG_TYPE_NSDQ
1513 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1514 || reg->type == REG_TYPE_NQ))
1515 || (type == REG_TYPE_NSD
1516 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1517 || (type == REG_TYPE_MMXWC
1518 && (reg->type == REG_TYPE_MMXWCG)))
1519 type = (enum arm_reg_type) reg->type;
1520
1521 if (type != reg->type)
1522 return FAIL;
1523
1524 if (reg->neon)
1525 atype = *reg->neon;
1526
1527 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1528 {
1529 if ((atype.defined & NTA_HASTYPE) != 0)
1530 {
1531 first_error (_("can't redefine type for operand"));
1532 return FAIL;
1533 }
1534 atype.defined |= NTA_HASTYPE;
1535 atype.eltype = parsetype;
1536 }
1537
1538 if (skip_past_char (&str, '[') == SUCCESS)
1539 {
1540 if (type != REG_TYPE_VFD
1541 && !(type == REG_TYPE_VFS
1542 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1543 {
1544 first_error (_("only D registers may be indexed"));
1545 return FAIL;
1546 }
1547
1548 if ((atype.defined & NTA_HASINDEX) != 0)
1549 {
1550 first_error (_("can't change index for operand"));
1551 return FAIL;
1552 }
1553
1554 atype.defined |= NTA_HASINDEX;
1555
1556 if (skip_past_char (&str, ']') == SUCCESS)
1557 atype.index = NEON_ALL_LANES;
1558 else
1559 {
1560 expressionS exp;
1561
1562 my_get_expression (&exp, &str, GE_NO_PREFIX);
1563
1564 if (exp.X_op != O_constant)
1565 {
1566 first_error (_("constant expression required"));
1567 return FAIL;
1568 }
1569
1570 if (skip_past_char (&str, ']') == FAIL)
1571 return FAIL;
1572
1573 atype.index = exp.X_add_number;
1574 }
1575 }
1576
1577 if (typeinfo)
1578 *typeinfo = atype;
1579
1580 if (rtype)
1581 *rtype = type;
1582
1583 *ccp = str;
1584
1585 return reg->number;
1586 }
1587
1588 /* Like arm_reg_parse, but allow allow the following extra features:
1589 - If RTYPE is non-zero, return the (possibly restricted) type of the
1590 register (e.g. Neon double or quad reg when either has been requested).
1591 - If this is a Neon vector type with additional type information, fill
1592 in the struct pointed to by VECTYPE (if non-NULL).
1593 This function will fault on encountering a scalar. */
1594
1595 static int
1596 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1597 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1598 {
1599 struct neon_typed_alias atype;
1600 char *str = *ccp;
1601 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1602
1603 if (reg == FAIL)
1604 return FAIL;
1605
1606 /* Do not allow regname(... to parse as a register. */
1607 if (*str == '(')
1608 return FAIL;
1609
1610 /* Do not allow a scalar (reg+index) to parse as a register. */
1611 if ((atype.defined & NTA_HASINDEX) != 0)
1612 {
1613 first_error (_("register operand expected, but got scalar"));
1614 return FAIL;
1615 }
1616
1617 if (vectype)
1618 *vectype = atype.eltype;
1619
1620 *ccp = str;
1621
1622 return reg;
1623 }
1624
1625 #define NEON_SCALAR_REG(X) ((X) >> 4)
1626 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1627
1628 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1629 have enough information to be able to do a good job bounds-checking. So, we
1630 just do easy checks here, and do further checks later. */
1631
1632 static int
1633 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1634 {
1635 int reg;
1636 char *str = *ccp;
1637 struct neon_typed_alias atype;
1638 enum arm_reg_type reg_type = REG_TYPE_VFD;
1639
1640 if (elsize == 4)
1641 reg_type = REG_TYPE_VFS;
1642
1643 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1644
1645 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1646 return FAIL;
1647
1648 if (atype.index == NEON_ALL_LANES)
1649 {
1650 first_error (_("scalar must have an index"));
1651 return FAIL;
1652 }
1653 else if (atype.index >= 64 / elsize)
1654 {
1655 first_error (_("scalar index out of range"));
1656 return FAIL;
1657 }
1658
1659 if (type)
1660 *type = atype.eltype;
1661
1662 *ccp = str;
1663
1664 return reg * 16 + atype.index;
1665 }
1666
1667 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1668
1669 static long
1670 parse_reg_list (char ** strp)
1671 {
1672 char * str = * strp;
1673 long range = 0;
1674 int another_range;
1675
1676 /* We come back here if we get ranges concatenated by '+' or '|'. */
1677 do
1678 {
1679 skip_whitespace (str);
1680
1681 another_range = 0;
1682
1683 if (*str == '{')
1684 {
1685 int in_range = 0;
1686 int cur_reg = -1;
1687
1688 str++;
1689 do
1690 {
1691 int reg;
1692
1693 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1694 {
1695 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1696 return FAIL;
1697 }
1698
1699 if (in_range)
1700 {
1701 int i;
1702
1703 if (reg <= cur_reg)
1704 {
1705 first_error (_("bad range in register list"));
1706 return FAIL;
1707 }
1708
1709 for (i = cur_reg + 1; i < reg; i++)
1710 {
1711 if (range & (1 << i))
1712 as_tsktsk
1713 (_("Warning: duplicated register (r%d) in register list"),
1714 i);
1715 else
1716 range |= 1 << i;
1717 }
1718 in_range = 0;
1719 }
1720
1721 if (range & (1 << reg))
1722 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1723 reg);
1724 else if (reg <= cur_reg)
1725 as_tsktsk (_("Warning: register range not in ascending order"));
1726
1727 range |= 1 << reg;
1728 cur_reg = reg;
1729 }
1730 while (skip_past_comma (&str) != FAIL
1731 || (in_range = 1, *str++ == '-'));
1732 str--;
1733
1734 if (skip_past_char (&str, '}') == FAIL)
1735 {
1736 first_error (_("missing `}'"));
1737 return FAIL;
1738 }
1739 }
1740 else
1741 {
1742 expressionS exp;
1743
1744 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1745 return FAIL;
1746
1747 if (exp.X_op == O_constant)
1748 {
1749 if (exp.X_add_number
1750 != (exp.X_add_number & 0x0000ffff))
1751 {
1752 inst.error = _("invalid register mask");
1753 return FAIL;
1754 }
1755
1756 if ((range & exp.X_add_number) != 0)
1757 {
1758 int regno = range & exp.X_add_number;
1759
1760 regno &= -regno;
1761 regno = (1 << regno) - 1;
1762 as_tsktsk
1763 (_("Warning: duplicated register (r%d) in register list"),
1764 regno);
1765 }
1766
1767 range |= exp.X_add_number;
1768 }
1769 else
1770 {
1771 if (inst.reloc.type != 0)
1772 {
1773 inst.error = _("expression too complex");
1774 return FAIL;
1775 }
1776
1777 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1778 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1779 inst.reloc.pc_rel = 0;
1780 }
1781 }
1782
1783 if (*str == '|' || *str == '+')
1784 {
1785 str++;
1786 another_range = 1;
1787 }
1788 }
1789 while (another_range);
1790
1791 *strp = str;
1792 return range;
1793 }
1794
1795 /* Types of registers in a list. */
1796
1797 enum reg_list_els
1798 {
1799 REGLIST_VFP_S,
1800 REGLIST_VFP_D,
1801 REGLIST_NEON_D
1802 };
1803
1804 /* Parse a VFP register list. If the string is invalid return FAIL.
1805 Otherwise return the number of registers, and set PBASE to the first
1806 register. Parses registers of type ETYPE.
1807 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1808 - Q registers can be used to specify pairs of D registers
1809 - { } can be omitted from around a singleton register list
1810 FIXME: This is not implemented, as it would require backtracking in
1811 some cases, e.g.:
1812 vtbl.8 d3,d4,d5
1813 This could be done (the meaning isn't really ambiguous), but doesn't
1814 fit in well with the current parsing framework.
1815 - 32 D registers may be used (also true for VFPv3).
1816 FIXME: Types are ignored in these register lists, which is probably a
1817 bug. */
1818
1819 static int
1820 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1821 {
1822 char *str = *ccp;
1823 int base_reg;
1824 int new_base;
1825 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1826 int max_regs = 0;
1827 int count = 0;
1828 int warned = 0;
1829 unsigned long mask = 0;
1830 int i;
1831
1832 if (skip_past_char (&str, '{') == FAIL)
1833 {
1834 inst.error = _("expecting {");
1835 return FAIL;
1836 }
1837
1838 switch (etype)
1839 {
1840 case REGLIST_VFP_S:
1841 regtype = REG_TYPE_VFS;
1842 max_regs = 32;
1843 break;
1844
1845 case REGLIST_VFP_D:
1846 regtype = REG_TYPE_VFD;
1847 break;
1848
1849 case REGLIST_NEON_D:
1850 regtype = REG_TYPE_NDQ;
1851 break;
1852 }
1853
1854 if (etype != REGLIST_VFP_S)
1855 {
1856 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1857 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1858 {
1859 max_regs = 32;
1860 if (thumb_mode)
1861 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1862 fpu_vfp_ext_d32);
1863 else
1864 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1865 fpu_vfp_ext_d32);
1866 }
1867 else
1868 max_regs = 16;
1869 }
1870
1871 base_reg = max_regs;
1872
1873 do
1874 {
1875 int setmask = 1, addregs = 1;
1876
1877 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1878
1879 if (new_base == FAIL)
1880 {
1881 first_error (_(reg_expected_msgs[regtype]));
1882 return FAIL;
1883 }
1884
1885 if (new_base >= max_regs)
1886 {
1887 first_error (_("register out of range in list"));
1888 return FAIL;
1889 }
1890
1891 /* Note: a value of 2 * n is returned for the register Q<n>. */
1892 if (regtype == REG_TYPE_NQ)
1893 {
1894 setmask = 3;
1895 addregs = 2;
1896 }
1897
1898 if (new_base < base_reg)
1899 base_reg = new_base;
1900
1901 if (mask & (setmask << new_base))
1902 {
1903 first_error (_("invalid register list"));
1904 return FAIL;
1905 }
1906
1907 if ((mask >> new_base) != 0 && ! warned)
1908 {
1909 as_tsktsk (_("register list not in ascending order"));
1910 warned = 1;
1911 }
1912
1913 mask |= setmask << new_base;
1914 count += addregs;
1915
1916 if (*str == '-') /* We have the start of a range expression */
1917 {
1918 int high_range;
1919
1920 str++;
1921
1922 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1923 == FAIL)
1924 {
1925 inst.error = gettext (reg_expected_msgs[regtype]);
1926 return FAIL;
1927 }
1928
1929 if (high_range >= max_regs)
1930 {
1931 first_error (_("register out of range in list"));
1932 return FAIL;
1933 }
1934
1935 if (regtype == REG_TYPE_NQ)
1936 high_range = high_range + 1;
1937
1938 if (high_range <= new_base)
1939 {
1940 inst.error = _("register range not in ascending order");
1941 return FAIL;
1942 }
1943
1944 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1945 {
1946 if (mask & (setmask << new_base))
1947 {
1948 inst.error = _("invalid register list");
1949 return FAIL;
1950 }
1951
1952 mask |= setmask << new_base;
1953 count += addregs;
1954 }
1955 }
1956 }
1957 while (skip_past_comma (&str) != FAIL);
1958
1959 str++;
1960
1961 /* Sanity check -- should have raised a parse error above. */
1962 if (count == 0 || count > max_regs)
1963 abort ();
1964
1965 *pbase = base_reg;
1966
1967 /* Final test -- the registers must be consecutive. */
1968 mask >>= base_reg;
1969 for (i = 0; i < count; i++)
1970 {
1971 if ((mask & (1u << i)) == 0)
1972 {
1973 inst.error = _("non-contiguous register range");
1974 return FAIL;
1975 }
1976 }
1977
1978 *ccp = str;
1979
1980 return count;
1981 }
1982
1983 /* True if two alias types are the same. */
1984
1985 static bfd_boolean
1986 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1987 {
1988 if (!a && !b)
1989 return TRUE;
1990
1991 if (!a || !b)
1992 return FALSE;
1993
1994 if (a->defined != b->defined)
1995 return FALSE;
1996
1997 if ((a->defined & NTA_HASTYPE) != 0
1998 && (a->eltype.type != b->eltype.type
1999 || a->eltype.size != b->eltype.size))
2000 return FALSE;
2001
2002 if ((a->defined & NTA_HASINDEX) != 0
2003 && (a->index != b->index))
2004 return FALSE;
2005
2006 return TRUE;
2007 }
2008
2009 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2010 The base register is put in *PBASE.
2011 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2012 the return value.
2013 The register stride (minus one) is put in bit 4 of the return value.
2014 Bits [6:5] encode the list length (minus one).
2015 The type of the list elements is put in *ELTYPE, if non-NULL. */
2016
2017 #define NEON_LANE(X) ((X) & 0xf)
2018 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2019 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2020
2021 static int
2022 parse_neon_el_struct_list (char **str, unsigned *pbase,
2023 struct neon_type_el *eltype)
2024 {
2025 char *ptr = *str;
2026 int base_reg = -1;
2027 int reg_incr = -1;
2028 int count = 0;
2029 int lane = -1;
2030 int leading_brace = 0;
2031 enum arm_reg_type rtype = REG_TYPE_NDQ;
2032 const char *const incr_error = _("register stride must be 1 or 2");
2033 const char *const type_error = _("mismatched element/structure types in list");
2034 struct neon_typed_alias firsttype;
2035 firsttype.defined = 0;
2036 firsttype.eltype.type = NT_invtype;
2037 firsttype.eltype.size = -1;
2038 firsttype.index = -1;
2039
2040 if (skip_past_char (&ptr, '{') == SUCCESS)
2041 leading_brace = 1;
2042
2043 do
2044 {
2045 struct neon_typed_alias atype;
2046 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2047
2048 if (getreg == FAIL)
2049 {
2050 first_error (_(reg_expected_msgs[rtype]));
2051 return FAIL;
2052 }
2053
2054 if (base_reg == -1)
2055 {
2056 base_reg = getreg;
2057 if (rtype == REG_TYPE_NQ)
2058 {
2059 reg_incr = 1;
2060 }
2061 firsttype = atype;
2062 }
2063 else if (reg_incr == -1)
2064 {
2065 reg_incr = getreg - base_reg;
2066 if (reg_incr < 1 || reg_incr > 2)
2067 {
2068 first_error (_(incr_error));
2069 return FAIL;
2070 }
2071 }
2072 else if (getreg != base_reg + reg_incr * count)
2073 {
2074 first_error (_(incr_error));
2075 return FAIL;
2076 }
2077
2078 if (! neon_alias_types_same (&atype, &firsttype))
2079 {
2080 first_error (_(type_error));
2081 return FAIL;
2082 }
2083
2084 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2085 modes. */
2086 if (ptr[0] == '-')
2087 {
2088 struct neon_typed_alias htype;
2089 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2090 if (lane == -1)
2091 lane = NEON_INTERLEAVE_LANES;
2092 else if (lane != NEON_INTERLEAVE_LANES)
2093 {
2094 first_error (_(type_error));
2095 return FAIL;
2096 }
2097 if (reg_incr == -1)
2098 reg_incr = 1;
2099 else if (reg_incr != 1)
2100 {
2101 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2102 return FAIL;
2103 }
2104 ptr++;
2105 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2106 if (hireg == FAIL)
2107 {
2108 first_error (_(reg_expected_msgs[rtype]));
2109 return FAIL;
2110 }
2111 if (! neon_alias_types_same (&htype, &firsttype))
2112 {
2113 first_error (_(type_error));
2114 return FAIL;
2115 }
2116 count += hireg + dregs - getreg;
2117 continue;
2118 }
2119
2120 /* If we're using Q registers, we can't use [] or [n] syntax. */
2121 if (rtype == REG_TYPE_NQ)
2122 {
2123 count += 2;
2124 continue;
2125 }
2126
2127 if ((atype.defined & NTA_HASINDEX) != 0)
2128 {
2129 if (lane == -1)
2130 lane = atype.index;
2131 else if (lane != atype.index)
2132 {
2133 first_error (_(type_error));
2134 return FAIL;
2135 }
2136 }
2137 else if (lane == -1)
2138 lane = NEON_INTERLEAVE_LANES;
2139 else if (lane != NEON_INTERLEAVE_LANES)
2140 {
2141 first_error (_(type_error));
2142 return FAIL;
2143 }
2144 count++;
2145 }
2146 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2147
2148 /* No lane set by [x]. We must be interleaving structures. */
2149 if (lane == -1)
2150 lane = NEON_INTERLEAVE_LANES;
2151
2152 /* Sanity check. */
2153 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2154 || (count > 1 && reg_incr == -1))
2155 {
2156 first_error (_("error parsing element/structure list"));
2157 return FAIL;
2158 }
2159
2160 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2161 {
2162 first_error (_("expected }"));
2163 return FAIL;
2164 }
2165
2166 if (reg_incr == -1)
2167 reg_incr = 1;
2168
2169 if (eltype)
2170 *eltype = firsttype.eltype;
2171
2172 *pbase = base_reg;
2173 *str = ptr;
2174
2175 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2176 }
2177
2178 /* Parse an explicit relocation suffix on an expression. This is
2179 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2180 arm_reloc_hsh contains no entries, so this function can only
2181 succeed if there is no () after the word. Returns -1 on error,
2182 BFD_RELOC_UNUSED if there wasn't any suffix. */
2183
2184 static int
2185 parse_reloc (char **str)
2186 {
2187 struct reloc_entry *r;
2188 char *p, *q;
2189
2190 if (**str != '(')
2191 return BFD_RELOC_UNUSED;
2192
2193 p = *str + 1;
2194 q = p;
2195
2196 while (*q && *q != ')' && *q != ',')
2197 q++;
2198 if (*q != ')')
2199 return -1;
2200
2201 if ((r = (struct reloc_entry *)
2202 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2203 return -1;
2204
2205 *str = q + 1;
2206 return r->reloc;
2207 }
2208
2209 /* Directives: register aliases. */
2210
2211 static struct reg_entry *
2212 insert_reg_alias (char *str, unsigned number, int type)
2213 {
2214 struct reg_entry *new_reg;
2215 const char *name;
2216
2217 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2218 {
2219 if (new_reg->builtin)
2220 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2221
2222 /* Only warn about a redefinition if it's not defined as the
2223 same register. */
2224 else if (new_reg->number != number || new_reg->type != type)
2225 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2226
2227 return NULL;
2228 }
2229
2230 name = xstrdup (str);
2231 new_reg = XNEW (struct reg_entry);
2232
2233 new_reg->name = name;
2234 new_reg->number = number;
2235 new_reg->type = type;
2236 new_reg->builtin = FALSE;
2237 new_reg->neon = NULL;
2238
2239 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2240 abort ();
2241
2242 return new_reg;
2243 }
2244
2245 static void
2246 insert_neon_reg_alias (char *str, int number, int type,
2247 struct neon_typed_alias *atype)
2248 {
2249 struct reg_entry *reg = insert_reg_alias (str, number, type);
2250
2251 if (!reg)
2252 {
2253 first_error (_("attempt to redefine typed alias"));
2254 return;
2255 }
2256
2257 if (atype)
2258 {
2259 reg->neon = XNEW (struct neon_typed_alias);
2260 *reg->neon = *atype;
2261 }
2262 }
2263
2264 /* Look for the .req directive. This is of the form:
2265
2266 new_register_name .req existing_register_name
2267
2268 If we find one, or if it looks sufficiently like one that we want to
2269 handle any error here, return TRUE. Otherwise return FALSE. */
2270
2271 static bfd_boolean
2272 create_register_alias (char * newname, char *p)
2273 {
2274 struct reg_entry *old;
2275 char *oldname, *nbuf;
2276 size_t nlen;
2277
2278 /* The input scrubber ensures that whitespace after the mnemonic is
2279 collapsed to single spaces. */
2280 oldname = p;
2281 if (strncmp (oldname, " .req ", 6) != 0)
2282 return FALSE;
2283
2284 oldname += 6;
2285 if (*oldname == '\0')
2286 return FALSE;
2287
2288 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2289 if (!old)
2290 {
2291 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2292 return TRUE;
2293 }
2294
2295 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2296 the desired alias name, and p points to its end. If not, then
2297 the desired alias name is in the global original_case_string. */
2298 #ifdef TC_CASE_SENSITIVE
2299 nlen = p - newname;
2300 #else
2301 newname = original_case_string;
2302 nlen = strlen (newname);
2303 #endif
2304
2305 nbuf = xmemdup0 (newname, nlen);
2306
2307 /* Create aliases under the new name as stated; an all-lowercase
2308 version of the new name; and an all-uppercase version of the new
2309 name. */
2310 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2311 {
2312 for (p = nbuf; *p; p++)
2313 *p = TOUPPER (*p);
2314
2315 if (strncmp (nbuf, newname, nlen))
2316 {
2317 /* If this attempt to create an additional alias fails, do not bother
2318 trying to create the all-lower case alias. We will fail and issue
2319 a second, duplicate error message. This situation arises when the
2320 programmer does something like:
2321 foo .req r0
2322 Foo .req r1
2323 The second .req creates the "Foo" alias but then fails to create
2324 the artificial FOO alias because it has already been created by the
2325 first .req. */
2326 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2327 {
2328 free (nbuf);
2329 return TRUE;
2330 }
2331 }
2332
2333 for (p = nbuf; *p; p++)
2334 *p = TOLOWER (*p);
2335
2336 if (strncmp (nbuf, newname, nlen))
2337 insert_reg_alias (nbuf, old->number, old->type);
2338 }
2339
2340 free (nbuf);
2341 return TRUE;
2342 }
2343
2344 /* Create a Neon typed/indexed register alias using directives, e.g.:
2345 X .dn d5.s32[1]
2346 Y .qn 6.s16
2347 Z .dn d7
2348 T .dn Z[0]
2349 These typed registers can be used instead of the types specified after the
2350 Neon mnemonic, so long as all operands given have types. Types can also be
2351 specified directly, e.g.:
2352 vadd d0.s32, d1.s32, d2.s32 */
2353
2354 static bfd_boolean
2355 create_neon_reg_alias (char *newname, char *p)
2356 {
2357 enum arm_reg_type basetype;
2358 struct reg_entry *basereg;
2359 struct reg_entry mybasereg;
2360 struct neon_type ntype;
2361 struct neon_typed_alias typeinfo;
2362 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2363 int namelen;
2364
2365 typeinfo.defined = 0;
2366 typeinfo.eltype.type = NT_invtype;
2367 typeinfo.eltype.size = -1;
2368 typeinfo.index = -1;
2369
2370 nameend = p;
2371
2372 if (strncmp (p, " .dn ", 5) == 0)
2373 basetype = REG_TYPE_VFD;
2374 else if (strncmp (p, " .qn ", 5) == 0)
2375 basetype = REG_TYPE_NQ;
2376 else
2377 return FALSE;
2378
2379 p += 5;
2380
2381 if (*p == '\0')
2382 return FALSE;
2383
2384 basereg = arm_reg_parse_multi (&p);
2385
2386 if (basereg && basereg->type != basetype)
2387 {
2388 as_bad (_("bad type for register"));
2389 return FALSE;
2390 }
2391
2392 if (basereg == NULL)
2393 {
2394 expressionS exp;
2395 /* Try parsing as an integer. */
2396 my_get_expression (&exp, &p, GE_NO_PREFIX);
2397 if (exp.X_op != O_constant)
2398 {
2399 as_bad (_("expression must be constant"));
2400 return FALSE;
2401 }
2402 basereg = &mybasereg;
2403 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2404 : exp.X_add_number;
2405 basereg->neon = 0;
2406 }
2407
2408 if (basereg->neon)
2409 typeinfo = *basereg->neon;
2410
2411 if (parse_neon_type (&ntype, &p) == SUCCESS)
2412 {
2413 /* We got a type. */
2414 if (typeinfo.defined & NTA_HASTYPE)
2415 {
2416 as_bad (_("can't redefine the type of a register alias"));
2417 return FALSE;
2418 }
2419
2420 typeinfo.defined |= NTA_HASTYPE;
2421 if (ntype.elems != 1)
2422 {
2423 as_bad (_("you must specify a single type only"));
2424 return FALSE;
2425 }
2426 typeinfo.eltype = ntype.el[0];
2427 }
2428
2429 if (skip_past_char (&p, '[') == SUCCESS)
2430 {
2431 expressionS exp;
2432 /* We got a scalar index. */
2433
2434 if (typeinfo.defined & NTA_HASINDEX)
2435 {
2436 as_bad (_("can't redefine the index of a scalar alias"));
2437 return FALSE;
2438 }
2439
2440 my_get_expression (&exp, &p, GE_NO_PREFIX);
2441
2442 if (exp.X_op != O_constant)
2443 {
2444 as_bad (_("scalar index must be constant"));
2445 return FALSE;
2446 }
2447
2448 typeinfo.defined |= NTA_HASINDEX;
2449 typeinfo.index = exp.X_add_number;
2450
2451 if (skip_past_char (&p, ']') == FAIL)
2452 {
2453 as_bad (_("expecting ]"));
2454 return FALSE;
2455 }
2456 }
2457
2458 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2459 the desired alias name, and p points to its end. If not, then
2460 the desired alias name is in the global original_case_string. */
2461 #ifdef TC_CASE_SENSITIVE
2462 namelen = nameend - newname;
2463 #else
2464 newname = original_case_string;
2465 namelen = strlen (newname);
2466 #endif
2467
2468 namebuf = xmemdup0 (newname, namelen);
2469
2470 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2471 typeinfo.defined != 0 ? &typeinfo : NULL);
2472
2473 /* Insert name in all uppercase. */
2474 for (p = namebuf; *p; p++)
2475 *p = TOUPPER (*p);
2476
2477 if (strncmp (namebuf, newname, namelen))
2478 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2479 typeinfo.defined != 0 ? &typeinfo : NULL);
2480
2481 /* Insert name in all lowercase. */
2482 for (p = namebuf; *p; p++)
2483 *p = TOLOWER (*p);
2484
2485 if (strncmp (namebuf, newname, namelen))
2486 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2487 typeinfo.defined != 0 ? &typeinfo : NULL);
2488
2489 free (namebuf);
2490 return TRUE;
2491 }
2492
2493 /* Should never be called, as .req goes between the alias and the
2494 register name, not at the beginning of the line. */
2495
2496 static void
2497 s_req (int a ATTRIBUTE_UNUSED)
2498 {
2499 as_bad (_("invalid syntax for .req directive"));
2500 }
2501
2502 static void
2503 s_dn (int a ATTRIBUTE_UNUSED)
2504 {
2505 as_bad (_("invalid syntax for .dn directive"));
2506 }
2507
2508 static void
2509 s_qn (int a ATTRIBUTE_UNUSED)
2510 {
2511 as_bad (_("invalid syntax for .qn directive"));
2512 }
2513
2514 /* The .unreq directive deletes an alias which was previously defined
2515 by .req. For example:
2516
2517 my_alias .req r11
2518 .unreq my_alias */
2519
2520 static void
2521 s_unreq (int a ATTRIBUTE_UNUSED)
2522 {
2523 char * name;
2524 char saved_char;
2525
2526 name = input_line_pointer;
2527
2528 while (*input_line_pointer != 0
2529 && *input_line_pointer != ' '
2530 && *input_line_pointer != '\n')
2531 ++input_line_pointer;
2532
2533 saved_char = *input_line_pointer;
2534 *input_line_pointer = 0;
2535
2536 if (!*name)
2537 as_bad (_("invalid syntax for .unreq directive"));
2538 else
2539 {
2540 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2541 name);
2542
2543 if (!reg)
2544 as_bad (_("unknown register alias '%s'"), name);
2545 else if (reg->builtin)
2546 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2547 name);
2548 else
2549 {
2550 char * p;
2551 char * nbuf;
2552
2553 hash_delete (arm_reg_hsh, name, FALSE);
2554 free ((char *) reg->name);
2555 if (reg->neon)
2556 free (reg->neon);
2557 free (reg);
2558
2559 /* Also locate the all upper case and all lower case versions.
2560 Do not complain if we cannot find one or the other as it
2561 was probably deleted above. */
2562
2563 nbuf = strdup (name);
2564 for (p = nbuf; *p; p++)
2565 *p = TOUPPER (*p);
2566 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2567 if (reg)
2568 {
2569 hash_delete (arm_reg_hsh, nbuf, FALSE);
2570 free ((char *) reg->name);
2571 if (reg->neon)
2572 free (reg->neon);
2573 free (reg);
2574 }
2575
2576 for (p = nbuf; *p; p++)
2577 *p = TOLOWER (*p);
2578 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2579 if (reg)
2580 {
2581 hash_delete (arm_reg_hsh, nbuf, FALSE);
2582 free ((char *) reg->name);
2583 if (reg->neon)
2584 free (reg->neon);
2585 free (reg);
2586 }
2587
2588 free (nbuf);
2589 }
2590 }
2591
2592 *input_line_pointer = saved_char;
2593 demand_empty_rest_of_line ();
2594 }
2595
2596 /* Directives: Instruction set selection. */
2597
2598 #ifdef OBJ_ELF
2599 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2600 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2601 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2602 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2603
2604 /* Create a new mapping symbol for the transition to STATE. */
2605
2606 static void
2607 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2608 {
2609 symbolS * symbolP;
2610 const char * symname;
2611 int type;
2612
2613 switch (state)
2614 {
2615 case MAP_DATA:
2616 symname = "$d";
2617 type = BSF_NO_FLAGS;
2618 break;
2619 case MAP_ARM:
2620 symname = "$a";
2621 type = BSF_NO_FLAGS;
2622 break;
2623 case MAP_THUMB:
2624 symname = "$t";
2625 type = BSF_NO_FLAGS;
2626 break;
2627 default:
2628 abort ();
2629 }
2630
2631 symbolP = symbol_new (symname, now_seg, value, frag);
2632 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2633
2634 switch (state)
2635 {
2636 case MAP_ARM:
2637 THUMB_SET_FUNC (symbolP, 0);
2638 ARM_SET_THUMB (symbolP, 0);
2639 ARM_SET_INTERWORK (symbolP, support_interwork);
2640 break;
2641
2642 case MAP_THUMB:
2643 THUMB_SET_FUNC (symbolP, 1);
2644 ARM_SET_THUMB (symbolP, 1);
2645 ARM_SET_INTERWORK (symbolP, support_interwork);
2646 break;
2647
2648 case MAP_DATA:
2649 default:
2650 break;
2651 }
2652
2653 /* Save the mapping symbols for future reference. Also check that
2654 we do not place two mapping symbols at the same offset within a
2655 frag. We'll handle overlap between frags in
2656 check_mapping_symbols.
2657
2658 If .fill or other data filling directive generates zero sized data,
2659 the mapping symbol for the following code will have the same value
2660 as the one generated for the data filling directive. In this case,
2661 we replace the old symbol with the new one at the same address. */
2662 if (value == 0)
2663 {
2664 if (frag->tc_frag_data.first_map != NULL)
2665 {
2666 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2667 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2668 }
2669 frag->tc_frag_data.first_map = symbolP;
2670 }
2671 if (frag->tc_frag_data.last_map != NULL)
2672 {
2673 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2674 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2675 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2676 }
2677 frag->tc_frag_data.last_map = symbolP;
2678 }
2679
2680 /* We must sometimes convert a region marked as code to data during
2681 code alignment, if an odd number of bytes have to be padded. The
2682 code mapping symbol is pushed to an aligned address. */
2683
2684 static void
2685 insert_data_mapping_symbol (enum mstate state,
2686 valueT value, fragS *frag, offsetT bytes)
2687 {
2688 /* If there was already a mapping symbol, remove it. */
2689 if (frag->tc_frag_data.last_map != NULL
2690 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2691 {
2692 symbolS *symp = frag->tc_frag_data.last_map;
2693
2694 if (value == 0)
2695 {
2696 know (frag->tc_frag_data.first_map == symp);
2697 frag->tc_frag_data.first_map = NULL;
2698 }
2699 frag->tc_frag_data.last_map = NULL;
2700 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2701 }
2702
2703 make_mapping_symbol (MAP_DATA, value, frag);
2704 make_mapping_symbol (state, value + bytes, frag);
2705 }
2706
2707 static void mapping_state_2 (enum mstate state, int max_chars);
2708
2709 /* Set the mapping state to STATE. Only call this when about to
2710 emit some STATE bytes to the file. */
2711
2712 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2713 void
2714 mapping_state (enum mstate state)
2715 {
2716 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2717
2718 if (mapstate == state)
2719 /* The mapping symbol has already been emitted.
2720 There is nothing else to do. */
2721 return;
2722
2723 if (state == MAP_ARM || state == MAP_THUMB)
2724 /* PR gas/12931
2725 All ARM instructions require 4-byte alignment.
2726 (Almost) all Thumb instructions require 2-byte alignment.
2727
2728 When emitting instructions into any section, mark the section
2729 appropriately.
2730
2731 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2732 but themselves require 2-byte alignment; this applies to some
2733 PC- relative forms. However, these cases will involve implicit
2734 literal pool generation or an explicit .align >=2, both of
2735 which will cause the section to me marked with sufficient
2736 alignment. Thus, we don't handle those cases here. */
2737 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2738
2739 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2740 /* This case will be evaluated later. */
2741 return;
2742
2743 mapping_state_2 (state, 0);
2744 }
2745
2746 /* Same as mapping_state, but MAX_CHARS bytes have already been
2747 allocated. Put the mapping symbol that far back. */
2748
2749 static void
2750 mapping_state_2 (enum mstate state, int max_chars)
2751 {
2752 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2753
2754 if (!SEG_NORMAL (now_seg))
2755 return;
2756
2757 if (mapstate == state)
2758 /* The mapping symbol has already been emitted.
2759 There is nothing else to do. */
2760 return;
2761
2762 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2763 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2764 {
2765 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2766 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2767
2768 if (add_symbol)
2769 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2770 }
2771
2772 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2773 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2774 }
2775 #undef TRANSITION
2776 #else
2777 #define mapping_state(x) ((void)0)
2778 #define mapping_state_2(x, y) ((void)0)
2779 #endif
2780
2781 /* Find the real, Thumb encoded start of a Thumb function. */
2782
2783 #ifdef OBJ_COFF
2784 static symbolS *
2785 find_real_start (symbolS * symbolP)
2786 {
2787 char * real_start;
2788 const char * name = S_GET_NAME (symbolP);
2789 symbolS * new_target;
2790
2791 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2792 #define STUB_NAME ".real_start_of"
2793
2794 if (name == NULL)
2795 abort ();
2796
2797 /* The compiler may generate BL instructions to local labels because
2798 it needs to perform a branch to a far away location. These labels
2799 do not have a corresponding ".real_start_of" label. We check
2800 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2801 the ".real_start_of" convention for nonlocal branches. */
2802 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2803 return symbolP;
2804
2805 real_start = concat (STUB_NAME, name, NULL);
2806 new_target = symbol_find (real_start);
2807 free (real_start);
2808
2809 if (new_target == NULL)
2810 {
2811 as_warn (_("Failed to find real start of function: %s\n"), name);
2812 new_target = symbolP;
2813 }
2814
2815 return new_target;
2816 }
2817 #endif
2818
2819 static void
2820 opcode_select (int width)
2821 {
2822 switch (width)
2823 {
2824 case 16:
2825 if (! thumb_mode)
2826 {
2827 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2828 as_bad (_("selected processor does not support THUMB opcodes"));
2829
2830 thumb_mode = 1;
2831 /* No need to force the alignment, since we will have been
2832 coming from ARM mode, which is word-aligned. */
2833 record_alignment (now_seg, 1);
2834 }
2835 break;
2836
2837 case 32:
2838 if (thumb_mode)
2839 {
2840 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2841 as_bad (_("selected processor does not support ARM opcodes"));
2842
2843 thumb_mode = 0;
2844
2845 if (!need_pass_2)
2846 frag_align (2, 0, 0);
2847
2848 record_alignment (now_seg, 1);
2849 }
2850 break;
2851
2852 default:
2853 as_bad (_("invalid instruction size selected (%d)"), width);
2854 }
2855 }
2856
2857 static void
2858 s_arm (int ignore ATTRIBUTE_UNUSED)
2859 {
2860 opcode_select (32);
2861 demand_empty_rest_of_line ();
2862 }
2863
2864 static void
2865 s_thumb (int ignore ATTRIBUTE_UNUSED)
2866 {
2867 opcode_select (16);
2868 demand_empty_rest_of_line ();
2869 }
2870
2871 static void
2872 s_code (int unused ATTRIBUTE_UNUSED)
2873 {
2874 int temp;
2875
2876 temp = get_absolute_expression ();
2877 switch (temp)
2878 {
2879 case 16:
2880 case 32:
2881 opcode_select (temp);
2882 break;
2883
2884 default:
2885 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2886 }
2887 }
2888
2889 static void
2890 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2891 {
2892 /* If we are not already in thumb mode go into it, EVEN if
2893 the target processor does not support thumb instructions.
2894 This is used by gcc/config/arm/lib1funcs.asm for example
2895 to compile interworking support functions even if the
2896 target processor should not support interworking. */
2897 if (! thumb_mode)
2898 {
2899 thumb_mode = 2;
2900 record_alignment (now_seg, 1);
2901 }
2902
2903 demand_empty_rest_of_line ();
2904 }
2905
2906 static void
2907 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2908 {
2909 s_thumb (0);
2910
2911 /* The following label is the name/address of the start of a Thumb function.
2912 We need to know this for the interworking support. */
2913 label_is_thumb_function_name = TRUE;
2914 }
2915
2916 /* Perform a .set directive, but also mark the alias as
2917 being a thumb function. */
2918
2919 static void
2920 s_thumb_set (int equiv)
2921 {
2922 /* XXX the following is a duplicate of the code for s_set() in read.c
2923 We cannot just call that code as we need to get at the symbol that
2924 is created. */
2925 char * name;
2926 char delim;
2927 char * end_name;
2928 symbolS * symbolP;
2929
2930 /* Especial apologies for the random logic:
2931 This just grew, and could be parsed much more simply!
2932 Dean - in haste. */
2933 delim = get_symbol_name (& name);
2934 end_name = input_line_pointer;
2935 (void) restore_line_pointer (delim);
2936
2937 if (*input_line_pointer != ',')
2938 {
2939 *end_name = 0;
2940 as_bad (_("expected comma after name \"%s\""), name);
2941 *end_name = delim;
2942 ignore_rest_of_line ();
2943 return;
2944 }
2945
2946 input_line_pointer++;
2947 *end_name = 0;
2948
2949 if (name[0] == '.' && name[1] == '\0')
2950 {
2951 /* XXX - this should not happen to .thumb_set. */
2952 abort ();
2953 }
2954
2955 if ((symbolP = symbol_find (name)) == NULL
2956 && (symbolP = md_undefined_symbol (name)) == NULL)
2957 {
2958 #ifndef NO_LISTING
2959 /* When doing symbol listings, play games with dummy fragments living
2960 outside the normal fragment chain to record the file and line info
2961 for this symbol. */
2962 if (listing & LISTING_SYMBOLS)
2963 {
2964 extern struct list_info_struct * listing_tail;
2965 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2966
2967 memset (dummy_frag, 0, sizeof (fragS));
2968 dummy_frag->fr_type = rs_fill;
2969 dummy_frag->line = listing_tail;
2970 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2971 dummy_frag->fr_symbol = symbolP;
2972 }
2973 else
2974 #endif
2975 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2976
2977 #ifdef OBJ_COFF
2978 /* "set" symbols are local unless otherwise specified. */
2979 SF_SET_LOCAL (symbolP);
2980 #endif /* OBJ_COFF */
2981 } /* Make a new symbol. */
2982
2983 symbol_table_insert (symbolP);
2984
2985 * end_name = delim;
2986
2987 if (equiv
2988 && S_IS_DEFINED (symbolP)
2989 && S_GET_SEGMENT (symbolP) != reg_section)
2990 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2991
2992 pseudo_set (symbolP);
2993
2994 demand_empty_rest_of_line ();
2995
2996 /* XXX Now we come to the Thumb specific bit of code. */
2997
2998 THUMB_SET_FUNC (symbolP, 1);
2999 ARM_SET_THUMB (symbolP, 1);
3000 #if defined OBJ_ELF || defined OBJ_COFF
3001 ARM_SET_INTERWORK (symbolP, support_interwork);
3002 #endif
3003 }
3004
3005 /* Directives: Mode selection. */
3006
3007 /* .syntax [unified|divided] - choose the new unified syntax
3008 (same for Arm and Thumb encoding, modulo slight differences in what
3009 can be represented) or the old divergent syntax for each mode. */
3010 static void
3011 s_syntax (int unused ATTRIBUTE_UNUSED)
3012 {
3013 char *name, delim;
3014
3015 delim = get_symbol_name (& name);
3016
3017 if (!strcasecmp (name, "unified"))
3018 unified_syntax = TRUE;
3019 else if (!strcasecmp (name, "divided"))
3020 unified_syntax = FALSE;
3021 else
3022 {
3023 as_bad (_("unrecognized syntax mode \"%s\""), name);
3024 return;
3025 }
3026 (void) restore_line_pointer (delim);
3027 demand_empty_rest_of_line ();
3028 }
3029
3030 /* Directives: sectioning and alignment. */
3031
3032 static void
3033 s_bss (int ignore ATTRIBUTE_UNUSED)
3034 {
3035 /* We don't support putting frags in the BSS segment, we fake it by
3036 marking in_bss, then looking at s_skip for clues. */
3037 subseg_set (bss_section, 0);
3038 demand_empty_rest_of_line ();
3039
3040 #ifdef md_elf_section_change_hook
3041 md_elf_section_change_hook ();
3042 #endif
3043 }
3044
3045 static void
3046 s_even (int ignore ATTRIBUTE_UNUSED)
3047 {
3048 /* Never make frag if expect extra pass. */
3049 if (!need_pass_2)
3050 frag_align (1, 0, 0);
3051
3052 record_alignment (now_seg, 1);
3053
3054 demand_empty_rest_of_line ();
3055 }
3056
3057 /* Directives: CodeComposer Studio. */
3058
3059 /* .ref (for CodeComposer Studio syntax only). */
3060 static void
3061 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3062 {
3063 if (codecomposer_syntax)
3064 ignore_rest_of_line ();
3065 else
3066 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3067 }
3068
3069 /* If name is not NULL, then it is used for marking the beginning of a
3070 function, whereas if it is NULL then it means the function end. */
3071 static void
3072 asmfunc_debug (const char * name)
3073 {
3074 static const char * last_name = NULL;
3075
3076 if (name != NULL)
3077 {
3078 gas_assert (last_name == NULL);
3079 last_name = name;
3080
3081 if (debug_type == DEBUG_STABS)
3082 stabs_generate_asm_func (name, name);
3083 }
3084 else
3085 {
3086 gas_assert (last_name != NULL);
3087
3088 if (debug_type == DEBUG_STABS)
3089 stabs_generate_asm_endfunc (last_name, last_name);
3090
3091 last_name = NULL;
3092 }
3093 }
3094
3095 static void
3096 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3097 {
3098 if (codecomposer_syntax)
3099 {
3100 switch (asmfunc_state)
3101 {
3102 case OUTSIDE_ASMFUNC:
3103 asmfunc_state = WAITING_ASMFUNC_NAME;
3104 break;
3105
3106 case WAITING_ASMFUNC_NAME:
3107 as_bad (_(".asmfunc repeated."));
3108 break;
3109
3110 case WAITING_ENDASMFUNC:
3111 as_bad (_(".asmfunc without function."));
3112 break;
3113 }
3114 demand_empty_rest_of_line ();
3115 }
3116 else
3117 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3118 }
3119
3120 static void
3121 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3122 {
3123 if (codecomposer_syntax)
3124 {
3125 switch (asmfunc_state)
3126 {
3127 case OUTSIDE_ASMFUNC:
3128 as_bad (_(".endasmfunc without a .asmfunc."));
3129 break;
3130
3131 case WAITING_ASMFUNC_NAME:
3132 as_bad (_(".endasmfunc without function."));
3133 break;
3134
3135 case WAITING_ENDASMFUNC:
3136 asmfunc_state = OUTSIDE_ASMFUNC;
3137 asmfunc_debug (NULL);
3138 break;
3139 }
3140 demand_empty_rest_of_line ();
3141 }
3142 else
3143 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3144 }
3145
3146 static void
3147 s_ccs_def (int name)
3148 {
3149 if (codecomposer_syntax)
3150 s_globl (name);
3151 else
3152 as_bad (_(".def pseudo-op only available with -mccs flag."));
3153 }
3154
3155 /* Directives: Literal pools. */
3156
3157 static literal_pool *
3158 find_literal_pool (void)
3159 {
3160 literal_pool * pool;
3161
3162 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3163 {
3164 if (pool->section == now_seg
3165 && pool->sub_section == now_subseg)
3166 break;
3167 }
3168
3169 return pool;
3170 }
3171
3172 static literal_pool *
3173 find_or_make_literal_pool (void)
3174 {
3175 /* Next literal pool ID number. */
3176 static unsigned int latest_pool_num = 1;
3177 literal_pool * pool;
3178
3179 pool = find_literal_pool ();
3180
3181 if (pool == NULL)
3182 {
3183 /* Create a new pool. */
3184 pool = XNEW (literal_pool);
3185 if (! pool)
3186 return NULL;
3187
3188 pool->next_free_entry = 0;
3189 pool->section = now_seg;
3190 pool->sub_section = now_subseg;
3191 pool->next = list_of_pools;
3192 pool->symbol = NULL;
3193 pool->alignment = 2;
3194
3195 /* Add it to the list. */
3196 list_of_pools = pool;
3197 }
3198
3199 /* New pools, and emptied pools, will have a NULL symbol. */
3200 if (pool->symbol == NULL)
3201 {
3202 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3203 (valueT) 0, &zero_address_frag);
3204 pool->id = latest_pool_num ++;
3205 }
3206
3207 /* Done. */
3208 return pool;
3209 }
3210
3211 /* Add the literal in the global 'inst'
3212 structure to the relevant literal pool. */
3213
3214 static int
3215 add_to_lit_pool (unsigned int nbytes)
3216 {
3217 #define PADDING_SLOT 0x1
3218 #define LIT_ENTRY_SIZE_MASK 0xFF
3219 literal_pool * pool;
3220 unsigned int entry, pool_size = 0;
3221 bfd_boolean padding_slot_p = FALSE;
3222 unsigned imm1 = 0;
3223 unsigned imm2 = 0;
3224
3225 if (nbytes == 8)
3226 {
3227 imm1 = inst.operands[1].imm;
3228 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3229 : inst.reloc.exp.X_unsigned ? 0
3230 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3231 if (target_big_endian)
3232 {
3233 imm1 = imm2;
3234 imm2 = inst.operands[1].imm;
3235 }
3236 }
3237
3238 pool = find_or_make_literal_pool ();
3239
3240 /* Check if this literal value is already in the pool. */
3241 for (entry = 0; entry < pool->next_free_entry; entry ++)
3242 {
3243 if (nbytes == 4)
3244 {
3245 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3246 && (inst.reloc.exp.X_op == O_constant)
3247 && (pool->literals[entry].X_add_number
3248 == inst.reloc.exp.X_add_number)
3249 && (pool->literals[entry].X_md == nbytes)
3250 && (pool->literals[entry].X_unsigned
3251 == inst.reloc.exp.X_unsigned))
3252 break;
3253
3254 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3255 && (inst.reloc.exp.X_op == O_symbol)
3256 && (pool->literals[entry].X_add_number
3257 == inst.reloc.exp.X_add_number)
3258 && (pool->literals[entry].X_add_symbol
3259 == inst.reloc.exp.X_add_symbol)
3260 && (pool->literals[entry].X_op_symbol
3261 == inst.reloc.exp.X_op_symbol)
3262 && (pool->literals[entry].X_md == nbytes))
3263 break;
3264 }
3265 else if ((nbytes == 8)
3266 && !(pool_size & 0x7)
3267 && ((entry + 1) != pool->next_free_entry)
3268 && (pool->literals[entry].X_op == O_constant)
3269 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3270 && (pool->literals[entry].X_unsigned
3271 == inst.reloc.exp.X_unsigned)
3272 && (pool->literals[entry + 1].X_op == O_constant)
3273 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3274 && (pool->literals[entry + 1].X_unsigned
3275 == inst.reloc.exp.X_unsigned))
3276 break;
3277
3278 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3279 if (padding_slot_p && (nbytes == 4))
3280 break;
3281
3282 pool_size += 4;
3283 }
3284
3285 /* Do we need to create a new entry? */
3286 if (entry == pool->next_free_entry)
3287 {
3288 if (entry >= MAX_LITERAL_POOL_SIZE)
3289 {
3290 inst.error = _("literal pool overflow");
3291 return FAIL;
3292 }
3293
3294 if (nbytes == 8)
3295 {
3296 /* For 8-byte entries, we align to an 8-byte boundary,
3297 and split it into two 4-byte entries, because on 32-bit
3298 host, 8-byte constants are treated as big num, thus
3299 saved in "generic_bignum" which will be overwritten
3300 by later assignments.
3301
3302 We also need to make sure there is enough space for
3303 the split.
3304
3305 We also check to make sure the literal operand is a
3306 constant number. */
3307 if (!(inst.reloc.exp.X_op == O_constant
3308 || inst.reloc.exp.X_op == O_big))
3309 {
3310 inst.error = _("invalid type for literal pool");
3311 return FAIL;
3312 }
3313 else if (pool_size & 0x7)
3314 {
3315 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3316 {
3317 inst.error = _("literal pool overflow");
3318 return FAIL;
3319 }
3320
3321 pool->literals[entry] = inst.reloc.exp;
3322 pool->literals[entry].X_op = O_constant;
3323 pool->literals[entry].X_add_number = 0;
3324 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3325 pool->next_free_entry += 1;
3326 pool_size += 4;
3327 }
3328 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3329 {
3330 inst.error = _("literal pool overflow");
3331 return FAIL;
3332 }
3333
3334 pool->literals[entry] = inst.reloc.exp;
3335 pool->literals[entry].X_op = O_constant;
3336 pool->literals[entry].X_add_number = imm1;
3337 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3338 pool->literals[entry++].X_md = 4;
3339 pool->literals[entry] = inst.reloc.exp;
3340 pool->literals[entry].X_op = O_constant;
3341 pool->literals[entry].X_add_number = imm2;
3342 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3343 pool->literals[entry].X_md = 4;
3344 pool->alignment = 3;
3345 pool->next_free_entry += 1;
3346 }
3347 else
3348 {
3349 pool->literals[entry] = inst.reloc.exp;
3350 pool->literals[entry].X_md = 4;
3351 }
3352
3353 #ifdef OBJ_ELF
3354 /* PR ld/12974: Record the location of the first source line to reference
3355 this entry in the literal pool. If it turns out during linking that the
3356 symbol does not exist we will be able to give an accurate line number for
3357 the (first use of the) missing reference. */
3358 if (debug_type == DEBUG_DWARF2)
3359 dwarf2_where (pool->locs + entry);
3360 #endif
3361 pool->next_free_entry += 1;
3362 }
3363 else if (padding_slot_p)
3364 {
3365 pool->literals[entry] = inst.reloc.exp;
3366 pool->literals[entry].X_md = nbytes;
3367 }
3368
3369 inst.reloc.exp.X_op = O_symbol;
3370 inst.reloc.exp.X_add_number = pool_size;
3371 inst.reloc.exp.X_add_symbol = pool->symbol;
3372
3373 return SUCCESS;
3374 }
3375
3376 bfd_boolean
3377 tc_start_label_without_colon (void)
3378 {
3379 bfd_boolean ret = TRUE;
3380
3381 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3382 {
3383 const char *label = input_line_pointer;
3384
3385 while (!is_end_of_line[(int) label[-1]])
3386 --label;
3387
3388 if (*label == '.')
3389 {
3390 as_bad (_("Invalid label '%s'"), label);
3391 ret = FALSE;
3392 }
3393
3394 asmfunc_debug (label);
3395
3396 asmfunc_state = WAITING_ENDASMFUNC;
3397 }
3398
3399 return ret;
3400 }
3401
3402 /* Can't use symbol_new here, so have to create a symbol and then at
3403 a later date assign it a value. That's what these functions do. */
3404
3405 static void
3406 symbol_locate (symbolS * symbolP,
3407 const char * name, /* It is copied, the caller can modify. */
3408 segT segment, /* Segment identifier (SEG_<something>). */
3409 valueT valu, /* Symbol value. */
3410 fragS * frag) /* Associated fragment. */
3411 {
3412 size_t name_length;
3413 char * preserved_copy_of_name;
3414
3415 name_length = strlen (name) + 1; /* +1 for \0. */
3416 obstack_grow (&notes, name, name_length);
3417 preserved_copy_of_name = (char *) obstack_finish (&notes);
3418
3419 #ifdef tc_canonicalize_symbol_name
3420 preserved_copy_of_name =
3421 tc_canonicalize_symbol_name (preserved_copy_of_name);
3422 #endif
3423
3424 S_SET_NAME (symbolP, preserved_copy_of_name);
3425
3426 S_SET_SEGMENT (symbolP, segment);
3427 S_SET_VALUE (symbolP, valu);
3428 symbol_clear_list_pointers (symbolP);
3429
3430 symbol_set_frag (symbolP, frag);
3431
3432 /* Link to end of symbol chain. */
3433 {
3434 extern int symbol_table_frozen;
3435
3436 if (symbol_table_frozen)
3437 abort ();
3438 }
3439
3440 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3441
3442 obj_symbol_new_hook (symbolP);
3443
3444 #ifdef tc_symbol_new_hook
3445 tc_symbol_new_hook (symbolP);
3446 #endif
3447
3448 #ifdef DEBUG_SYMS
3449 verify_symbol_chain (symbol_rootP, symbol_lastP);
3450 #endif /* DEBUG_SYMS */
3451 }
3452
3453 static void
3454 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3455 {
3456 unsigned int entry;
3457 literal_pool * pool;
3458 char sym_name[20];
3459
3460 pool = find_literal_pool ();
3461 if (pool == NULL
3462 || pool->symbol == NULL
3463 || pool->next_free_entry == 0)
3464 return;
3465
3466 /* Align pool as you have word accesses.
3467 Only make a frag if we have to. */
3468 if (!need_pass_2)
3469 frag_align (pool->alignment, 0, 0);
3470
3471 record_alignment (now_seg, 2);
3472
3473 #ifdef OBJ_ELF
3474 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3475 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3476 #endif
3477 sprintf (sym_name, "$$lit_\002%x", pool->id);
3478
3479 symbol_locate (pool->symbol, sym_name, now_seg,
3480 (valueT) frag_now_fix (), frag_now);
3481 symbol_table_insert (pool->symbol);
3482
3483 ARM_SET_THUMB (pool->symbol, thumb_mode);
3484
3485 #if defined OBJ_COFF || defined OBJ_ELF
3486 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3487 #endif
3488
3489 for (entry = 0; entry < pool->next_free_entry; entry ++)
3490 {
3491 #ifdef OBJ_ELF
3492 if (debug_type == DEBUG_DWARF2)
3493 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3494 #endif
3495 /* First output the expression in the instruction to the pool. */
3496 emit_expr (&(pool->literals[entry]),
3497 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3498 }
3499
3500 /* Mark the pool as empty. */
3501 pool->next_free_entry = 0;
3502 pool->symbol = NULL;
3503 }
3504
3505 #ifdef OBJ_ELF
3506 /* Forward declarations for functions below, in the MD interface
3507 section. */
3508 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3509 static valueT create_unwind_entry (int);
3510 static void start_unwind_section (const segT, int);
3511 static void add_unwind_opcode (valueT, int);
3512 static void flush_pending_unwind (void);
3513
3514 /* Directives: Data. */
3515
3516 static void
3517 s_arm_elf_cons (int nbytes)
3518 {
3519 expressionS exp;
3520
3521 #ifdef md_flush_pending_output
3522 md_flush_pending_output ();
3523 #endif
3524
3525 if (is_it_end_of_statement ())
3526 {
3527 demand_empty_rest_of_line ();
3528 return;
3529 }
3530
3531 #ifdef md_cons_align
3532 md_cons_align (nbytes);
3533 #endif
3534
3535 mapping_state (MAP_DATA);
3536 do
3537 {
3538 int reloc;
3539 char *base = input_line_pointer;
3540
3541 expression (& exp);
3542
3543 if (exp.X_op != O_symbol)
3544 emit_expr (&exp, (unsigned int) nbytes);
3545 else
3546 {
3547 char *before_reloc = input_line_pointer;
3548 reloc = parse_reloc (&input_line_pointer);
3549 if (reloc == -1)
3550 {
3551 as_bad (_("unrecognized relocation suffix"));
3552 ignore_rest_of_line ();
3553 return;
3554 }
3555 else if (reloc == BFD_RELOC_UNUSED)
3556 emit_expr (&exp, (unsigned int) nbytes);
3557 else
3558 {
3559 reloc_howto_type *howto = (reloc_howto_type *)
3560 bfd_reloc_type_lookup (stdoutput,
3561 (bfd_reloc_code_real_type) reloc);
3562 int size = bfd_get_reloc_size (howto);
3563
3564 if (reloc == BFD_RELOC_ARM_PLT32)
3565 {
3566 as_bad (_("(plt) is only valid on branch targets"));
3567 reloc = BFD_RELOC_UNUSED;
3568 size = 0;
3569 }
3570
3571 if (size > nbytes)
3572 as_bad (ngettext ("%s relocations do not fit in %d byte",
3573 "%s relocations do not fit in %d bytes",
3574 nbytes),
3575 howto->name, nbytes);
3576 else
3577 {
3578 /* We've parsed an expression stopping at O_symbol.
3579 But there may be more expression left now that we
3580 have parsed the relocation marker. Parse it again.
3581 XXX Surely there is a cleaner way to do this. */
3582 char *p = input_line_pointer;
3583 int offset;
3584 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3585
3586 memcpy (save_buf, base, input_line_pointer - base);
3587 memmove (base + (input_line_pointer - before_reloc),
3588 base, before_reloc - base);
3589
3590 input_line_pointer = base + (input_line_pointer-before_reloc);
3591 expression (&exp);
3592 memcpy (base, save_buf, p - base);
3593
3594 offset = nbytes - size;
3595 p = frag_more (nbytes);
3596 memset (p, 0, nbytes);
3597 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3598 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3599 free (save_buf);
3600 }
3601 }
3602 }
3603 }
3604 while (*input_line_pointer++ == ',');
3605
3606 /* Put terminator back into stream. */
3607 input_line_pointer --;
3608 demand_empty_rest_of_line ();
3609 }
3610
3611 /* Emit an expression containing a 32-bit thumb instruction.
3612 Implementation based on put_thumb32_insn. */
3613
3614 static void
3615 emit_thumb32_expr (expressionS * exp)
3616 {
3617 expressionS exp_high = *exp;
3618
3619 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3620 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3621 exp->X_add_number &= 0xffff;
3622 emit_expr (exp, (unsigned int) THUMB_SIZE);
3623 }
3624
3625 /* Guess the instruction size based on the opcode. */
3626
3627 static int
3628 thumb_insn_size (int opcode)
3629 {
3630 if ((unsigned int) opcode < 0xe800u)
3631 return 2;
3632 else if ((unsigned int) opcode >= 0xe8000000u)
3633 return 4;
3634 else
3635 return 0;
3636 }
3637
3638 static bfd_boolean
3639 emit_insn (expressionS *exp, int nbytes)
3640 {
3641 int size = 0;
3642
3643 if (exp->X_op == O_constant)
3644 {
3645 size = nbytes;
3646
3647 if (size == 0)
3648 size = thumb_insn_size (exp->X_add_number);
3649
3650 if (size != 0)
3651 {
3652 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3653 {
3654 as_bad (_(".inst.n operand too big. "\
3655 "Use .inst.w instead"));
3656 size = 0;
3657 }
3658 else
3659 {
3660 if (now_it.state == AUTOMATIC_IT_BLOCK)
3661 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3662 else
3663 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3664
3665 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3666 emit_thumb32_expr (exp);
3667 else
3668 emit_expr (exp, (unsigned int) size);
3669
3670 it_fsm_post_encode ();
3671 }
3672 }
3673 else
3674 as_bad (_("cannot determine Thumb instruction size. " \
3675 "Use .inst.n/.inst.w instead"));
3676 }
3677 else
3678 as_bad (_("constant expression required"));
3679
3680 return (size != 0);
3681 }
3682
3683 /* Like s_arm_elf_cons but do not use md_cons_align and
3684 set the mapping state to MAP_ARM/MAP_THUMB. */
3685
3686 static void
3687 s_arm_elf_inst (int nbytes)
3688 {
3689 if (is_it_end_of_statement ())
3690 {
3691 demand_empty_rest_of_line ();
3692 return;
3693 }
3694
3695 /* Calling mapping_state () here will not change ARM/THUMB,
3696 but will ensure not to be in DATA state. */
3697
3698 if (thumb_mode)
3699 mapping_state (MAP_THUMB);
3700 else
3701 {
3702 if (nbytes != 0)
3703 {
3704 as_bad (_("width suffixes are invalid in ARM mode"));
3705 ignore_rest_of_line ();
3706 return;
3707 }
3708
3709 nbytes = 4;
3710
3711 mapping_state (MAP_ARM);
3712 }
3713
3714 do
3715 {
3716 expressionS exp;
3717
3718 expression (& exp);
3719
3720 if (! emit_insn (& exp, nbytes))
3721 {
3722 ignore_rest_of_line ();
3723 return;
3724 }
3725 }
3726 while (*input_line_pointer++ == ',');
3727
3728 /* Put terminator back into stream. */
3729 input_line_pointer --;
3730 demand_empty_rest_of_line ();
3731 }
3732
3733 /* Parse a .rel31 directive. */
3734
3735 static void
3736 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3737 {
3738 expressionS exp;
3739 char *p;
3740 valueT highbit;
3741
3742 highbit = 0;
3743 if (*input_line_pointer == '1')
3744 highbit = 0x80000000;
3745 else if (*input_line_pointer != '0')
3746 as_bad (_("expected 0 or 1"));
3747
3748 input_line_pointer++;
3749 if (*input_line_pointer != ',')
3750 as_bad (_("missing comma"));
3751 input_line_pointer++;
3752
3753 #ifdef md_flush_pending_output
3754 md_flush_pending_output ();
3755 #endif
3756
3757 #ifdef md_cons_align
3758 md_cons_align (4);
3759 #endif
3760
3761 mapping_state (MAP_DATA);
3762
3763 expression (&exp);
3764
3765 p = frag_more (4);
3766 md_number_to_chars (p, highbit, 4);
3767 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3768 BFD_RELOC_ARM_PREL31);
3769
3770 demand_empty_rest_of_line ();
3771 }
3772
3773 /* Directives: AEABI stack-unwind tables. */
3774
3775 /* Parse an unwind_fnstart directive. Simply records the current location. */
3776
3777 static void
3778 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3779 {
3780 demand_empty_rest_of_line ();
3781 if (unwind.proc_start)
3782 {
3783 as_bad (_("duplicate .fnstart directive"));
3784 return;
3785 }
3786
3787 /* Mark the start of the function. */
3788 unwind.proc_start = expr_build_dot ();
3789
3790 /* Reset the rest of the unwind info. */
3791 unwind.opcode_count = 0;
3792 unwind.table_entry = NULL;
3793 unwind.personality_routine = NULL;
3794 unwind.personality_index = -1;
3795 unwind.frame_size = 0;
3796 unwind.fp_offset = 0;
3797 unwind.fp_reg = REG_SP;
3798 unwind.fp_used = 0;
3799 unwind.sp_restored = 0;
3800 }
3801
3802
3803 /* Parse a handlerdata directive. Creates the exception handling table entry
3804 for the function. */
3805
3806 static void
3807 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3808 {
3809 demand_empty_rest_of_line ();
3810 if (!unwind.proc_start)
3811 as_bad (MISSING_FNSTART);
3812
3813 if (unwind.table_entry)
3814 as_bad (_("duplicate .handlerdata directive"));
3815
3816 create_unwind_entry (1);
3817 }
3818
3819 /* Parse an unwind_fnend directive. Generates the index table entry. */
3820
3821 static void
3822 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3823 {
3824 long where;
3825 char *ptr;
3826 valueT val;
3827 unsigned int marked_pr_dependency;
3828
3829 demand_empty_rest_of_line ();
3830
3831 if (!unwind.proc_start)
3832 {
3833 as_bad (_(".fnend directive without .fnstart"));
3834 return;
3835 }
3836
3837 /* Add eh table entry. */
3838 if (unwind.table_entry == NULL)
3839 val = create_unwind_entry (0);
3840 else
3841 val = 0;
3842
3843 /* Add index table entry. This is two words. */
3844 start_unwind_section (unwind.saved_seg, 1);
3845 frag_align (2, 0, 0);
3846 record_alignment (now_seg, 2);
3847
3848 ptr = frag_more (8);
3849 memset (ptr, 0, 8);
3850 where = frag_now_fix () - 8;
3851
3852 /* Self relative offset of the function start. */
3853 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3854 BFD_RELOC_ARM_PREL31);
3855
3856 /* Indicate dependency on EHABI-defined personality routines to the
3857 linker, if it hasn't been done already. */
3858 marked_pr_dependency
3859 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3860 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3861 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3862 {
3863 static const char *const name[] =
3864 {
3865 "__aeabi_unwind_cpp_pr0",
3866 "__aeabi_unwind_cpp_pr1",
3867 "__aeabi_unwind_cpp_pr2"
3868 };
3869 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3870 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3871 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3872 |= 1 << unwind.personality_index;
3873 }
3874
3875 if (val)
3876 /* Inline exception table entry. */
3877 md_number_to_chars (ptr + 4, val, 4);
3878 else
3879 /* Self relative offset of the table entry. */
3880 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3881 BFD_RELOC_ARM_PREL31);
3882
3883 /* Restore the original section. */
3884 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3885
3886 unwind.proc_start = NULL;
3887 }
3888
3889
3890 /* Parse an unwind_cantunwind directive. */
3891
3892 static void
3893 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3894 {
3895 demand_empty_rest_of_line ();
3896 if (!unwind.proc_start)
3897 as_bad (MISSING_FNSTART);
3898
3899 if (unwind.personality_routine || unwind.personality_index != -1)
3900 as_bad (_("personality routine specified for cantunwind frame"));
3901
3902 unwind.personality_index = -2;
3903 }
3904
3905
3906 /* Parse a personalityindex directive. */
3907
3908 static void
3909 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3910 {
3911 expressionS exp;
3912
3913 if (!unwind.proc_start)
3914 as_bad (MISSING_FNSTART);
3915
3916 if (unwind.personality_routine || unwind.personality_index != -1)
3917 as_bad (_("duplicate .personalityindex directive"));
3918
3919 expression (&exp);
3920
3921 if (exp.X_op != O_constant
3922 || exp.X_add_number < 0 || exp.X_add_number > 15)
3923 {
3924 as_bad (_("bad personality routine number"));
3925 ignore_rest_of_line ();
3926 return;
3927 }
3928
3929 unwind.personality_index = exp.X_add_number;
3930
3931 demand_empty_rest_of_line ();
3932 }
3933
3934
3935 /* Parse a personality directive. */
3936
3937 static void
3938 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3939 {
3940 char *name, *p, c;
3941
3942 if (!unwind.proc_start)
3943 as_bad (MISSING_FNSTART);
3944
3945 if (unwind.personality_routine || unwind.personality_index != -1)
3946 as_bad (_("duplicate .personality directive"));
3947
3948 c = get_symbol_name (& name);
3949 p = input_line_pointer;
3950 if (c == '"')
3951 ++ input_line_pointer;
3952 unwind.personality_routine = symbol_find_or_make (name);
3953 *p = c;
3954 demand_empty_rest_of_line ();
3955 }
3956
3957
3958 /* Parse a directive saving core registers. */
3959
3960 static void
3961 s_arm_unwind_save_core (void)
3962 {
3963 valueT op;
3964 long range;
3965 int n;
3966
3967 range = parse_reg_list (&input_line_pointer);
3968 if (range == FAIL)
3969 {
3970 as_bad (_("expected register list"));
3971 ignore_rest_of_line ();
3972 return;
3973 }
3974
3975 demand_empty_rest_of_line ();
3976
3977 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3978 into .unwind_save {..., sp...}. We aren't bothered about the value of
3979 ip because it is clobbered by calls. */
3980 if (unwind.sp_restored && unwind.fp_reg == 12
3981 && (range & 0x3000) == 0x1000)
3982 {
3983 unwind.opcode_count--;
3984 unwind.sp_restored = 0;
3985 range = (range | 0x2000) & ~0x1000;
3986 unwind.pending_offset = 0;
3987 }
3988
3989 /* Pop r4-r15. */
3990 if (range & 0xfff0)
3991 {
3992 /* See if we can use the short opcodes. These pop a block of up to 8
3993 registers starting with r4, plus maybe r14. */
3994 for (n = 0; n < 8; n++)
3995 {
3996 /* Break at the first non-saved register. */
3997 if ((range & (1 << (n + 4))) == 0)
3998 break;
3999 }
4000 /* See if there are any other bits set. */
4001 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4002 {
4003 /* Use the long form. */
4004 op = 0x8000 | ((range >> 4) & 0xfff);
4005 add_unwind_opcode (op, 2);
4006 }
4007 else
4008 {
4009 /* Use the short form. */
4010 if (range & 0x4000)
4011 op = 0xa8; /* Pop r14. */
4012 else
4013 op = 0xa0; /* Do not pop r14. */
4014 op |= (n - 1);
4015 add_unwind_opcode (op, 1);
4016 }
4017 }
4018
4019 /* Pop r0-r3. */
4020 if (range & 0xf)
4021 {
4022 op = 0xb100 | (range & 0xf);
4023 add_unwind_opcode (op, 2);
4024 }
4025
4026 /* Record the number of bytes pushed. */
4027 for (n = 0; n < 16; n++)
4028 {
4029 if (range & (1 << n))
4030 unwind.frame_size += 4;
4031 }
4032 }
4033
4034
4035 /* Parse a directive saving FPA registers. */
4036
4037 static void
4038 s_arm_unwind_save_fpa (int reg)
4039 {
4040 expressionS exp;
4041 int num_regs;
4042 valueT op;
4043
4044 /* Get Number of registers to transfer. */
4045 if (skip_past_comma (&input_line_pointer) != FAIL)
4046 expression (&exp);
4047 else
4048 exp.X_op = O_illegal;
4049
4050 if (exp.X_op != O_constant)
4051 {
4052 as_bad (_("expected , <constant>"));
4053 ignore_rest_of_line ();
4054 return;
4055 }
4056
4057 num_regs = exp.X_add_number;
4058
4059 if (num_regs < 1 || num_regs > 4)
4060 {
4061 as_bad (_("number of registers must be in the range [1:4]"));
4062 ignore_rest_of_line ();
4063 return;
4064 }
4065
4066 demand_empty_rest_of_line ();
4067
4068 if (reg == 4)
4069 {
4070 /* Short form. */
4071 op = 0xb4 | (num_regs - 1);
4072 add_unwind_opcode (op, 1);
4073 }
4074 else
4075 {
4076 /* Long form. */
4077 op = 0xc800 | (reg << 4) | (num_regs - 1);
4078 add_unwind_opcode (op, 2);
4079 }
4080 unwind.frame_size += num_regs * 12;
4081 }
4082
4083
4084 /* Parse a directive saving VFP registers for ARMv6 and above. */
4085
4086 static void
4087 s_arm_unwind_save_vfp_armv6 (void)
4088 {
4089 int count;
4090 unsigned int start;
4091 valueT op;
4092 int num_vfpv3_regs = 0;
4093 int num_regs_below_16;
4094
4095 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4096 if (count == FAIL)
4097 {
4098 as_bad (_("expected register list"));
4099 ignore_rest_of_line ();
4100 return;
4101 }
4102
4103 demand_empty_rest_of_line ();
4104
4105 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4106 than FSTMX/FLDMX-style ones). */
4107
4108 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4109 if (start >= 16)
4110 num_vfpv3_regs = count;
4111 else if (start + count > 16)
4112 num_vfpv3_regs = start + count - 16;
4113
4114 if (num_vfpv3_regs > 0)
4115 {
4116 int start_offset = start > 16 ? start - 16 : 0;
4117 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4118 add_unwind_opcode (op, 2);
4119 }
4120
4121 /* Generate opcode for registers numbered in the range 0 .. 15. */
4122 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4123 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4124 if (num_regs_below_16 > 0)
4125 {
4126 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4127 add_unwind_opcode (op, 2);
4128 }
4129
4130 unwind.frame_size += count * 8;
4131 }
4132
4133
4134 /* Parse a directive saving VFP registers for pre-ARMv6. */
4135
4136 static void
4137 s_arm_unwind_save_vfp (void)
4138 {
4139 int count;
4140 unsigned int reg;
4141 valueT op;
4142
4143 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4144 if (count == FAIL)
4145 {
4146 as_bad (_("expected register list"));
4147 ignore_rest_of_line ();
4148 return;
4149 }
4150
4151 demand_empty_rest_of_line ();
4152
4153 if (reg == 8)
4154 {
4155 /* Short form. */
4156 op = 0xb8 | (count - 1);
4157 add_unwind_opcode (op, 1);
4158 }
4159 else
4160 {
4161 /* Long form. */
4162 op = 0xb300 | (reg << 4) | (count - 1);
4163 add_unwind_opcode (op, 2);
4164 }
4165 unwind.frame_size += count * 8 + 4;
4166 }
4167
4168
4169 /* Parse a directive saving iWMMXt data registers. */
4170
4171 static void
4172 s_arm_unwind_save_mmxwr (void)
4173 {
4174 int reg;
4175 int hi_reg;
4176 int i;
4177 unsigned mask = 0;
4178 valueT op;
4179
4180 if (*input_line_pointer == '{')
4181 input_line_pointer++;
4182
4183 do
4184 {
4185 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4186
4187 if (reg == FAIL)
4188 {
4189 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4190 goto error;
4191 }
4192
4193 if (mask >> reg)
4194 as_tsktsk (_("register list not in ascending order"));
4195 mask |= 1 << reg;
4196
4197 if (*input_line_pointer == '-')
4198 {
4199 input_line_pointer++;
4200 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4201 if (hi_reg == FAIL)
4202 {
4203 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4204 goto error;
4205 }
4206 else if (reg >= hi_reg)
4207 {
4208 as_bad (_("bad register range"));
4209 goto error;
4210 }
4211 for (; reg < hi_reg; reg++)
4212 mask |= 1 << reg;
4213 }
4214 }
4215 while (skip_past_comma (&input_line_pointer) != FAIL);
4216
4217 skip_past_char (&input_line_pointer, '}');
4218
4219 demand_empty_rest_of_line ();
4220
4221 /* Generate any deferred opcodes because we're going to be looking at
4222 the list. */
4223 flush_pending_unwind ();
4224
4225 for (i = 0; i < 16; i++)
4226 {
4227 if (mask & (1 << i))
4228 unwind.frame_size += 8;
4229 }
4230
4231 /* Attempt to combine with a previous opcode. We do this because gcc
4232 likes to output separate unwind directives for a single block of
4233 registers. */
4234 if (unwind.opcode_count > 0)
4235 {
4236 i = unwind.opcodes[unwind.opcode_count - 1];
4237 if ((i & 0xf8) == 0xc0)
4238 {
4239 i &= 7;
4240 /* Only merge if the blocks are contiguous. */
4241 if (i < 6)
4242 {
4243 if ((mask & 0xfe00) == (1 << 9))
4244 {
4245 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4246 unwind.opcode_count--;
4247 }
4248 }
4249 else if (i == 6 && unwind.opcode_count >= 2)
4250 {
4251 i = unwind.opcodes[unwind.opcode_count - 2];
4252 reg = i >> 4;
4253 i &= 0xf;
4254
4255 op = 0xffff << (reg - 1);
4256 if (reg > 0
4257 && ((mask & op) == (1u << (reg - 1))))
4258 {
4259 op = (1 << (reg + i + 1)) - 1;
4260 op &= ~((1 << reg) - 1);
4261 mask |= op;
4262 unwind.opcode_count -= 2;
4263 }
4264 }
4265 }
4266 }
4267
4268 hi_reg = 15;
4269 /* We want to generate opcodes in the order the registers have been
4270 saved, ie. descending order. */
4271 for (reg = 15; reg >= -1; reg--)
4272 {
4273 /* Save registers in blocks. */
4274 if (reg < 0
4275 || !(mask & (1 << reg)))
4276 {
4277 /* We found an unsaved reg. Generate opcodes to save the
4278 preceding block. */
4279 if (reg != hi_reg)
4280 {
4281 if (reg == 9)
4282 {
4283 /* Short form. */
4284 op = 0xc0 | (hi_reg - 10);
4285 add_unwind_opcode (op, 1);
4286 }
4287 else
4288 {
4289 /* Long form. */
4290 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4291 add_unwind_opcode (op, 2);
4292 }
4293 }
4294 hi_reg = reg - 1;
4295 }
4296 }
4297
4298 return;
4299 error:
4300 ignore_rest_of_line ();
4301 }
4302
4303 static void
4304 s_arm_unwind_save_mmxwcg (void)
4305 {
4306 int reg;
4307 int hi_reg;
4308 unsigned mask = 0;
4309 valueT op;
4310
4311 if (*input_line_pointer == '{')
4312 input_line_pointer++;
4313
4314 skip_whitespace (input_line_pointer);
4315
4316 do
4317 {
4318 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4319
4320 if (reg == FAIL)
4321 {
4322 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4323 goto error;
4324 }
4325
4326 reg -= 8;
4327 if (mask >> reg)
4328 as_tsktsk (_("register list not in ascending order"));
4329 mask |= 1 << reg;
4330
4331 if (*input_line_pointer == '-')
4332 {
4333 input_line_pointer++;
4334 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4335 if (hi_reg == FAIL)
4336 {
4337 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4338 goto error;
4339 }
4340 else if (reg >= hi_reg)
4341 {
4342 as_bad (_("bad register range"));
4343 goto error;
4344 }
4345 for (; reg < hi_reg; reg++)
4346 mask |= 1 << reg;
4347 }
4348 }
4349 while (skip_past_comma (&input_line_pointer) != FAIL);
4350
4351 skip_past_char (&input_line_pointer, '}');
4352
4353 demand_empty_rest_of_line ();
4354
4355 /* Generate any deferred opcodes because we're going to be looking at
4356 the list. */
4357 flush_pending_unwind ();
4358
4359 for (reg = 0; reg < 16; reg++)
4360 {
4361 if (mask & (1 << reg))
4362 unwind.frame_size += 4;
4363 }
4364 op = 0xc700 | mask;
4365 add_unwind_opcode (op, 2);
4366 return;
4367 error:
4368 ignore_rest_of_line ();
4369 }
4370
4371
4372 /* Parse an unwind_save directive.
4373 If the argument is non-zero, this is a .vsave directive. */
4374
4375 static void
4376 s_arm_unwind_save (int arch_v6)
4377 {
4378 char *peek;
4379 struct reg_entry *reg;
4380 bfd_boolean had_brace = FALSE;
4381
4382 if (!unwind.proc_start)
4383 as_bad (MISSING_FNSTART);
4384
4385 /* Figure out what sort of save we have. */
4386 peek = input_line_pointer;
4387
4388 if (*peek == '{')
4389 {
4390 had_brace = TRUE;
4391 peek++;
4392 }
4393
4394 reg = arm_reg_parse_multi (&peek);
4395
4396 if (!reg)
4397 {
4398 as_bad (_("register expected"));
4399 ignore_rest_of_line ();
4400 return;
4401 }
4402
4403 switch (reg->type)
4404 {
4405 case REG_TYPE_FN:
4406 if (had_brace)
4407 {
4408 as_bad (_("FPA .unwind_save does not take a register list"));
4409 ignore_rest_of_line ();
4410 return;
4411 }
4412 input_line_pointer = peek;
4413 s_arm_unwind_save_fpa (reg->number);
4414 return;
4415
4416 case REG_TYPE_RN:
4417 s_arm_unwind_save_core ();
4418 return;
4419
4420 case REG_TYPE_VFD:
4421 if (arch_v6)
4422 s_arm_unwind_save_vfp_armv6 ();
4423 else
4424 s_arm_unwind_save_vfp ();
4425 return;
4426
4427 case REG_TYPE_MMXWR:
4428 s_arm_unwind_save_mmxwr ();
4429 return;
4430
4431 case REG_TYPE_MMXWCG:
4432 s_arm_unwind_save_mmxwcg ();
4433 return;
4434
4435 default:
4436 as_bad (_(".unwind_save does not support this kind of register"));
4437 ignore_rest_of_line ();
4438 }
4439 }
4440
4441
4442 /* Parse an unwind_movsp directive. */
4443
4444 static void
4445 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4446 {
4447 int reg;
4448 valueT op;
4449 int offset;
4450
4451 if (!unwind.proc_start)
4452 as_bad (MISSING_FNSTART);
4453
4454 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4455 if (reg == FAIL)
4456 {
4457 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4458 ignore_rest_of_line ();
4459 return;
4460 }
4461
4462 /* Optional constant. */
4463 if (skip_past_comma (&input_line_pointer) != FAIL)
4464 {
4465 if (immediate_for_directive (&offset) == FAIL)
4466 return;
4467 }
4468 else
4469 offset = 0;
4470
4471 demand_empty_rest_of_line ();
4472
4473 if (reg == REG_SP || reg == REG_PC)
4474 {
4475 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4476 return;
4477 }
4478
4479 if (unwind.fp_reg != REG_SP)
4480 as_bad (_("unexpected .unwind_movsp directive"));
4481
4482 /* Generate opcode to restore the value. */
4483 op = 0x90 | reg;
4484 add_unwind_opcode (op, 1);
4485
4486 /* Record the information for later. */
4487 unwind.fp_reg = reg;
4488 unwind.fp_offset = unwind.frame_size - offset;
4489 unwind.sp_restored = 1;
4490 }
4491
4492 /* Parse an unwind_pad directive. */
4493
4494 static void
4495 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4496 {
4497 int offset;
4498
4499 if (!unwind.proc_start)
4500 as_bad (MISSING_FNSTART);
4501
4502 if (immediate_for_directive (&offset) == FAIL)
4503 return;
4504
4505 if (offset & 3)
4506 {
4507 as_bad (_("stack increment must be multiple of 4"));
4508 ignore_rest_of_line ();
4509 return;
4510 }
4511
4512 /* Don't generate any opcodes, just record the details for later. */
4513 unwind.frame_size += offset;
4514 unwind.pending_offset += offset;
4515
4516 demand_empty_rest_of_line ();
4517 }
4518
4519 /* Parse an unwind_setfp directive. */
4520
4521 static void
4522 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4523 {
4524 int sp_reg;
4525 int fp_reg;
4526 int offset;
4527
4528 if (!unwind.proc_start)
4529 as_bad (MISSING_FNSTART);
4530
4531 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4532 if (skip_past_comma (&input_line_pointer) == FAIL)
4533 sp_reg = FAIL;
4534 else
4535 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4536
4537 if (fp_reg == FAIL || sp_reg == FAIL)
4538 {
4539 as_bad (_("expected <reg>, <reg>"));
4540 ignore_rest_of_line ();
4541 return;
4542 }
4543
4544 /* Optional constant. */
4545 if (skip_past_comma (&input_line_pointer) != FAIL)
4546 {
4547 if (immediate_for_directive (&offset) == FAIL)
4548 return;
4549 }
4550 else
4551 offset = 0;
4552
4553 demand_empty_rest_of_line ();
4554
4555 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4556 {
4557 as_bad (_("register must be either sp or set by a previous"
4558 "unwind_movsp directive"));
4559 return;
4560 }
4561
4562 /* Don't generate any opcodes, just record the information for later. */
4563 unwind.fp_reg = fp_reg;
4564 unwind.fp_used = 1;
4565 if (sp_reg == REG_SP)
4566 unwind.fp_offset = unwind.frame_size - offset;
4567 else
4568 unwind.fp_offset -= offset;
4569 }
4570
4571 /* Parse an unwind_raw directive. */
4572
4573 static void
4574 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4575 {
4576 expressionS exp;
4577 /* This is an arbitrary limit. */
4578 unsigned char op[16];
4579 int count;
4580
4581 if (!unwind.proc_start)
4582 as_bad (MISSING_FNSTART);
4583
4584 expression (&exp);
4585 if (exp.X_op == O_constant
4586 && skip_past_comma (&input_line_pointer) != FAIL)
4587 {
4588 unwind.frame_size += exp.X_add_number;
4589 expression (&exp);
4590 }
4591 else
4592 exp.X_op = O_illegal;
4593
4594 if (exp.X_op != O_constant)
4595 {
4596 as_bad (_("expected <offset>, <opcode>"));
4597 ignore_rest_of_line ();
4598 return;
4599 }
4600
4601 count = 0;
4602
4603 /* Parse the opcode. */
4604 for (;;)
4605 {
4606 if (count >= 16)
4607 {
4608 as_bad (_("unwind opcode too long"));
4609 ignore_rest_of_line ();
4610 }
4611 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4612 {
4613 as_bad (_("invalid unwind opcode"));
4614 ignore_rest_of_line ();
4615 return;
4616 }
4617 op[count++] = exp.X_add_number;
4618
4619 /* Parse the next byte. */
4620 if (skip_past_comma (&input_line_pointer) == FAIL)
4621 break;
4622
4623 expression (&exp);
4624 }
4625
4626 /* Add the opcode bytes in reverse order. */
4627 while (count--)
4628 add_unwind_opcode (op[count], 1);
4629
4630 demand_empty_rest_of_line ();
4631 }
4632
4633
4634 /* Parse a .eabi_attribute directive. */
4635
4636 static void
4637 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4638 {
4639 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4640
4641 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4642 attributes_set_explicitly[tag] = 1;
4643 }
4644
4645 /* Emit a tls fix for the symbol. */
4646
4647 static void
4648 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4649 {
4650 char *p;
4651 expressionS exp;
4652 #ifdef md_flush_pending_output
4653 md_flush_pending_output ();
4654 #endif
4655
4656 #ifdef md_cons_align
4657 md_cons_align (4);
4658 #endif
4659
4660 /* Since we're just labelling the code, there's no need to define a
4661 mapping symbol. */
4662 expression (&exp);
4663 p = obstack_next_free (&frchain_now->frch_obstack);
4664 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4665 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4666 : BFD_RELOC_ARM_TLS_DESCSEQ);
4667 }
4668 #endif /* OBJ_ELF */
4669
4670 static void s_arm_arch (int);
4671 static void s_arm_object_arch (int);
4672 static void s_arm_cpu (int);
4673 static void s_arm_fpu (int);
4674 static void s_arm_arch_extension (int);
4675
4676 #ifdef TE_PE
4677
4678 static void
4679 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4680 {
4681 expressionS exp;
4682
4683 do
4684 {
4685 expression (&exp);
4686 if (exp.X_op == O_symbol)
4687 exp.X_op = O_secrel;
4688
4689 emit_expr (&exp, 4);
4690 }
4691 while (*input_line_pointer++ == ',');
4692
4693 input_line_pointer--;
4694 demand_empty_rest_of_line ();
4695 }
4696 #endif /* TE_PE */
4697
4698 /* This table describes all the machine specific pseudo-ops the assembler
4699 has to support. The fields are:
4700 pseudo-op name without dot
4701 function to call to execute this pseudo-op
4702 Integer arg to pass to the function. */
4703
4704 const pseudo_typeS md_pseudo_table[] =
4705 {
4706 /* Never called because '.req' does not start a line. */
4707 { "req", s_req, 0 },
4708 /* Following two are likewise never called. */
4709 { "dn", s_dn, 0 },
4710 { "qn", s_qn, 0 },
4711 { "unreq", s_unreq, 0 },
4712 { "bss", s_bss, 0 },
4713 { "align", s_align_ptwo, 2 },
4714 { "arm", s_arm, 0 },
4715 { "thumb", s_thumb, 0 },
4716 { "code", s_code, 0 },
4717 { "force_thumb", s_force_thumb, 0 },
4718 { "thumb_func", s_thumb_func, 0 },
4719 { "thumb_set", s_thumb_set, 0 },
4720 { "even", s_even, 0 },
4721 { "ltorg", s_ltorg, 0 },
4722 { "pool", s_ltorg, 0 },
4723 { "syntax", s_syntax, 0 },
4724 { "cpu", s_arm_cpu, 0 },
4725 { "arch", s_arm_arch, 0 },
4726 { "object_arch", s_arm_object_arch, 0 },
4727 { "fpu", s_arm_fpu, 0 },
4728 { "arch_extension", s_arm_arch_extension, 0 },
4729 #ifdef OBJ_ELF
4730 { "word", s_arm_elf_cons, 4 },
4731 { "long", s_arm_elf_cons, 4 },
4732 { "inst.n", s_arm_elf_inst, 2 },
4733 { "inst.w", s_arm_elf_inst, 4 },
4734 { "inst", s_arm_elf_inst, 0 },
4735 { "rel31", s_arm_rel31, 0 },
4736 { "fnstart", s_arm_unwind_fnstart, 0 },
4737 { "fnend", s_arm_unwind_fnend, 0 },
4738 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4739 { "personality", s_arm_unwind_personality, 0 },
4740 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4741 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4742 { "save", s_arm_unwind_save, 0 },
4743 { "vsave", s_arm_unwind_save, 1 },
4744 { "movsp", s_arm_unwind_movsp, 0 },
4745 { "pad", s_arm_unwind_pad, 0 },
4746 { "setfp", s_arm_unwind_setfp, 0 },
4747 { "unwind_raw", s_arm_unwind_raw, 0 },
4748 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4749 { "tlsdescseq", s_arm_tls_descseq, 0 },
4750 #else
4751 { "word", cons, 4},
4752
4753 /* These are used for dwarf. */
4754 {"2byte", cons, 2},
4755 {"4byte", cons, 4},
4756 {"8byte", cons, 8},
4757 /* These are used for dwarf2. */
4758 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4759 { "loc", dwarf2_directive_loc, 0 },
4760 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4761 #endif
4762 { "extend", float_cons, 'x' },
4763 { "ldouble", float_cons, 'x' },
4764 { "packed", float_cons, 'p' },
4765 #ifdef TE_PE
4766 {"secrel32", pe_directive_secrel, 0},
4767 #endif
4768
4769 /* These are for compatibility with CodeComposer Studio. */
4770 {"ref", s_ccs_ref, 0},
4771 {"def", s_ccs_def, 0},
4772 {"asmfunc", s_ccs_asmfunc, 0},
4773 {"endasmfunc", s_ccs_endasmfunc, 0},
4774
4775 { 0, 0, 0 }
4776 };
4777 \f
4778 /* Parser functions used exclusively in instruction operands. */
4779
4780 /* Generic immediate-value read function for use in insn parsing.
4781 STR points to the beginning of the immediate (the leading #);
4782 VAL receives the value; if the value is outside [MIN, MAX]
4783 issue an error. PREFIX_OPT is true if the immediate prefix is
4784 optional. */
4785
4786 static int
4787 parse_immediate (char **str, int *val, int min, int max,
4788 bfd_boolean prefix_opt)
4789 {
4790 expressionS exp;
4791
4792 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4793 if (exp.X_op != O_constant)
4794 {
4795 inst.error = _("constant expression required");
4796 return FAIL;
4797 }
4798
4799 if (exp.X_add_number < min || exp.X_add_number > max)
4800 {
4801 inst.error = _("immediate value out of range");
4802 return FAIL;
4803 }
4804
4805 *val = exp.X_add_number;
4806 return SUCCESS;
4807 }
4808
4809 /* Less-generic immediate-value read function with the possibility of loading a
4810 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4811 instructions. Puts the result directly in inst.operands[i]. */
4812
4813 static int
4814 parse_big_immediate (char **str, int i, expressionS *in_exp,
4815 bfd_boolean allow_symbol_p)
4816 {
4817 expressionS exp;
4818 expressionS *exp_p = in_exp ? in_exp : &exp;
4819 char *ptr = *str;
4820
4821 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4822
4823 if (exp_p->X_op == O_constant)
4824 {
4825 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4826 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4827 O_constant. We have to be careful not to break compilation for
4828 32-bit X_add_number, though. */
4829 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4830 {
4831 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4832 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4833 & 0xffffffff);
4834 inst.operands[i].regisimm = 1;
4835 }
4836 }
4837 else if (exp_p->X_op == O_big
4838 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4839 {
4840 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4841
4842 /* Bignums have their least significant bits in
4843 generic_bignum[0]. Make sure we put 32 bits in imm and
4844 32 bits in reg, in a (hopefully) portable way. */
4845 gas_assert (parts != 0);
4846
4847 /* Make sure that the number is not too big.
4848 PR 11972: Bignums can now be sign-extended to the
4849 size of a .octa so check that the out of range bits
4850 are all zero or all one. */
4851 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4852 {
4853 LITTLENUM_TYPE m = -1;
4854
4855 if (generic_bignum[parts * 2] != 0
4856 && generic_bignum[parts * 2] != m)
4857 return FAIL;
4858
4859 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4860 if (generic_bignum[j] != generic_bignum[j-1])
4861 return FAIL;
4862 }
4863
4864 inst.operands[i].imm = 0;
4865 for (j = 0; j < parts; j++, idx++)
4866 inst.operands[i].imm |= generic_bignum[idx]
4867 << (LITTLENUM_NUMBER_OF_BITS * j);
4868 inst.operands[i].reg = 0;
4869 for (j = 0; j < parts; j++, idx++)
4870 inst.operands[i].reg |= generic_bignum[idx]
4871 << (LITTLENUM_NUMBER_OF_BITS * j);
4872 inst.operands[i].regisimm = 1;
4873 }
4874 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4875 return FAIL;
4876
4877 *str = ptr;
4878
4879 return SUCCESS;
4880 }
4881
4882 /* Returns the pseudo-register number of an FPA immediate constant,
4883 or FAIL if there isn't a valid constant here. */
4884
4885 static int
4886 parse_fpa_immediate (char ** str)
4887 {
4888 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4889 char * save_in;
4890 expressionS exp;
4891 int i;
4892 int j;
4893
4894 /* First try and match exact strings, this is to guarantee
4895 that some formats will work even for cross assembly. */
4896
4897 for (i = 0; fp_const[i]; i++)
4898 {
4899 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4900 {
4901 char *start = *str;
4902
4903 *str += strlen (fp_const[i]);
4904 if (is_end_of_line[(unsigned char) **str])
4905 return i + 8;
4906 *str = start;
4907 }
4908 }
4909
4910 /* Just because we didn't get a match doesn't mean that the constant
4911 isn't valid, just that it is in a format that we don't
4912 automatically recognize. Try parsing it with the standard
4913 expression routines. */
4914
4915 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4916
4917 /* Look for a raw floating point number. */
4918 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4919 && is_end_of_line[(unsigned char) *save_in])
4920 {
4921 for (i = 0; i < NUM_FLOAT_VALS; i++)
4922 {
4923 for (j = 0; j < MAX_LITTLENUMS; j++)
4924 {
4925 if (words[j] != fp_values[i][j])
4926 break;
4927 }
4928
4929 if (j == MAX_LITTLENUMS)
4930 {
4931 *str = save_in;
4932 return i + 8;
4933 }
4934 }
4935 }
4936
4937 /* Try and parse a more complex expression, this will probably fail
4938 unless the code uses a floating point prefix (eg "0f"). */
4939 save_in = input_line_pointer;
4940 input_line_pointer = *str;
4941 if (expression (&exp) == absolute_section
4942 && exp.X_op == O_big
4943 && exp.X_add_number < 0)
4944 {
4945 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4946 Ditto for 15. */
4947 #define X_PRECISION 5
4948 #define E_PRECISION 15L
4949 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4950 {
4951 for (i = 0; i < NUM_FLOAT_VALS; i++)
4952 {
4953 for (j = 0; j < MAX_LITTLENUMS; j++)
4954 {
4955 if (words[j] != fp_values[i][j])
4956 break;
4957 }
4958
4959 if (j == MAX_LITTLENUMS)
4960 {
4961 *str = input_line_pointer;
4962 input_line_pointer = save_in;
4963 return i + 8;
4964 }
4965 }
4966 }
4967 }
4968
4969 *str = input_line_pointer;
4970 input_line_pointer = save_in;
4971 inst.error = _("invalid FPA immediate expression");
4972 return FAIL;
4973 }
4974
4975 /* Returns 1 if a number has "quarter-precision" float format
4976 0baBbbbbbc defgh000 00000000 00000000. */
4977
4978 static int
4979 is_quarter_float (unsigned imm)
4980 {
4981 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4982 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4983 }
4984
4985
4986 /* Detect the presence of a floating point or integer zero constant,
4987 i.e. #0.0 or #0. */
4988
4989 static bfd_boolean
4990 parse_ifimm_zero (char **in)
4991 {
4992 int error_code;
4993
4994 if (!is_immediate_prefix (**in))
4995 {
4996 /* In unified syntax, all prefixes are optional. */
4997 if (!unified_syntax)
4998 return FALSE;
4999 }
5000 else
5001 ++*in;
5002
5003 /* Accept #0x0 as a synonym for #0. */
5004 if (strncmp (*in, "0x", 2) == 0)
5005 {
5006 int val;
5007 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5008 return FALSE;
5009 return TRUE;
5010 }
5011
5012 error_code = atof_generic (in, ".", EXP_CHARS,
5013 &generic_floating_point_number);
5014
5015 if (!error_code
5016 && generic_floating_point_number.sign == '+'
5017 && (generic_floating_point_number.low
5018 > generic_floating_point_number.leader))
5019 return TRUE;
5020
5021 return FALSE;
5022 }
5023
5024 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5025 0baBbbbbbc defgh000 00000000 00000000.
5026 The zero and minus-zero cases need special handling, since they can't be
5027 encoded in the "quarter-precision" float format, but can nonetheless be
5028 loaded as integer constants. */
5029
5030 static unsigned
5031 parse_qfloat_immediate (char **ccp, int *immed)
5032 {
5033 char *str = *ccp;
5034 char *fpnum;
5035 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5036 int found_fpchar = 0;
5037
5038 skip_past_char (&str, '#');
5039
5040 /* We must not accidentally parse an integer as a floating-point number. Make
5041 sure that the value we parse is not an integer by checking for special
5042 characters '.' or 'e'.
5043 FIXME: This is a horrible hack, but doing better is tricky because type
5044 information isn't in a very usable state at parse time. */
5045 fpnum = str;
5046 skip_whitespace (fpnum);
5047
5048 if (strncmp (fpnum, "0x", 2) == 0)
5049 return FAIL;
5050 else
5051 {
5052 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5053 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5054 {
5055 found_fpchar = 1;
5056 break;
5057 }
5058
5059 if (!found_fpchar)
5060 return FAIL;
5061 }
5062
5063 if ((str = atof_ieee (str, 's', words)) != NULL)
5064 {
5065 unsigned fpword = 0;
5066 int i;
5067
5068 /* Our FP word must be 32 bits (single-precision FP). */
5069 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5070 {
5071 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5072 fpword |= words[i];
5073 }
5074
5075 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5076 *immed = fpword;
5077 else
5078 return FAIL;
5079
5080 *ccp = str;
5081
5082 return SUCCESS;
5083 }
5084
5085 return FAIL;
5086 }
5087
5088 /* Shift operands. */
5089 enum shift_kind
5090 {
5091 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5092 };
5093
5094 struct asm_shift_name
5095 {
5096 const char *name;
5097 enum shift_kind kind;
5098 };
5099
5100 /* Third argument to parse_shift. */
5101 enum parse_shift_mode
5102 {
5103 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5104 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5105 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5106 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5107 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5108 };
5109
5110 /* Parse a <shift> specifier on an ARM data processing instruction.
5111 This has three forms:
5112
5113 (LSL|LSR|ASL|ASR|ROR) Rs
5114 (LSL|LSR|ASL|ASR|ROR) #imm
5115 RRX
5116
5117 Note that ASL is assimilated to LSL in the instruction encoding, and
5118 RRX to ROR #0 (which cannot be written as such). */
5119
5120 static int
5121 parse_shift (char **str, int i, enum parse_shift_mode mode)
5122 {
5123 const struct asm_shift_name *shift_name;
5124 enum shift_kind shift;
5125 char *s = *str;
5126 char *p = s;
5127 int reg;
5128
5129 for (p = *str; ISALPHA (*p); p++)
5130 ;
5131
5132 if (p == *str)
5133 {
5134 inst.error = _("shift expression expected");
5135 return FAIL;
5136 }
5137
5138 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5139 p - *str);
5140
5141 if (shift_name == NULL)
5142 {
5143 inst.error = _("shift expression expected");
5144 return FAIL;
5145 }
5146
5147 shift = shift_name->kind;
5148
5149 switch (mode)
5150 {
5151 case NO_SHIFT_RESTRICT:
5152 case SHIFT_IMMEDIATE: break;
5153
5154 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5155 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5156 {
5157 inst.error = _("'LSL' or 'ASR' required");
5158 return FAIL;
5159 }
5160 break;
5161
5162 case SHIFT_LSL_IMMEDIATE:
5163 if (shift != SHIFT_LSL)
5164 {
5165 inst.error = _("'LSL' required");
5166 return FAIL;
5167 }
5168 break;
5169
5170 case SHIFT_ASR_IMMEDIATE:
5171 if (shift != SHIFT_ASR)
5172 {
5173 inst.error = _("'ASR' required");
5174 return FAIL;
5175 }
5176 break;
5177
5178 default: abort ();
5179 }
5180
5181 if (shift != SHIFT_RRX)
5182 {
5183 /* Whitespace can appear here if the next thing is a bare digit. */
5184 skip_whitespace (p);
5185
5186 if (mode == NO_SHIFT_RESTRICT
5187 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5188 {
5189 inst.operands[i].imm = reg;
5190 inst.operands[i].immisreg = 1;
5191 }
5192 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5193 return FAIL;
5194 }
5195 inst.operands[i].shift_kind = shift;
5196 inst.operands[i].shifted = 1;
5197 *str = p;
5198 return SUCCESS;
5199 }
5200
5201 /* Parse a <shifter_operand> for an ARM data processing instruction:
5202
5203 #<immediate>
5204 #<immediate>, <rotate>
5205 <Rm>
5206 <Rm>, <shift>
5207
5208 where <shift> is defined by parse_shift above, and <rotate> is a
5209 multiple of 2 between 0 and 30. Validation of immediate operands
5210 is deferred to md_apply_fix. */
5211
5212 static int
5213 parse_shifter_operand (char **str, int i)
5214 {
5215 int value;
5216 expressionS exp;
5217
5218 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5219 {
5220 inst.operands[i].reg = value;
5221 inst.operands[i].isreg = 1;
5222
5223 /* parse_shift will override this if appropriate */
5224 inst.reloc.exp.X_op = O_constant;
5225 inst.reloc.exp.X_add_number = 0;
5226
5227 if (skip_past_comma (str) == FAIL)
5228 return SUCCESS;
5229
5230 /* Shift operation on register. */
5231 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5232 }
5233
5234 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5235 return FAIL;
5236
5237 if (skip_past_comma (str) == SUCCESS)
5238 {
5239 /* #x, y -- ie explicit rotation by Y. */
5240 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5241 return FAIL;
5242
5243 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5244 {
5245 inst.error = _("constant expression expected");
5246 return FAIL;
5247 }
5248
5249 value = exp.X_add_number;
5250 if (value < 0 || value > 30 || value % 2 != 0)
5251 {
5252 inst.error = _("invalid rotation");
5253 return FAIL;
5254 }
5255 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5256 {
5257 inst.error = _("invalid constant");
5258 return FAIL;
5259 }
5260
5261 /* Encode as specified. */
5262 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5263 return SUCCESS;
5264 }
5265
5266 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5267 inst.reloc.pc_rel = 0;
5268 return SUCCESS;
5269 }
5270
5271 /* Group relocation information. Each entry in the table contains the
5272 textual name of the relocation as may appear in assembler source
5273 and must end with a colon.
5274 Along with this textual name are the relocation codes to be used if
5275 the corresponding instruction is an ALU instruction (ADD or SUB only),
5276 an LDR, an LDRS, or an LDC. */
5277
5278 struct group_reloc_table_entry
5279 {
5280 const char *name;
5281 int alu_code;
5282 int ldr_code;
5283 int ldrs_code;
5284 int ldc_code;
5285 };
5286
5287 typedef enum
5288 {
5289 /* Varieties of non-ALU group relocation. */
5290
5291 GROUP_LDR,
5292 GROUP_LDRS,
5293 GROUP_LDC
5294 } group_reloc_type;
5295
5296 static struct group_reloc_table_entry group_reloc_table[] =
5297 { /* Program counter relative: */
5298 { "pc_g0_nc",
5299 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5300 0, /* LDR */
5301 0, /* LDRS */
5302 0 }, /* LDC */
5303 { "pc_g0",
5304 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5305 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5306 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5307 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5308 { "pc_g1_nc",
5309 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5310 0, /* LDR */
5311 0, /* LDRS */
5312 0 }, /* LDC */
5313 { "pc_g1",
5314 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5315 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5316 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5317 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5318 { "pc_g2",
5319 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5320 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5321 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5322 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5323 /* Section base relative */
5324 { "sb_g0_nc",
5325 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5326 0, /* LDR */
5327 0, /* LDRS */
5328 0 }, /* LDC */
5329 { "sb_g0",
5330 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5331 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5332 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5333 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5334 { "sb_g1_nc",
5335 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5336 0, /* LDR */
5337 0, /* LDRS */
5338 0 }, /* LDC */
5339 { "sb_g1",
5340 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5341 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5342 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5343 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5344 { "sb_g2",
5345 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5346 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5347 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5348 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5349 /* Absolute thumb alu relocations. */
5350 { "lower0_7",
5351 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5352 0, /* LDR. */
5353 0, /* LDRS. */
5354 0 }, /* LDC. */
5355 { "lower8_15",
5356 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5357 0, /* LDR. */
5358 0, /* LDRS. */
5359 0 }, /* LDC. */
5360 { "upper0_7",
5361 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5362 0, /* LDR. */
5363 0, /* LDRS. */
5364 0 }, /* LDC. */
5365 { "upper8_15",
5366 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5367 0, /* LDR. */
5368 0, /* LDRS. */
5369 0 } }; /* LDC. */
5370
5371 /* Given the address of a pointer pointing to the textual name of a group
5372 relocation as may appear in assembler source, attempt to find its details
5373 in group_reloc_table. The pointer will be updated to the character after
5374 the trailing colon. On failure, FAIL will be returned; SUCCESS
5375 otherwise. On success, *entry will be updated to point at the relevant
5376 group_reloc_table entry. */
5377
5378 static int
5379 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5380 {
5381 unsigned int i;
5382 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5383 {
5384 int length = strlen (group_reloc_table[i].name);
5385
5386 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5387 && (*str)[length] == ':')
5388 {
5389 *out = &group_reloc_table[i];
5390 *str += (length + 1);
5391 return SUCCESS;
5392 }
5393 }
5394
5395 return FAIL;
5396 }
5397
5398 /* Parse a <shifter_operand> for an ARM data processing instruction
5399 (as for parse_shifter_operand) where group relocations are allowed:
5400
5401 #<immediate>
5402 #<immediate>, <rotate>
5403 #:<group_reloc>:<expression>
5404 <Rm>
5405 <Rm>, <shift>
5406
5407 where <group_reloc> is one of the strings defined in group_reloc_table.
5408 The hashes are optional.
5409
5410 Everything else is as for parse_shifter_operand. */
5411
5412 static parse_operand_result
5413 parse_shifter_operand_group_reloc (char **str, int i)
5414 {
5415 /* Determine if we have the sequence of characters #: or just :
5416 coming next. If we do, then we check for a group relocation.
5417 If we don't, punt the whole lot to parse_shifter_operand. */
5418
5419 if (((*str)[0] == '#' && (*str)[1] == ':')
5420 || (*str)[0] == ':')
5421 {
5422 struct group_reloc_table_entry *entry;
5423
5424 if ((*str)[0] == '#')
5425 (*str) += 2;
5426 else
5427 (*str)++;
5428
5429 /* Try to parse a group relocation. Anything else is an error. */
5430 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5431 {
5432 inst.error = _("unknown group relocation");
5433 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5434 }
5435
5436 /* We now have the group relocation table entry corresponding to
5437 the name in the assembler source. Next, we parse the expression. */
5438 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5439 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5440
5441 /* Record the relocation type (always the ALU variant here). */
5442 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5443 gas_assert (inst.reloc.type != 0);
5444
5445 return PARSE_OPERAND_SUCCESS;
5446 }
5447 else
5448 return parse_shifter_operand (str, i) == SUCCESS
5449 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5450
5451 /* Never reached. */
5452 }
5453
5454 /* Parse a Neon alignment expression. Information is written to
5455 inst.operands[i]. We assume the initial ':' has been skipped.
5456
5457 align .imm = align << 8, .immisalign=1, .preind=0 */
5458 static parse_operand_result
5459 parse_neon_alignment (char **str, int i)
5460 {
5461 char *p = *str;
5462 expressionS exp;
5463
5464 my_get_expression (&exp, &p, GE_NO_PREFIX);
5465
5466 if (exp.X_op != O_constant)
5467 {
5468 inst.error = _("alignment must be constant");
5469 return PARSE_OPERAND_FAIL;
5470 }
5471
5472 inst.operands[i].imm = exp.X_add_number << 8;
5473 inst.operands[i].immisalign = 1;
5474 /* Alignments are not pre-indexes. */
5475 inst.operands[i].preind = 0;
5476
5477 *str = p;
5478 return PARSE_OPERAND_SUCCESS;
5479 }
5480
5481 /* Parse all forms of an ARM address expression. Information is written
5482 to inst.operands[i] and/or inst.reloc.
5483
5484 Preindexed addressing (.preind=1):
5485
5486 [Rn, #offset] .reg=Rn .reloc.exp=offset
5487 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5488 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5489 .shift_kind=shift .reloc.exp=shift_imm
5490
5491 These three may have a trailing ! which causes .writeback to be set also.
5492
5493 Postindexed addressing (.postind=1, .writeback=1):
5494
5495 [Rn], #offset .reg=Rn .reloc.exp=offset
5496 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5497 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5498 .shift_kind=shift .reloc.exp=shift_imm
5499
5500 Unindexed addressing (.preind=0, .postind=0):
5501
5502 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5503
5504 Other:
5505
5506 [Rn]{!} shorthand for [Rn,#0]{!}
5507 =immediate .isreg=0 .reloc.exp=immediate
5508 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5509
5510 It is the caller's responsibility to check for addressing modes not
5511 supported by the instruction, and to set inst.reloc.type. */
5512
5513 static parse_operand_result
5514 parse_address_main (char **str, int i, int group_relocations,
5515 group_reloc_type group_type)
5516 {
5517 char *p = *str;
5518 int reg;
5519
5520 if (skip_past_char (&p, '[') == FAIL)
5521 {
5522 if (skip_past_char (&p, '=') == FAIL)
5523 {
5524 /* Bare address - translate to PC-relative offset. */
5525 inst.reloc.pc_rel = 1;
5526 inst.operands[i].reg = REG_PC;
5527 inst.operands[i].isreg = 1;
5528 inst.operands[i].preind = 1;
5529
5530 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5531 return PARSE_OPERAND_FAIL;
5532 }
5533 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5534 /*allow_symbol_p=*/TRUE))
5535 return PARSE_OPERAND_FAIL;
5536
5537 *str = p;
5538 return PARSE_OPERAND_SUCCESS;
5539 }
5540
5541 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5542 skip_whitespace (p);
5543
5544 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5545 {
5546 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5547 return PARSE_OPERAND_FAIL;
5548 }
5549 inst.operands[i].reg = reg;
5550 inst.operands[i].isreg = 1;
5551
5552 if (skip_past_comma (&p) == SUCCESS)
5553 {
5554 inst.operands[i].preind = 1;
5555
5556 if (*p == '+') p++;
5557 else if (*p == '-') p++, inst.operands[i].negative = 1;
5558
5559 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5560 {
5561 inst.operands[i].imm = reg;
5562 inst.operands[i].immisreg = 1;
5563
5564 if (skip_past_comma (&p) == SUCCESS)
5565 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5566 return PARSE_OPERAND_FAIL;
5567 }
5568 else if (skip_past_char (&p, ':') == SUCCESS)
5569 {
5570 /* FIXME: '@' should be used here, but it's filtered out by generic
5571 code before we get to see it here. This may be subject to
5572 change. */
5573 parse_operand_result result = parse_neon_alignment (&p, i);
5574
5575 if (result != PARSE_OPERAND_SUCCESS)
5576 return result;
5577 }
5578 else
5579 {
5580 if (inst.operands[i].negative)
5581 {
5582 inst.operands[i].negative = 0;
5583 p--;
5584 }
5585
5586 if (group_relocations
5587 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5588 {
5589 struct group_reloc_table_entry *entry;
5590
5591 /* Skip over the #: or : sequence. */
5592 if (*p == '#')
5593 p += 2;
5594 else
5595 p++;
5596
5597 /* Try to parse a group relocation. Anything else is an
5598 error. */
5599 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5600 {
5601 inst.error = _("unknown group relocation");
5602 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5603 }
5604
5605 /* We now have the group relocation table entry corresponding to
5606 the name in the assembler source. Next, we parse the
5607 expression. */
5608 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5609 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5610
5611 /* Record the relocation type. */
5612 switch (group_type)
5613 {
5614 case GROUP_LDR:
5615 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5616 break;
5617
5618 case GROUP_LDRS:
5619 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5620 break;
5621
5622 case GROUP_LDC:
5623 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5624 break;
5625
5626 default:
5627 gas_assert (0);
5628 }
5629
5630 if (inst.reloc.type == 0)
5631 {
5632 inst.error = _("this group relocation is not allowed on this instruction");
5633 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5634 }
5635 }
5636 else
5637 {
5638 char *q = p;
5639
5640 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5641 return PARSE_OPERAND_FAIL;
5642 /* If the offset is 0, find out if it's a +0 or -0. */
5643 if (inst.reloc.exp.X_op == O_constant
5644 && inst.reloc.exp.X_add_number == 0)
5645 {
5646 skip_whitespace (q);
5647 if (*q == '#')
5648 {
5649 q++;
5650 skip_whitespace (q);
5651 }
5652 if (*q == '-')
5653 inst.operands[i].negative = 1;
5654 }
5655 }
5656 }
5657 }
5658 else if (skip_past_char (&p, ':') == SUCCESS)
5659 {
5660 /* FIXME: '@' should be used here, but it's filtered out by generic code
5661 before we get to see it here. This may be subject to change. */
5662 parse_operand_result result = parse_neon_alignment (&p, i);
5663
5664 if (result != PARSE_OPERAND_SUCCESS)
5665 return result;
5666 }
5667
5668 if (skip_past_char (&p, ']') == FAIL)
5669 {
5670 inst.error = _("']' expected");
5671 return PARSE_OPERAND_FAIL;
5672 }
5673
5674 if (skip_past_char (&p, '!') == SUCCESS)
5675 inst.operands[i].writeback = 1;
5676
5677 else if (skip_past_comma (&p) == SUCCESS)
5678 {
5679 if (skip_past_char (&p, '{') == SUCCESS)
5680 {
5681 /* [Rn], {expr} - unindexed, with option */
5682 if (parse_immediate (&p, &inst.operands[i].imm,
5683 0, 255, TRUE) == FAIL)
5684 return PARSE_OPERAND_FAIL;
5685
5686 if (skip_past_char (&p, '}') == FAIL)
5687 {
5688 inst.error = _("'}' expected at end of 'option' field");
5689 return PARSE_OPERAND_FAIL;
5690 }
5691 if (inst.operands[i].preind)
5692 {
5693 inst.error = _("cannot combine index with option");
5694 return PARSE_OPERAND_FAIL;
5695 }
5696 *str = p;
5697 return PARSE_OPERAND_SUCCESS;
5698 }
5699 else
5700 {
5701 inst.operands[i].postind = 1;
5702 inst.operands[i].writeback = 1;
5703
5704 if (inst.operands[i].preind)
5705 {
5706 inst.error = _("cannot combine pre- and post-indexing");
5707 return PARSE_OPERAND_FAIL;
5708 }
5709
5710 if (*p == '+') p++;
5711 else if (*p == '-') p++, inst.operands[i].negative = 1;
5712
5713 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5714 {
5715 /* We might be using the immediate for alignment already. If we
5716 are, OR the register number into the low-order bits. */
5717 if (inst.operands[i].immisalign)
5718 inst.operands[i].imm |= reg;
5719 else
5720 inst.operands[i].imm = reg;
5721 inst.operands[i].immisreg = 1;
5722
5723 if (skip_past_comma (&p) == SUCCESS)
5724 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5725 return PARSE_OPERAND_FAIL;
5726 }
5727 else
5728 {
5729 char *q = p;
5730
5731 if (inst.operands[i].negative)
5732 {
5733 inst.operands[i].negative = 0;
5734 p--;
5735 }
5736 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5737 return PARSE_OPERAND_FAIL;
5738 /* If the offset is 0, find out if it's a +0 or -0. */
5739 if (inst.reloc.exp.X_op == O_constant
5740 && inst.reloc.exp.X_add_number == 0)
5741 {
5742 skip_whitespace (q);
5743 if (*q == '#')
5744 {
5745 q++;
5746 skip_whitespace (q);
5747 }
5748 if (*q == '-')
5749 inst.operands[i].negative = 1;
5750 }
5751 }
5752 }
5753 }
5754
5755 /* If at this point neither .preind nor .postind is set, we have a
5756 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5757 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5758 {
5759 inst.operands[i].preind = 1;
5760 inst.reloc.exp.X_op = O_constant;
5761 inst.reloc.exp.X_add_number = 0;
5762 }
5763 *str = p;
5764 return PARSE_OPERAND_SUCCESS;
5765 }
5766
5767 static int
5768 parse_address (char **str, int i)
5769 {
5770 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5771 ? SUCCESS : FAIL;
5772 }
5773
5774 static parse_operand_result
5775 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5776 {
5777 return parse_address_main (str, i, 1, type);
5778 }
5779
5780 /* Parse an operand for a MOVW or MOVT instruction. */
5781 static int
5782 parse_half (char **str)
5783 {
5784 char * p;
5785
5786 p = *str;
5787 skip_past_char (&p, '#');
5788 if (strncasecmp (p, ":lower16:", 9) == 0)
5789 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5790 else if (strncasecmp (p, ":upper16:", 9) == 0)
5791 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5792
5793 if (inst.reloc.type != BFD_RELOC_UNUSED)
5794 {
5795 p += 9;
5796 skip_whitespace (p);
5797 }
5798
5799 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5800 return FAIL;
5801
5802 if (inst.reloc.type == BFD_RELOC_UNUSED)
5803 {
5804 if (inst.reloc.exp.X_op != O_constant)
5805 {
5806 inst.error = _("constant expression expected");
5807 return FAIL;
5808 }
5809 if (inst.reloc.exp.X_add_number < 0
5810 || inst.reloc.exp.X_add_number > 0xffff)
5811 {
5812 inst.error = _("immediate value out of range");
5813 return FAIL;
5814 }
5815 }
5816 *str = p;
5817 return SUCCESS;
5818 }
5819
5820 /* Miscellaneous. */
5821
5822 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5823 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5824 static int
5825 parse_psr (char **str, bfd_boolean lhs)
5826 {
5827 char *p;
5828 unsigned long psr_field;
5829 const struct asm_psr *psr;
5830 char *start;
5831 bfd_boolean is_apsr = FALSE;
5832 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5833
5834 /* PR gas/12698: If the user has specified -march=all then m_profile will
5835 be TRUE, but we want to ignore it in this case as we are building for any
5836 CPU type, including non-m variants. */
5837 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5838 m_profile = FALSE;
5839
5840 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5841 feature for ease of use and backwards compatibility. */
5842 p = *str;
5843 if (strncasecmp (p, "SPSR", 4) == 0)
5844 {
5845 if (m_profile)
5846 goto unsupported_psr;
5847
5848 psr_field = SPSR_BIT;
5849 }
5850 else if (strncasecmp (p, "CPSR", 4) == 0)
5851 {
5852 if (m_profile)
5853 goto unsupported_psr;
5854
5855 psr_field = 0;
5856 }
5857 else if (strncasecmp (p, "APSR", 4) == 0)
5858 {
5859 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5860 and ARMv7-R architecture CPUs. */
5861 is_apsr = TRUE;
5862 psr_field = 0;
5863 }
5864 else if (m_profile)
5865 {
5866 start = p;
5867 do
5868 p++;
5869 while (ISALNUM (*p) || *p == '_');
5870
5871 if (strncasecmp (start, "iapsr", 5) == 0
5872 || strncasecmp (start, "eapsr", 5) == 0
5873 || strncasecmp (start, "xpsr", 4) == 0
5874 || strncasecmp (start, "psr", 3) == 0)
5875 p = start + strcspn (start, "rR") + 1;
5876
5877 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5878 p - start);
5879
5880 if (!psr)
5881 return FAIL;
5882
5883 /* If APSR is being written, a bitfield may be specified. Note that
5884 APSR itself is handled above. */
5885 if (psr->field <= 3)
5886 {
5887 psr_field = psr->field;
5888 is_apsr = TRUE;
5889 goto check_suffix;
5890 }
5891
5892 *str = p;
5893 /* M-profile MSR instructions have the mask field set to "10", except
5894 *PSR variants which modify APSR, which may use a different mask (and
5895 have been handled already). Do that by setting the PSR_f field
5896 here. */
5897 return psr->field | (lhs ? PSR_f : 0);
5898 }
5899 else
5900 goto unsupported_psr;
5901
5902 p += 4;
5903 check_suffix:
5904 if (*p == '_')
5905 {
5906 /* A suffix follows. */
5907 p++;
5908 start = p;
5909
5910 do
5911 p++;
5912 while (ISALNUM (*p) || *p == '_');
5913
5914 if (is_apsr)
5915 {
5916 /* APSR uses a notation for bits, rather than fields. */
5917 unsigned int nzcvq_bits = 0;
5918 unsigned int g_bit = 0;
5919 char *bit;
5920
5921 for (bit = start; bit != p; bit++)
5922 {
5923 switch (TOLOWER (*bit))
5924 {
5925 case 'n':
5926 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5927 break;
5928
5929 case 'z':
5930 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5931 break;
5932
5933 case 'c':
5934 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5935 break;
5936
5937 case 'v':
5938 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5939 break;
5940
5941 case 'q':
5942 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5943 break;
5944
5945 case 'g':
5946 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5947 break;
5948
5949 default:
5950 inst.error = _("unexpected bit specified after APSR");
5951 return FAIL;
5952 }
5953 }
5954
5955 if (nzcvq_bits == 0x1f)
5956 psr_field |= PSR_f;
5957
5958 if (g_bit == 0x1)
5959 {
5960 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5961 {
5962 inst.error = _("selected processor does not "
5963 "support DSP extension");
5964 return FAIL;
5965 }
5966
5967 psr_field |= PSR_s;
5968 }
5969
5970 if ((nzcvq_bits & 0x20) != 0
5971 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5972 || (g_bit & 0x2) != 0)
5973 {
5974 inst.error = _("bad bitmask specified after APSR");
5975 return FAIL;
5976 }
5977 }
5978 else
5979 {
5980 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5981 p - start);
5982 if (!psr)
5983 goto error;
5984
5985 psr_field |= psr->field;
5986 }
5987 }
5988 else
5989 {
5990 if (ISALNUM (*p))
5991 goto error; /* Garbage after "[CS]PSR". */
5992
5993 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5994 is deprecated, but allow it anyway. */
5995 if (is_apsr && lhs)
5996 {
5997 psr_field |= PSR_f;
5998 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5999 "deprecated"));
6000 }
6001 else if (!m_profile)
6002 /* These bits are never right for M-profile devices: don't set them
6003 (only code paths which read/write APSR reach here). */
6004 psr_field |= (PSR_c | PSR_f);
6005 }
6006 *str = p;
6007 return psr_field;
6008
6009 unsupported_psr:
6010 inst.error = _("selected processor does not support requested special "
6011 "purpose register");
6012 return FAIL;
6013
6014 error:
6015 inst.error = _("flag for {c}psr instruction expected");
6016 return FAIL;
6017 }
6018
6019 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6020 value suitable for splatting into the AIF field of the instruction. */
6021
6022 static int
6023 parse_cps_flags (char **str)
6024 {
6025 int val = 0;
6026 int saw_a_flag = 0;
6027 char *s = *str;
6028
6029 for (;;)
6030 switch (*s++)
6031 {
6032 case '\0': case ',':
6033 goto done;
6034
6035 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6036 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6037 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6038
6039 default:
6040 inst.error = _("unrecognized CPS flag");
6041 return FAIL;
6042 }
6043
6044 done:
6045 if (saw_a_flag == 0)
6046 {
6047 inst.error = _("missing CPS flags");
6048 return FAIL;
6049 }
6050
6051 *str = s - 1;
6052 return val;
6053 }
6054
6055 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6056 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6057
6058 static int
6059 parse_endian_specifier (char **str)
6060 {
6061 int little_endian;
6062 char *s = *str;
6063
6064 if (strncasecmp (s, "BE", 2))
6065 little_endian = 0;
6066 else if (strncasecmp (s, "LE", 2))
6067 little_endian = 1;
6068 else
6069 {
6070 inst.error = _("valid endian specifiers are be or le");
6071 return FAIL;
6072 }
6073
6074 if (ISALNUM (s[2]) || s[2] == '_')
6075 {
6076 inst.error = _("valid endian specifiers are be or le");
6077 return FAIL;
6078 }
6079
6080 *str = s + 2;
6081 return little_endian;
6082 }
6083
6084 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6085 value suitable for poking into the rotate field of an sxt or sxta
6086 instruction, or FAIL on error. */
6087
6088 static int
6089 parse_ror (char **str)
6090 {
6091 int rot;
6092 char *s = *str;
6093
6094 if (strncasecmp (s, "ROR", 3) == 0)
6095 s += 3;
6096 else
6097 {
6098 inst.error = _("missing rotation field after comma");
6099 return FAIL;
6100 }
6101
6102 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6103 return FAIL;
6104
6105 switch (rot)
6106 {
6107 case 0: *str = s; return 0x0;
6108 case 8: *str = s; return 0x1;
6109 case 16: *str = s; return 0x2;
6110 case 24: *str = s; return 0x3;
6111
6112 default:
6113 inst.error = _("rotation can only be 0, 8, 16, or 24");
6114 return FAIL;
6115 }
6116 }
6117
6118 /* Parse a conditional code (from conds[] below). The value returned is in the
6119 range 0 .. 14, or FAIL. */
6120 static int
6121 parse_cond (char **str)
6122 {
6123 char *q;
6124 const struct asm_cond *c;
6125 int n;
6126 /* Condition codes are always 2 characters, so matching up to
6127 3 characters is sufficient. */
6128 char cond[3];
6129
6130 q = *str;
6131 n = 0;
6132 while (ISALPHA (*q) && n < 3)
6133 {
6134 cond[n] = TOLOWER (*q);
6135 q++;
6136 n++;
6137 }
6138
6139 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6140 if (!c)
6141 {
6142 inst.error = _("condition required");
6143 return FAIL;
6144 }
6145
6146 *str = q;
6147 return c->value;
6148 }
6149
6150 /* Record a use of the given feature. */
6151 static void
6152 record_feature_use (const arm_feature_set *feature)
6153 {
6154 if (thumb_mode)
6155 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6156 else
6157 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6158 }
6159
6160 /* If the given feature available in the selected CPU, mark it as used.
6161 Returns TRUE iff feature is available. */
6162 static bfd_boolean
6163 mark_feature_used (const arm_feature_set *feature)
6164 {
6165 /* Ensure the option is valid on the current architecture. */
6166 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6167 return FALSE;
6168
6169 /* Add the appropriate architecture feature for the barrier option used.
6170 */
6171 record_feature_use (feature);
6172
6173 return TRUE;
6174 }
6175
6176 /* Parse an option for a barrier instruction. Returns the encoding for the
6177 option, or FAIL. */
6178 static int
6179 parse_barrier (char **str)
6180 {
6181 char *p, *q;
6182 const struct asm_barrier_opt *o;
6183
6184 p = q = *str;
6185 while (ISALPHA (*q))
6186 q++;
6187
6188 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6189 q - p);
6190 if (!o)
6191 return FAIL;
6192
6193 if (!mark_feature_used (&o->arch))
6194 return FAIL;
6195
6196 *str = q;
6197 return o->value;
6198 }
6199
6200 /* Parse the operands of a table branch instruction. Similar to a memory
6201 operand. */
6202 static int
6203 parse_tb (char **str)
6204 {
6205 char * p = *str;
6206 int reg;
6207
6208 if (skip_past_char (&p, '[') == FAIL)
6209 {
6210 inst.error = _("'[' expected");
6211 return FAIL;
6212 }
6213
6214 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6215 {
6216 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6217 return FAIL;
6218 }
6219 inst.operands[0].reg = reg;
6220
6221 if (skip_past_comma (&p) == FAIL)
6222 {
6223 inst.error = _("',' expected");
6224 return FAIL;
6225 }
6226
6227 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6228 {
6229 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6230 return FAIL;
6231 }
6232 inst.operands[0].imm = reg;
6233
6234 if (skip_past_comma (&p) == SUCCESS)
6235 {
6236 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6237 return FAIL;
6238 if (inst.reloc.exp.X_add_number != 1)
6239 {
6240 inst.error = _("invalid shift");
6241 return FAIL;
6242 }
6243 inst.operands[0].shifted = 1;
6244 }
6245
6246 if (skip_past_char (&p, ']') == FAIL)
6247 {
6248 inst.error = _("']' expected");
6249 return FAIL;
6250 }
6251 *str = p;
6252 return SUCCESS;
6253 }
6254
6255 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6256 information on the types the operands can take and how they are encoded.
6257 Up to four operands may be read; this function handles setting the
6258 ".present" field for each read operand itself.
6259 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6260 else returns FAIL. */
6261
6262 static int
6263 parse_neon_mov (char **str, int *which_operand)
6264 {
6265 int i = *which_operand, val;
6266 enum arm_reg_type rtype;
6267 char *ptr = *str;
6268 struct neon_type_el optype;
6269
6270 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6271 {
6272 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6273 inst.operands[i].reg = val;
6274 inst.operands[i].isscalar = 1;
6275 inst.operands[i].vectype = optype;
6276 inst.operands[i++].present = 1;
6277
6278 if (skip_past_comma (&ptr) == FAIL)
6279 goto wanted_comma;
6280
6281 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6282 goto wanted_arm;
6283
6284 inst.operands[i].reg = val;
6285 inst.operands[i].isreg = 1;
6286 inst.operands[i].present = 1;
6287 }
6288 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6289 != FAIL)
6290 {
6291 /* Cases 0, 1, 2, 3, 5 (D only). */
6292 if (skip_past_comma (&ptr) == FAIL)
6293 goto wanted_comma;
6294
6295 inst.operands[i].reg = val;
6296 inst.operands[i].isreg = 1;
6297 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6298 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6299 inst.operands[i].isvec = 1;
6300 inst.operands[i].vectype = optype;
6301 inst.operands[i++].present = 1;
6302
6303 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6304 {
6305 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6306 Case 13: VMOV <Sd>, <Rm> */
6307 inst.operands[i].reg = val;
6308 inst.operands[i].isreg = 1;
6309 inst.operands[i].present = 1;
6310
6311 if (rtype == REG_TYPE_NQ)
6312 {
6313 first_error (_("can't use Neon quad register here"));
6314 return FAIL;
6315 }
6316 else if (rtype != REG_TYPE_VFS)
6317 {
6318 i++;
6319 if (skip_past_comma (&ptr) == FAIL)
6320 goto wanted_comma;
6321 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6322 goto wanted_arm;
6323 inst.operands[i].reg = val;
6324 inst.operands[i].isreg = 1;
6325 inst.operands[i].present = 1;
6326 }
6327 }
6328 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6329 &optype)) != FAIL)
6330 {
6331 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6332 Case 1: VMOV<c><q> <Dd>, <Dm>
6333 Case 8: VMOV.F32 <Sd>, <Sm>
6334 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6335
6336 inst.operands[i].reg = val;
6337 inst.operands[i].isreg = 1;
6338 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6339 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6340 inst.operands[i].isvec = 1;
6341 inst.operands[i].vectype = optype;
6342 inst.operands[i].present = 1;
6343
6344 if (skip_past_comma (&ptr) == SUCCESS)
6345 {
6346 /* Case 15. */
6347 i++;
6348
6349 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6350 goto wanted_arm;
6351
6352 inst.operands[i].reg = val;
6353 inst.operands[i].isreg = 1;
6354 inst.operands[i++].present = 1;
6355
6356 if (skip_past_comma (&ptr) == FAIL)
6357 goto wanted_comma;
6358
6359 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6360 goto wanted_arm;
6361
6362 inst.operands[i].reg = val;
6363 inst.operands[i].isreg = 1;
6364 inst.operands[i].present = 1;
6365 }
6366 }
6367 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6368 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6369 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6370 Case 10: VMOV.F32 <Sd>, #<imm>
6371 Case 11: VMOV.F64 <Dd>, #<imm> */
6372 inst.operands[i].immisfloat = 1;
6373 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6374 == SUCCESS)
6375 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6376 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6377 ;
6378 else
6379 {
6380 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6381 return FAIL;
6382 }
6383 }
6384 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6385 {
6386 /* Cases 6, 7. */
6387 inst.operands[i].reg = val;
6388 inst.operands[i].isreg = 1;
6389 inst.operands[i++].present = 1;
6390
6391 if (skip_past_comma (&ptr) == FAIL)
6392 goto wanted_comma;
6393
6394 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6395 {
6396 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6397 inst.operands[i].reg = val;
6398 inst.operands[i].isscalar = 1;
6399 inst.operands[i].present = 1;
6400 inst.operands[i].vectype = optype;
6401 }
6402 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6403 {
6404 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6405 inst.operands[i].reg = val;
6406 inst.operands[i].isreg = 1;
6407 inst.operands[i++].present = 1;
6408
6409 if (skip_past_comma (&ptr) == FAIL)
6410 goto wanted_comma;
6411
6412 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6413 == FAIL)
6414 {
6415 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6416 return FAIL;
6417 }
6418
6419 inst.operands[i].reg = val;
6420 inst.operands[i].isreg = 1;
6421 inst.operands[i].isvec = 1;
6422 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6423 inst.operands[i].vectype = optype;
6424 inst.operands[i].present = 1;
6425
6426 if (rtype == REG_TYPE_VFS)
6427 {
6428 /* Case 14. */
6429 i++;
6430 if (skip_past_comma (&ptr) == FAIL)
6431 goto wanted_comma;
6432 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6433 &optype)) == FAIL)
6434 {
6435 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6436 return FAIL;
6437 }
6438 inst.operands[i].reg = val;
6439 inst.operands[i].isreg = 1;
6440 inst.operands[i].isvec = 1;
6441 inst.operands[i].issingle = 1;
6442 inst.operands[i].vectype = optype;
6443 inst.operands[i].present = 1;
6444 }
6445 }
6446 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6447 != FAIL)
6448 {
6449 /* Case 13. */
6450 inst.operands[i].reg = val;
6451 inst.operands[i].isreg = 1;
6452 inst.operands[i].isvec = 1;
6453 inst.operands[i].issingle = 1;
6454 inst.operands[i].vectype = optype;
6455 inst.operands[i].present = 1;
6456 }
6457 }
6458 else
6459 {
6460 first_error (_("parse error"));
6461 return FAIL;
6462 }
6463
6464 /* Successfully parsed the operands. Update args. */
6465 *which_operand = i;
6466 *str = ptr;
6467 return SUCCESS;
6468
6469 wanted_comma:
6470 first_error (_("expected comma"));
6471 return FAIL;
6472
6473 wanted_arm:
6474 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6475 return FAIL;
6476 }
6477
6478 /* Use this macro when the operand constraints are different
6479 for ARM and THUMB (e.g. ldrd). */
6480 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6481 ((arm_operand) | ((thumb_operand) << 16))
6482
6483 /* Matcher codes for parse_operands. */
6484 enum operand_parse_code
6485 {
6486 OP_stop, /* end of line */
6487
6488 OP_RR, /* ARM register */
6489 OP_RRnpc, /* ARM register, not r15 */
6490 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6491 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6492 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6493 optional trailing ! */
6494 OP_RRw, /* ARM register, not r15, optional trailing ! */
6495 OP_RCP, /* Coprocessor number */
6496 OP_RCN, /* Coprocessor register */
6497 OP_RF, /* FPA register */
6498 OP_RVS, /* VFP single precision register */
6499 OP_RVD, /* VFP double precision register (0..15) */
6500 OP_RND, /* Neon double precision register (0..31) */
6501 OP_RNQ, /* Neon quad precision register */
6502 OP_RVSD, /* VFP single or double precision register */
6503 OP_RNSD, /* Neon single or double precision register */
6504 OP_RNDQ, /* Neon double or quad precision register */
6505 OP_RNSDQ, /* Neon single, double or quad precision register */
6506 OP_RNSC, /* Neon scalar D[X] */
6507 OP_RVC, /* VFP control register */
6508 OP_RMF, /* Maverick F register */
6509 OP_RMD, /* Maverick D register */
6510 OP_RMFX, /* Maverick FX register */
6511 OP_RMDX, /* Maverick DX register */
6512 OP_RMAX, /* Maverick AX register */
6513 OP_RMDS, /* Maverick DSPSC register */
6514 OP_RIWR, /* iWMMXt wR register */
6515 OP_RIWC, /* iWMMXt wC register */
6516 OP_RIWG, /* iWMMXt wCG register */
6517 OP_RXA, /* XScale accumulator register */
6518
6519 OP_REGLST, /* ARM register list */
6520 OP_VRSLST, /* VFP single-precision register list */
6521 OP_VRDLST, /* VFP double-precision register list */
6522 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6523 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6524 OP_NSTRLST, /* Neon element/structure list */
6525
6526 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6527 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6528 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6529 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6530 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6531 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6532 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6533 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6534 OP_VMOV, /* Neon VMOV operands. */
6535 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6536 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6537 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6538
6539 OP_I0, /* immediate zero */
6540 OP_I7, /* immediate value 0 .. 7 */
6541 OP_I15, /* 0 .. 15 */
6542 OP_I16, /* 1 .. 16 */
6543 OP_I16z, /* 0 .. 16 */
6544 OP_I31, /* 0 .. 31 */
6545 OP_I31w, /* 0 .. 31, optional trailing ! */
6546 OP_I32, /* 1 .. 32 */
6547 OP_I32z, /* 0 .. 32 */
6548 OP_I63, /* 0 .. 63 */
6549 OP_I63s, /* -64 .. 63 */
6550 OP_I64, /* 1 .. 64 */
6551 OP_I64z, /* 0 .. 64 */
6552 OP_I255, /* 0 .. 255 */
6553
6554 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6555 OP_I7b, /* 0 .. 7 */
6556 OP_I15b, /* 0 .. 15 */
6557 OP_I31b, /* 0 .. 31 */
6558
6559 OP_SH, /* shifter operand */
6560 OP_SHG, /* shifter operand with possible group relocation */
6561 OP_ADDR, /* Memory address expression (any mode) */
6562 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6563 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6564 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6565 OP_EXP, /* arbitrary expression */
6566 OP_EXPi, /* same, with optional immediate prefix */
6567 OP_EXPr, /* same, with optional relocation suffix */
6568 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6569 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6570 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6571
6572 OP_CPSF, /* CPS flags */
6573 OP_ENDI, /* Endianness specifier */
6574 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6575 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6576 OP_COND, /* conditional code */
6577 OP_TB, /* Table branch. */
6578
6579 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6580
6581 OP_RRnpc_I0, /* ARM register or literal 0 */
6582 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6583 OP_RR_EXi, /* ARM register or expression with imm prefix */
6584 OP_RF_IF, /* FPA register or immediate */
6585 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6586 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6587
6588 /* Optional operands. */
6589 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6590 OP_oI31b, /* 0 .. 31 */
6591 OP_oI32b, /* 1 .. 32 */
6592 OP_oI32z, /* 0 .. 32 */
6593 OP_oIffffb, /* 0 .. 65535 */
6594 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6595
6596 OP_oRR, /* ARM register */
6597 OP_oRRnpc, /* ARM register, not the PC */
6598 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6599 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6600 OP_oRND, /* Optional Neon double precision register */
6601 OP_oRNQ, /* Optional Neon quad precision register */
6602 OP_oRNDQ, /* Optional Neon double or quad precision register */
6603 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6604 OP_oSHll, /* LSL immediate */
6605 OP_oSHar, /* ASR immediate */
6606 OP_oSHllar, /* LSL or ASR immediate */
6607 OP_oROR, /* ROR 0/8/16/24 */
6608 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6609
6610 /* Some pre-defined mixed (ARM/THUMB) operands. */
6611 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6612 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6613 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6614
6615 OP_FIRST_OPTIONAL = OP_oI7b
6616 };
6617
6618 /* Generic instruction operand parser. This does no encoding and no
6619 semantic validation; it merely squirrels values away in the inst
6620 structure. Returns SUCCESS or FAIL depending on whether the
6621 specified grammar matched. */
6622 static int
6623 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6624 {
6625 unsigned const int *upat = pattern;
6626 char *backtrack_pos = 0;
6627 const char *backtrack_error = 0;
6628 int i, val = 0, backtrack_index = 0;
6629 enum arm_reg_type rtype;
6630 parse_operand_result result;
6631 unsigned int op_parse_code;
6632
6633 #define po_char_or_fail(chr) \
6634 do \
6635 { \
6636 if (skip_past_char (&str, chr) == FAIL) \
6637 goto bad_args; \
6638 } \
6639 while (0)
6640
6641 #define po_reg_or_fail(regtype) \
6642 do \
6643 { \
6644 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6645 & inst.operands[i].vectype); \
6646 if (val == FAIL) \
6647 { \
6648 first_error (_(reg_expected_msgs[regtype])); \
6649 goto failure; \
6650 } \
6651 inst.operands[i].reg = val; \
6652 inst.operands[i].isreg = 1; \
6653 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6654 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6655 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6656 || rtype == REG_TYPE_VFD \
6657 || rtype == REG_TYPE_NQ); \
6658 } \
6659 while (0)
6660
6661 #define po_reg_or_goto(regtype, label) \
6662 do \
6663 { \
6664 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6665 & inst.operands[i].vectype); \
6666 if (val == FAIL) \
6667 goto label; \
6668 \
6669 inst.operands[i].reg = val; \
6670 inst.operands[i].isreg = 1; \
6671 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6672 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6673 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6674 || rtype == REG_TYPE_VFD \
6675 || rtype == REG_TYPE_NQ); \
6676 } \
6677 while (0)
6678
6679 #define po_imm_or_fail(min, max, popt) \
6680 do \
6681 { \
6682 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6683 goto failure; \
6684 inst.operands[i].imm = val; \
6685 } \
6686 while (0)
6687
6688 #define po_scalar_or_goto(elsz, label) \
6689 do \
6690 { \
6691 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6692 if (val == FAIL) \
6693 goto label; \
6694 inst.operands[i].reg = val; \
6695 inst.operands[i].isscalar = 1; \
6696 } \
6697 while (0)
6698
6699 #define po_misc_or_fail(expr) \
6700 do \
6701 { \
6702 if (expr) \
6703 goto failure; \
6704 } \
6705 while (0)
6706
6707 #define po_misc_or_fail_no_backtrack(expr) \
6708 do \
6709 { \
6710 result = expr; \
6711 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6712 backtrack_pos = 0; \
6713 if (result != PARSE_OPERAND_SUCCESS) \
6714 goto failure; \
6715 } \
6716 while (0)
6717
6718 #define po_barrier_or_imm(str) \
6719 do \
6720 { \
6721 val = parse_barrier (&str); \
6722 if (val == FAIL && ! ISALPHA (*str)) \
6723 goto immediate; \
6724 if (val == FAIL \
6725 /* ISB can only take SY as an option. */ \
6726 || ((inst.instruction & 0xf0) == 0x60 \
6727 && val != 0xf)) \
6728 { \
6729 inst.error = _("invalid barrier type"); \
6730 backtrack_pos = 0; \
6731 goto failure; \
6732 } \
6733 } \
6734 while (0)
6735
6736 skip_whitespace (str);
6737
6738 for (i = 0; upat[i] != OP_stop; i++)
6739 {
6740 op_parse_code = upat[i];
6741 if (op_parse_code >= 1<<16)
6742 op_parse_code = thumb ? (op_parse_code >> 16)
6743 : (op_parse_code & ((1<<16)-1));
6744
6745 if (op_parse_code >= OP_FIRST_OPTIONAL)
6746 {
6747 /* Remember where we are in case we need to backtrack. */
6748 gas_assert (!backtrack_pos);
6749 backtrack_pos = str;
6750 backtrack_error = inst.error;
6751 backtrack_index = i;
6752 }
6753
6754 if (i > 0 && (i > 1 || inst.operands[0].present))
6755 po_char_or_fail (',');
6756
6757 switch (op_parse_code)
6758 {
6759 /* Registers */
6760 case OP_oRRnpc:
6761 case OP_oRRnpcsp:
6762 case OP_RRnpc:
6763 case OP_RRnpcsp:
6764 case OP_oRR:
6765 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6766 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6767 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6768 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6769 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6770 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6771 case OP_oRND:
6772 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6773 case OP_RVC:
6774 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6775 break;
6776 /* Also accept generic coprocessor regs for unknown registers. */
6777 coproc_reg:
6778 po_reg_or_fail (REG_TYPE_CN);
6779 break;
6780 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6781 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6782 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6783 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6784 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6785 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6786 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6787 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6788 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6789 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6790 case OP_oRNQ:
6791 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6792 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6793 case OP_oRNDQ:
6794 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6795 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6796 case OP_oRNSDQ:
6797 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6798
6799 /* Neon scalar. Using an element size of 8 means that some invalid
6800 scalars are accepted here, so deal with those in later code. */
6801 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6802
6803 case OP_RNDQ_I0:
6804 {
6805 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6806 break;
6807 try_imm0:
6808 po_imm_or_fail (0, 0, TRUE);
6809 }
6810 break;
6811
6812 case OP_RVSD_I0:
6813 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6814 break;
6815
6816 case OP_RSVD_FI0:
6817 {
6818 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6819 break;
6820 try_ifimm0:
6821 if (parse_ifimm_zero (&str))
6822 inst.operands[i].imm = 0;
6823 else
6824 {
6825 inst.error
6826 = _("only floating point zero is allowed as immediate value");
6827 goto failure;
6828 }
6829 }
6830 break;
6831
6832 case OP_RR_RNSC:
6833 {
6834 po_scalar_or_goto (8, try_rr);
6835 break;
6836 try_rr:
6837 po_reg_or_fail (REG_TYPE_RN);
6838 }
6839 break;
6840
6841 case OP_RNSDQ_RNSC:
6842 {
6843 po_scalar_or_goto (8, try_nsdq);
6844 break;
6845 try_nsdq:
6846 po_reg_or_fail (REG_TYPE_NSDQ);
6847 }
6848 break;
6849
6850 case OP_RNSD_RNSC:
6851 {
6852 po_scalar_or_goto (8, try_s_scalar);
6853 break;
6854 try_s_scalar:
6855 po_scalar_or_goto (4, try_nsd);
6856 break;
6857 try_nsd:
6858 po_reg_or_fail (REG_TYPE_NSD);
6859 }
6860 break;
6861
6862 case OP_RNDQ_RNSC:
6863 {
6864 po_scalar_or_goto (8, try_ndq);
6865 break;
6866 try_ndq:
6867 po_reg_or_fail (REG_TYPE_NDQ);
6868 }
6869 break;
6870
6871 case OP_RND_RNSC:
6872 {
6873 po_scalar_or_goto (8, try_vfd);
6874 break;
6875 try_vfd:
6876 po_reg_or_fail (REG_TYPE_VFD);
6877 }
6878 break;
6879
6880 case OP_VMOV:
6881 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6882 not careful then bad things might happen. */
6883 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6884 break;
6885
6886 case OP_RNDQ_Ibig:
6887 {
6888 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6889 break;
6890 try_immbig:
6891 /* There's a possibility of getting a 64-bit immediate here, so
6892 we need special handling. */
6893 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6894 == FAIL)
6895 {
6896 inst.error = _("immediate value is out of range");
6897 goto failure;
6898 }
6899 }
6900 break;
6901
6902 case OP_RNDQ_I63b:
6903 {
6904 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6905 break;
6906 try_shimm:
6907 po_imm_or_fail (0, 63, TRUE);
6908 }
6909 break;
6910
6911 case OP_RRnpcb:
6912 po_char_or_fail ('[');
6913 po_reg_or_fail (REG_TYPE_RN);
6914 po_char_or_fail (']');
6915 break;
6916
6917 case OP_RRnpctw:
6918 case OP_RRw:
6919 case OP_oRRw:
6920 po_reg_or_fail (REG_TYPE_RN);
6921 if (skip_past_char (&str, '!') == SUCCESS)
6922 inst.operands[i].writeback = 1;
6923 break;
6924
6925 /* Immediates */
6926 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6927 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6928 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6929 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6930 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6931 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6932 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6933 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6934 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6935 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6936 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6937 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6938
6939 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6940 case OP_oI7b:
6941 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6942 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6943 case OP_oI31b:
6944 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6945 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6946 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6947 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6948
6949 /* Immediate variants */
6950 case OP_oI255c:
6951 po_char_or_fail ('{');
6952 po_imm_or_fail (0, 255, TRUE);
6953 po_char_or_fail ('}');
6954 break;
6955
6956 case OP_I31w:
6957 /* The expression parser chokes on a trailing !, so we have
6958 to find it first and zap it. */
6959 {
6960 char *s = str;
6961 while (*s && *s != ',')
6962 s++;
6963 if (s[-1] == '!')
6964 {
6965 s[-1] = '\0';
6966 inst.operands[i].writeback = 1;
6967 }
6968 po_imm_or_fail (0, 31, TRUE);
6969 if (str == s - 1)
6970 str = s;
6971 }
6972 break;
6973
6974 /* Expressions */
6975 case OP_EXPi: EXPi:
6976 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6977 GE_OPT_PREFIX));
6978 break;
6979
6980 case OP_EXP:
6981 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6982 GE_NO_PREFIX));
6983 break;
6984
6985 case OP_EXPr: EXPr:
6986 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6987 GE_NO_PREFIX));
6988 if (inst.reloc.exp.X_op == O_symbol)
6989 {
6990 val = parse_reloc (&str);
6991 if (val == -1)
6992 {
6993 inst.error = _("unrecognized relocation suffix");
6994 goto failure;
6995 }
6996 else if (val != BFD_RELOC_UNUSED)
6997 {
6998 inst.operands[i].imm = val;
6999 inst.operands[i].hasreloc = 1;
7000 }
7001 }
7002 break;
7003
7004 /* Operand for MOVW or MOVT. */
7005 case OP_HALF:
7006 po_misc_or_fail (parse_half (&str));
7007 break;
7008
7009 /* Register or expression. */
7010 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7011 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7012
7013 /* Register or immediate. */
7014 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7015 I0: po_imm_or_fail (0, 0, FALSE); break;
7016
7017 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7018 IF:
7019 if (!is_immediate_prefix (*str))
7020 goto bad_args;
7021 str++;
7022 val = parse_fpa_immediate (&str);
7023 if (val == FAIL)
7024 goto failure;
7025 /* FPA immediates are encoded as registers 8-15.
7026 parse_fpa_immediate has already applied the offset. */
7027 inst.operands[i].reg = val;
7028 inst.operands[i].isreg = 1;
7029 break;
7030
7031 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7032 I32z: po_imm_or_fail (0, 32, FALSE); break;
7033
7034 /* Two kinds of register. */
7035 case OP_RIWR_RIWC:
7036 {
7037 struct reg_entry *rege = arm_reg_parse_multi (&str);
7038 if (!rege
7039 || (rege->type != REG_TYPE_MMXWR
7040 && rege->type != REG_TYPE_MMXWC
7041 && rege->type != REG_TYPE_MMXWCG))
7042 {
7043 inst.error = _("iWMMXt data or control register expected");
7044 goto failure;
7045 }
7046 inst.operands[i].reg = rege->number;
7047 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7048 }
7049 break;
7050
7051 case OP_RIWC_RIWG:
7052 {
7053 struct reg_entry *rege = arm_reg_parse_multi (&str);
7054 if (!rege
7055 || (rege->type != REG_TYPE_MMXWC
7056 && rege->type != REG_TYPE_MMXWCG))
7057 {
7058 inst.error = _("iWMMXt control register expected");
7059 goto failure;
7060 }
7061 inst.operands[i].reg = rege->number;
7062 inst.operands[i].isreg = 1;
7063 }
7064 break;
7065
7066 /* Misc */
7067 case OP_CPSF: val = parse_cps_flags (&str); break;
7068 case OP_ENDI: val = parse_endian_specifier (&str); break;
7069 case OP_oROR: val = parse_ror (&str); break;
7070 case OP_COND: val = parse_cond (&str); break;
7071 case OP_oBARRIER_I15:
7072 po_barrier_or_imm (str); break;
7073 immediate:
7074 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7075 goto failure;
7076 break;
7077
7078 case OP_wPSR:
7079 case OP_rPSR:
7080 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7081 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7082 {
7083 inst.error = _("Banked registers are not available with this "
7084 "architecture.");
7085 goto failure;
7086 }
7087 break;
7088 try_psr:
7089 val = parse_psr (&str, op_parse_code == OP_wPSR);
7090 break;
7091
7092 case OP_APSR_RR:
7093 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7094 break;
7095 try_apsr:
7096 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7097 instruction). */
7098 if (strncasecmp (str, "APSR_", 5) == 0)
7099 {
7100 unsigned found = 0;
7101 str += 5;
7102 while (found < 15)
7103 switch (*str++)
7104 {
7105 case 'c': found = (found & 1) ? 16 : found | 1; break;
7106 case 'n': found = (found & 2) ? 16 : found | 2; break;
7107 case 'z': found = (found & 4) ? 16 : found | 4; break;
7108 case 'v': found = (found & 8) ? 16 : found | 8; break;
7109 default: found = 16;
7110 }
7111 if (found != 15)
7112 goto failure;
7113 inst.operands[i].isvec = 1;
7114 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7115 inst.operands[i].reg = REG_PC;
7116 }
7117 else
7118 goto failure;
7119 break;
7120
7121 case OP_TB:
7122 po_misc_or_fail (parse_tb (&str));
7123 break;
7124
7125 /* Register lists. */
7126 case OP_REGLST:
7127 val = parse_reg_list (&str);
7128 if (*str == '^')
7129 {
7130 inst.operands[i].writeback = 1;
7131 str++;
7132 }
7133 break;
7134
7135 case OP_VRSLST:
7136 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7137 break;
7138
7139 case OP_VRDLST:
7140 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7141 break;
7142
7143 case OP_VRSDLST:
7144 /* Allow Q registers too. */
7145 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7146 REGLIST_NEON_D);
7147 if (val == FAIL)
7148 {
7149 inst.error = NULL;
7150 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7151 REGLIST_VFP_S);
7152 inst.operands[i].issingle = 1;
7153 }
7154 break;
7155
7156 case OP_NRDLST:
7157 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7158 REGLIST_NEON_D);
7159 break;
7160
7161 case OP_NSTRLST:
7162 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7163 &inst.operands[i].vectype);
7164 break;
7165
7166 /* Addressing modes */
7167 case OP_ADDR:
7168 po_misc_or_fail (parse_address (&str, i));
7169 break;
7170
7171 case OP_ADDRGLDR:
7172 po_misc_or_fail_no_backtrack (
7173 parse_address_group_reloc (&str, i, GROUP_LDR));
7174 break;
7175
7176 case OP_ADDRGLDRS:
7177 po_misc_or_fail_no_backtrack (
7178 parse_address_group_reloc (&str, i, GROUP_LDRS));
7179 break;
7180
7181 case OP_ADDRGLDC:
7182 po_misc_or_fail_no_backtrack (
7183 parse_address_group_reloc (&str, i, GROUP_LDC));
7184 break;
7185
7186 case OP_SH:
7187 po_misc_or_fail (parse_shifter_operand (&str, i));
7188 break;
7189
7190 case OP_SHG:
7191 po_misc_or_fail_no_backtrack (
7192 parse_shifter_operand_group_reloc (&str, i));
7193 break;
7194
7195 case OP_oSHll:
7196 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7197 break;
7198
7199 case OP_oSHar:
7200 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7201 break;
7202
7203 case OP_oSHllar:
7204 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7205 break;
7206
7207 default:
7208 as_fatal (_("unhandled operand code %d"), op_parse_code);
7209 }
7210
7211 /* Various value-based sanity checks and shared operations. We
7212 do not signal immediate failures for the register constraints;
7213 this allows a syntax error to take precedence. */
7214 switch (op_parse_code)
7215 {
7216 case OP_oRRnpc:
7217 case OP_RRnpc:
7218 case OP_RRnpcb:
7219 case OP_RRw:
7220 case OP_oRRw:
7221 case OP_RRnpc_I0:
7222 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7223 inst.error = BAD_PC;
7224 break;
7225
7226 case OP_oRRnpcsp:
7227 case OP_RRnpcsp:
7228 if (inst.operands[i].isreg)
7229 {
7230 if (inst.operands[i].reg == REG_PC)
7231 inst.error = BAD_PC;
7232 else if (inst.operands[i].reg == REG_SP
7233 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7234 relaxed since ARMv8-A. */
7235 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7236 {
7237 gas_assert (thumb);
7238 inst.error = BAD_SP;
7239 }
7240 }
7241 break;
7242
7243 case OP_RRnpctw:
7244 if (inst.operands[i].isreg
7245 && inst.operands[i].reg == REG_PC
7246 && (inst.operands[i].writeback || thumb))
7247 inst.error = BAD_PC;
7248 break;
7249
7250 case OP_CPSF:
7251 case OP_ENDI:
7252 case OP_oROR:
7253 case OP_wPSR:
7254 case OP_rPSR:
7255 case OP_COND:
7256 case OP_oBARRIER_I15:
7257 case OP_REGLST:
7258 case OP_VRSLST:
7259 case OP_VRDLST:
7260 case OP_VRSDLST:
7261 case OP_NRDLST:
7262 case OP_NSTRLST:
7263 if (val == FAIL)
7264 goto failure;
7265 inst.operands[i].imm = val;
7266 break;
7267
7268 default:
7269 break;
7270 }
7271
7272 /* If we get here, this operand was successfully parsed. */
7273 inst.operands[i].present = 1;
7274 continue;
7275
7276 bad_args:
7277 inst.error = BAD_ARGS;
7278
7279 failure:
7280 if (!backtrack_pos)
7281 {
7282 /* The parse routine should already have set inst.error, but set a
7283 default here just in case. */
7284 if (!inst.error)
7285 inst.error = _("syntax error");
7286 return FAIL;
7287 }
7288
7289 /* Do not backtrack over a trailing optional argument that
7290 absorbed some text. We will only fail again, with the
7291 'garbage following instruction' error message, which is
7292 probably less helpful than the current one. */
7293 if (backtrack_index == i && backtrack_pos != str
7294 && upat[i+1] == OP_stop)
7295 {
7296 if (!inst.error)
7297 inst.error = _("syntax error");
7298 return FAIL;
7299 }
7300
7301 /* Try again, skipping the optional argument at backtrack_pos. */
7302 str = backtrack_pos;
7303 inst.error = backtrack_error;
7304 inst.operands[backtrack_index].present = 0;
7305 i = backtrack_index;
7306 backtrack_pos = 0;
7307 }
7308
7309 /* Check that we have parsed all the arguments. */
7310 if (*str != '\0' && !inst.error)
7311 inst.error = _("garbage following instruction");
7312
7313 return inst.error ? FAIL : SUCCESS;
7314 }
7315
7316 #undef po_char_or_fail
7317 #undef po_reg_or_fail
7318 #undef po_reg_or_goto
7319 #undef po_imm_or_fail
7320 #undef po_scalar_or_fail
7321 #undef po_barrier_or_imm
7322
7323 /* Shorthand macro for instruction encoding functions issuing errors. */
7324 #define constraint(expr, err) \
7325 do \
7326 { \
7327 if (expr) \
7328 { \
7329 inst.error = err; \
7330 return; \
7331 } \
7332 } \
7333 while (0)
7334
7335 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7336 instructions are unpredictable if these registers are used. This
7337 is the BadReg predicate in ARM's Thumb-2 documentation.
7338
7339 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7340 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7341 #define reject_bad_reg(reg) \
7342 do \
7343 if (reg == REG_PC) \
7344 { \
7345 inst.error = BAD_PC; \
7346 return; \
7347 } \
7348 else if (reg == REG_SP \
7349 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7350 { \
7351 inst.error = BAD_SP; \
7352 return; \
7353 } \
7354 while (0)
7355
7356 /* If REG is R13 (the stack pointer), warn that its use is
7357 deprecated. */
7358 #define warn_deprecated_sp(reg) \
7359 do \
7360 if (warn_on_deprecated && reg == REG_SP) \
7361 as_tsktsk (_("use of r13 is deprecated")); \
7362 while (0)
7363
7364 /* Functions for operand encoding. ARM, then Thumb. */
7365
7366 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7367
7368 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7369
7370 The only binary encoding difference is the Coprocessor number. Coprocessor
7371 9 is used for half-precision calculations or conversions. The format of the
7372 instruction is the same as the equivalent Coprocessor 10 instruction that
7373 exists for Single-Precision operation. */
7374
7375 static void
7376 do_scalar_fp16_v82_encode (void)
7377 {
7378 if (inst.cond != COND_ALWAYS)
7379 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7380 " the behaviour is UNPREDICTABLE"));
7381 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7382 _(BAD_FP16));
7383
7384 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7385 mark_feature_used (&arm_ext_fp16);
7386 }
7387
7388 /* If VAL can be encoded in the immediate field of an ARM instruction,
7389 return the encoded form. Otherwise, return FAIL. */
7390
7391 static unsigned int
7392 encode_arm_immediate (unsigned int val)
7393 {
7394 unsigned int a, i;
7395
7396 if (val <= 0xff)
7397 return val;
7398
7399 for (i = 2; i < 32; i += 2)
7400 if ((a = rotate_left (val, i)) <= 0xff)
7401 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7402
7403 return FAIL;
7404 }
7405
7406 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7407 return the encoded form. Otherwise, return FAIL. */
7408 static unsigned int
7409 encode_thumb32_immediate (unsigned int val)
7410 {
7411 unsigned int a, i;
7412
7413 if (val <= 0xff)
7414 return val;
7415
7416 for (i = 1; i <= 24; i++)
7417 {
7418 a = val >> i;
7419 if ((val & ~(0xff << i)) == 0)
7420 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7421 }
7422
7423 a = val & 0xff;
7424 if (val == ((a << 16) | a))
7425 return 0x100 | a;
7426 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7427 return 0x300 | a;
7428
7429 a = val & 0xff00;
7430 if (val == ((a << 16) | a))
7431 return 0x200 | (a >> 8);
7432
7433 return FAIL;
7434 }
7435 /* Encode a VFP SP or DP register number into inst.instruction. */
7436
7437 static void
7438 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7439 {
7440 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7441 && reg > 15)
7442 {
7443 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7444 {
7445 if (thumb_mode)
7446 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7447 fpu_vfp_ext_d32);
7448 else
7449 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7450 fpu_vfp_ext_d32);
7451 }
7452 else
7453 {
7454 first_error (_("D register out of range for selected VFP version"));
7455 return;
7456 }
7457 }
7458
7459 switch (pos)
7460 {
7461 case VFP_REG_Sd:
7462 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7463 break;
7464
7465 case VFP_REG_Sn:
7466 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7467 break;
7468
7469 case VFP_REG_Sm:
7470 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7471 break;
7472
7473 case VFP_REG_Dd:
7474 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7475 break;
7476
7477 case VFP_REG_Dn:
7478 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7479 break;
7480
7481 case VFP_REG_Dm:
7482 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7483 break;
7484
7485 default:
7486 abort ();
7487 }
7488 }
7489
7490 /* Encode a <shift> in an ARM-format instruction. The immediate,
7491 if any, is handled by md_apply_fix. */
7492 static void
7493 encode_arm_shift (int i)
7494 {
7495 /* register-shifted register. */
7496 if (inst.operands[i].immisreg)
7497 {
7498 int op_index;
7499 for (op_index = 0; op_index <= i; ++op_index)
7500 {
7501 /* Check the operand only when it's presented. In pre-UAL syntax,
7502 if the destination register is the same as the first operand, two
7503 register form of the instruction can be used. */
7504 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7505 && inst.operands[op_index].reg == REG_PC)
7506 as_warn (UNPRED_REG ("r15"));
7507 }
7508
7509 if (inst.operands[i].imm == REG_PC)
7510 as_warn (UNPRED_REG ("r15"));
7511 }
7512
7513 if (inst.operands[i].shift_kind == SHIFT_RRX)
7514 inst.instruction |= SHIFT_ROR << 5;
7515 else
7516 {
7517 inst.instruction |= inst.operands[i].shift_kind << 5;
7518 if (inst.operands[i].immisreg)
7519 {
7520 inst.instruction |= SHIFT_BY_REG;
7521 inst.instruction |= inst.operands[i].imm << 8;
7522 }
7523 else
7524 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7525 }
7526 }
7527
7528 static void
7529 encode_arm_shifter_operand (int i)
7530 {
7531 if (inst.operands[i].isreg)
7532 {
7533 inst.instruction |= inst.operands[i].reg;
7534 encode_arm_shift (i);
7535 }
7536 else
7537 {
7538 inst.instruction |= INST_IMMEDIATE;
7539 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7540 inst.instruction |= inst.operands[i].imm;
7541 }
7542 }
7543
7544 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7545 static void
7546 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7547 {
7548 /* PR 14260:
7549 Generate an error if the operand is not a register. */
7550 constraint (!inst.operands[i].isreg,
7551 _("Instruction does not support =N addresses"));
7552
7553 inst.instruction |= inst.operands[i].reg << 16;
7554
7555 if (inst.operands[i].preind)
7556 {
7557 if (is_t)
7558 {
7559 inst.error = _("instruction does not accept preindexed addressing");
7560 return;
7561 }
7562 inst.instruction |= PRE_INDEX;
7563 if (inst.operands[i].writeback)
7564 inst.instruction |= WRITE_BACK;
7565
7566 }
7567 else if (inst.operands[i].postind)
7568 {
7569 gas_assert (inst.operands[i].writeback);
7570 if (is_t)
7571 inst.instruction |= WRITE_BACK;
7572 }
7573 else /* unindexed - only for coprocessor */
7574 {
7575 inst.error = _("instruction does not accept unindexed addressing");
7576 return;
7577 }
7578
7579 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7580 && (((inst.instruction & 0x000f0000) >> 16)
7581 == ((inst.instruction & 0x0000f000) >> 12)))
7582 as_warn ((inst.instruction & LOAD_BIT)
7583 ? _("destination register same as write-back base")
7584 : _("source register same as write-back base"));
7585 }
7586
7587 /* inst.operands[i] was set up by parse_address. Encode it into an
7588 ARM-format mode 2 load or store instruction. If is_t is true,
7589 reject forms that cannot be used with a T instruction (i.e. not
7590 post-indexed). */
7591 static void
7592 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7593 {
7594 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7595
7596 encode_arm_addr_mode_common (i, is_t);
7597
7598 if (inst.operands[i].immisreg)
7599 {
7600 constraint ((inst.operands[i].imm == REG_PC
7601 || (is_pc && inst.operands[i].writeback)),
7602 BAD_PC_ADDRESSING);
7603 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7604 inst.instruction |= inst.operands[i].imm;
7605 if (!inst.operands[i].negative)
7606 inst.instruction |= INDEX_UP;
7607 if (inst.operands[i].shifted)
7608 {
7609 if (inst.operands[i].shift_kind == SHIFT_RRX)
7610 inst.instruction |= SHIFT_ROR << 5;
7611 else
7612 {
7613 inst.instruction |= inst.operands[i].shift_kind << 5;
7614 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7615 }
7616 }
7617 }
7618 else /* immediate offset in inst.reloc */
7619 {
7620 if (is_pc && !inst.reloc.pc_rel)
7621 {
7622 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7623
7624 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7625 cannot use PC in addressing.
7626 PC cannot be used in writeback addressing, either. */
7627 constraint ((is_t || inst.operands[i].writeback),
7628 BAD_PC_ADDRESSING);
7629
7630 /* Use of PC in str is deprecated for ARMv7. */
7631 if (warn_on_deprecated
7632 && !is_load
7633 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7634 as_tsktsk (_("use of PC in this instruction is deprecated"));
7635 }
7636
7637 if (inst.reloc.type == BFD_RELOC_UNUSED)
7638 {
7639 /* Prefer + for zero encoded value. */
7640 if (!inst.operands[i].negative)
7641 inst.instruction |= INDEX_UP;
7642 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7643 }
7644 }
7645 }
7646
7647 /* inst.operands[i] was set up by parse_address. Encode it into an
7648 ARM-format mode 3 load or store instruction. Reject forms that
7649 cannot be used with such instructions. If is_t is true, reject
7650 forms that cannot be used with a T instruction (i.e. not
7651 post-indexed). */
7652 static void
7653 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7654 {
7655 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7656 {
7657 inst.error = _("instruction does not accept scaled register index");
7658 return;
7659 }
7660
7661 encode_arm_addr_mode_common (i, is_t);
7662
7663 if (inst.operands[i].immisreg)
7664 {
7665 constraint ((inst.operands[i].imm == REG_PC
7666 || (is_t && inst.operands[i].reg == REG_PC)),
7667 BAD_PC_ADDRESSING);
7668 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7669 BAD_PC_WRITEBACK);
7670 inst.instruction |= inst.operands[i].imm;
7671 if (!inst.operands[i].negative)
7672 inst.instruction |= INDEX_UP;
7673 }
7674 else /* immediate offset in inst.reloc */
7675 {
7676 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7677 && inst.operands[i].writeback),
7678 BAD_PC_WRITEBACK);
7679 inst.instruction |= HWOFFSET_IMM;
7680 if (inst.reloc.type == BFD_RELOC_UNUSED)
7681 {
7682 /* Prefer + for zero encoded value. */
7683 if (!inst.operands[i].negative)
7684 inst.instruction |= INDEX_UP;
7685
7686 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7687 }
7688 }
7689 }
7690
7691 /* Write immediate bits [7:0] to the following locations:
7692
7693 |28/24|23 19|18 16|15 4|3 0|
7694 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7695
7696 This function is used by VMOV/VMVN/VORR/VBIC. */
7697
7698 static void
7699 neon_write_immbits (unsigned immbits)
7700 {
7701 inst.instruction |= immbits & 0xf;
7702 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7703 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7704 }
7705
7706 /* Invert low-order SIZE bits of XHI:XLO. */
7707
7708 static void
7709 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7710 {
7711 unsigned immlo = xlo ? *xlo : 0;
7712 unsigned immhi = xhi ? *xhi : 0;
7713
7714 switch (size)
7715 {
7716 case 8:
7717 immlo = (~immlo) & 0xff;
7718 break;
7719
7720 case 16:
7721 immlo = (~immlo) & 0xffff;
7722 break;
7723
7724 case 64:
7725 immhi = (~immhi) & 0xffffffff;
7726 /* fall through. */
7727
7728 case 32:
7729 immlo = (~immlo) & 0xffffffff;
7730 break;
7731
7732 default:
7733 abort ();
7734 }
7735
7736 if (xlo)
7737 *xlo = immlo;
7738
7739 if (xhi)
7740 *xhi = immhi;
7741 }
7742
7743 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7744 A, B, C, D. */
7745
7746 static int
7747 neon_bits_same_in_bytes (unsigned imm)
7748 {
7749 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7750 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7751 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7752 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7753 }
7754
7755 /* For immediate of above form, return 0bABCD. */
7756
7757 static unsigned
7758 neon_squash_bits (unsigned imm)
7759 {
7760 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7761 | ((imm & 0x01000000) >> 21);
7762 }
7763
7764 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7765
7766 static unsigned
7767 neon_qfloat_bits (unsigned imm)
7768 {
7769 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7770 }
7771
7772 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7773 the instruction. *OP is passed as the initial value of the op field, and
7774 may be set to a different value depending on the constant (i.e.
7775 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7776 MVN). If the immediate looks like a repeated pattern then also
7777 try smaller element sizes. */
7778
7779 static int
7780 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7781 unsigned *immbits, int *op, int size,
7782 enum neon_el_type type)
7783 {
7784 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7785 float. */
7786 if (type == NT_float && !float_p)
7787 return FAIL;
7788
7789 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7790 {
7791 if (size != 32 || *op == 1)
7792 return FAIL;
7793 *immbits = neon_qfloat_bits (immlo);
7794 return 0xf;
7795 }
7796
7797 if (size == 64)
7798 {
7799 if (neon_bits_same_in_bytes (immhi)
7800 && neon_bits_same_in_bytes (immlo))
7801 {
7802 if (*op == 1)
7803 return FAIL;
7804 *immbits = (neon_squash_bits (immhi) << 4)
7805 | neon_squash_bits (immlo);
7806 *op = 1;
7807 return 0xe;
7808 }
7809
7810 if (immhi != immlo)
7811 return FAIL;
7812 }
7813
7814 if (size >= 32)
7815 {
7816 if (immlo == (immlo & 0x000000ff))
7817 {
7818 *immbits = immlo;
7819 return 0x0;
7820 }
7821 else if (immlo == (immlo & 0x0000ff00))
7822 {
7823 *immbits = immlo >> 8;
7824 return 0x2;
7825 }
7826 else if (immlo == (immlo & 0x00ff0000))
7827 {
7828 *immbits = immlo >> 16;
7829 return 0x4;
7830 }
7831 else if (immlo == (immlo & 0xff000000))
7832 {
7833 *immbits = immlo >> 24;
7834 return 0x6;
7835 }
7836 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7837 {
7838 *immbits = (immlo >> 8) & 0xff;
7839 return 0xc;
7840 }
7841 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7842 {
7843 *immbits = (immlo >> 16) & 0xff;
7844 return 0xd;
7845 }
7846
7847 if ((immlo & 0xffff) != (immlo >> 16))
7848 return FAIL;
7849 immlo &= 0xffff;
7850 }
7851
7852 if (size >= 16)
7853 {
7854 if (immlo == (immlo & 0x000000ff))
7855 {
7856 *immbits = immlo;
7857 return 0x8;
7858 }
7859 else if (immlo == (immlo & 0x0000ff00))
7860 {
7861 *immbits = immlo >> 8;
7862 return 0xa;
7863 }
7864
7865 if ((immlo & 0xff) != (immlo >> 8))
7866 return FAIL;
7867 immlo &= 0xff;
7868 }
7869
7870 if (immlo == (immlo & 0x000000ff))
7871 {
7872 /* Don't allow MVN with 8-bit immediate. */
7873 if (*op == 1)
7874 return FAIL;
7875 *immbits = immlo;
7876 return 0xe;
7877 }
7878
7879 return FAIL;
7880 }
7881
7882 #if defined BFD_HOST_64_BIT
7883 /* Returns TRUE if double precision value V may be cast
7884 to single precision without loss of accuracy. */
7885
7886 static bfd_boolean
7887 is_double_a_single (bfd_int64_t v)
7888 {
7889 int exp = (int)((v >> 52) & 0x7FF);
7890 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7891
7892 return (exp == 0 || exp == 0x7FF
7893 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7894 && (mantissa & 0x1FFFFFFFl) == 0;
7895 }
7896
7897 /* Returns a double precision value casted to single precision
7898 (ignoring the least significant bits in exponent and mantissa). */
7899
7900 static int
7901 double_to_single (bfd_int64_t v)
7902 {
7903 int sign = (int) ((v >> 63) & 1l);
7904 int exp = (int) ((v >> 52) & 0x7FF);
7905 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7906
7907 if (exp == 0x7FF)
7908 exp = 0xFF;
7909 else
7910 {
7911 exp = exp - 1023 + 127;
7912 if (exp >= 0xFF)
7913 {
7914 /* Infinity. */
7915 exp = 0x7F;
7916 mantissa = 0;
7917 }
7918 else if (exp < 0)
7919 {
7920 /* No denormalized numbers. */
7921 exp = 0;
7922 mantissa = 0;
7923 }
7924 }
7925 mantissa >>= 29;
7926 return (sign << 31) | (exp << 23) | mantissa;
7927 }
7928 #endif /* BFD_HOST_64_BIT */
7929
7930 enum lit_type
7931 {
7932 CONST_THUMB,
7933 CONST_ARM,
7934 CONST_VEC
7935 };
7936
7937 static void do_vfp_nsyn_opcode (const char *);
7938
7939 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7940 Determine whether it can be performed with a move instruction; if
7941 it can, convert inst.instruction to that move instruction and
7942 return TRUE; if it can't, convert inst.instruction to a literal-pool
7943 load and return FALSE. If this is not a valid thing to do in the
7944 current context, set inst.error and return TRUE.
7945
7946 inst.operands[i] describes the destination register. */
7947
7948 static bfd_boolean
7949 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7950 {
7951 unsigned long tbit;
7952 bfd_boolean thumb_p = (t == CONST_THUMB);
7953 bfd_boolean arm_p = (t == CONST_ARM);
7954
7955 if (thumb_p)
7956 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7957 else
7958 tbit = LOAD_BIT;
7959
7960 if ((inst.instruction & tbit) == 0)
7961 {
7962 inst.error = _("invalid pseudo operation");
7963 return TRUE;
7964 }
7965
7966 if (inst.reloc.exp.X_op != O_constant
7967 && inst.reloc.exp.X_op != O_symbol
7968 && inst.reloc.exp.X_op != O_big)
7969 {
7970 inst.error = _("constant expression expected");
7971 return TRUE;
7972 }
7973
7974 if (inst.reloc.exp.X_op == O_constant
7975 || inst.reloc.exp.X_op == O_big)
7976 {
7977 #if defined BFD_HOST_64_BIT
7978 bfd_int64_t v;
7979 #else
7980 offsetT v;
7981 #endif
7982 if (inst.reloc.exp.X_op == O_big)
7983 {
7984 LITTLENUM_TYPE w[X_PRECISION];
7985 LITTLENUM_TYPE * l;
7986
7987 if (inst.reloc.exp.X_add_number == -1)
7988 {
7989 gen_to_words (w, X_PRECISION, E_PRECISION);
7990 l = w;
7991 /* FIXME: Should we check words w[2..5] ? */
7992 }
7993 else
7994 l = generic_bignum;
7995
7996 #if defined BFD_HOST_64_BIT
7997 v =
7998 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7999 << LITTLENUM_NUMBER_OF_BITS)
8000 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8001 << LITTLENUM_NUMBER_OF_BITS)
8002 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8003 << LITTLENUM_NUMBER_OF_BITS)
8004 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8005 #else
8006 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8007 | (l[0] & LITTLENUM_MASK);
8008 #endif
8009 }
8010 else
8011 v = inst.reloc.exp.X_add_number;
8012
8013 if (!inst.operands[i].issingle)
8014 {
8015 if (thumb_p)
8016 {
8017 /* LDR should not use lead in a flag-setting instruction being
8018 chosen so we do not check whether movs can be used. */
8019
8020 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8021 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8022 && inst.operands[i].reg != 13
8023 && inst.operands[i].reg != 15)
8024 {
8025 /* Check if on thumb2 it can be done with a mov.w, mvn or
8026 movw instruction. */
8027 unsigned int newimm;
8028 bfd_boolean isNegated;
8029
8030 newimm = encode_thumb32_immediate (v);
8031 if (newimm != (unsigned int) FAIL)
8032 isNegated = FALSE;
8033 else
8034 {
8035 newimm = encode_thumb32_immediate (~v);
8036 if (newimm != (unsigned int) FAIL)
8037 isNegated = TRUE;
8038 }
8039
8040 /* The number can be loaded with a mov.w or mvn
8041 instruction. */
8042 if (newimm != (unsigned int) FAIL
8043 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8044 {
8045 inst.instruction = (0xf04f0000 /* MOV.W. */
8046 | (inst.operands[i].reg << 8));
8047 /* Change to MOVN. */
8048 inst.instruction |= (isNegated ? 0x200000 : 0);
8049 inst.instruction |= (newimm & 0x800) << 15;
8050 inst.instruction |= (newimm & 0x700) << 4;
8051 inst.instruction |= (newimm & 0x0ff);
8052 return TRUE;
8053 }
8054 /* The number can be loaded with a movw instruction. */
8055 else if ((v & ~0xFFFF) == 0
8056 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8057 {
8058 int imm = v & 0xFFFF;
8059
8060 inst.instruction = 0xf2400000; /* MOVW. */
8061 inst.instruction |= (inst.operands[i].reg << 8);
8062 inst.instruction |= (imm & 0xf000) << 4;
8063 inst.instruction |= (imm & 0x0800) << 15;
8064 inst.instruction |= (imm & 0x0700) << 4;
8065 inst.instruction |= (imm & 0x00ff);
8066 return TRUE;
8067 }
8068 }
8069 }
8070 else if (arm_p)
8071 {
8072 int value = encode_arm_immediate (v);
8073
8074 if (value != FAIL)
8075 {
8076 /* This can be done with a mov instruction. */
8077 inst.instruction &= LITERAL_MASK;
8078 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8079 inst.instruction |= value & 0xfff;
8080 return TRUE;
8081 }
8082
8083 value = encode_arm_immediate (~ v);
8084 if (value != FAIL)
8085 {
8086 /* This can be done with a mvn instruction. */
8087 inst.instruction &= LITERAL_MASK;
8088 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8089 inst.instruction |= value & 0xfff;
8090 return TRUE;
8091 }
8092 }
8093 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8094 {
8095 int op = 0;
8096 unsigned immbits = 0;
8097 unsigned immlo = inst.operands[1].imm;
8098 unsigned immhi = inst.operands[1].regisimm
8099 ? inst.operands[1].reg
8100 : inst.reloc.exp.X_unsigned
8101 ? 0
8102 : ((bfd_int64_t)((int) immlo)) >> 32;
8103 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8104 &op, 64, NT_invtype);
8105
8106 if (cmode == FAIL)
8107 {
8108 neon_invert_size (&immlo, &immhi, 64);
8109 op = !op;
8110 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8111 &op, 64, NT_invtype);
8112 }
8113
8114 if (cmode != FAIL)
8115 {
8116 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8117 | (1 << 23)
8118 | (cmode << 8)
8119 | (op << 5)
8120 | (1 << 4);
8121
8122 /* Fill other bits in vmov encoding for both thumb and arm. */
8123 if (thumb_mode)
8124 inst.instruction |= (0x7U << 29) | (0xF << 24);
8125 else
8126 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8127 neon_write_immbits (immbits);
8128 return TRUE;
8129 }
8130 }
8131 }
8132
8133 if (t == CONST_VEC)
8134 {
8135 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8136 if (inst.operands[i].issingle
8137 && is_quarter_float (inst.operands[1].imm)
8138 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8139 {
8140 inst.operands[1].imm =
8141 neon_qfloat_bits (v);
8142 do_vfp_nsyn_opcode ("fconsts");
8143 return TRUE;
8144 }
8145
8146 /* If our host does not support a 64-bit type then we cannot perform
8147 the following optimization. This mean that there will be a
8148 discrepancy between the output produced by an assembler built for
8149 a 32-bit-only host and the output produced from a 64-bit host, but
8150 this cannot be helped. */
8151 #if defined BFD_HOST_64_BIT
8152 else if (!inst.operands[1].issingle
8153 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8154 {
8155 if (is_double_a_single (v)
8156 && is_quarter_float (double_to_single (v)))
8157 {
8158 inst.operands[1].imm =
8159 neon_qfloat_bits (double_to_single (v));
8160 do_vfp_nsyn_opcode ("fconstd");
8161 return TRUE;
8162 }
8163 }
8164 #endif
8165 }
8166 }
8167
8168 if (add_to_lit_pool ((!inst.operands[i].isvec
8169 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8170 return TRUE;
8171
8172 inst.operands[1].reg = REG_PC;
8173 inst.operands[1].isreg = 1;
8174 inst.operands[1].preind = 1;
8175 inst.reloc.pc_rel = 1;
8176 inst.reloc.type = (thumb_p
8177 ? BFD_RELOC_ARM_THUMB_OFFSET
8178 : (mode_3
8179 ? BFD_RELOC_ARM_HWLITERAL
8180 : BFD_RELOC_ARM_LITERAL));
8181 return FALSE;
8182 }
8183
8184 /* inst.operands[i] was set up by parse_address. Encode it into an
8185 ARM-format instruction. Reject all forms which cannot be encoded
8186 into a coprocessor load/store instruction. If wb_ok is false,
8187 reject use of writeback; if unind_ok is false, reject use of
8188 unindexed addressing. If reloc_override is not 0, use it instead
8189 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8190 (in which case it is preserved). */
8191
8192 static int
8193 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8194 {
8195 if (!inst.operands[i].isreg)
8196 {
8197 /* PR 18256 */
8198 if (! inst.operands[0].isvec)
8199 {
8200 inst.error = _("invalid co-processor operand");
8201 return FAIL;
8202 }
8203 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8204 return SUCCESS;
8205 }
8206
8207 inst.instruction |= inst.operands[i].reg << 16;
8208
8209 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8210
8211 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8212 {
8213 gas_assert (!inst.operands[i].writeback);
8214 if (!unind_ok)
8215 {
8216 inst.error = _("instruction does not support unindexed addressing");
8217 return FAIL;
8218 }
8219 inst.instruction |= inst.operands[i].imm;
8220 inst.instruction |= INDEX_UP;
8221 return SUCCESS;
8222 }
8223
8224 if (inst.operands[i].preind)
8225 inst.instruction |= PRE_INDEX;
8226
8227 if (inst.operands[i].writeback)
8228 {
8229 if (inst.operands[i].reg == REG_PC)
8230 {
8231 inst.error = _("pc may not be used with write-back");
8232 return FAIL;
8233 }
8234 if (!wb_ok)
8235 {
8236 inst.error = _("instruction does not support writeback");
8237 return FAIL;
8238 }
8239 inst.instruction |= WRITE_BACK;
8240 }
8241
8242 if (reloc_override)
8243 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8244 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8245 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8246 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8247 {
8248 if (thumb_mode)
8249 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8250 else
8251 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8252 }
8253
8254 /* Prefer + for zero encoded value. */
8255 if (!inst.operands[i].negative)
8256 inst.instruction |= INDEX_UP;
8257
8258 return SUCCESS;
8259 }
8260
8261 /* Functions for instruction encoding, sorted by sub-architecture.
8262 First some generics; their names are taken from the conventional
8263 bit positions for register arguments in ARM format instructions. */
8264
8265 static void
8266 do_noargs (void)
8267 {
8268 }
8269
8270 static void
8271 do_rd (void)
8272 {
8273 inst.instruction |= inst.operands[0].reg << 12;
8274 }
8275
8276 static void
8277 do_rn (void)
8278 {
8279 inst.instruction |= inst.operands[0].reg << 16;
8280 }
8281
8282 static void
8283 do_rd_rm (void)
8284 {
8285 inst.instruction |= inst.operands[0].reg << 12;
8286 inst.instruction |= inst.operands[1].reg;
8287 }
8288
8289 static void
8290 do_rm_rn (void)
8291 {
8292 inst.instruction |= inst.operands[0].reg;
8293 inst.instruction |= inst.operands[1].reg << 16;
8294 }
8295
8296 static void
8297 do_rd_rn (void)
8298 {
8299 inst.instruction |= inst.operands[0].reg << 12;
8300 inst.instruction |= inst.operands[1].reg << 16;
8301 }
8302
8303 static void
8304 do_rn_rd (void)
8305 {
8306 inst.instruction |= inst.operands[0].reg << 16;
8307 inst.instruction |= inst.operands[1].reg << 12;
8308 }
8309
8310 static void
8311 do_tt (void)
8312 {
8313 inst.instruction |= inst.operands[0].reg << 8;
8314 inst.instruction |= inst.operands[1].reg << 16;
8315 }
8316
8317 static bfd_boolean
8318 check_obsolete (const arm_feature_set *feature, const char *msg)
8319 {
8320 if (ARM_CPU_IS_ANY (cpu_variant))
8321 {
8322 as_tsktsk ("%s", msg);
8323 return TRUE;
8324 }
8325 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8326 {
8327 as_bad ("%s", msg);
8328 return TRUE;
8329 }
8330
8331 return FALSE;
8332 }
8333
8334 static void
8335 do_rd_rm_rn (void)
8336 {
8337 unsigned Rn = inst.operands[2].reg;
8338 /* Enforce restrictions on SWP instruction. */
8339 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8340 {
8341 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8342 _("Rn must not overlap other operands"));
8343
8344 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8345 */
8346 if (!check_obsolete (&arm_ext_v8,
8347 _("swp{b} use is obsoleted for ARMv8 and later"))
8348 && warn_on_deprecated
8349 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8350 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8351 }
8352
8353 inst.instruction |= inst.operands[0].reg << 12;
8354 inst.instruction |= inst.operands[1].reg;
8355 inst.instruction |= Rn << 16;
8356 }
8357
8358 static void
8359 do_rd_rn_rm (void)
8360 {
8361 inst.instruction |= inst.operands[0].reg << 12;
8362 inst.instruction |= inst.operands[1].reg << 16;
8363 inst.instruction |= inst.operands[2].reg;
8364 }
8365
8366 static void
8367 do_rm_rd_rn (void)
8368 {
8369 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8370 constraint (((inst.reloc.exp.X_op != O_constant
8371 && inst.reloc.exp.X_op != O_illegal)
8372 || inst.reloc.exp.X_add_number != 0),
8373 BAD_ADDR_MODE);
8374 inst.instruction |= inst.operands[0].reg;
8375 inst.instruction |= inst.operands[1].reg << 12;
8376 inst.instruction |= inst.operands[2].reg << 16;
8377 }
8378
8379 static void
8380 do_imm0 (void)
8381 {
8382 inst.instruction |= inst.operands[0].imm;
8383 }
8384
8385 static void
8386 do_rd_cpaddr (void)
8387 {
8388 inst.instruction |= inst.operands[0].reg << 12;
8389 encode_arm_cp_address (1, TRUE, TRUE, 0);
8390 }
8391
8392 /* ARM instructions, in alphabetical order by function name (except
8393 that wrapper functions appear immediately after the function they
8394 wrap). */
8395
8396 /* This is a pseudo-op of the form "adr rd, label" to be converted
8397 into a relative address of the form "add rd, pc, #label-.-8". */
8398
8399 static void
8400 do_adr (void)
8401 {
8402 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8403
8404 /* Frag hacking will turn this into a sub instruction if the offset turns
8405 out to be negative. */
8406 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8407 inst.reloc.pc_rel = 1;
8408 inst.reloc.exp.X_add_number -= 8;
8409
8410 if (inst.reloc.exp.X_op == O_symbol
8411 && inst.reloc.exp.X_add_symbol != NULL
8412 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8413 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8414 inst.reloc.exp.X_add_number += 1;
8415 }
8416
8417 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8418 into a relative address of the form:
8419 add rd, pc, #low(label-.-8)"
8420 add rd, rd, #high(label-.-8)" */
8421
8422 static void
8423 do_adrl (void)
8424 {
8425 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8426
8427 /* Frag hacking will turn this into a sub instruction if the offset turns
8428 out to be negative. */
8429 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8430 inst.reloc.pc_rel = 1;
8431 inst.size = INSN_SIZE * 2;
8432 inst.reloc.exp.X_add_number -= 8;
8433
8434 if (inst.reloc.exp.X_op == O_symbol
8435 && inst.reloc.exp.X_add_symbol != NULL
8436 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8437 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8438 inst.reloc.exp.X_add_number += 1;
8439 }
8440
8441 static void
8442 do_arit (void)
8443 {
8444 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8445 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8446 THUMB1_RELOC_ONLY);
8447 if (!inst.operands[1].present)
8448 inst.operands[1].reg = inst.operands[0].reg;
8449 inst.instruction |= inst.operands[0].reg << 12;
8450 inst.instruction |= inst.operands[1].reg << 16;
8451 encode_arm_shifter_operand (2);
8452 }
8453
8454 static void
8455 do_barrier (void)
8456 {
8457 if (inst.operands[0].present)
8458 inst.instruction |= inst.operands[0].imm;
8459 else
8460 inst.instruction |= 0xf;
8461 }
8462
8463 static void
8464 do_bfc (void)
8465 {
8466 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8467 constraint (msb > 32, _("bit-field extends past end of register"));
8468 /* The instruction encoding stores the LSB and MSB,
8469 not the LSB and width. */
8470 inst.instruction |= inst.operands[0].reg << 12;
8471 inst.instruction |= inst.operands[1].imm << 7;
8472 inst.instruction |= (msb - 1) << 16;
8473 }
8474
8475 static void
8476 do_bfi (void)
8477 {
8478 unsigned int msb;
8479
8480 /* #0 in second position is alternative syntax for bfc, which is
8481 the same instruction but with REG_PC in the Rm field. */
8482 if (!inst.operands[1].isreg)
8483 inst.operands[1].reg = REG_PC;
8484
8485 msb = inst.operands[2].imm + inst.operands[3].imm;
8486 constraint (msb > 32, _("bit-field extends past end of register"));
8487 /* The instruction encoding stores the LSB and MSB,
8488 not the LSB and width. */
8489 inst.instruction |= inst.operands[0].reg << 12;
8490 inst.instruction |= inst.operands[1].reg;
8491 inst.instruction |= inst.operands[2].imm << 7;
8492 inst.instruction |= (msb - 1) << 16;
8493 }
8494
8495 static void
8496 do_bfx (void)
8497 {
8498 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8499 _("bit-field extends past end of register"));
8500 inst.instruction |= inst.operands[0].reg << 12;
8501 inst.instruction |= inst.operands[1].reg;
8502 inst.instruction |= inst.operands[2].imm << 7;
8503 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8504 }
8505
8506 /* ARM V5 breakpoint instruction (argument parse)
8507 BKPT <16 bit unsigned immediate>
8508 Instruction is not conditional.
8509 The bit pattern given in insns[] has the COND_ALWAYS condition,
8510 and it is an error if the caller tried to override that. */
8511
8512 static void
8513 do_bkpt (void)
8514 {
8515 /* Top 12 of 16 bits to bits 19:8. */
8516 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8517
8518 /* Bottom 4 of 16 bits to bits 3:0. */
8519 inst.instruction |= inst.operands[0].imm & 0xf;
8520 }
8521
8522 static void
8523 encode_branch (int default_reloc)
8524 {
8525 if (inst.operands[0].hasreloc)
8526 {
8527 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8528 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8529 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8530 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8531 ? BFD_RELOC_ARM_PLT32
8532 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8533 }
8534 else
8535 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8536 inst.reloc.pc_rel = 1;
8537 }
8538
8539 static void
8540 do_branch (void)
8541 {
8542 #ifdef OBJ_ELF
8543 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8544 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8545 else
8546 #endif
8547 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8548 }
8549
8550 static void
8551 do_bl (void)
8552 {
8553 #ifdef OBJ_ELF
8554 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8555 {
8556 if (inst.cond == COND_ALWAYS)
8557 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8558 else
8559 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8560 }
8561 else
8562 #endif
8563 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8564 }
8565
8566 /* ARM V5 branch-link-exchange instruction (argument parse)
8567 BLX <target_addr> ie BLX(1)
8568 BLX{<condition>} <Rm> ie BLX(2)
8569 Unfortunately, there are two different opcodes for this mnemonic.
8570 So, the insns[].value is not used, and the code here zaps values
8571 into inst.instruction.
8572 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8573
8574 static void
8575 do_blx (void)
8576 {
8577 if (inst.operands[0].isreg)
8578 {
8579 /* Arg is a register; the opcode provided by insns[] is correct.
8580 It is not illegal to do "blx pc", just useless. */
8581 if (inst.operands[0].reg == REG_PC)
8582 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8583
8584 inst.instruction |= inst.operands[0].reg;
8585 }
8586 else
8587 {
8588 /* Arg is an address; this instruction cannot be executed
8589 conditionally, and the opcode must be adjusted.
8590 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8591 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8592 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8593 inst.instruction = 0xfa000000;
8594 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8595 }
8596 }
8597
8598 static void
8599 do_bx (void)
8600 {
8601 bfd_boolean want_reloc;
8602
8603 if (inst.operands[0].reg == REG_PC)
8604 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8605
8606 inst.instruction |= inst.operands[0].reg;
8607 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8608 it is for ARMv4t or earlier. */
8609 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8610 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8611 want_reloc = TRUE;
8612
8613 #ifdef OBJ_ELF
8614 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8615 #endif
8616 want_reloc = FALSE;
8617
8618 if (want_reloc)
8619 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8620 }
8621
8622
8623 /* ARM v5TEJ. Jump to Jazelle code. */
8624
8625 static void
8626 do_bxj (void)
8627 {
8628 if (inst.operands[0].reg == REG_PC)
8629 as_tsktsk (_("use of r15 in bxj is not really useful"));
8630
8631 inst.instruction |= inst.operands[0].reg;
8632 }
8633
8634 /* Co-processor data operation:
8635 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8636 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8637 static void
8638 do_cdp (void)
8639 {
8640 inst.instruction |= inst.operands[0].reg << 8;
8641 inst.instruction |= inst.operands[1].imm << 20;
8642 inst.instruction |= inst.operands[2].reg << 12;
8643 inst.instruction |= inst.operands[3].reg << 16;
8644 inst.instruction |= inst.operands[4].reg;
8645 inst.instruction |= inst.operands[5].imm << 5;
8646 }
8647
8648 static void
8649 do_cmp (void)
8650 {
8651 inst.instruction |= inst.operands[0].reg << 16;
8652 encode_arm_shifter_operand (1);
8653 }
8654
8655 /* Transfer between coprocessor and ARM registers.
8656 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8657 MRC2
8658 MCR{cond}
8659 MCR2
8660
8661 No special properties. */
8662
8663 struct deprecated_coproc_regs_s
8664 {
8665 unsigned cp;
8666 int opc1;
8667 unsigned crn;
8668 unsigned crm;
8669 int opc2;
8670 arm_feature_set deprecated;
8671 arm_feature_set obsoleted;
8672 const char *dep_msg;
8673 const char *obs_msg;
8674 };
8675
8676 #define DEPR_ACCESS_V8 \
8677 N_("This coprocessor register access is deprecated in ARMv8")
8678
8679 /* Table of all deprecated coprocessor registers. */
8680 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8681 {
8682 {15, 0, 7, 10, 5, /* CP15DMB. */
8683 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8684 DEPR_ACCESS_V8, NULL},
8685 {15, 0, 7, 10, 4, /* CP15DSB. */
8686 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8687 DEPR_ACCESS_V8, NULL},
8688 {15, 0, 7, 5, 4, /* CP15ISB. */
8689 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8690 DEPR_ACCESS_V8, NULL},
8691 {14, 6, 1, 0, 0, /* TEEHBR. */
8692 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8693 DEPR_ACCESS_V8, NULL},
8694 {14, 6, 0, 0, 0, /* TEECR. */
8695 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8696 DEPR_ACCESS_V8, NULL},
8697 };
8698
8699 #undef DEPR_ACCESS_V8
8700
8701 static const size_t deprecated_coproc_reg_count =
8702 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8703
8704 static void
8705 do_co_reg (void)
8706 {
8707 unsigned Rd;
8708 size_t i;
8709
8710 Rd = inst.operands[2].reg;
8711 if (thumb_mode)
8712 {
8713 if (inst.instruction == 0xee000010
8714 || inst.instruction == 0xfe000010)
8715 /* MCR, MCR2 */
8716 reject_bad_reg (Rd);
8717 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8718 /* MRC, MRC2 */
8719 constraint (Rd == REG_SP, BAD_SP);
8720 }
8721 else
8722 {
8723 /* MCR */
8724 if (inst.instruction == 0xe000010)
8725 constraint (Rd == REG_PC, BAD_PC);
8726 }
8727
8728 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8729 {
8730 const struct deprecated_coproc_regs_s *r =
8731 deprecated_coproc_regs + i;
8732
8733 if (inst.operands[0].reg == r->cp
8734 && inst.operands[1].imm == r->opc1
8735 && inst.operands[3].reg == r->crn
8736 && inst.operands[4].reg == r->crm
8737 && inst.operands[5].imm == r->opc2)
8738 {
8739 if (! ARM_CPU_IS_ANY (cpu_variant)
8740 && warn_on_deprecated
8741 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8742 as_tsktsk ("%s", r->dep_msg);
8743 }
8744 }
8745
8746 inst.instruction |= inst.operands[0].reg << 8;
8747 inst.instruction |= inst.operands[1].imm << 21;
8748 inst.instruction |= Rd << 12;
8749 inst.instruction |= inst.operands[3].reg << 16;
8750 inst.instruction |= inst.operands[4].reg;
8751 inst.instruction |= inst.operands[5].imm << 5;
8752 }
8753
8754 /* Transfer between coprocessor register and pair of ARM registers.
8755 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8756 MCRR2
8757 MRRC{cond}
8758 MRRC2
8759
8760 Two XScale instructions are special cases of these:
8761
8762 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8763 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8764
8765 Result unpredictable if Rd or Rn is R15. */
8766
8767 static void
8768 do_co_reg2c (void)
8769 {
8770 unsigned Rd, Rn;
8771
8772 Rd = inst.operands[2].reg;
8773 Rn = inst.operands[3].reg;
8774
8775 if (thumb_mode)
8776 {
8777 reject_bad_reg (Rd);
8778 reject_bad_reg (Rn);
8779 }
8780 else
8781 {
8782 constraint (Rd == REG_PC, BAD_PC);
8783 constraint (Rn == REG_PC, BAD_PC);
8784 }
8785
8786 /* Only check the MRRC{2} variants. */
8787 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8788 {
8789 /* If Rd == Rn, error that the operation is
8790 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8791 constraint (Rd == Rn, BAD_OVERLAP);
8792 }
8793
8794 inst.instruction |= inst.operands[0].reg << 8;
8795 inst.instruction |= inst.operands[1].imm << 4;
8796 inst.instruction |= Rd << 12;
8797 inst.instruction |= Rn << 16;
8798 inst.instruction |= inst.operands[4].reg;
8799 }
8800
8801 static void
8802 do_cpsi (void)
8803 {
8804 inst.instruction |= inst.operands[0].imm << 6;
8805 if (inst.operands[1].present)
8806 {
8807 inst.instruction |= CPSI_MMOD;
8808 inst.instruction |= inst.operands[1].imm;
8809 }
8810 }
8811
8812 static void
8813 do_dbg (void)
8814 {
8815 inst.instruction |= inst.operands[0].imm;
8816 }
8817
8818 static void
8819 do_div (void)
8820 {
8821 unsigned Rd, Rn, Rm;
8822
8823 Rd = inst.operands[0].reg;
8824 Rn = (inst.operands[1].present
8825 ? inst.operands[1].reg : Rd);
8826 Rm = inst.operands[2].reg;
8827
8828 constraint ((Rd == REG_PC), BAD_PC);
8829 constraint ((Rn == REG_PC), BAD_PC);
8830 constraint ((Rm == REG_PC), BAD_PC);
8831
8832 inst.instruction |= Rd << 16;
8833 inst.instruction |= Rn << 0;
8834 inst.instruction |= Rm << 8;
8835 }
8836
8837 static void
8838 do_it (void)
8839 {
8840 /* There is no IT instruction in ARM mode. We
8841 process it to do the validation as if in
8842 thumb mode, just in case the code gets
8843 assembled for thumb using the unified syntax. */
8844
8845 inst.size = 0;
8846 if (unified_syntax)
8847 {
8848 set_it_insn_type (IT_INSN);
8849 now_it.mask = (inst.instruction & 0xf) | 0x10;
8850 now_it.cc = inst.operands[0].imm;
8851 }
8852 }
8853
8854 /* If there is only one register in the register list,
8855 then return its register number. Otherwise return -1. */
8856 static int
8857 only_one_reg_in_list (int range)
8858 {
8859 int i = ffs (range) - 1;
8860 return (i > 15 || range != (1 << i)) ? -1 : i;
8861 }
8862
8863 static void
8864 encode_ldmstm(int from_push_pop_mnem)
8865 {
8866 int base_reg = inst.operands[0].reg;
8867 int range = inst.operands[1].imm;
8868 int one_reg;
8869
8870 inst.instruction |= base_reg << 16;
8871 inst.instruction |= range;
8872
8873 if (inst.operands[1].writeback)
8874 inst.instruction |= LDM_TYPE_2_OR_3;
8875
8876 if (inst.operands[0].writeback)
8877 {
8878 inst.instruction |= WRITE_BACK;
8879 /* Check for unpredictable uses of writeback. */
8880 if (inst.instruction & LOAD_BIT)
8881 {
8882 /* Not allowed in LDM type 2. */
8883 if ((inst.instruction & LDM_TYPE_2_OR_3)
8884 && ((range & (1 << REG_PC)) == 0))
8885 as_warn (_("writeback of base register is UNPREDICTABLE"));
8886 /* Only allowed if base reg not in list for other types. */
8887 else if (range & (1 << base_reg))
8888 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8889 }
8890 else /* STM. */
8891 {
8892 /* Not allowed for type 2. */
8893 if (inst.instruction & LDM_TYPE_2_OR_3)
8894 as_warn (_("writeback of base register is UNPREDICTABLE"));
8895 /* Only allowed if base reg not in list, or first in list. */
8896 else if ((range & (1 << base_reg))
8897 && (range & ((1 << base_reg) - 1)))
8898 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8899 }
8900 }
8901
8902 /* If PUSH/POP has only one register, then use the A2 encoding. */
8903 one_reg = only_one_reg_in_list (range);
8904 if (from_push_pop_mnem && one_reg >= 0)
8905 {
8906 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8907
8908 inst.instruction &= A_COND_MASK;
8909 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8910 inst.instruction |= one_reg << 12;
8911 }
8912 }
8913
8914 static void
8915 do_ldmstm (void)
8916 {
8917 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8918 }
8919
8920 /* ARMv5TE load-consecutive (argument parse)
8921 Mode is like LDRH.
8922
8923 LDRccD R, mode
8924 STRccD R, mode. */
8925
8926 static void
8927 do_ldrd (void)
8928 {
8929 constraint (inst.operands[0].reg % 2 != 0,
8930 _("first transfer register must be even"));
8931 constraint (inst.operands[1].present
8932 && inst.operands[1].reg != inst.operands[0].reg + 1,
8933 _("can only transfer two consecutive registers"));
8934 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8935 constraint (!inst.operands[2].isreg, _("'[' expected"));
8936
8937 if (!inst.operands[1].present)
8938 inst.operands[1].reg = inst.operands[0].reg + 1;
8939
8940 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8941 register and the first register written; we have to diagnose
8942 overlap between the base and the second register written here. */
8943
8944 if (inst.operands[2].reg == inst.operands[1].reg
8945 && (inst.operands[2].writeback || inst.operands[2].postind))
8946 as_warn (_("base register written back, and overlaps "
8947 "second transfer register"));
8948
8949 if (!(inst.instruction & V4_STR_BIT))
8950 {
8951 /* For an index-register load, the index register must not overlap the
8952 destination (even if not write-back). */
8953 if (inst.operands[2].immisreg
8954 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8955 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8956 as_warn (_("index register overlaps transfer register"));
8957 }
8958 inst.instruction |= inst.operands[0].reg << 12;
8959 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8960 }
8961
8962 static void
8963 do_ldrex (void)
8964 {
8965 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8966 || inst.operands[1].postind || inst.operands[1].writeback
8967 || inst.operands[1].immisreg || inst.operands[1].shifted
8968 || inst.operands[1].negative
8969 /* This can arise if the programmer has written
8970 strex rN, rM, foo
8971 or if they have mistakenly used a register name as the last
8972 operand, eg:
8973 strex rN, rM, rX
8974 It is very difficult to distinguish between these two cases
8975 because "rX" might actually be a label. ie the register
8976 name has been occluded by a symbol of the same name. So we
8977 just generate a general 'bad addressing mode' type error
8978 message and leave it up to the programmer to discover the
8979 true cause and fix their mistake. */
8980 || (inst.operands[1].reg == REG_PC),
8981 BAD_ADDR_MODE);
8982
8983 constraint (inst.reloc.exp.X_op != O_constant
8984 || inst.reloc.exp.X_add_number != 0,
8985 _("offset must be zero in ARM encoding"));
8986
8987 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8988
8989 inst.instruction |= inst.operands[0].reg << 12;
8990 inst.instruction |= inst.operands[1].reg << 16;
8991 inst.reloc.type = BFD_RELOC_UNUSED;
8992 }
8993
8994 static void
8995 do_ldrexd (void)
8996 {
8997 constraint (inst.operands[0].reg % 2 != 0,
8998 _("even register required"));
8999 constraint (inst.operands[1].present
9000 && inst.operands[1].reg != inst.operands[0].reg + 1,
9001 _("can only load two consecutive registers"));
9002 /* If op 1 were present and equal to PC, this function wouldn't
9003 have been called in the first place. */
9004 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9005
9006 inst.instruction |= inst.operands[0].reg << 12;
9007 inst.instruction |= inst.operands[2].reg << 16;
9008 }
9009
9010 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9011 which is not a multiple of four is UNPREDICTABLE. */
9012 static void
9013 check_ldr_r15_aligned (void)
9014 {
9015 constraint (!(inst.operands[1].immisreg)
9016 && (inst.operands[0].reg == REG_PC
9017 && inst.operands[1].reg == REG_PC
9018 && (inst.reloc.exp.X_add_number & 0x3)),
9019 _("ldr to register 15 must be 4-byte aligned"));
9020 }
9021
9022 static void
9023 do_ldst (void)
9024 {
9025 inst.instruction |= inst.operands[0].reg << 12;
9026 if (!inst.operands[1].isreg)
9027 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9028 return;
9029 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9030 check_ldr_r15_aligned ();
9031 }
9032
9033 static void
9034 do_ldstt (void)
9035 {
9036 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9037 reject [Rn,...]. */
9038 if (inst.operands[1].preind)
9039 {
9040 constraint (inst.reloc.exp.X_op != O_constant
9041 || inst.reloc.exp.X_add_number != 0,
9042 _("this instruction requires a post-indexed address"));
9043
9044 inst.operands[1].preind = 0;
9045 inst.operands[1].postind = 1;
9046 inst.operands[1].writeback = 1;
9047 }
9048 inst.instruction |= inst.operands[0].reg << 12;
9049 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9050 }
9051
9052 /* Halfword and signed-byte load/store operations. */
9053
9054 static void
9055 do_ldstv4 (void)
9056 {
9057 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9058 inst.instruction |= inst.operands[0].reg << 12;
9059 if (!inst.operands[1].isreg)
9060 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9061 return;
9062 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9063 }
9064
9065 static void
9066 do_ldsttv4 (void)
9067 {
9068 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9069 reject [Rn,...]. */
9070 if (inst.operands[1].preind)
9071 {
9072 constraint (inst.reloc.exp.X_op != O_constant
9073 || inst.reloc.exp.X_add_number != 0,
9074 _("this instruction requires a post-indexed address"));
9075
9076 inst.operands[1].preind = 0;
9077 inst.operands[1].postind = 1;
9078 inst.operands[1].writeback = 1;
9079 }
9080 inst.instruction |= inst.operands[0].reg << 12;
9081 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9082 }
9083
9084 /* Co-processor register load/store.
9085 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9086 static void
9087 do_lstc (void)
9088 {
9089 inst.instruction |= inst.operands[0].reg << 8;
9090 inst.instruction |= inst.operands[1].reg << 12;
9091 encode_arm_cp_address (2, TRUE, TRUE, 0);
9092 }
9093
9094 static void
9095 do_mlas (void)
9096 {
9097 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9098 if (inst.operands[0].reg == inst.operands[1].reg
9099 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9100 && !(inst.instruction & 0x00400000))
9101 as_tsktsk (_("Rd and Rm should be different in mla"));
9102
9103 inst.instruction |= inst.operands[0].reg << 16;
9104 inst.instruction |= inst.operands[1].reg;
9105 inst.instruction |= inst.operands[2].reg << 8;
9106 inst.instruction |= inst.operands[3].reg << 12;
9107 }
9108
9109 static void
9110 do_mov (void)
9111 {
9112 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9113 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9114 THUMB1_RELOC_ONLY);
9115 inst.instruction |= inst.operands[0].reg << 12;
9116 encode_arm_shifter_operand (1);
9117 }
9118
9119 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9120 static void
9121 do_mov16 (void)
9122 {
9123 bfd_vma imm;
9124 bfd_boolean top;
9125
9126 top = (inst.instruction & 0x00400000) != 0;
9127 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9128 _(":lower16: not allowed in this instruction"));
9129 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9130 _(":upper16: not allowed in this instruction"));
9131 inst.instruction |= inst.operands[0].reg << 12;
9132 if (inst.reloc.type == BFD_RELOC_UNUSED)
9133 {
9134 imm = inst.reloc.exp.X_add_number;
9135 /* The value is in two pieces: 0:11, 16:19. */
9136 inst.instruction |= (imm & 0x00000fff);
9137 inst.instruction |= (imm & 0x0000f000) << 4;
9138 }
9139 }
9140
9141 static int
9142 do_vfp_nsyn_mrs (void)
9143 {
9144 if (inst.operands[0].isvec)
9145 {
9146 if (inst.operands[1].reg != 1)
9147 first_error (_("operand 1 must be FPSCR"));
9148 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9149 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9150 do_vfp_nsyn_opcode ("fmstat");
9151 }
9152 else if (inst.operands[1].isvec)
9153 do_vfp_nsyn_opcode ("fmrx");
9154 else
9155 return FAIL;
9156
9157 return SUCCESS;
9158 }
9159
9160 static int
9161 do_vfp_nsyn_msr (void)
9162 {
9163 if (inst.operands[0].isvec)
9164 do_vfp_nsyn_opcode ("fmxr");
9165 else
9166 return FAIL;
9167
9168 return SUCCESS;
9169 }
9170
9171 static void
9172 do_vmrs (void)
9173 {
9174 unsigned Rt = inst.operands[0].reg;
9175
9176 if (thumb_mode && Rt == REG_SP)
9177 {
9178 inst.error = BAD_SP;
9179 return;
9180 }
9181
9182 /* MVFR2 is only valid at ARMv8-A. */
9183 if (inst.operands[1].reg == 5)
9184 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9185 _(BAD_FPU));
9186
9187 /* APSR_ sets isvec. All other refs to PC are illegal. */
9188 if (!inst.operands[0].isvec && Rt == REG_PC)
9189 {
9190 inst.error = BAD_PC;
9191 return;
9192 }
9193
9194 /* If we get through parsing the register name, we just insert the number
9195 generated into the instruction without further validation. */
9196 inst.instruction |= (inst.operands[1].reg << 16);
9197 inst.instruction |= (Rt << 12);
9198 }
9199
9200 static void
9201 do_vmsr (void)
9202 {
9203 unsigned Rt = inst.operands[1].reg;
9204
9205 if (thumb_mode)
9206 reject_bad_reg (Rt);
9207 else if (Rt == REG_PC)
9208 {
9209 inst.error = BAD_PC;
9210 return;
9211 }
9212
9213 /* MVFR2 is only valid for ARMv8-A. */
9214 if (inst.operands[0].reg == 5)
9215 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9216 _(BAD_FPU));
9217
9218 /* If we get through parsing the register name, we just insert the number
9219 generated into the instruction without further validation. */
9220 inst.instruction |= (inst.operands[0].reg << 16);
9221 inst.instruction |= (Rt << 12);
9222 }
9223
9224 static void
9225 do_mrs (void)
9226 {
9227 unsigned br;
9228
9229 if (do_vfp_nsyn_mrs () == SUCCESS)
9230 return;
9231
9232 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9233 inst.instruction |= inst.operands[0].reg << 12;
9234
9235 if (inst.operands[1].isreg)
9236 {
9237 br = inst.operands[1].reg;
9238 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9239 as_bad (_("bad register for mrs"));
9240 }
9241 else
9242 {
9243 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9244 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9245 != (PSR_c|PSR_f),
9246 _("'APSR', 'CPSR' or 'SPSR' expected"));
9247 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9248 }
9249
9250 inst.instruction |= br;
9251 }
9252
9253 /* Two possible forms:
9254 "{C|S}PSR_<field>, Rm",
9255 "{C|S}PSR_f, #expression". */
9256
9257 static void
9258 do_msr (void)
9259 {
9260 if (do_vfp_nsyn_msr () == SUCCESS)
9261 return;
9262
9263 inst.instruction |= inst.operands[0].imm;
9264 if (inst.operands[1].isreg)
9265 inst.instruction |= inst.operands[1].reg;
9266 else
9267 {
9268 inst.instruction |= INST_IMMEDIATE;
9269 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9270 inst.reloc.pc_rel = 0;
9271 }
9272 }
9273
9274 static void
9275 do_mul (void)
9276 {
9277 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9278
9279 if (!inst.operands[2].present)
9280 inst.operands[2].reg = inst.operands[0].reg;
9281 inst.instruction |= inst.operands[0].reg << 16;
9282 inst.instruction |= inst.operands[1].reg;
9283 inst.instruction |= inst.operands[2].reg << 8;
9284
9285 if (inst.operands[0].reg == inst.operands[1].reg
9286 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9287 as_tsktsk (_("Rd and Rm should be different in mul"));
9288 }
9289
9290 /* Long Multiply Parser
9291 UMULL RdLo, RdHi, Rm, Rs
9292 SMULL RdLo, RdHi, Rm, Rs
9293 UMLAL RdLo, RdHi, Rm, Rs
9294 SMLAL RdLo, RdHi, Rm, Rs. */
9295
9296 static void
9297 do_mull (void)
9298 {
9299 inst.instruction |= inst.operands[0].reg << 12;
9300 inst.instruction |= inst.operands[1].reg << 16;
9301 inst.instruction |= inst.operands[2].reg;
9302 inst.instruction |= inst.operands[3].reg << 8;
9303
9304 /* rdhi and rdlo must be different. */
9305 if (inst.operands[0].reg == inst.operands[1].reg)
9306 as_tsktsk (_("rdhi and rdlo must be different"));
9307
9308 /* rdhi, rdlo and rm must all be different before armv6. */
9309 if ((inst.operands[0].reg == inst.operands[2].reg
9310 || inst.operands[1].reg == inst.operands[2].reg)
9311 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9312 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9313 }
9314
9315 static void
9316 do_nop (void)
9317 {
9318 if (inst.operands[0].present
9319 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9320 {
9321 /* Architectural NOP hints are CPSR sets with no bits selected. */
9322 inst.instruction &= 0xf0000000;
9323 inst.instruction |= 0x0320f000;
9324 if (inst.operands[0].present)
9325 inst.instruction |= inst.operands[0].imm;
9326 }
9327 }
9328
9329 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9330 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9331 Condition defaults to COND_ALWAYS.
9332 Error if Rd, Rn or Rm are R15. */
9333
9334 static void
9335 do_pkhbt (void)
9336 {
9337 inst.instruction |= inst.operands[0].reg << 12;
9338 inst.instruction |= inst.operands[1].reg << 16;
9339 inst.instruction |= inst.operands[2].reg;
9340 if (inst.operands[3].present)
9341 encode_arm_shift (3);
9342 }
9343
9344 /* ARM V6 PKHTB (Argument Parse). */
9345
9346 static void
9347 do_pkhtb (void)
9348 {
9349 if (!inst.operands[3].present)
9350 {
9351 /* If the shift specifier is omitted, turn the instruction
9352 into pkhbt rd, rm, rn. */
9353 inst.instruction &= 0xfff00010;
9354 inst.instruction |= inst.operands[0].reg << 12;
9355 inst.instruction |= inst.operands[1].reg;
9356 inst.instruction |= inst.operands[2].reg << 16;
9357 }
9358 else
9359 {
9360 inst.instruction |= inst.operands[0].reg << 12;
9361 inst.instruction |= inst.operands[1].reg << 16;
9362 inst.instruction |= inst.operands[2].reg;
9363 encode_arm_shift (3);
9364 }
9365 }
9366
9367 /* ARMv5TE: Preload-Cache
9368 MP Extensions: Preload for write
9369
9370 PLD(W) <addr_mode>
9371
9372 Syntactically, like LDR with B=1, W=0, L=1. */
9373
9374 static void
9375 do_pld (void)
9376 {
9377 constraint (!inst.operands[0].isreg,
9378 _("'[' expected after PLD mnemonic"));
9379 constraint (inst.operands[0].postind,
9380 _("post-indexed expression used in preload instruction"));
9381 constraint (inst.operands[0].writeback,
9382 _("writeback used in preload instruction"));
9383 constraint (!inst.operands[0].preind,
9384 _("unindexed addressing used in preload instruction"));
9385 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9386 }
9387
9388 /* ARMv7: PLI <addr_mode> */
9389 static void
9390 do_pli (void)
9391 {
9392 constraint (!inst.operands[0].isreg,
9393 _("'[' expected after PLI mnemonic"));
9394 constraint (inst.operands[0].postind,
9395 _("post-indexed expression used in preload instruction"));
9396 constraint (inst.operands[0].writeback,
9397 _("writeback used in preload instruction"));
9398 constraint (!inst.operands[0].preind,
9399 _("unindexed addressing used in preload instruction"));
9400 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9401 inst.instruction &= ~PRE_INDEX;
9402 }
9403
9404 static void
9405 do_push_pop (void)
9406 {
9407 constraint (inst.operands[0].writeback,
9408 _("push/pop do not support {reglist}^"));
9409 inst.operands[1] = inst.operands[0];
9410 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9411 inst.operands[0].isreg = 1;
9412 inst.operands[0].writeback = 1;
9413 inst.operands[0].reg = REG_SP;
9414 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9415 }
9416
9417 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9418 word at the specified address and the following word
9419 respectively.
9420 Unconditionally executed.
9421 Error if Rn is R15. */
9422
9423 static void
9424 do_rfe (void)
9425 {
9426 inst.instruction |= inst.operands[0].reg << 16;
9427 if (inst.operands[0].writeback)
9428 inst.instruction |= WRITE_BACK;
9429 }
9430
9431 /* ARM V6 ssat (argument parse). */
9432
9433 static void
9434 do_ssat (void)
9435 {
9436 inst.instruction |= inst.operands[0].reg << 12;
9437 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9438 inst.instruction |= inst.operands[2].reg;
9439
9440 if (inst.operands[3].present)
9441 encode_arm_shift (3);
9442 }
9443
9444 /* ARM V6 usat (argument parse). */
9445
9446 static void
9447 do_usat (void)
9448 {
9449 inst.instruction |= inst.operands[0].reg << 12;
9450 inst.instruction |= inst.operands[1].imm << 16;
9451 inst.instruction |= inst.operands[2].reg;
9452
9453 if (inst.operands[3].present)
9454 encode_arm_shift (3);
9455 }
9456
9457 /* ARM V6 ssat16 (argument parse). */
9458
9459 static void
9460 do_ssat16 (void)
9461 {
9462 inst.instruction |= inst.operands[0].reg << 12;
9463 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9464 inst.instruction |= inst.operands[2].reg;
9465 }
9466
9467 static void
9468 do_usat16 (void)
9469 {
9470 inst.instruction |= inst.operands[0].reg << 12;
9471 inst.instruction |= inst.operands[1].imm << 16;
9472 inst.instruction |= inst.operands[2].reg;
9473 }
9474
9475 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9476 preserving the other bits.
9477
9478 setend <endian_specifier>, where <endian_specifier> is either
9479 BE or LE. */
9480
9481 static void
9482 do_setend (void)
9483 {
9484 if (warn_on_deprecated
9485 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9486 as_tsktsk (_("setend use is deprecated for ARMv8"));
9487
9488 if (inst.operands[0].imm)
9489 inst.instruction |= 0x200;
9490 }
9491
9492 static void
9493 do_shift (void)
9494 {
9495 unsigned int Rm = (inst.operands[1].present
9496 ? inst.operands[1].reg
9497 : inst.operands[0].reg);
9498
9499 inst.instruction |= inst.operands[0].reg << 12;
9500 inst.instruction |= Rm;
9501 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9502 {
9503 inst.instruction |= inst.operands[2].reg << 8;
9504 inst.instruction |= SHIFT_BY_REG;
9505 /* PR 12854: Error on extraneous shifts. */
9506 constraint (inst.operands[2].shifted,
9507 _("extraneous shift as part of operand to shift insn"));
9508 }
9509 else
9510 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9511 }
9512
9513 static void
9514 do_smc (void)
9515 {
9516 inst.reloc.type = BFD_RELOC_ARM_SMC;
9517 inst.reloc.pc_rel = 0;
9518 }
9519
9520 static void
9521 do_hvc (void)
9522 {
9523 inst.reloc.type = BFD_RELOC_ARM_HVC;
9524 inst.reloc.pc_rel = 0;
9525 }
9526
9527 static void
9528 do_swi (void)
9529 {
9530 inst.reloc.type = BFD_RELOC_ARM_SWI;
9531 inst.reloc.pc_rel = 0;
9532 }
9533
9534 static void
9535 do_setpan (void)
9536 {
9537 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9538 _("selected processor does not support SETPAN instruction"));
9539
9540 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9541 }
9542
9543 static void
9544 do_t_setpan (void)
9545 {
9546 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9547 _("selected processor does not support SETPAN instruction"));
9548
9549 inst.instruction |= (inst.operands[0].imm << 3);
9550 }
9551
9552 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9553 SMLAxy{cond} Rd,Rm,Rs,Rn
9554 SMLAWy{cond} Rd,Rm,Rs,Rn
9555 Error if any register is R15. */
9556
9557 static void
9558 do_smla (void)
9559 {
9560 inst.instruction |= inst.operands[0].reg << 16;
9561 inst.instruction |= inst.operands[1].reg;
9562 inst.instruction |= inst.operands[2].reg << 8;
9563 inst.instruction |= inst.operands[3].reg << 12;
9564 }
9565
9566 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9567 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9568 Error if any register is R15.
9569 Warning if Rdlo == Rdhi. */
9570
9571 static void
9572 do_smlal (void)
9573 {
9574 inst.instruction |= inst.operands[0].reg << 12;
9575 inst.instruction |= inst.operands[1].reg << 16;
9576 inst.instruction |= inst.operands[2].reg;
9577 inst.instruction |= inst.operands[3].reg << 8;
9578
9579 if (inst.operands[0].reg == inst.operands[1].reg)
9580 as_tsktsk (_("rdhi and rdlo must be different"));
9581 }
9582
9583 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9584 SMULxy{cond} Rd,Rm,Rs
9585 Error if any register is R15. */
9586
9587 static void
9588 do_smul (void)
9589 {
9590 inst.instruction |= inst.operands[0].reg << 16;
9591 inst.instruction |= inst.operands[1].reg;
9592 inst.instruction |= inst.operands[2].reg << 8;
9593 }
9594
9595 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9596 the same for both ARM and Thumb-2. */
9597
9598 static void
9599 do_srs (void)
9600 {
9601 int reg;
9602
9603 if (inst.operands[0].present)
9604 {
9605 reg = inst.operands[0].reg;
9606 constraint (reg != REG_SP, _("SRS base register must be r13"));
9607 }
9608 else
9609 reg = REG_SP;
9610
9611 inst.instruction |= reg << 16;
9612 inst.instruction |= inst.operands[1].imm;
9613 if (inst.operands[0].writeback || inst.operands[1].writeback)
9614 inst.instruction |= WRITE_BACK;
9615 }
9616
9617 /* ARM V6 strex (argument parse). */
9618
9619 static void
9620 do_strex (void)
9621 {
9622 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9623 || inst.operands[2].postind || inst.operands[2].writeback
9624 || inst.operands[2].immisreg || inst.operands[2].shifted
9625 || inst.operands[2].negative
9626 /* See comment in do_ldrex(). */
9627 || (inst.operands[2].reg == REG_PC),
9628 BAD_ADDR_MODE);
9629
9630 constraint (inst.operands[0].reg == inst.operands[1].reg
9631 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9632
9633 constraint (inst.reloc.exp.X_op != O_constant
9634 || inst.reloc.exp.X_add_number != 0,
9635 _("offset must be zero in ARM encoding"));
9636
9637 inst.instruction |= inst.operands[0].reg << 12;
9638 inst.instruction |= inst.operands[1].reg;
9639 inst.instruction |= inst.operands[2].reg << 16;
9640 inst.reloc.type = BFD_RELOC_UNUSED;
9641 }
9642
9643 static void
9644 do_t_strexbh (void)
9645 {
9646 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9647 || inst.operands[2].postind || inst.operands[2].writeback
9648 || inst.operands[2].immisreg || inst.operands[2].shifted
9649 || inst.operands[2].negative,
9650 BAD_ADDR_MODE);
9651
9652 constraint (inst.operands[0].reg == inst.operands[1].reg
9653 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9654
9655 do_rm_rd_rn ();
9656 }
9657
9658 static void
9659 do_strexd (void)
9660 {
9661 constraint (inst.operands[1].reg % 2 != 0,
9662 _("even register required"));
9663 constraint (inst.operands[2].present
9664 && inst.operands[2].reg != inst.operands[1].reg + 1,
9665 _("can only store two consecutive registers"));
9666 /* If op 2 were present and equal to PC, this function wouldn't
9667 have been called in the first place. */
9668 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9669
9670 constraint (inst.operands[0].reg == inst.operands[1].reg
9671 || inst.operands[0].reg == inst.operands[1].reg + 1
9672 || inst.operands[0].reg == inst.operands[3].reg,
9673 BAD_OVERLAP);
9674
9675 inst.instruction |= inst.operands[0].reg << 12;
9676 inst.instruction |= inst.operands[1].reg;
9677 inst.instruction |= inst.operands[3].reg << 16;
9678 }
9679
9680 /* ARM V8 STRL. */
9681 static void
9682 do_stlex (void)
9683 {
9684 constraint (inst.operands[0].reg == inst.operands[1].reg
9685 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9686
9687 do_rd_rm_rn ();
9688 }
9689
9690 static void
9691 do_t_stlex (void)
9692 {
9693 constraint (inst.operands[0].reg == inst.operands[1].reg
9694 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9695
9696 do_rm_rd_rn ();
9697 }
9698
9699 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9700 extends it to 32-bits, and adds the result to a value in another
9701 register. You can specify a rotation by 0, 8, 16, or 24 bits
9702 before extracting the 16-bit value.
9703 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9704 Condition defaults to COND_ALWAYS.
9705 Error if any register uses R15. */
9706
9707 static void
9708 do_sxtah (void)
9709 {
9710 inst.instruction |= inst.operands[0].reg << 12;
9711 inst.instruction |= inst.operands[1].reg << 16;
9712 inst.instruction |= inst.operands[2].reg;
9713 inst.instruction |= inst.operands[3].imm << 10;
9714 }
9715
9716 /* ARM V6 SXTH.
9717
9718 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9719 Condition defaults to COND_ALWAYS.
9720 Error if any register uses R15. */
9721
9722 static void
9723 do_sxth (void)
9724 {
9725 inst.instruction |= inst.operands[0].reg << 12;
9726 inst.instruction |= inst.operands[1].reg;
9727 inst.instruction |= inst.operands[2].imm << 10;
9728 }
9729 \f
9730 /* VFP instructions. In a logical order: SP variant first, monad
9731 before dyad, arithmetic then move then load/store. */
9732
9733 static void
9734 do_vfp_sp_monadic (void)
9735 {
9736 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9737 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9738 }
9739
9740 static void
9741 do_vfp_sp_dyadic (void)
9742 {
9743 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9744 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9745 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9746 }
9747
9748 static void
9749 do_vfp_sp_compare_z (void)
9750 {
9751 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9752 }
9753
9754 static void
9755 do_vfp_dp_sp_cvt (void)
9756 {
9757 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9758 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9759 }
9760
9761 static void
9762 do_vfp_sp_dp_cvt (void)
9763 {
9764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9765 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9766 }
9767
9768 static void
9769 do_vfp_reg_from_sp (void)
9770 {
9771 inst.instruction |= inst.operands[0].reg << 12;
9772 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9773 }
9774
9775 static void
9776 do_vfp_reg2_from_sp2 (void)
9777 {
9778 constraint (inst.operands[2].imm != 2,
9779 _("only two consecutive VFP SP registers allowed here"));
9780 inst.instruction |= inst.operands[0].reg << 12;
9781 inst.instruction |= inst.operands[1].reg << 16;
9782 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9783 }
9784
9785 static void
9786 do_vfp_sp_from_reg (void)
9787 {
9788 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9789 inst.instruction |= inst.operands[1].reg << 12;
9790 }
9791
9792 static void
9793 do_vfp_sp2_from_reg2 (void)
9794 {
9795 constraint (inst.operands[0].imm != 2,
9796 _("only two consecutive VFP SP registers allowed here"));
9797 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9798 inst.instruction |= inst.operands[1].reg << 12;
9799 inst.instruction |= inst.operands[2].reg << 16;
9800 }
9801
9802 static void
9803 do_vfp_sp_ldst (void)
9804 {
9805 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9806 encode_arm_cp_address (1, FALSE, TRUE, 0);
9807 }
9808
9809 static void
9810 do_vfp_dp_ldst (void)
9811 {
9812 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9813 encode_arm_cp_address (1, FALSE, TRUE, 0);
9814 }
9815
9816
9817 static void
9818 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9819 {
9820 if (inst.operands[0].writeback)
9821 inst.instruction |= WRITE_BACK;
9822 else
9823 constraint (ldstm_type != VFP_LDSTMIA,
9824 _("this addressing mode requires base-register writeback"));
9825 inst.instruction |= inst.operands[0].reg << 16;
9826 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9827 inst.instruction |= inst.operands[1].imm;
9828 }
9829
9830 static void
9831 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9832 {
9833 int count;
9834
9835 if (inst.operands[0].writeback)
9836 inst.instruction |= WRITE_BACK;
9837 else
9838 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9839 _("this addressing mode requires base-register writeback"));
9840
9841 inst.instruction |= inst.operands[0].reg << 16;
9842 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9843
9844 count = inst.operands[1].imm << 1;
9845 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9846 count += 1;
9847
9848 inst.instruction |= count;
9849 }
9850
9851 static void
9852 do_vfp_sp_ldstmia (void)
9853 {
9854 vfp_sp_ldstm (VFP_LDSTMIA);
9855 }
9856
9857 static void
9858 do_vfp_sp_ldstmdb (void)
9859 {
9860 vfp_sp_ldstm (VFP_LDSTMDB);
9861 }
9862
9863 static void
9864 do_vfp_dp_ldstmia (void)
9865 {
9866 vfp_dp_ldstm (VFP_LDSTMIA);
9867 }
9868
9869 static void
9870 do_vfp_dp_ldstmdb (void)
9871 {
9872 vfp_dp_ldstm (VFP_LDSTMDB);
9873 }
9874
9875 static void
9876 do_vfp_xp_ldstmia (void)
9877 {
9878 vfp_dp_ldstm (VFP_LDSTMIAX);
9879 }
9880
9881 static void
9882 do_vfp_xp_ldstmdb (void)
9883 {
9884 vfp_dp_ldstm (VFP_LDSTMDBX);
9885 }
9886
9887 static void
9888 do_vfp_dp_rd_rm (void)
9889 {
9890 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9891 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9892 }
9893
9894 static void
9895 do_vfp_dp_rn_rd (void)
9896 {
9897 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9898 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9899 }
9900
9901 static void
9902 do_vfp_dp_rd_rn (void)
9903 {
9904 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9905 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9906 }
9907
9908 static void
9909 do_vfp_dp_rd_rn_rm (void)
9910 {
9911 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9912 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9913 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9914 }
9915
9916 static void
9917 do_vfp_dp_rd (void)
9918 {
9919 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9920 }
9921
9922 static void
9923 do_vfp_dp_rm_rd_rn (void)
9924 {
9925 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9926 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9927 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9928 }
9929
9930 /* VFPv3 instructions. */
9931 static void
9932 do_vfp_sp_const (void)
9933 {
9934 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9935 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9936 inst.instruction |= (inst.operands[1].imm & 0x0f);
9937 }
9938
9939 static void
9940 do_vfp_dp_const (void)
9941 {
9942 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9943 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9944 inst.instruction |= (inst.operands[1].imm & 0x0f);
9945 }
9946
9947 static void
9948 vfp_conv (int srcsize)
9949 {
9950 int immbits = srcsize - inst.operands[1].imm;
9951
9952 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9953 {
9954 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9955 i.e. immbits must be in range 0 - 16. */
9956 inst.error = _("immediate value out of range, expected range [0, 16]");
9957 return;
9958 }
9959 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9960 {
9961 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9962 i.e. immbits must be in range 0 - 31. */
9963 inst.error = _("immediate value out of range, expected range [1, 32]");
9964 return;
9965 }
9966
9967 inst.instruction |= (immbits & 1) << 5;
9968 inst.instruction |= (immbits >> 1);
9969 }
9970
9971 static void
9972 do_vfp_sp_conv_16 (void)
9973 {
9974 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9975 vfp_conv (16);
9976 }
9977
9978 static void
9979 do_vfp_dp_conv_16 (void)
9980 {
9981 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9982 vfp_conv (16);
9983 }
9984
9985 static void
9986 do_vfp_sp_conv_32 (void)
9987 {
9988 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9989 vfp_conv (32);
9990 }
9991
9992 static void
9993 do_vfp_dp_conv_32 (void)
9994 {
9995 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9996 vfp_conv (32);
9997 }
9998 \f
9999 /* FPA instructions. Also in a logical order. */
10000
10001 static void
10002 do_fpa_cmp (void)
10003 {
10004 inst.instruction |= inst.operands[0].reg << 16;
10005 inst.instruction |= inst.operands[1].reg;
10006 }
10007
10008 static void
10009 do_fpa_ldmstm (void)
10010 {
10011 inst.instruction |= inst.operands[0].reg << 12;
10012 switch (inst.operands[1].imm)
10013 {
10014 case 1: inst.instruction |= CP_T_X; break;
10015 case 2: inst.instruction |= CP_T_Y; break;
10016 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10017 case 4: break;
10018 default: abort ();
10019 }
10020
10021 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10022 {
10023 /* The instruction specified "ea" or "fd", so we can only accept
10024 [Rn]{!}. The instruction does not really support stacking or
10025 unstacking, so we have to emulate these by setting appropriate
10026 bits and offsets. */
10027 constraint (inst.reloc.exp.X_op != O_constant
10028 || inst.reloc.exp.X_add_number != 0,
10029 _("this instruction does not support indexing"));
10030
10031 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10032 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
10033
10034 if (!(inst.instruction & INDEX_UP))
10035 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
10036
10037 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10038 {
10039 inst.operands[2].preind = 0;
10040 inst.operands[2].postind = 1;
10041 }
10042 }
10043
10044 encode_arm_cp_address (2, TRUE, TRUE, 0);
10045 }
10046 \f
10047 /* iWMMXt instructions: strictly in alphabetical order. */
10048
10049 static void
10050 do_iwmmxt_tandorc (void)
10051 {
10052 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10053 }
10054
10055 static void
10056 do_iwmmxt_textrc (void)
10057 {
10058 inst.instruction |= inst.operands[0].reg << 12;
10059 inst.instruction |= inst.operands[1].imm;
10060 }
10061
10062 static void
10063 do_iwmmxt_textrm (void)
10064 {
10065 inst.instruction |= inst.operands[0].reg << 12;
10066 inst.instruction |= inst.operands[1].reg << 16;
10067 inst.instruction |= inst.operands[2].imm;
10068 }
10069
10070 static void
10071 do_iwmmxt_tinsr (void)
10072 {
10073 inst.instruction |= inst.operands[0].reg << 16;
10074 inst.instruction |= inst.operands[1].reg << 12;
10075 inst.instruction |= inst.operands[2].imm;
10076 }
10077
10078 static void
10079 do_iwmmxt_tmia (void)
10080 {
10081 inst.instruction |= inst.operands[0].reg << 5;
10082 inst.instruction |= inst.operands[1].reg;
10083 inst.instruction |= inst.operands[2].reg << 12;
10084 }
10085
10086 static void
10087 do_iwmmxt_waligni (void)
10088 {
10089 inst.instruction |= inst.operands[0].reg << 12;
10090 inst.instruction |= inst.operands[1].reg << 16;
10091 inst.instruction |= inst.operands[2].reg;
10092 inst.instruction |= inst.operands[3].imm << 20;
10093 }
10094
10095 static void
10096 do_iwmmxt_wmerge (void)
10097 {
10098 inst.instruction |= inst.operands[0].reg << 12;
10099 inst.instruction |= inst.operands[1].reg << 16;
10100 inst.instruction |= inst.operands[2].reg;
10101 inst.instruction |= inst.operands[3].imm << 21;
10102 }
10103
10104 static void
10105 do_iwmmxt_wmov (void)
10106 {
10107 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10108 inst.instruction |= inst.operands[0].reg << 12;
10109 inst.instruction |= inst.operands[1].reg << 16;
10110 inst.instruction |= inst.operands[1].reg;
10111 }
10112
10113 static void
10114 do_iwmmxt_wldstbh (void)
10115 {
10116 int reloc;
10117 inst.instruction |= inst.operands[0].reg << 12;
10118 if (thumb_mode)
10119 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10120 else
10121 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10122 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10123 }
10124
10125 static void
10126 do_iwmmxt_wldstw (void)
10127 {
10128 /* RIWR_RIWC clears .isreg for a control register. */
10129 if (!inst.operands[0].isreg)
10130 {
10131 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10132 inst.instruction |= 0xf0000000;
10133 }
10134
10135 inst.instruction |= inst.operands[0].reg << 12;
10136 encode_arm_cp_address (1, TRUE, TRUE, 0);
10137 }
10138
10139 static void
10140 do_iwmmxt_wldstd (void)
10141 {
10142 inst.instruction |= inst.operands[0].reg << 12;
10143 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10144 && inst.operands[1].immisreg)
10145 {
10146 inst.instruction &= ~0x1a000ff;
10147 inst.instruction |= (0xfU << 28);
10148 if (inst.operands[1].preind)
10149 inst.instruction |= PRE_INDEX;
10150 if (!inst.operands[1].negative)
10151 inst.instruction |= INDEX_UP;
10152 if (inst.operands[1].writeback)
10153 inst.instruction |= WRITE_BACK;
10154 inst.instruction |= inst.operands[1].reg << 16;
10155 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10156 inst.instruction |= inst.operands[1].imm;
10157 }
10158 else
10159 encode_arm_cp_address (1, TRUE, FALSE, 0);
10160 }
10161
10162 static void
10163 do_iwmmxt_wshufh (void)
10164 {
10165 inst.instruction |= inst.operands[0].reg << 12;
10166 inst.instruction |= inst.operands[1].reg << 16;
10167 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10168 inst.instruction |= (inst.operands[2].imm & 0x0f);
10169 }
10170
10171 static void
10172 do_iwmmxt_wzero (void)
10173 {
10174 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10175 inst.instruction |= inst.operands[0].reg;
10176 inst.instruction |= inst.operands[0].reg << 12;
10177 inst.instruction |= inst.operands[0].reg << 16;
10178 }
10179
10180 static void
10181 do_iwmmxt_wrwrwr_or_imm5 (void)
10182 {
10183 if (inst.operands[2].isreg)
10184 do_rd_rn_rm ();
10185 else {
10186 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10187 _("immediate operand requires iWMMXt2"));
10188 do_rd_rn ();
10189 if (inst.operands[2].imm == 0)
10190 {
10191 switch ((inst.instruction >> 20) & 0xf)
10192 {
10193 case 4:
10194 case 5:
10195 case 6:
10196 case 7:
10197 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10198 inst.operands[2].imm = 16;
10199 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10200 break;
10201 case 8:
10202 case 9:
10203 case 10:
10204 case 11:
10205 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10206 inst.operands[2].imm = 32;
10207 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10208 break;
10209 case 12:
10210 case 13:
10211 case 14:
10212 case 15:
10213 {
10214 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10215 unsigned long wrn;
10216 wrn = (inst.instruction >> 16) & 0xf;
10217 inst.instruction &= 0xff0fff0f;
10218 inst.instruction |= wrn;
10219 /* Bail out here; the instruction is now assembled. */
10220 return;
10221 }
10222 }
10223 }
10224 /* Map 32 -> 0, etc. */
10225 inst.operands[2].imm &= 0x1f;
10226 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10227 }
10228 }
10229 \f
10230 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10231 operations first, then control, shift, and load/store. */
10232
10233 /* Insns like "foo X,Y,Z". */
10234
10235 static void
10236 do_mav_triple (void)
10237 {
10238 inst.instruction |= inst.operands[0].reg << 16;
10239 inst.instruction |= inst.operands[1].reg;
10240 inst.instruction |= inst.operands[2].reg << 12;
10241 }
10242
10243 /* Insns like "foo W,X,Y,Z".
10244 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10245
10246 static void
10247 do_mav_quad (void)
10248 {
10249 inst.instruction |= inst.operands[0].reg << 5;
10250 inst.instruction |= inst.operands[1].reg << 12;
10251 inst.instruction |= inst.operands[2].reg << 16;
10252 inst.instruction |= inst.operands[3].reg;
10253 }
10254
10255 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10256 static void
10257 do_mav_dspsc (void)
10258 {
10259 inst.instruction |= inst.operands[1].reg << 12;
10260 }
10261
10262 /* Maverick shift immediate instructions.
10263 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10264 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10265
10266 static void
10267 do_mav_shift (void)
10268 {
10269 int imm = inst.operands[2].imm;
10270
10271 inst.instruction |= inst.operands[0].reg << 12;
10272 inst.instruction |= inst.operands[1].reg << 16;
10273
10274 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10275 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10276 Bit 4 should be 0. */
10277 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10278
10279 inst.instruction |= imm;
10280 }
10281 \f
10282 /* XScale instructions. Also sorted arithmetic before move. */
10283
10284 /* Xscale multiply-accumulate (argument parse)
10285 MIAcc acc0,Rm,Rs
10286 MIAPHcc acc0,Rm,Rs
10287 MIAxycc acc0,Rm,Rs. */
10288
10289 static void
10290 do_xsc_mia (void)
10291 {
10292 inst.instruction |= inst.operands[1].reg;
10293 inst.instruction |= inst.operands[2].reg << 12;
10294 }
10295
10296 /* Xscale move-accumulator-register (argument parse)
10297
10298 MARcc acc0,RdLo,RdHi. */
10299
10300 static void
10301 do_xsc_mar (void)
10302 {
10303 inst.instruction |= inst.operands[1].reg << 12;
10304 inst.instruction |= inst.operands[2].reg << 16;
10305 }
10306
10307 /* Xscale move-register-accumulator (argument parse)
10308
10309 MRAcc RdLo,RdHi,acc0. */
10310
10311 static void
10312 do_xsc_mra (void)
10313 {
10314 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10315 inst.instruction |= inst.operands[0].reg << 12;
10316 inst.instruction |= inst.operands[1].reg << 16;
10317 }
10318 \f
10319 /* Encoding functions relevant only to Thumb. */
10320
10321 /* inst.operands[i] is a shifted-register operand; encode
10322 it into inst.instruction in the format used by Thumb32. */
10323
10324 static void
10325 encode_thumb32_shifted_operand (int i)
10326 {
10327 unsigned int value = inst.reloc.exp.X_add_number;
10328 unsigned int shift = inst.operands[i].shift_kind;
10329
10330 constraint (inst.operands[i].immisreg,
10331 _("shift by register not allowed in thumb mode"));
10332 inst.instruction |= inst.operands[i].reg;
10333 if (shift == SHIFT_RRX)
10334 inst.instruction |= SHIFT_ROR << 4;
10335 else
10336 {
10337 constraint (inst.reloc.exp.X_op != O_constant,
10338 _("expression too complex"));
10339
10340 constraint (value > 32
10341 || (value == 32 && (shift == SHIFT_LSL
10342 || shift == SHIFT_ROR)),
10343 _("shift expression is too large"));
10344
10345 if (value == 0)
10346 shift = SHIFT_LSL;
10347 else if (value == 32)
10348 value = 0;
10349
10350 inst.instruction |= shift << 4;
10351 inst.instruction |= (value & 0x1c) << 10;
10352 inst.instruction |= (value & 0x03) << 6;
10353 }
10354 }
10355
10356
10357 /* inst.operands[i] was set up by parse_address. Encode it into a
10358 Thumb32 format load or store instruction. Reject forms that cannot
10359 be used with such instructions. If is_t is true, reject forms that
10360 cannot be used with a T instruction; if is_d is true, reject forms
10361 that cannot be used with a D instruction. If it is a store insn,
10362 reject PC in Rn. */
10363
10364 static void
10365 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10366 {
10367 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10368
10369 constraint (!inst.operands[i].isreg,
10370 _("Instruction does not support =N addresses"));
10371
10372 inst.instruction |= inst.operands[i].reg << 16;
10373 if (inst.operands[i].immisreg)
10374 {
10375 constraint (is_pc, BAD_PC_ADDRESSING);
10376 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10377 constraint (inst.operands[i].negative,
10378 _("Thumb does not support negative register indexing"));
10379 constraint (inst.operands[i].postind,
10380 _("Thumb does not support register post-indexing"));
10381 constraint (inst.operands[i].writeback,
10382 _("Thumb does not support register indexing with writeback"));
10383 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10384 _("Thumb supports only LSL in shifted register indexing"));
10385
10386 inst.instruction |= inst.operands[i].imm;
10387 if (inst.operands[i].shifted)
10388 {
10389 constraint (inst.reloc.exp.X_op != O_constant,
10390 _("expression too complex"));
10391 constraint (inst.reloc.exp.X_add_number < 0
10392 || inst.reloc.exp.X_add_number > 3,
10393 _("shift out of range"));
10394 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10395 }
10396 inst.reloc.type = BFD_RELOC_UNUSED;
10397 }
10398 else if (inst.operands[i].preind)
10399 {
10400 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10401 constraint (is_t && inst.operands[i].writeback,
10402 _("cannot use writeback with this instruction"));
10403 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10404 BAD_PC_ADDRESSING);
10405
10406 if (is_d)
10407 {
10408 inst.instruction |= 0x01000000;
10409 if (inst.operands[i].writeback)
10410 inst.instruction |= 0x00200000;
10411 }
10412 else
10413 {
10414 inst.instruction |= 0x00000c00;
10415 if (inst.operands[i].writeback)
10416 inst.instruction |= 0x00000100;
10417 }
10418 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10419 }
10420 else if (inst.operands[i].postind)
10421 {
10422 gas_assert (inst.operands[i].writeback);
10423 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10424 constraint (is_t, _("cannot use post-indexing with this instruction"));
10425
10426 if (is_d)
10427 inst.instruction |= 0x00200000;
10428 else
10429 inst.instruction |= 0x00000900;
10430 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10431 }
10432 else /* unindexed - only for coprocessor */
10433 inst.error = _("instruction does not accept unindexed addressing");
10434 }
10435
10436 /* Table of Thumb instructions which exist in both 16- and 32-bit
10437 encodings (the latter only in post-V6T2 cores). The index is the
10438 value used in the insns table below. When there is more than one
10439 possible 16-bit encoding for the instruction, this table always
10440 holds variant (1).
10441 Also contains several pseudo-instructions used during relaxation. */
10442 #define T16_32_TAB \
10443 X(_adc, 4140, eb400000), \
10444 X(_adcs, 4140, eb500000), \
10445 X(_add, 1c00, eb000000), \
10446 X(_adds, 1c00, eb100000), \
10447 X(_addi, 0000, f1000000), \
10448 X(_addis, 0000, f1100000), \
10449 X(_add_pc,000f, f20f0000), \
10450 X(_add_sp,000d, f10d0000), \
10451 X(_adr, 000f, f20f0000), \
10452 X(_and, 4000, ea000000), \
10453 X(_ands, 4000, ea100000), \
10454 X(_asr, 1000, fa40f000), \
10455 X(_asrs, 1000, fa50f000), \
10456 X(_b, e000, f000b000), \
10457 X(_bcond, d000, f0008000), \
10458 X(_bic, 4380, ea200000), \
10459 X(_bics, 4380, ea300000), \
10460 X(_cmn, 42c0, eb100f00), \
10461 X(_cmp, 2800, ebb00f00), \
10462 X(_cpsie, b660, f3af8400), \
10463 X(_cpsid, b670, f3af8600), \
10464 X(_cpy, 4600, ea4f0000), \
10465 X(_dec_sp,80dd, f1ad0d00), \
10466 X(_eor, 4040, ea800000), \
10467 X(_eors, 4040, ea900000), \
10468 X(_inc_sp,00dd, f10d0d00), \
10469 X(_ldmia, c800, e8900000), \
10470 X(_ldr, 6800, f8500000), \
10471 X(_ldrb, 7800, f8100000), \
10472 X(_ldrh, 8800, f8300000), \
10473 X(_ldrsb, 5600, f9100000), \
10474 X(_ldrsh, 5e00, f9300000), \
10475 X(_ldr_pc,4800, f85f0000), \
10476 X(_ldr_pc2,4800, f85f0000), \
10477 X(_ldr_sp,9800, f85d0000), \
10478 X(_lsl, 0000, fa00f000), \
10479 X(_lsls, 0000, fa10f000), \
10480 X(_lsr, 0800, fa20f000), \
10481 X(_lsrs, 0800, fa30f000), \
10482 X(_mov, 2000, ea4f0000), \
10483 X(_movs, 2000, ea5f0000), \
10484 X(_mul, 4340, fb00f000), \
10485 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10486 X(_mvn, 43c0, ea6f0000), \
10487 X(_mvns, 43c0, ea7f0000), \
10488 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10489 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10490 X(_orr, 4300, ea400000), \
10491 X(_orrs, 4300, ea500000), \
10492 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10493 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10494 X(_rev, ba00, fa90f080), \
10495 X(_rev16, ba40, fa90f090), \
10496 X(_revsh, bac0, fa90f0b0), \
10497 X(_ror, 41c0, fa60f000), \
10498 X(_rors, 41c0, fa70f000), \
10499 X(_sbc, 4180, eb600000), \
10500 X(_sbcs, 4180, eb700000), \
10501 X(_stmia, c000, e8800000), \
10502 X(_str, 6000, f8400000), \
10503 X(_strb, 7000, f8000000), \
10504 X(_strh, 8000, f8200000), \
10505 X(_str_sp,9000, f84d0000), \
10506 X(_sub, 1e00, eba00000), \
10507 X(_subs, 1e00, ebb00000), \
10508 X(_subi, 8000, f1a00000), \
10509 X(_subis, 8000, f1b00000), \
10510 X(_sxtb, b240, fa4ff080), \
10511 X(_sxth, b200, fa0ff080), \
10512 X(_tst, 4200, ea100f00), \
10513 X(_uxtb, b2c0, fa5ff080), \
10514 X(_uxth, b280, fa1ff080), \
10515 X(_nop, bf00, f3af8000), \
10516 X(_yield, bf10, f3af8001), \
10517 X(_wfe, bf20, f3af8002), \
10518 X(_wfi, bf30, f3af8003), \
10519 X(_sev, bf40, f3af8004), \
10520 X(_sevl, bf50, f3af8005), \
10521 X(_udf, de00, f7f0a000)
10522
10523 /* To catch errors in encoding functions, the codes are all offset by
10524 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10525 as 16-bit instructions. */
10526 #define X(a,b,c) T_MNEM##a
10527 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10528 #undef X
10529
10530 #define X(a,b,c) 0x##b
10531 static const unsigned short thumb_op16[] = { T16_32_TAB };
10532 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10533 #undef X
10534
10535 #define X(a,b,c) 0x##c
10536 static const unsigned int thumb_op32[] = { T16_32_TAB };
10537 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10538 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10539 #undef X
10540 #undef T16_32_TAB
10541
10542 /* Thumb instruction encoders, in alphabetical order. */
10543
10544 /* ADDW or SUBW. */
10545
10546 static void
10547 do_t_add_sub_w (void)
10548 {
10549 int Rd, Rn;
10550
10551 Rd = inst.operands[0].reg;
10552 Rn = inst.operands[1].reg;
10553
10554 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10555 is the SP-{plus,minus}-immediate form of the instruction. */
10556 if (Rn == REG_SP)
10557 constraint (Rd == REG_PC, BAD_PC);
10558 else
10559 reject_bad_reg (Rd);
10560
10561 inst.instruction |= (Rn << 16) | (Rd << 8);
10562 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10563 }
10564
10565 /* Parse an add or subtract instruction. We get here with inst.instruction
10566 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10567
10568 static void
10569 do_t_add_sub (void)
10570 {
10571 int Rd, Rs, Rn;
10572
10573 Rd = inst.operands[0].reg;
10574 Rs = (inst.operands[1].present
10575 ? inst.operands[1].reg /* Rd, Rs, foo */
10576 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10577
10578 if (Rd == REG_PC)
10579 set_it_insn_type_last ();
10580
10581 if (unified_syntax)
10582 {
10583 bfd_boolean flags;
10584 bfd_boolean narrow;
10585 int opcode;
10586
10587 flags = (inst.instruction == T_MNEM_adds
10588 || inst.instruction == T_MNEM_subs);
10589 if (flags)
10590 narrow = !in_it_block ();
10591 else
10592 narrow = in_it_block ();
10593 if (!inst.operands[2].isreg)
10594 {
10595 int add;
10596
10597 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10598 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10599
10600 add = (inst.instruction == T_MNEM_add
10601 || inst.instruction == T_MNEM_adds);
10602 opcode = 0;
10603 if (inst.size_req != 4)
10604 {
10605 /* Attempt to use a narrow opcode, with relaxation if
10606 appropriate. */
10607 if (Rd == REG_SP && Rs == REG_SP && !flags)
10608 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10609 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10610 opcode = T_MNEM_add_sp;
10611 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10612 opcode = T_MNEM_add_pc;
10613 else if (Rd <= 7 && Rs <= 7 && narrow)
10614 {
10615 if (flags)
10616 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10617 else
10618 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10619 }
10620 if (opcode)
10621 {
10622 inst.instruction = THUMB_OP16(opcode);
10623 inst.instruction |= (Rd << 4) | Rs;
10624 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10625 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10626 {
10627 if (inst.size_req == 2)
10628 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10629 else
10630 inst.relax = opcode;
10631 }
10632 }
10633 else
10634 constraint (inst.size_req == 2, BAD_HIREG);
10635 }
10636 if (inst.size_req == 4
10637 || (inst.size_req != 2 && !opcode))
10638 {
10639 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10640 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10641 THUMB1_RELOC_ONLY);
10642 if (Rd == REG_PC)
10643 {
10644 constraint (add, BAD_PC);
10645 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10646 _("only SUBS PC, LR, #const allowed"));
10647 constraint (inst.reloc.exp.X_op != O_constant,
10648 _("expression too complex"));
10649 constraint (inst.reloc.exp.X_add_number < 0
10650 || inst.reloc.exp.X_add_number > 0xff,
10651 _("immediate value out of range"));
10652 inst.instruction = T2_SUBS_PC_LR
10653 | inst.reloc.exp.X_add_number;
10654 inst.reloc.type = BFD_RELOC_UNUSED;
10655 return;
10656 }
10657 else if (Rs == REG_PC)
10658 {
10659 /* Always use addw/subw. */
10660 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10661 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10662 }
10663 else
10664 {
10665 inst.instruction = THUMB_OP32 (inst.instruction);
10666 inst.instruction = (inst.instruction & 0xe1ffffff)
10667 | 0x10000000;
10668 if (flags)
10669 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10670 else
10671 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10672 }
10673 inst.instruction |= Rd << 8;
10674 inst.instruction |= Rs << 16;
10675 }
10676 }
10677 else
10678 {
10679 unsigned int value = inst.reloc.exp.X_add_number;
10680 unsigned int shift = inst.operands[2].shift_kind;
10681
10682 Rn = inst.operands[2].reg;
10683 /* See if we can do this with a 16-bit instruction. */
10684 if (!inst.operands[2].shifted && inst.size_req != 4)
10685 {
10686 if (Rd > 7 || Rs > 7 || Rn > 7)
10687 narrow = FALSE;
10688
10689 if (narrow)
10690 {
10691 inst.instruction = ((inst.instruction == T_MNEM_adds
10692 || inst.instruction == T_MNEM_add)
10693 ? T_OPCODE_ADD_R3
10694 : T_OPCODE_SUB_R3);
10695 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10696 return;
10697 }
10698
10699 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10700 {
10701 /* Thumb-1 cores (except v6-M) require at least one high
10702 register in a narrow non flag setting add. */
10703 if (Rd > 7 || Rn > 7
10704 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10705 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10706 {
10707 if (Rd == Rn)
10708 {
10709 Rn = Rs;
10710 Rs = Rd;
10711 }
10712 inst.instruction = T_OPCODE_ADD_HI;
10713 inst.instruction |= (Rd & 8) << 4;
10714 inst.instruction |= (Rd & 7);
10715 inst.instruction |= Rn << 3;
10716 return;
10717 }
10718 }
10719 }
10720
10721 constraint (Rd == REG_PC, BAD_PC);
10722 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10723 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10724 constraint (Rs == REG_PC, BAD_PC);
10725 reject_bad_reg (Rn);
10726
10727 /* If we get here, it can't be done in 16 bits. */
10728 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10729 _("shift must be constant"));
10730 inst.instruction = THUMB_OP32 (inst.instruction);
10731 inst.instruction |= Rd << 8;
10732 inst.instruction |= Rs << 16;
10733 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10734 _("shift value over 3 not allowed in thumb mode"));
10735 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10736 _("only LSL shift allowed in thumb mode"));
10737 encode_thumb32_shifted_operand (2);
10738 }
10739 }
10740 else
10741 {
10742 constraint (inst.instruction == T_MNEM_adds
10743 || inst.instruction == T_MNEM_subs,
10744 BAD_THUMB32);
10745
10746 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10747 {
10748 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10749 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10750 BAD_HIREG);
10751
10752 inst.instruction = (inst.instruction == T_MNEM_add
10753 ? 0x0000 : 0x8000);
10754 inst.instruction |= (Rd << 4) | Rs;
10755 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10756 return;
10757 }
10758
10759 Rn = inst.operands[2].reg;
10760 constraint (inst.operands[2].shifted, _("unshifted register required"));
10761
10762 /* We now have Rd, Rs, and Rn set to registers. */
10763 if (Rd > 7 || Rs > 7 || Rn > 7)
10764 {
10765 /* Can't do this for SUB. */
10766 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10767 inst.instruction = T_OPCODE_ADD_HI;
10768 inst.instruction |= (Rd & 8) << 4;
10769 inst.instruction |= (Rd & 7);
10770 if (Rs == Rd)
10771 inst.instruction |= Rn << 3;
10772 else if (Rn == Rd)
10773 inst.instruction |= Rs << 3;
10774 else
10775 constraint (1, _("dest must overlap one source register"));
10776 }
10777 else
10778 {
10779 inst.instruction = (inst.instruction == T_MNEM_add
10780 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10781 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10782 }
10783 }
10784 }
10785
10786 static void
10787 do_t_adr (void)
10788 {
10789 unsigned Rd;
10790
10791 Rd = inst.operands[0].reg;
10792 reject_bad_reg (Rd);
10793
10794 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10795 {
10796 /* Defer to section relaxation. */
10797 inst.relax = inst.instruction;
10798 inst.instruction = THUMB_OP16 (inst.instruction);
10799 inst.instruction |= Rd << 4;
10800 }
10801 else if (unified_syntax && inst.size_req != 2)
10802 {
10803 /* Generate a 32-bit opcode. */
10804 inst.instruction = THUMB_OP32 (inst.instruction);
10805 inst.instruction |= Rd << 8;
10806 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10807 inst.reloc.pc_rel = 1;
10808 }
10809 else
10810 {
10811 /* Generate a 16-bit opcode. */
10812 inst.instruction = THUMB_OP16 (inst.instruction);
10813 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10814 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10815 inst.reloc.pc_rel = 1;
10816 inst.instruction |= Rd << 4;
10817 }
10818
10819 if (inst.reloc.exp.X_op == O_symbol
10820 && inst.reloc.exp.X_add_symbol != NULL
10821 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10822 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10823 inst.reloc.exp.X_add_number += 1;
10824 }
10825
10826 /* Arithmetic instructions for which there is just one 16-bit
10827 instruction encoding, and it allows only two low registers.
10828 For maximal compatibility with ARM syntax, we allow three register
10829 operands even when Thumb-32 instructions are not available, as long
10830 as the first two are identical. For instance, both "sbc r0,r1" and
10831 "sbc r0,r0,r1" are allowed. */
10832 static void
10833 do_t_arit3 (void)
10834 {
10835 int Rd, Rs, Rn;
10836
10837 Rd = inst.operands[0].reg;
10838 Rs = (inst.operands[1].present
10839 ? inst.operands[1].reg /* Rd, Rs, foo */
10840 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10841 Rn = inst.operands[2].reg;
10842
10843 reject_bad_reg (Rd);
10844 reject_bad_reg (Rs);
10845 if (inst.operands[2].isreg)
10846 reject_bad_reg (Rn);
10847
10848 if (unified_syntax)
10849 {
10850 if (!inst.operands[2].isreg)
10851 {
10852 /* For an immediate, we always generate a 32-bit opcode;
10853 section relaxation will shrink it later if possible. */
10854 inst.instruction = THUMB_OP32 (inst.instruction);
10855 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10856 inst.instruction |= Rd << 8;
10857 inst.instruction |= Rs << 16;
10858 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10859 }
10860 else
10861 {
10862 bfd_boolean narrow;
10863
10864 /* See if we can do this with a 16-bit instruction. */
10865 if (THUMB_SETS_FLAGS (inst.instruction))
10866 narrow = !in_it_block ();
10867 else
10868 narrow = in_it_block ();
10869
10870 if (Rd > 7 || Rn > 7 || Rs > 7)
10871 narrow = FALSE;
10872 if (inst.operands[2].shifted)
10873 narrow = FALSE;
10874 if (inst.size_req == 4)
10875 narrow = FALSE;
10876
10877 if (narrow
10878 && Rd == Rs)
10879 {
10880 inst.instruction = THUMB_OP16 (inst.instruction);
10881 inst.instruction |= Rd;
10882 inst.instruction |= Rn << 3;
10883 return;
10884 }
10885
10886 /* If we get here, it can't be done in 16 bits. */
10887 constraint (inst.operands[2].shifted
10888 && inst.operands[2].immisreg,
10889 _("shift must be constant"));
10890 inst.instruction = THUMB_OP32 (inst.instruction);
10891 inst.instruction |= Rd << 8;
10892 inst.instruction |= Rs << 16;
10893 encode_thumb32_shifted_operand (2);
10894 }
10895 }
10896 else
10897 {
10898 /* On its face this is a lie - the instruction does set the
10899 flags. However, the only supported mnemonic in this mode
10900 says it doesn't. */
10901 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10902
10903 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10904 _("unshifted register required"));
10905 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10906 constraint (Rd != Rs,
10907 _("dest and source1 must be the same register"));
10908
10909 inst.instruction = THUMB_OP16 (inst.instruction);
10910 inst.instruction |= Rd;
10911 inst.instruction |= Rn << 3;
10912 }
10913 }
10914
10915 /* Similarly, but for instructions where the arithmetic operation is
10916 commutative, so we can allow either of them to be different from
10917 the destination operand in a 16-bit instruction. For instance, all
10918 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10919 accepted. */
10920 static void
10921 do_t_arit3c (void)
10922 {
10923 int Rd, Rs, Rn;
10924
10925 Rd = inst.operands[0].reg;
10926 Rs = (inst.operands[1].present
10927 ? inst.operands[1].reg /* Rd, Rs, foo */
10928 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10929 Rn = inst.operands[2].reg;
10930
10931 reject_bad_reg (Rd);
10932 reject_bad_reg (Rs);
10933 if (inst.operands[2].isreg)
10934 reject_bad_reg (Rn);
10935
10936 if (unified_syntax)
10937 {
10938 if (!inst.operands[2].isreg)
10939 {
10940 /* For an immediate, we always generate a 32-bit opcode;
10941 section relaxation will shrink it later if possible. */
10942 inst.instruction = THUMB_OP32 (inst.instruction);
10943 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10944 inst.instruction |= Rd << 8;
10945 inst.instruction |= Rs << 16;
10946 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10947 }
10948 else
10949 {
10950 bfd_boolean narrow;
10951
10952 /* See if we can do this with a 16-bit instruction. */
10953 if (THUMB_SETS_FLAGS (inst.instruction))
10954 narrow = !in_it_block ();
10955 else
10956 narrow = in_it_block ();
10957
10958 if (Rd > 7 || Rn > 7 || Rs > 7)
10959 narrow = FALSE;
10960 if (inst.operands[2].shifted)
10961 narrow = FALSE;
10962 if (inst.size_req == 4)
10963 narrow = FALSE;
10964
10965 if (narrow)
10966 {
10967 if (Rd == Rs)
10968 {
10969 inst.instruction = THUMB_OP16 (inst.instruction);
10970 inst.instruction |= Rd;
10971 inst.instruction |= Rn << 3;
10972 return;
10973 }
10974 if (Rd == Rn)
10975 {
10976 inst.instruction = THUMB_OP16 (inst.instruction);
10977 inst.instruction |= Rd;
10978 inst.instruction |= Rs << 3;
10979 return;
10980 }
10981 }
10982
10983 /* If we get here, it can't be done in 16 bits. */
10984 constraint (inst.operands[2].shifted
10985 && inst.operands[2].immisreg,
10986 _("shift must be constant"));
10987 inst.instruction = THUMB_OP32 (inst.instruction);
10988 inst.instruction |= Rd << 8;
10989 inst.instruction |= Rs << 16;
10990 encode_thumb32_shifted_operand (2);
10991 }
10992 }
10993 else
10994 {
10995 /* On its face this is a lie - the instruction does set the
10996 flags. However, the only supported mnemonic in this mode
10997 says it doesn't. */
10998 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10999
11000 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11001 _("unshifted register required"));
11002 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11003
11004 inst.instruction = THUMB_OP16 (inst.instruction);
11005 inst.instruction |= Rd;
11006
11007 if (Rd == Rs)
11008 inst.instruction |= Rn << 3;
11009 else if (Rd == Rn)
11010 inst.instruction |= Rs << 3;
11011 else
11012 constraint (1, _("dest must overlap one source register"));
11013 }
11014 }
11015
11016 static void
11017 do_t_bfc (void)
11018 {
11019 unsigned Rd;
11020 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11021 constraint (msb > 32, _("bit-field extends past end of register"));
11022 /* The instruction encoding stores the LSB and MSB,
11023 not the LSB and width. */
11024 Rd = inst.operands[0].reg;
11025 reject_bad_reg (Rd);
11026 inst.instruction |= Rd << 8;
11027 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11028 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11029 inst.instruction |= msb - 1;
11030 }
11031
11032 static void
11033 do_t_bfi (void)
11034 {
11035 int Rd, Rn;
11036 unsigned int msb;
11037
11038 Rd = inst.operands[0].reg;
11039 reject_bad_reg (Rd);
11040
11041 /* #0 in second position is alternative syntax for bfc, which is
11042 the same instruction but with REG_PC in the Rm field. */
11043 if (!inst.operands[1].isreg)
11044 Rn = REG_PC;
11045 else
11046 {
11047 Rn = inst.operands[1].reg;
11048 reject_bad_reg (Rn);
11049 }
11050
11051 msb = inst.operands[2].imm + inst.operands[3].imm;
11052 constraint (msb > 32, _("bit-field extends past end of register"));
11053 /* The instruction encoding stores the LSB and MSB,
11054 not the LSB and width. */
11055 inst.instruction |= Rd << 8;
11056 inst.instruction |= Rn << 16;
11057 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11058 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11059 inst.instruction |= msb - 1;
11060 }
11061
11062 static void
11063 do_t_bfx (void)
11064 {
11065 unsigned Rd, Rn;
11066
11067 Rd = inst.operands[0].reg;
11068 Rn = inst.operands[1].reg;
11069
11070 reject_bad_reg (Rd);
11071 reject_bad_reg (Rn);
11072
11073 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11074 _("bit-field extends past end of register"));
11075 inst.instruction |= Rd << 8;
11076 inst.instruction |= Rn << 16;
11077 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11078 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11079 inst.instruction |= inst.operands[3].imm - 1;
11080 }
11081
11082 /* ARM V5 Thumb BLX (argument parse)
11083 BLX <target_addr> which is BLX(1)
11084 BLX <Rm> which is BLX(2)
11085 Unfortunately, there are two different opcodes for this mnemonic.
11086 So, the insns[].value is not used, and the code here zaps values
11087 into inst.instruction.
11088
11089 ??? How to take advantage of the additional two bits of displacement
11090 available in Thumb32 mode? Need new relocation? */
11091
11092 static void
11093 do_t_blx (void)
11094 {
11095 set_it_insn_type_last ();
11096
11097 if (inst.operands[0].isreg)
11098 {
11099 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11100 /* We have a register, so this is BLX(2). */
11101 inst.instruction |= inst.operands[0].reg << 3;
11102 }
11103 else
11104 {
11105 /* No register. This must be BLX(1). */
11106 inst.instruction = 0xf000e800;
11107 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11108 }
11109 }
11110
11111 static void
11112 do_t_branch (void)
11113 {
11114 int opcode;
11115 int cond;
11116 bfd_reloc_code_real_type reloc;
11117
11118 cond = inst.cond;
11119 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11120
11121 if (in_it_block ())
11122 {
11123 /* Conditional branches inside IT blocks are encoded as unconditional
11124 branches. */
11125 cond = COND_ALWAYS;
11126 }
11127 else
11128 cond = inst.cond;
11129
11130 if (cond != COND_ALWAYS)
11131 opcode = T_MNEM_bcond;
11132 else
11133 opcode = inst.instruction;
11134
11135 if (unified_syntax
11136 && (inst.size_req == 4
11137 || (inst.size_req != 2
11138 && (inst.operands[0].hasreloc
11139 || inst.reloc.exp.X_op == O_constant))))
11140 {
11141 inst.instruction = THUMB_OP32(opcode);
11142 if (cond == COND_ALWAYS)
11143 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11144 else
11145 {
11146 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11147 _("selected architecture does not support "
11148 "wide conditional branch instruction"));
11149
11150 gas_assert (cond != 0xF);
11151 inst.instruction |= cond << 22;
11152 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11153 }
11154 }
11155 else
11156 {
11157 inst.instruction = THUMB_OP16(opcode);
11158 if (cond == COND_ALWAYS)
11159 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11160 else
11161 {
11162 inst.instruction |= cond << 8;
11163 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11164 }
11165 /* Allow section relaxation. */
11166 if (unified_syntax && inst.size_req != 2)
11167 inst.relax = opcode;
11168 }
11169 inst.reloc.type = reloc;
11170 inst.reloc.pc_rel = 1;
11171 }
11172
11173 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11174 between the two is the maximum immediate allowed - which is passed in
11175 RANGE. */
11176 static void
11177 do_t_bkpt_hlt1 (int range)
11178 {
11179 constraint (inst.cond != COND_ALWAYS,
11180 _("instruction is always unconditional"));
11181 if (inst.operands[0].present)
11182 {
11183 constraint (inst.operands[0].imm > range,
11184 _("immediate value out of range"));
11185 inst.instruction |= inst.operands[0].imm;
11186 }
11187
11188 set_it_insn_type (NEUTRAL_IT_INSN);
11189 }
11190
11191 static void
11192 do_t_hlt (void)
11193 {
11194 do_t_bkpt_hlt1 (63);
11195 }
11196
11197 static void
11198 do_t_bkpt (void)
11199 {
11200 do_t_bkpt_hlt1 (255);
11201 }
11202
11203 static void
11204 do_t_branch23 (void)
11205 {
11206 set_it_insn_type_last ();
11207 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11208
11209 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11210 this file. We used to simply ignore the PLT reloc type here --
11211 the branch encoding is now needed to deal with TLSCALL relocs.
11212 So if we see a PLT reloc now, put it back to how it used to be to
11213 keep the preexisting behaviour. */
11214 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11215 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11216
11217 #if defined(OBJ_COFF)
11218 /* If the destination of the branch is a defined symbol which does not have
11219 the THUMB_FUNC attribute, then we must be calling a function which has
11220 the (interfacearm) attribute. We look for the Thumb entry point to that
11221 function and change the branch to refer to that function instead. */
11222 if ( inst.reloc.exp.X_op == O_symbol
11223 && inst.reloc.exp.X_add_symbol != NULL
11224 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11225 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11226 inst.reloc.exp.X_add_symbol =
11227 find_real_start (inst.reloc.exp.X_add_symbol);
11228 #endif
11229 }
11230
11231 static void
11232 do_t_bx (void)
11233 {
11234 set_it_insn_type_last ();
11235 inst.instruction |= inst.operands[0].reg << 3;
11236 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11237 should cause the alignment to be checked once it is known. This is
11238 because BX PC only works if the instruction is word aligned. */
11239 }
11240
11241 static void
11242 do_t_bxj (void)
11243 {
11244 int Rm;
11245
11246 set_it_insn_type_last ();
11247 Rm = inst.operands[0].reg;
11248 reject_bad_reg (Rm);
11249 inst.instruction |= Rm << 16;
11250 }
11251
11252 static void
11253 do_t_clz (void)
11254 {
11255 unsigned Rd;
11256 unsigned Rm;
11257
11258 Rd = inst.operands[0].reg;
11259 Rm = inst.operands[1].reg;
11260
11261 reject_bad_reg (Rd);
11262 reject_bad_reg (Rm);
11263
11264 inst.instruction |= Rd << 8;
11265 inst.instruction |= Rm << 16;
11266 inst.instruction |= Rm;
11267 }
11268
11269 static void
11270 do_t_cps (void)
11271 {
11272 set_it_insn_type (OUTSIDE_IT_INSN);
11273 inst.instruction |= inst.operands[0].imm;
11274 }
11275
11276 static void
11277 do_t_cpsi (void)
11278 {
11279 set_it_insn_type (OUTSIDE_IT_INSN);
11280 if (unified_syntax
11281 && (inst.operands[1].present || inst.size_req == 4)
11282 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11283 {
11284 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11285 inst.instruction = 0xf3af8000;
11286 inst.instruction |= imod << 9;
11287 inst.instruction |= inst.operands[0].imm << 5;
11288 if (inst.operands[1].present)
11289 inst.instruction |= 0x100 | inst.operands[1].imm;
11290 }
11291 else
11292 {
11293 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11294 && (inst.operands[0].imm & 4),
11295 _("selected processor does not support 'A' form "
11296 "of this instruction"));
11297 constraint (inst.operands[1].present || inst.size_req == 4,
11298 _("Thumb does not support the 2-argument "
11299 "form of this instruction"));
11300 inst.instruction |= inst.operands[0].imm;
11301 }
11302 }
11303
11304 /* THUMB CPY instruction (argument parse). */
11305
11306 static void
11307 do_t_cpy (void)
11308 {
11309 if (inst.size_req == 4)
11310 {
11311 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11312 inst.instruction |= inst.operands[0].reg << 8;
11313 inst.instruction |= inst.operands[1].reg;
11314 }
11315 else
11316 {
11317 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11318 inst.instruction |= (inst.operands[0].reg & 0x7);
11319 inst.instruction |= inst.operands[1].reg << 3;
11320 }
11321 }
11322
11323 static void
11324 do_t_cbz (void)
11325 {
11326 set_it_insn_type (OUTSIDE_IT_INSN);
11327 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11328 inst.instruction |= inst.operands[0].reg;
11329 inst.reloc.pc_rel = 1;
11330 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11331 }
11332
11333 static void
11334 do_t_dbg (void)
11335 {
11336 inst.instruction |= inst.operands[0].imm;
11337 }
11338
11339 static void
11340 do_t_div (void)
11341 {
11342 unsigned Rd, Rn, Rm;
11343
11344 Rd = inst.operands[0].reg;
11345 Rn = (inst.operands[1].present
11346 ? inst.operands[1].reg : Rd);
11347 Rm = inst.operands[2].reg;
11348
11349 reject_bad_reg (Rd);
11350 reject_bad_reg (Rn);
11351 reject_bad_reg (Rm);
11352
11353 inst.instruction |= Rd << 8;
11354 inst.instruction |= Rn << 16;
11355 inst.instruction |= Rm;
11356 }
11357
11358 static void
11359 do_t_hint (void)
11360 {
11361 if (unified_syntax && inst.size_req == 4)
11362 inst.instruction = THUMB_OP32 (inst.instruction);
11363 else
11364 inst.instruction = THUMB_OP16 (inst.instruction);
11365 }
11366
11367 static void
11368 do_t_it (void)
11369 {
11370 unsigned int cond = inst.operands[0].imm;
11371
11372 set_it_insn_type (IT_INSN);
11373 now_it.mask = (inst.instruction & 0xf) | 0x10;
11374 now_it.cc = cond;
11375 now_it.warn_deprecated = FALSE;
11376
11377 /* If the condition is a negative condition, invert the mask. */
11378 if ((cond & 0x1) == 0x0)
11379 {
11380 unsigned int mask = inst.instruction & 0x000f;
11381
11382 if ((mask & 0x7) == 0)
11383 {
11384 /* No conversion needed. */
11385 now_it.block_length = 1;
11386 }
11387 else if ((mask & 0x3) == 0)
11388 {
11389 mask ^= 0x8;
11390 now_it.block_length = 2;
11391 }
11392 else if ((mask & 0x1) == 0)
11393 {
11394 mask ^= 0xC;
11395 now_it.block_length = 3;
11396 }
11397 else
11398 {
11399 mask ^= 0xE;
11400 now_it.block_length = 4;
11401 }
11402
11403 inst.instruction &= 0xfff0;
11404 inst.instruction |= mask;
11405 }
11406
11407 inst.instruction |= cond << 4;
11408 }
11409
11410 /* Helper function used for both push/pop and ldm/stm. */
11411 static void
11412 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11413 {
11414 bfd_boolean load;
11415
11416 load = (inst.instruction & (1 << 20)) != 0;
11417
11418 if (mask & (1 << 13))
11419 inst.error = _("SP not allowed in register list");
11420
11421 if ((mask & (1 << base)) != 0
11422 && writeback)
11423 inst.error = _("having the base register in the register list when "
11424 "using write back is UNPREDICTABLE");
11425
11426 if (load)
11427 {
11428 if (mask & (1 << 15))
11429 {
11430 if (mask & (1 << 14))
11431 inst.error = _("LR and PC should not both be in register list");
11432 else
11433 set_it_insn_type_last ();
11434 }
11435 }
11436 else
11437 {
11438 if (mask & (1 << 15))
11439 inst.error = _("PC not allowed in register list");
11440 }
11441
11442 if ((mask & (mask - 1)) == 0)
11443 {
11444 /* Single register transfers implemented as str/ldr. */
11445 if (writeback)
11446 {
11447 if (inst.instruction & (1 << 23))
11448 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11449 else
11450 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11451 }
11452 else
11453 {
11454 if (inst.instruction & (1 << 23))
11455 inst.instruction = 0x00800000; /* ia -> [base] */
11456 else
11457 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11458 }
11459
11460 inst.instruction |= 0xf8400000;
11461 if (load)
11462 inst.instruction |= 0x00100000;
11463
11464 mask = ffs (mask) - 1;
11465 mask <<= 12;
11466 }
11467 else if (writeback)
11468 inst.instruction |= WRITE_BACK;
11469
11470 inst.instruction |= mask;
11471 inst.instruction |= base << 16;
11472 }
11473
11474 static void
11475 do_t_ldmstm (void)
11476 {
11477 /* This really doesn't seem worth it. */
11478 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11479 _("expression too complex"));
11480 constraint (inst.operands[1].writeback,
11481 _("Thumb load/store multiple does not support {reglist}^"));
11482
11483 if (unified_syntax)
11484 {
11485 bfd_boolean narrow;
11486 unsigned mask;
11487
11488 narrow = FALSE;
11489 /* See if we can use a 16-bit instruction. */
11490 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11491 && inst.size_req != 4
11492 && !(inst.operands[1].imm & ~0xff))
11493 {
11494 mask = 1 << inst.operands[0].reg;
11495
11496 if (inst.operands[0].reg <= 7)
11497 {
11498 if (inst.instruction == T_MNEM_stmia
11499 ? inst.operands[0].writeback
11500 : (inst.operands[0].writeback
11501 == !(inst.operands[1].imm & mask)))
11502 {
11503 if (inst.instruction == T_MNEM_stmia
11504 && (inst.operands[1].imm & mask)
11505 && (inst.operands[1].imm & (mask - 1)))
11506 as_warn (_("value stored for r%d is UNKNOWN"),
11507 inst.operands[0].reg);
11508
11509 inst.instruction = THUMB_OP16 (inst.instruction);
11510 inst.instruction |= inst.operands[0].reg << 8;
11511 inst.instruction |= inst.operands[1].imm;
11512 narrow = TRUE;
11513 }
11514 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11515 {
11516 /* This means 1 register in reg list one of 3 situations:
11517 1. Instruction is stmia, but without writeback.
11518 2. lmdia without writeback, but with Rn not in
11519 reglist.
11520 3. ldmia with writeback, but with Rn in reglist.
11521 Case 3 is UNPREDICTABLE behaviour, so we handle
11522 case 1 and 2 which can be converted into a 16-bit
11523 str or ldr. The SP cases are handled below. */
11524 unsigned long opcode;
11525 /* First, record an error for Case 3. */
11526 if (inst.operands[1].imm & mask
11527 && inst.operands[0].writeback)
11528 inst.error =
11529 _("having the base register in the register list when "
11530 "using write back is UNPREDICTABLE");
11531
11532 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11533 : T_MNEM_ldr);
11534 inst.instruction = THUMB_OP16 (opcode);
11535 inst.instruction |= inst.operands[0].reg << 3;
11536 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11537 narrow = TRUE;
11538 }
11539 }
11540 else if (inst.operands[0] .reg == REG_SP)
11541 {
11542 if (inst.operands[0].writeback)
11543 {
11544 inst.instruction =
11545 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11546 ? T_MNEM_push : T_MNEM_pop);
11547 inst.instruction |= inst.operands[1].imm;
11548 narrow = TRUE;
11549 }
11550 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11551 {
11552 inst.instruction =
11553 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11554 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11555 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11556 narrow = TRUE;
11557 }
11558 }
11559 }
11560
11561 if (!narrow)
11562 {
11563 if (inst.instruction < 0xffff)
11564 inst.instruction = THUMB_OP32 (inst.instruction);
11565
11566 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11567 inst.operands[0].writeback);
11568 }
11569 }
11570 else
11571 {
11572 constraint (inst.operands[0].reg > 7
11573 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11574 constraint (inst.instruction != T_MNEM_ldmia
11575 && inst.instruction != T_MNEM_stmia,
11576 _("Thumb-2 instruction only valid in unified syntax"));
11577 if (inst.instruction == T_MNEM_stmia)
11578 {
11579 if (!inst.operands[0].writeback)
11580 as_warn (_("this instruction will write back the base register"));
11581 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11582 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11583 as_warn (_("value stored for r%d is UNKNOWN"),
11584 inst.operands[0].reg);
11585 }
11586 else
11587 {
11588 if (!inst.operands[0].writeback
11589 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11590 as_warn (_("this instruction will write back the base register"));
11591 else if (inst.operands[0].writeback
11592 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11593 as_warn (_("this instruction will not write back the base register"));
11594 }
11595
11596 inst.instruction = THUMB_OP16 (inst.instruction);
11597 inst.instruction |= inst.operands[0].reg << 8;
11598 inst.instruction |= inst.operands[1].imm;
11599 }
11600 }
11601
11602 static void
11603 do_t_ldrex (void)
11604 {
11605 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11606 || inst.operands[1].postind || inst.operands[1].writeback
11607 || inst.operands[1].immisreg || inst.operands[1].shifted
11608 || inst.operands[1].negative,
11609 BAD_ADDR_MODE);
11610
11611 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11612
11613 inst.instruction |= inst.operands[0].reg << 12;
11614 inst.instruction |= inst.operands[1].reg << 16;
11615 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11616 }
11617
11618 static void
11619 do_t_ldrexd (void)
11620 {
11621 if (!inst.operands[1].present)
11622 {
11623 constraint (inst.operands[0].reg == REG_LR,
11624 _("r14 not allowed as first register "
11625 "when second register is omitted"));
11626 inst.operands[1].reg = inst.operands[0].reg + 1;
11627 }
11628 constraint (inst.operands[0].reg == inst.operands[1].reg,
11629 BAD_OVERLAP);
11630
11631 inst.instruction |= inst.operands[0].reg << 12;
11632 inst.instruction |= inst.operands[1].reg << 8;
11633 inst.instruction |= inst.operands[2].reg << 16;
11634 }
11635
11636 static void
11637 do_t_ldst (void)
11638 {
11639 unsigned long opcode;
11640 int Rn;
11641
11642 if (inst.operands[0].isreg
11643 && !inst.operands[0].preind
11644 && inst.operands[0].reg == REG_PC)
11645 set_it_insn_type_last ();
11646
11647 opcode = inst.instruction;
11648 if (unified_syntax)
11649 {
11650 if (!inst.operands[1].isreg)
11651 {
11652 if (opcode <= 0xffff)
11653 inst.instruction = THUMB_OP32 (opcode);
11654 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11655 return;
11656 }
11657 if (inst.operands[1].isreg
11658 && !inst.operands[1].writeback
11659 && !inst.operands[1].shifted && !inst.operands[1].postind
11660 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11661 && opcode <= 0xffff
11662 && inst.size_req != 4)
11663 {
11664 /* Insn may have a 16-bit form. */
11665 Rn = inst.operands[1].reg;
11666 if (inst.operands[1].immisreg)
11667 {
11668 inst.instruction = THUMB_OP16 (opcode);
11669 /* [Rn, Rik] */
11670 if (Rn <= 7 && inst.operands[1].imm <= 7)
11671 goto op16;
11672 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11673 reject_bad_reg (inst.operands[1].imm);
11674 }
11675 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11676 && opcode != T_MNEM_ldrsb)
11677 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11678 || (Rn == REG_SP && opcode == T_MNEM_str))
11679 {
11680 /* [Rn, #const] */
11681 if (Rn > 7)
11682 {
11683 if (Rn == REG_PC)
11684 {
11685 if (inst.reloc.pc_rel)
11686 opcode = T_MNEM_ldr_pc2;
11687 else
11688 opcode = T_MNEM_ldr_pc;
11689 }
11690 else
11691 {
11692 if (opcode == T_MNEM_ldr)
11693 opcode = T_MNEM_ldr_sp;
11694 else
11695 opcode = T_MNEM_str_sp;
11696 }
11697 inst.instruction = inst.operands[0].reg << 8;
11698 }
11699 else
11700 {
11701 inst.instruction = inst.operands[0].reg;
11702 inst.instruction |= inst.operands[1].reg << 3;
11703 }
11704 inst.instruction |= THUMB_OP16 (opcode);
11705 if (inst.size_req == 2)
11706 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11707 else
11708 inst.relax = opcode;
11709 return;
11710 }
11711 }
11712 /* Definitely a 32-bit variant. */
11713
11714 /* Warning for Erratum 752419. */
11715 if (opcode == T_MNEM_ldr
11716 && inst.operands[0].reg == REG_SP
11717 && inst.operands[1].writeback == 1
11718 && !inst.operands[1].immisreg)
11719 {
11720 if (no_cpu_selected ()
11721 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11722 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11723 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11724 as_warn (_("This instruction may be unpredictable "
11725 "if executed on M-profile cores "
11726 "with interrupts enabled."));
11727 }
11728
11729 /* Do some validations regarding addressing modes. */
11730 if (inst.operands[1].immisreg)
11731 reject_bad_reg (inst.operands[1].imm);
11732
11733 constraint (inst.operands[1].writeback == 1
11734 && inst.operands[0].reg == inst.operands[1].reg,
11735 BAD_OVERLAP);
11736
11737 inst.instruction = THUMB_OP32 (opcode);
11738 inst.instruction |= inst.operands[0].reg << 12;
11739 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11740 check_ldr_r15_aligned ();
11741 return;
11742 }
11743
11744 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11745
11746 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11747 {
11748 /* Only [Rn,Rm] is acceptable. */
11749 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11750 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11751 || inst.operands[1].postind || inst.operands[1].shifted
11752 || inst.operands[1].negative,
11753 _("Thumb does not support this addressing mode"));
11754 inst.instruction = THUMB_OP16 (inst.instruction);
11755 goto op16;
11756 }
11757
11758 inst.instruction = THUMB_OP16 (inst.instruction);
11759 if (!inst.operands[1].isreg)
11760 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11761 return;
11762
11763 constraint (!inst.operands[1].preind
11764 || inst.operands[1].shifted
11765 || inst.operands[1].writeback,
11766 _("Thumb does not support this addressing mode"));
11767 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11768 {
11769 constraint (inst.instruction & 0x0600,
11770 _("byte or halfword not valid for base register"));
11771 constraint (inst.operands[1].reg == REG_PC
11772 && !(inst.instruction & THUMB_LOAD_BIT),
11773 _("r15 based store not allowed"));
11774 constraint (inst.operands[1].immisreg,
11775 _("invalid base register for register offset"));
11776
11777 if (inst.operands[1].reg == REG_PC)
11778 inst.instruction = T_OPCODE_LDR_PC;
11779 else if (inst.instruction & THUMB_LOAD_BIT)
11780 inst.instruction = T_OPCODE_LDR_SP;
11781 else
11782 inst.instruction = T_OPCODE_STR_SP;
11783
11784 inst.instruction |= inst.operands[0].reg << 8;
11785 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11786 return;
11787 }
11788
11789 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11790 if (!inst.operands[1].immisreg)
11791 {
11792 /* Immediate offset. */
11793 inst.instruction |= inst.operands[0].reg;
11794 inst.instruction |= inst.operands[1].reg << 3;
11795 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11796 return;
11797 }
11798
11799 /* Register offset. */
11800 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11801 constraint (inst.operands[1].negative,
11802 _("Thumb does not support this addressing mode"));
11803
11804 op16:
11805 switch (inst.instruction)
11806 {
11807 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11808 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11809 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11810 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11811 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11812 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11813 case 0x5600 /* ldrsb */:
11814 case 0x5e00 /* ldrsh */: break;
11815 default: abort ();
11816 }
11817
11818 inst.instruction |= inst.operands[0].reg;
11819 inst.instruction |= inst.operands[1].reg << 3;
11820 inst.instruction |= inst.operands[1].imm << 6;
11821 }
11822
11823 static void
11824 do_t_ldstd (void)
11825 {
11826 if (!inst.operands[1].present)
11827 {
11828 inst.operands[1].reg = inst.operands[0].reg + 1;
11829 constraint (inst.operands[0].reg == REG_LR,
11830 _("r14 not allowed here"));
11831 constraint (inst.operands[0].reg == REG_R12,
11832 _("r12 not allowed here"));
11833 }
11834
11835 if (inst.operands[2].writeback
11836 && (inst.operands[0].reg == inst.operands[2].reg
11837 || inst.operands[1].reg == inst.operands[2].reg))
11838 as_warn (_("base register written back, and overlaps "
11839 "one of transfer registers"));
11840
11841 inst.instruction |= inst.operands[0].reg << 12;
11842 inst.instruction |= inst.operands[1].reg << 8;
11843 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11844 }
11845
11846 static void
11847 do_t_ldstt (void)
11848 {
11849 inst.instruction |= inst.operands[0].reg << 12;
11850 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11851 }
11852
11853 static void
11854 do_t_mla (void)
11855 {
11856 unsigned Rd, Rn, Rm, Ra;
11857
11858 Rd = inst.operands[0].reg;
11859 Rn = inst.operands[1].reg;
11860 Rm = inst.operands[2].reg;
11861 Ra = inst.operands[3].reg;
11862
11863 reject_bad_reg (Rd);
11864 reject_bad_reg (Rn);
11865 reject_bad_reg (Rm);
11866 reject_bad_reg (Ra);
11867
11868 inst.instruction |= Rd << 8;
11869 inst.instruction |= Rn << 16;
11870 inst.instruction |= Rm;
11871 inst.instruction |= Ra << 12;
11872 }
11873
11874 static void
11875 do_t_mlal (void)
11876 {
11877 unsigned RdLo, RdHi, Rn, Rm;
11878
11879 RdLo = inst.operands[0].reg;
11880 RdHi = inst.operands[1].reg;
11881 Rn = inst.operands[2].reg;
11882 Rm = inst.operands[3].reg;
11883
11884 reject_bad_reg (RdLo);
11885 reject_bad_reg (RdHi);
11886 reject_bad_reg (Rn);
11887 reject_bad_reg (Rm);
11888
11889 inst.instruction |= RdLo << 12;
11890 inst.instruction |= RdHi << 8;
11891 inst.instruction |= Rn << 16;
11892 inst.instruction |= Rm;
11893 }
11894
11895 static void
11896 do_t_mov_cmp (void)
11897 {
11898 unsigned Rn, Rm;
11899
11900 Rn = inst.operands[0].reg;
11901 Rm = inst.operands[1].reg;
11902
11903 if (Rn == REG_PC)
11904 set_it_insn_type_last ();
11905
11906 if (unified_syntax)
11907 {
11908 int r0off = (inst.instruction == T_MNEM_mov
11909 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11910 unsigned long opcode;
11911 bfd_boolean narrow;
11912 bfd_boolean low_regs;
11913
11914 low_regs = (Rn <= 7 && Rm <= 7);
11915 opcode = inst.instruction;
11916 if (in_it_block ())
11917 narrow = opcode != T_MNEM_movs;
11918 else
11919 narrow = opcode != T_MNEM_movs || low_regs;
11920 if (inst.size_req == 4
11921 || inst.operands[1].shifted)
11922 narrow = FALSE;
11923
11924 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11925 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11926 && !inst.operands[1].shifted
11927 && Rn == REG_PC
11928 && Rm == REG_LR)
11929 {
11930 inst.instruction = T2_SUBS_PC_LR;
11931 return;
11932 }
11933
11934 if (opcode == T_MNEM_cmp)
11935 {
11936 constraint (Rn == REG_PC, BAD_PC);
11937 if (narrow)
11938 {
11939 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11940 but valid. */
11941 warn_deprecated_sp (Rm);
11942 /* R15 was documented as a valid choice for Rm in ARMv6,
11943 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11944 tools reject R15, so we do too. */
11945 constraint (Rm == REG_PC, BAD_PC);
11946 }
11947 else
11948 reject_bad_reg (Rm);
11949 }
11950 else if (opcode == T_MNEM_mov
11951 || opcode == T_MNEM_movs)
11952 {
11953 if (inst.operands[1].isreg)
11954 {
11955 if (opcode == T_MNEM_movs)
11956 {
11957 reject_bad_reg (Rn);
11958 reject_bad_reg (Rm);
11959 }
11960 else if (narrow)
11961 {
11962 /* This is mov.n. */
11963 if ((Rn == REG_SP || Rn == REG_PC)
11964 && (Rm == REG_SP || Rm == REG_PC))
11965 {
11966 as_tsktsk (_("Use of r%u as a source register is "
11967 "deprecated when r%u is the destination "
11968 "register."), Rm, Rn);
11969 }
11970 }
11971 else
11972 {
11973 /* This is mov.w. */
11974 constraint (Rn == REG_PC, BAD_PC);
11975 constraint (Rm == REG_PC, BAD_PC);
11976 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11977 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11978 }
11979 }
11980 else
11981 reject_bad_reg (Rn);
11982 }
11983
11984 if (!inst.operands[1].isreg)
11985 {
11986 /* Immediate operand. */
11987 if (!in_it_block () && opcode == T_MNEM_mov)
11988 narrow = 0;
11989 if (low_regs && narrow)
11990 {
11991 inst.instruction = THUMB_OP16 (opcode);
11992 inst.instruction |= Rn << 8;
11993 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11994 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11995 {
11996 if (inst.size_req == 2)
11997 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11998 else
11999 inst.relax = opcode;
12000 }
12001 }
12002 else
12003 {
12004 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12005 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
12006 THUMB1_RELOC_ONLY);
12007
12008 inst.instruction = THUMB_OP32 (inst.instruction);
12009 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12010 inst.instruction |= Rn << r0off;
12011 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12012 }
12013 }
12014 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12015 && (inst.instruction == T_MNEM_mov
12016 || inst.instruction == T_MNEM_movs))
12017 {
12018 /* Register shifts are encoded as separate shift instructions. */
12019 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12020
12021 if (in_it_block ())
12022 narrow = !flags;
12023 else
12024 narrow = flags;
12025
12026 if (inst.size_req == 4)
12027 narrow = FALSE;
12028
12029 if (!low_regs || inst.operands[1].imm > 7)
12030 narrow = FALSE;
12031
12032 if (Rn != Rm)
12033 narrow = FALSE;
12034
12035 switch (inst.operands[1].shift_kind)
12036 {
12037 case SHIFT_LSL:
12038 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12039 break;
12040 case SHIFT_ASR:
12041 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12042 break;
12043 case SHIFT_LSR:
12044 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12045 break;
12046 case SHIFT_ROR:
12047 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12048 break;
12049 default:
12050 abort ();
12051 }
12052
12053 inst.instruction = opcode;
12054 if (narrow)
12055 {
12056 inst.instruction |= Rn;
12057 inst.instruction |= inst.operands[1].imm << 3;
12058 }
12059 else
12060 {
12061 if (flags)
12062 inst.instruction |= CONDS_BIT;
12063
12064 inst.instruction |= Rn << 8;
12065 inst.instruction |= Rm << 16;
12066 inst.instruction |= inst.operands[1].imm;
12067 }
12068 }
12069 else if (!narrow)
12070 {
12071 /* Some mov with immediate shift have narrow variants.
12072 Register shifts are handled above. */
12073 if (low_regs && inst.operands[1].shifted
12074 && (inst.instruction == T_MNEM_mov
12075 || inst.instruction == T_MNEM_movs))
12076 {
12077 if (in_it_block ())
12078 narrow = (inst.instruction == T_MNEM_mov);
12079 else
12080 narrow = (inst.instruction == T_MNEM_movs);
12081 }
12082
12083 if (narrow)
12084 {
12085 switch (inst.operands[1].shift_kind)
12086 {
12087 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12088 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12089 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12090 default: narrow = FALSE; break;
12091 }
12092 }
12093
12094 if (narrow)
12095 {
12096 inst.instruction |= Rn;
12097 inst.instruction |= Rm << 3;
12098 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12099 }
12100 else
12101 {
12102 inst.instruction = THUMB_OP32 (inst.instruction);
12103 inst.instruction |= Rn << r0off;
12104 encode_thumb32_shifted_operand (1);
12105 }
12106 }
12107 else
12108 switch (inst.instruction)
12109 {
12110 case T_MNEM_mov:
12111 /* In v4t or v5t a move of two lowregs produces unpredictable
12112 results. Don't allow this. */
12113 if (low_regs)
12114 {
12115 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12116 "MOV Rd, Rs with two low registers is not "
12117 "permitted on this architecture");
12118 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12119 arm_ext_v6);
12120 }
12121
12122 inst.instruction = T_OPCODE_MOV_HR;
12123 inst.instruction |= (Rn & 0x8) << 4;
12124 inst.instruction |= (Rn & 0x7);
12125 inst.instruction |= Rm << 3;
12126 break;
12127
12128 case T_MNEM_movs:
12129 /* We know we have low registers at this point.
12130 Generate LSLS Rd, Rs, #0. */
12131 inst.instruction = T_OPCODE_LSL_I;
12132 inst.instruction |= Rn;
12133 inst.instruction |= Rm << 3;
12134 break;
12135
12136 case T_MNEM_cmp:
12137 if (low_regs)
12138 {
12139 inst.instruction = T_OPCODE_CMP_LR;
12140 inst.instruction |= Rn;
12141 inst.instruction |= Rm << 3;
12142 }
12143 else
12144 {
12145 inst.instruction = T_OPCODE_CMP_HR;
12146 inst.instruction |= (Rn & 0x8) << 4;
12147 inst.instruction |= (Rn & 0x7);
12148 inst.instruction |= Rm << 3;
12149 }
12150 break;
12151 }
12152 return;
12153 }
12154
12155 inst.instruction = THUMB_OP16 (inst.instruction);
12156
12157 /* PR 10443: Do not silently ignore shifted operands. */
12158 constraint (inst.operands[1].shifted,
12159 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12160
12161 if (inst.operands[1].isreg)
12162 {
12163 if (Rn < 8 && Rm < 8)
12164 {
12165 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12166 since a MOV instruction produces unpredictable results. */
12167 if (inst.instruction == T_OPCODE_MOV_I8)
12168 inst.instruction = T_OPCODE_ADD_I3;
12169 else
12170 inst.instruction = T_OPCODE_CMP_LR;
12171
12172 inst.instruction |= Rn;
12173 inst.instruction |= Rm << 3;
12174 }
12175 else
12176 {
12177 if (inst.instruction == T_OPCODE_MOV_I8)
12178 inst.instruction = T_OPCODE_MOV_HR;
12179 else
12180 inst.instruction = T_OPCODE_CMP_HR;
12181 do_t_cpy ();
12182 }
12183 }
12184 else
12185 {
12186 constraint (Rn > 7,
12187 _("only lo regs allowed with immediate"));
12188 inst.instruction |= Rn << 8;
12189 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12190 }
12191 }
12192
12193 static void
12194 do_t_mov16 (void)
12195 {
12196 unsigned Rd;
12197 bfd_vma imm;
12198 bfd_boolean top;
12199
12200 top = (inst.instruction & 0x00800000) != 0;
12201 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12202 {
12203 constraint (top, _(":lower16: not allowed in this instruction"));
12204 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12205 }
12206 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12207 {
12208 constraint (!top, _(":upper16: not allowed in this instruction"));
12209 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12210 }
12211
12212 Rd = inst.operands[0].reg;
12213 reject_bad_reg (Rd);
12214
12215 inst.instruction |= Rd << 8;
12216 if (inst.reloc.type == BFD_RELOC_UNUSED)
12217 {
12218 imm = inst.reloc.exp.X_add_number;
12219 inst.instruction |= (imm & 0xf000) << 4;
12220 inst.instruction |= (imm & 0x0800) << 15;
12221 inst.instruction |= (imm & 0x0700) << 4;
12222 inst.instruction |= (imm & 0x00ff);
12223 }
12224 }
12225
12226 static void
12227 do_t_mvn_tst (void)
12228 {
12229 unsigned Rn, Rm;
12230
12231 Rn = inst.operands[0].reg;
12232 Rm = inst.operands[1].reg;
12233
12234 if (inst.instruction == T_MNEM_cmp
12235 || inst.instruction == T_MNEM_cmn)
12236 constraint (Rn == REG_PC, BAD_PC);
12237 else
12238 reject_bad_reg (Rn);
12239 reject_bad_reg (Rm);
12240
12241 if (unified_syntax)
12242 {
12243 int r0off = (inst.instruction == T_MNEM_mvn
12244 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12245 bfd_boolean narrow;
12246
12247 if (inst.size_req == 4
12248 || inst.instruction > 0xffff
12249 || inst.operands[1].shifted
12250 || Rn > 7 || Rm > 7)
12251 narrow = FALSE;
12252 else if (inst.instruction == T_MNEM_cmn
12253 || inst.instruction == T_MNEM_tst)
12254 narrow = TRUE;
12255 else if (THUMB_SETS_FLAGS (inst.instruction))
12256 narrow = !in_it_block ();
12257 else
12258 narrow = in_it_block ();
12259
12260 if (!inst.operands[1].isreg)
12261 {
12262 /* For an immediate, we always generate a 32-bit opcode;
12263 section relaxation will shrink it later if possible. */
12264 if (inst.instruction < 0xffff)
12265 inst.instruction = THUMB_OP32 (inst.instruction);
12266 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12267 inst.instruction |= Rn << r0off;
12268 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12269 }
12270 else
12271 {
12272 /* See if we can do this with a 16-bit instruction. */
12273 if (narrow)
12274 {
12275 inst.instruction = THUMB_OP16 (inst.instruction);
12276 inst.instruction |= Rn;
12277 inst.instruction |= Rm << 3;
12278 }
12279 else
12280 {
12281 constraint (inst.operands[1].shifted
12282 && inst.operands[1].immisreg,
12283 _("shift must be constant"));
12284 if (inst.instruction < 0xffff)
12285 inst.instruction = THUMB_OP32 (inst.instruction);
12286 inst.instruction |= Rn << r0off;
12287 encode_thumb32_shifted_operand (1);
12288 }
12289 }
12290 }
12291 else
12292 {
12293 constraint (inst.instruction > 0xffff
12294 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12295 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12296 _("unshifted register required"));
12297 constraint (Rn > 7 || Rm > 7,
12298 BAD_HIREG);
12299
12300 inst.instruction = THUMB_OP16 (inst.instruction);
12301 inst.instruction |= Rn;
12302 inst.instruction |= Rm << 3;
12303 }
12304 }
12305
12306 static void
12307 do_t_mrs (void)
12308 {
12309 unsigned Rd;
12310
12311 if (do_vfp_nsyn_mrs () == SUCCESS)
12312 return;
12313
12314 Rd = inst.operands[0].reg;
12315 reject_bad_reg (Rd);
12316 inst.instruction |= Rd << 8;
12317
12318 if (inst.operands[1].isreg)
12319 {
12320 unsigned br = inst.operands[1].reg;
12321 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12322 as_bad (_("bad register for mrs"));
12323
12324 inst.instruction |= br & (0xf << 16);
12325 inst.instruction |= (br & 0x300) >> 4;
12326 inst.instruction |= (br & SPSR_BIT) >> 2;
12327 }
12328 else
12329 {
12330 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12331
12332 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12333 {
12334 /* PR gas/12698: The constraint is only applied for m_profile.
12335 If the user has specified -march=all, we want to ignore it as
12336 we are building for any CPU type, including non-m variants. */
12337 bfd_boolean m_profile =
12338 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12339 constraint ((flags != 0) && m_profile, _("selected processor does "
12340 "not support requested special purpose register"));
12341 }
12342 else
12343 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12344 devices). */
12345 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12346 _("'APSR', 'CPSR' or 'SPSR' expected"));
12347
12348 inst.instruction |= (flags & SPSR_BIT) >> 2;
12349 inst.instruction |= inst.operands[1].imm & 0xff;
12350 inst.instruction |= 0xf0000;
12351 }
12352 }
12353
12354 static void
12355 do_t_msr (void)
12356 {
12357 int flags;
12358 unsigned Rn;
12359
12360 if (do_vfp_nsyn_msr () == SUCCESS)
12361 return;
12362
12363 constraint (!inst.operands[1].isreg,
12364 _("Thumb encoding does not support an immediate here"));
12365
12366 if (inst.operands[0].isreg)
12367 flags = (int)(inst.operands[0].reg);
12368 else
12369 flags = inst.operands[0].imm;
12370
12371 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12372 {
12373 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12374
12375 /* PR gas/12698: The constraint is only applied for m_profile.
12376 If the user has specified -march=all, we want to ignore it as
12377 we are building for any CPU type, including non-m variants. */
12378 bfd_boolean m_profile =
12379 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12380 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12381 && (bits & ~(PSR_s | PSR_f)) != 0)
12382 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12383 && bits != PSR_f)) && m_profile,
12384 _("selected processor does not support requested special "
12385 "purpose register"));
12386 }
12387 else
12388 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12389 "requested special purpose register"));
12390
12391 Rn = inst.operands[1].reg;
12392 reject_bad_reg (Rn);
12393
12394 inst.instruction |= (flags & SPSR_BIT) >> 2;
12395 inst.instruction |= (flags & 0xf0000) >> 8;
12396 inst.instruction |= (flags & 0x300) >> 4;
12397 inst.instruction |= (flags & 0xff);
12398 inst.instruction |= Rn << 16;
12399 }
12400
12401 static void
12402 do_t_mul (void)
12403 {
12404 bfd_boolean narrow;
12405 unsigned Rd, Rn, Rm;
12406
12407 if (!inst.operands[2].present)
12408 inst.operands[2].reg = inst.operands[0].reg;
12409
12410 Rd = inst.operands[0].reg;
12411 Rn = inst.operands[1].reg;
12412 Rm = inst.operands[2].reg;
12413
12414 if (unified_syntax)
12415 {
12416 if (inst.size_req == 4
12417 || (Rd != Rn
12418 && Rd != Rm)
12419 || Rn > 7
12420 || Rm > 7)
12421 narrow = FALSE;
12422 else if (inst.instruction == T_MNEM_muls)
12423 narrow = !in_it_block ();
12424 else
12425 narrow = in_it_block ();
12426 }
12427 else
12428 {
12429 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12430 constraint (Rn > 7 || Rm > 7,
12431 BAD_HIREG);
12432 narrow = TRUE;
12433 }
12434
12435 if (narrow)
12436 {
12437 /* 16-bit MULS/Conditional MUL. */
12438 inst.instruction = THUMB_OP16 (inst.instruction);
12439 inst.instruction |= Rd;
12440
12441 if (Rd == Rn)
12442 inst.instruction |= Rm << 3;
12443 else if (Rd == Rm)
12444 inst.instruction |= Rn << 3;
12445 else
12446 constraint (1, _("dest must overlap one source register"));
12447 }
12448 else
12449 {
12450 constraint (inst.instruction != T_MNEM_mul,
12451 _("Thumb-2 MUL must not set flags"));
12452 /* 32-bit MUL. */
12453 inst.instruction = THUMB_OP32 (inst.instruction);
12454 inst.instruction |= Rd << 8;
12455 inst.instruction |= Rn << 16;
12456 inst.instruction |= Rm << 0;
12457
12458 reject_bad_reg (Rd);
12459 reject_bad_reg (Rn);
12460 reject_bad_reg (Rm);
12461 }
12462 }
12463
12464 static void
12465 do_t_mull (void)
12466 {
12467 unsigned RdLo, RdHi, Rn, Rm;
12468
12469 RdLo = inst.operands[0].reg;
12470 RdHi = inst.operands[1].reg;
12471 Rn = inst.operands[2].reg;
12472 Rm = inst.operands[3].reg;
12473
12474 reject_bad_reg (RdLo);
12475 reject_bad_reg (RdHi);
12476 reject_bad_reg (Rn);
12477 reject_bad_reg (Rm);
12478
12479 inst.instruction |= RdLo << 12;
12480 inst.instruction |= RdHi << 8;
12481 inst.instruction |= Rn << 16;
12482 inst.instruction |= Rm;
12483
12484 if (RdLo == RdHi)
12485 as_tsktsk (_("rdhi and rdlo must be different"));
12486 }
12487
12488 static void
12489 do_t_nop (void)
12490 {
12491 set_it_insn_type (NEUTRAL_IT_INSN);
12492
12493 if (unified_syntax)
12494 {
12495 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12496 {
12497 inst.instruction = THUMB_OP32 (inst.instruction);
12498 inst.instruction |= inst.operands[0].imm;
12499 }
12500 else
12501 {
12502 /* PR9722: Check for Thumb2 availability before
12503 generating a thumb2 nop instruction. */
12504 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12505 {
12506 inst.instruction = THUMB_OP16 (inst.instruction);
12507 inst.instruction |= inst.operands[0].imm << 4;
12508 }
12509 else
12510 inst.instruction = 0x46c0;
12511 }
12512 }
12513 else
12514 {
12515 constraint (inst.operands[0].present,
12516 _("Thumb does not support NOP with hints"));
12517 inst.instruction = 0x46c0;
12518 }
12519 }
12520
12521 static void
12522 do_t_neg (void)
12523 {
12524 if (unified_syntax)
12525 {
12526 bfd_boolean narrow;
12527
12528 if (THUMB_SETS_FLAGS (inst.instruction))
12529 narrow = !in_it_block ();
12530 else
12531 narrow = in_it_block ();
12532 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12533 narrow = FALSE;
12534 if (inst.size_req == 4)
12535 narrow = FALSE;
12536
12537 if (!narrow)
12538 {
12539 inst.instruction = THUMB_OP32 (inst.instruction);
12540 inst.instruction |= inst.operands[0].reg << 8;
12541 inst.instruction |= inst.operands[1].reg << 16;
12542 }
12543 else
12544 {
12545 inst.instruction = THUMB_OP16 (inst.instruction);
12546 inst.instruction |= inst.operands[0].reg;
12547 inst.instruction |= inst.operands[1].reg << 3;
12548 }
12549 }
12550 else
12551 {
12552 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12553 BAD_HIREG);
12554 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12555
12556 inst.instruction = THUMB_OP16 (inst.instruction);
12557 inst.instruction |= inst.operands[0].reg;
12558 inst.instruction |= inst.operands[1].reg << 3;
12559 }
12560 }
12561
12562 static void
12563 do_t_orn (void)
12564 {
12565 unsigned Rd, Rn;
12566
12567 Rd = inst.operands[0].reg;
12568 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12569
12570 reject_bad_reg (Rd);
12571 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12572 reject_bad_reg (Rn);
12573
12574 inst.instruction |= Rd << 8;
12575 inst.instruction |= Rn << 16;
12576
12577 if (!inst.operands[2].isreg)
12578 {
12579 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12580 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12581 }
12582 else
12583 {
12584 unsigned Rm;
12585
12586 Rm = inst.operands[2].reg;
12587 reject_bad_reg (Rm);
12588
12589 constraint (inst.operands[2].shifted
12590 && inst.operands[2].immisreg,
12591 _("shift must be constant"));
12592 encode_thumb32_shifted_operand (2);
12593 }
12594 }
12595
12596 static void
12597 do_t_pkhbt (void)
12598 {
12599 unsigned Rd, Rn, Rm;
12600
12601 Rd = inst.operands[0].reg;
12602 Rn = inst.operands[1].reg;
12603 Rm = inst.operands[2].reg;
12604
12605 reject_bad_reg (Rd);
12606 reject_bad_reg (Rn);
12607 reject_bad_reg (Rm);
12608
12609 inst.instruction |= Rd << 8;
12610 inst.instruction |= Rn << 16;
12611 inst.instruction |= Rm;
12612 if (inst.operands[3].present)
12613 {
12614 unsigned int val = inst.reloc.exp.X_add_number;
12615 constraint (inst.reloc.exp.X_op != O_constant,
12616 _("expression too complex"));
12617 inst.instruction |= (val & 0x1c) << 10;
12618 inst.instruction |= (val & 0x03) << 6;
12619 }
12620 }
12621
12622 static void
12623 do_t_pkhtb (void)
12624 {
12625 if (!inst.operands[3].present)
12626 {
12627 unsigned Rtmp;
12628
12629 inst.instruction &= ~0x00000020;
12630
12631 /* PR 10168. Swap the Rm and Rn registers. */
12632 Rtmp = inst.operands[1].reg;
12633 inst.operands[1].reg = inst.operands[2].reg;
12634 inst.operands[2].reg = Rtmp;
12635 }
12636 do_t_pkhbt ();
12637 }
12638
12639 static void
12640 do_t_pld (void)
12641 {
12642 if (inst.operands[0].immisreg)
12643 reject_bad_reg (inst.operands[0].imm);
12644
12645 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12646 }
12647
12648 static void
12649 do_t_push_pop (void)
12650 {
12651 unsigned mask;
12652
12653 constraint (inst.operands[0].writeback,
12654 _("push/pop do not support {reglist}^"));
12655 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12656 _("expression too complex"));
12657
12658 mask = inst.operands[0].imm;
12659 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12660 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12661 else if (inst.size_req != 4
12662 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12663 ? REG_LR : REG_PC)))
12664 {
12665 inst.instruction = THUMB_OP16 (inst.instruction);
12666 inst.instruction |= THUMB_PP_PC_LR;
12667 inst.instruction |= mask & 0xff;
12668 }
12669 else if (unified_syntax)
12670 {
12671 inst.instruction = THUMB_OP32 (inst.instruction);
12672 encode_thumb2_ldmstm (13, mask, TRUE);
12673 }
12674 else
12675 {
12676 inst.error = _("invalid register list to push/pop instruction");
12677 return;
12678 }
12679 }
12680
12681 static void
12682 do_t_rbit (void)
12683 {
12684 unsigned Rd, Rm;
12685
12686 Rd = inst.operands[0].reg;
12687 Rm = inst.operands[1].reg;
12688
12689 reject_bad_reg (Rd);
12690 reject_bad_reg (Rm);
12691
12692 inst.instruction |= Rd << 8;
12693 inst.instruction |= Rm << 16;
12694 inst.instruction |= Rm;
12695 }
12696
12697 static void
12698 do_t_rev (void)
12699 {
12700 unsigned Rd, Rm;
12701
12702 Rd = inst.operands[0].reg;
12703 Rm = inst.operands[1].reg;
12704
12705 reject_bad_reg (Rd);
12706 reject_bad_reg (Rm);
12707
12708 if (Rd <= 7 && Rm <= 7
12709 && inst.size_req != 4)
12710 {
12711 inst.instruction = THUMB_OP16 (inst.instruction);
12712 inst.instruction |= Rd;
12713 inst.instruction |= Rm << 3;
12714 }
12715 else if (unified_syntax)
12716 {
12717 inst.instruction = THUMB_OP32 (inst.instruction);
12718 inst.instruction |= Rd << 8;
12719 inst.instruction |= Rm << 16;
12720 inst.instruction |= Rm;
12721 }
12722 else
12723 inst.error = BAD_HIREG;
12724 }
12725
12726 static void
12727 do_t_rrx (void)
12728 {
12729 unsigned Rd, Rm;
12730
12731 Rd = inst.operands[0].reg;
12732 Rm = inst.operands[1].reg;
12733
12734 reject_bad_reg (Rd);
12735 reject_bad_reg (Rm);
12736
12737 inst.instruction |= Rd << 8;
12738 inst.instruction |= Rm;
12739 }
12740
12741 static void
12742 do_t_rsb (void)
12743 {
12744 unsigned Rd, Rs;
12745
12746 Rd = inst.operands[0].reg;
12747 Rs = (inst.operands[1].present
12748 ? inst.operands[1].reg /* Rd, Rs, foo */
12749 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12750
12751 reject_bad_reg (Rd);
12752 reject_bad_reg (Rs);
12753 if (inst.operands[2].isreg)
12754 reject_bad_reg (inst.operands[2].reg);
12755
12756 inst.instruction |= Rd << 8;
12757 inst.instruction |= Rs << 16;
12758 if (!inst.operands[2].isreg)
12759 {
12760 bfd_boolean narrow;
12761
12762 if ((inst.instruction & 0x00100000) != 0)
12763 narrow = !in_it_block ();
12764 else
12765 narrow = in_it_block ();
12766
12767 if (Rd > 7 || Rs > 7)
12768 narrow = FALSE;
12769
12770 if (inst.size_req == 4 || !unified_syntax)
12771 narrow = FALSE;
12772
12773 if (inst.reloc.exp.X_op != O_constant
12774 || inst.reloc.exp.X_add_number != 0)
12775 narrow = FALSE;
12776
12777 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12778 relaxation, but it doesn't seem worth the hassle. */
12779 if (narrow)
12780 {
12781 inst.reloc.type = BFD_RELOC_UNUSED;
12782 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12783 inst.instruction |= Rs << 3;
12784 inst.instruction |= Rd;
12785 }
12786 else
12787 {
12788 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12789 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12790 }
12791 }
12792 else
12793 encode_thumb32_shifted_operand (2);
12794 }
12795
12796 static void
12797 do_t_setend (void)
12798 {
12799 if (warn_on_deprecated
12800 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12801 as_tsktsk (_("setend use is deprecated for ARMv8"));
12802
12803 set_it_insn_type (OUTSIDE_IT_INSN);
12804 if (inst.operands[0].imm)
12805 inst.instruction |= 0x8;
12806 }
12807
12808 static void
12809 do_t_shift (void)
12810 {
12811 if (!inst.operands[1].present)
12812 inst.operands[1].reg = inst.operands[0].reg;
12813
12814 if (unified_syntax)
12815 {
12816 bfd_boolean narrow;
12817 int shift_kind;
12818
12819 switch (inst.instruction)
12820 {
12821 case T_MNEM_asr:
12822 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12823 case T_MNEM_lsl:
12824 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12825 case T_MNEM_lsr:
12826 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12827 case T_MNEM_ror:
12828 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12829 default: abort ();
12830 }
12831
12832 if (THUMB_SETS_FLAGS (inst.instruction))
12833 narrow = !in_it_block ();
12834 else
12835 narrow = in_it_block ();
12836 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12837 narrow = FALSE;
12838 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12839 narrow = FALSE;
12840 if (inst.operands[2].isreg
12841 && (inst.operands[1].reg != inst.operands[0].reg
12842 || inst.operands[2].reg > 7))
12843 narrow = FALSE;
12844 if (inst.size_req == 4)
12845 narrow = FALSE;
12846
12847 reject_bad_reg (inst.operands[0].reg);
12848 reject_bad_reg (inst.operands[1].reg);
12849
12850 if (!narrow)
12851 {
12852 if (inst.operands[2].isreg)
12853 {
12854 reject_bad_reg (inst.operands[2].reg);
12855 inst.instruction = THUMB_OP32 (inst.instruction);
12856 inst.instruction |= inst.operands[0].reg << 8;
12857 inst.instruction |= inst.operands[1].reg << 16;
12858 inst.instruction |= inst.operands[2].reg;
12859
12860 /* PR 12854: Error on extraneous shifts. */
12861 constraint (inst.operands[2].shifted,
12862 _("extraneous shift as part of operand to shift insn"));
12863 }
12864 else
12865 {
12866 inst.operands[1].shifted = 1;
12867 inst.operands[1].shift_kind = shift_kind;
12868 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12869 ? T_MNEM_movs : T_MNEM_mov);
12870 inst.instruction |= inst.operands[0].reg << 8;
12871 encode_thumb32_shifted_operand (1);
12872 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12873 inst.reloc.type = BFD_RELOC_UNUSED;
12874 }
12875 }
12876 else
12877 {
12878 if (inst.operands[2].isreg)
12879 {
12880 switch (shift_kind)
12881 {
12882 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12883 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12884 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12885 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12886 default: abort ();
12887 }
12888
12889 inst.instruction |= inst.operands[0].reg;
12890 inst.instruction |= inst.operands[2].reg << 3;
12891
12892 /* PR 12854: Error on extraneous shifts. */
12893 constraint (inst.operands[2].shifted,
12894 _("extraneous shift as part of operand to shift insn"));
12895 }
12896 else
12897 {
12898 switch (shift_kind)
12899 {
12900 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12901 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12902 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12903 default: abort ();
12904 }
12905 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12906 inst.instruction |= inst.operands[0].reg;
12907 inst.instruction |= inst.operands[1].reg << 3;
12908 }
12909 }
12910 }
12911 else
12912 {
12913 constraint (inst.operands[0].reg > 7
12914 || inst.operands[1].reg > 7, BAD_HIREG);
12915 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12916
12917 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12918 {
12919 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12920 constraint (inst.operands[0].reg != inst.operands[1].reg,
12921 _("source1 and dest must be same register"));
12922
12923 switch (inst.instruction)
12924 {
12925 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12926 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12927 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12928 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12929 default: abort ();
12930 }
12931
12932 inst.instruction |= inst.operands[0].reg;
12933 inst.instruction |= inst.operands[2].reg << 3;
12934
12935 /* PR 12854: Error on extraneous shifts. */
12936 constraint (inst.operands[2].shifted,
12937 _("extraneous shift as part of operand to shift insn"));
12938 }
12939 else
12940 {
12941 switch (inst.instruction)
12942 {
12943 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12944 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12945 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12946 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12947 default: abort ();
12948 }
12949 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12950 inst.instruction |= inst.operands[0].reg;
12951 inst.instruction |= inst.operands[1].reg << 3;
12952 }
12953 }
12954 }
12955
12956 static void
12957 do_t_simd (void)
12958 {
12959 unsigned Rd, Rn, Rm;
12960
12961 Rd = inst.operands[0].reg;
12962 Rn = inst.operands[1].reg;
12963 Rm = inst.operands[2].reg;
12964
12965 reject_bad_reg (Rd);
12966 reject_bad_reg (Rn);
12967 reject_bad_reg (Rm);
12968
12969 inst.instruction |= Rd << 8;
12970 inst.instruction |= Rn << 16;
12971 inst.instruction |= Rm;
12972 }
12973
12974 static void
12975 do_t_simd2 (void)
12976 {
12977 unsigned Rd, Rn, Rm;
12978
12979 Rd = inst.operands[0].reg;
12980 Rm = inst.operands[1].reg;
12981 Rn = inst.operands[2].reg;
12982
12983 reject_bad_reg (Rd);
12984 reject_bad_reg (Rn);
12985 reject_bad_reg (Rm);
12986
12987 inst.instruction |= Rd << 8;
12988 inst.instruction |= Rn << 16;
12989 inst.instruction |= Rm;
12990 }
12991
12992 static void
12993 do_t_smc (void)
12994 {
12995 unsigned int value = inst.reloc.exp.X_add_number;
12996 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12997 _("SMC is not permitted on this architecture"));
12998 constraint (inst.reloc.exp.X_op != O_constant,
12999 _("expression too complex"));
13000 inst.reloc.type = BFD_RELOC_UNUSED;
13001 inst.instruction |= (value & 0xf000) >> 12;
13002 inst.instruction |= (value & 0x0ff0);
13003 inst.instruction |= (value & 0x000f) << 16;
13004 /* PR gas/15623: SMC instructions must be last in an IT block. */
13005 set_it_insn_type_last ();
13006 }
13007
13008 static void
13009 do_t_hvc (void)
13010 {
13011 unsigned int value = inst.reloc.exp.X_add_number;
13012
13013 inst.reloc.type = BFD_RELOC_UNUSED;
13014 inst.instruction |= (value & 0x0fff);
13015 inst.instruction |= (value & 0xf000) << 4;
13016 }
13017
13018 static void
13019 do_t_ssat_usat (int bias)
13020 {
13021 unsigned Rd, Rn;
13022
13023 Rd = inst.operands[0].reg;
13024 Rn = inst.operands[2].reg;
13025
13026 reject_bad_reg (Rd);
13027 reject_bad_reg (Rn);
13028
13029 inst.instruction |= Rd << 8;
13030 inst.instruction |= inst.operands[1].imm - bias;
13031 inst.instruction |= Rn << 16;
13032
13033 if (inst.operands[3].present)
13034 {
13035 offsetT shift_amount = inst.reloc.exp.X_add_number;
13036
13037 inst.reloc.type = BFD_RELOC_UNUSED;
13038
13039 constraint (inst.reloc.exp.X_op != O_constant,
13040 _("expression too complex"));
13041
13042 if (shift_amount != 0)
13043 {
13044 constraint (shift_amount > 31,
13045 _("shift expression is too large"));
13046
13047 if (inst.operands[3].shift_kind == SHIFT_ASR)
13048 inst.instruction |= 0x00200000; /* sh bit. */
13049
13050 inst.instruction |= (shift_amount & 0x1c) << 10;
13051 inst.instruction |= (shift_amount & 0x03) << 6;
13052 }
13053 }
13054 }
13055
13056 static void
13057 do_t_ssat (void)
13058 {
13059 do_t_ssat_usat (1);
13060 }
13061
13062 static void
13063 do_t_ssat16 (void)
13064 {
13065 unsigned Rd, Rn;
13066
13067 Rd = inst.operands[0].reg;
13068 Rn = inst.operands[2].reg;
13069
13070 reject_bad_reg (Rd);
13071 reject_bad_reg (Rn);
13072
13073 inst.instruction |= Rd << 8;
13074 inst.instruction |= inst.operands[1].imm - 1;
13075 inst.instruction |= Rn << 16;
13076 }
13077
13078 static void
13079 do_t_strex (void)
13080 {
13081 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13082 || inst.operands[2].postind || inst.operands[2].writeback
13083 || inst.operands[2].immisreg || inst.operands[2].shifted
13084 || inst.operands[2].negative,
13085 BAD_ADDR_MODE);
13086
13087 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13088
13089 inst.instruction |= inst.operands[0].reg << 8;
13090 inst.instruction |= inst.operands[1].reg << 12;
13091 inst.instruction |= inst.operands[2].reg << 16;
13092 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13093 }
13094
13095 static void
13096 do_t_strexd (void)
13097 {
13098 if (!inst.operands[2].present)
13099 inst.operands[2].reg = inst.operands[1].reg + 1;
13100
13101 constraint (inst.operands[0].reg == inst.operands[1].reg
13102 || inst.operands[0].reg == inst.operands[2].reg
13103 || inst.operands[0].reg == inst.operands[3].reg,
13104 BAD_OVERLAP);
13105
13106 inst.instruction |= inst.operands[0].reg;
13107 inst.instruction |= inst.operands[1].reg << 12;
13108 inst.instruction |= inst.operands[2].reg << 8;
13109 inst.instruction |= inst.operands[3].reg << 16;
13110 }
13111
13112 static void
13113 do_t_sxtah (void)
13114 {
13115 unsigned Rd, Rn, Rm;
13116
13117 Rd = inst.operands[0].reg;
13118 Rn = inst.operands[1].reg;
13119 Rm = inst.operands[2].reg;
13120
13121 reject_bad_reg (Rd);
13122 reject_bad_reg (Rn);
13123 reject_bad_reg (Rm);
13124
13125 inst.instruction |= Rd << 8;
13126 inst.instruction |= Rn << 16;
13127 inst.instruction |= Rm;
13128 inst.instruction |= inst.operands[3].imm << 4;
13129 }
13130
13131 static void
13132 do_t_sxth (void)
13133 {
13134 unsigned Rd, Rm;
13135
13136 Rd = inst.operands[0].reg;
13137 Rm = inst.operands[1].reg;
13138
13139 reject_bad_reg (Rd);
13140 reject_bad_reg (Rm);
13141
13142 if (inst.instruction <= 0xffff
13143 && inst.size_req != 4
13144 && Rd <= 7 && Rm <= 7
13145 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13146 {
13147 inst.instruction = THUMB_OP16 (inst.instruction);
13148 inst.instruction |= Rd;
13149 inst.instruction |= Rm << 3;
13150 }
13151 else if (unified_syntax)
13152 {
13153 if (inst.instruction <= 0xffff)
13154 inst.instruction = THUMB_OP32 (inst.instruction);
13155 inst.instruction |= Rd << 8;
13156 inst.instruction |= Rm;
13157 inst.instruction |= inst.operands[2].imm << 4;
13158 }
13159 else
13160 {
13161 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13162 _("Thumb encoding does not support rotation"));
13163 constraint (1, BAD_HIREG);
13164 }
13165 }
13166
13167 static void
13168 do_t_swi (void)
13169 {
13170 inst.reloc.type = BFD_RELOC_ARM_SWI;
13171 }
13172
13173 static void
13174 do_t_tb (void)
13175 {
13176 unsigned Rn, Rm;
13177 int half;
13178
13179 half = (inst.instruction & 0x10) != 0;
13180 set_it_insn_type_last ();
13181 constraint (inst.operands[0].immisreg,
13182 _("instruction requires register index"));
13183
13184 Rn = inst.operands[0].reg;
13185 Rm = inst.operands[0].imm;
13186
13187 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13188 constraint (Rn == REG_SP, BAD_SP);
13189 reject_bad_reg (Rm);
13190
13191 constraint (!half && inst.operands[0].shifted,
13192 _("instruction does not allow shifted index"));
13193 inst.instruction |= (Rn << 16) | Rm;
13194 }
13195
13196 static void
13197 do_t_udf (void)
13198 {
13199 if (!inst.operands[0].present)
13200 inst.operands[0].imm = 0;
13201
13202 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13203 {
13204 constraint (inst.size_req == 2,
13205 _("immediate value out of range"));
13206 inst.instruction = THUMB_OP32 (inst.instruction);
13207 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13208 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13209 }
13210 else
13211 {
13212 inst.instruction = THUMB_OP16 (inst.instruction);
13213 inst.instruction |= inst.operands[0].imm;
13214 }
13215
13216 set_it_insn_type (NEUTRAL_IT_INSN);
13217 }
13218
13219
13220 static void
13221 do_t_usat (void)
13222 {
13223 do_t_ssat_usat (0);
13224 }
13225
13226 static void
13227 do_t_usat16 (void)
13228 {
13229 unsigned Rd, Rn;
13230
13231 Rd = inst.operands[0].reg;
13232 Rn = inst.operands[2].reg;
13233
13234 reject_bad_reg (Rd);
13235 reject_bad_reg (Rn);
13236
13237 inst.instruction |= Rd << 8;
13238 inst.instruction |= inst.operands[1].imm;
13239 inst.instruction |= Rn << 16;
13240 }
13241
13242 /* Neon instruction encoder helpers. */
13243
13244 /* Encodings for the different types for various Neon opcodes. */
13245
13246 /* An "invalid" code for the following tables. */
13247 #define N_INV -1u
13248
13249 struct neon_tab_entry
13250 {
13251 unsigned integer;
13252 unsigned float_or_poly;
13253 unsigned scalar_or_imm;
13254 };
13255
13256 /* Map overloaded Neon opcodes to their respective encodings. */
13257 #define NEON_ENC_TAB \
13258 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13259 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13260 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13261 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13262 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13263 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13264 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13265 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13266 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13267 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13268 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13269 /* Register variants of the following two instructions are encoded as
13270 vcge / vcgt with the operands reversed. */ \
13271 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13272 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13273 X(vfma, N_INV, 0x0000c10, N_INV), \
13274 X(vfms, N_INV, 0x0200c10, N_INV), \
13275 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13276 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13277 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13278 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13279 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13280 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13281 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13282 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13283 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13284 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13285 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13286 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13287 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13288 X(vshl, 0x0000400, N_INV, 0x0800510), \
13289 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13290 X(vand, 0x0000110, N_INV, 0x0800030), \
13291 X(vbic, 0x0100110, N_INV, 0x0800030), \
13292 X(veor, 0x1000110, N_INV, N_INV), \
13293 X(vorn, 0x0300110, N_INV, 0x0800010), \
13294 X(vorr, 0x0200110, N_INV, 0x0800010), \
13295 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13296 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13297 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13298 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13299 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13300 X(vst1, 0x0000000, 0x0800000, N_INV), \
13301 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13302 X(vst2, 0x0000100, 0x0800100, N_INV), \
13303 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13304 X(vst3, 0x0000200, 0x0800200, N_INV), \
13305 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13306 X(vst4, 0x0000300, 0x0800300, N_INV), \
13307 X(vmovn, 0x1b20200, N_INV, N_INV), \
13308 X(vtrn, 0x1b20080, N_INV, N_INV), \
13309 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13310 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13311 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13312 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13313 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13314 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13315 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13316 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13317 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13318 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13319 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13320 X(vseleq, 0xe000a00, N_INV, N_INV), \
13321 X(vselvs, 0xe100a00, N_INV, N_INV), \
13322 X(vselge, 0xe200a00, N_INV, N_INV), \
13323 X(vselgt, 0xe300a00, N_INV, N_INV), \
13324 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13325 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13326 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13327 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13328 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13329 X(aes, 0x3b00300, N_INV, N_INV), \
13330 X(sha3op, 0x2000c00, N_INV, N_INV), \
13331 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13332 X(sha2op, 0x3ba0380, N_INV, N_INV)
13333
13334 enum neon_opc
13335 {
13336 #define X(OPC,I,F,S) N_MNEM_##OPC
13337 NEON_ENC_TAB
13338 #undef X
13339 };
13340
13341 static const struct neon_tab_entry neon_enc_tab[] =
13342 {
13343 #define X(OPC,I,F,S) { (I), (F), (S) }
13344 NEON_ENC_TAB
13345 #undef X
13346 };
13347
13348 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13349 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13350 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13351 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13352 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13353 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13354 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13355 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13356 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13357 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13358 #define NEON_ENC_SINGLE_(X) \
13359 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13360 #define NEON_ENC_DOUBLE_(X) \
13361 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13362 #define NEON_ENC_FPV8_(X) \
13363 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13364
13365 #define NEON_ENCODE(type, inst) \
13366 do \
13367 { \
13368 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13369 inst.is_neon = 1; \
13370 } \
13371 while (0)
13372
13373 #define check_neon_suffixes \
13374 do \
13375 { \
13376 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13377 { \
13378 as_bad (_("invalid neon suffix for non neon instruction")); \
13379 return; \
13380 } \
13381 } \
13382 while (0)
13383
13384 /* Define shapes for instruction operands. The following mnemonic characters
13385 are used in this table:
13386
13387 F - VFP S<n> register
13388 D - Neon D<n> register
13389 Q - Neon Q<n> register
13390 I - Immediate
13391 S - Scalar
13392 R - ARM register
13393 L - D<n> register list
13394
13395 This table is used to generate various data:
13396 - enumerations of the form NS_DDR to be used as arguments to
13397 neon_select_shape.
13398 - a table classifying shapes into single, double, quad, mixed.
13399 - a table used to drive neon_select_shape. */
13400
13401 #define NEON_SHAPE_DEF \
13402 X(3, (D, D, D), DOUBLE), \
13403 X(3, (Q, Q, Q), QUAD), \
13404 X(3, (D, D, I), DOUBLE), \
13405 X(3, (Q, Q, I), QUAD), \
13406 X(3, (D, D, S), DOUBLE), \
13407 X(3, (Q, Q, S), QUAD), \
13408 X(2, (D, D), DOUBLE), \
13409 X(2, (Q, Q), QUAD), \
13410 X(2, (D, S), DOUBLE), \
13411 X(2, (Q, S), QUAD), \
13412 X(2, (D, R), DOUBLE), \
13413 X(2, (Q, R), QUAD), \
13414 X(2, (D, I), DOUBLE), \
13415 X(2, (Q, I), QUAD), \
13416 X(3, (D, L, D), DOUBLE), \
13417 X(2, (D, Q), MIXED), \
13418 X(2, (Q, D), MIXED), \
13419 X(3, (D, Q, I), MIXED), \
13420 X(3, (Q, D, I), MIXED), \
13421 X(3, (Q, D, D), MIXED), \
13422 X(3, (D, Q, Q), MIXED), \
13423 X(3, (Q, Q, D), MIXED), \
13424 X(3, (Q, D, S), MIXED), \
13425 X(3, (D, Q, S), MIXED), \
13426 X(4, (D, D, D, I), DOUBLE), \
13427 X(4, (Q, Q, Q, I), QUAD), \
13428 X(4, (D, D, S, I), DOUBLE), \
13429 X(4, (Q, Q, S, I), QUAD), \
13430 X(2, (F, F), SINGLE), \
13431 X(3, (F, F, F), SINGLE), \
13432 X(2, (F, I), SINGLE), \
13433 X(2, (F, D), MIXED), \
13434 X(2, (D, F), MIXED), \
13435 X(3, (F, F, I), MIXED), \
13436 X(4, (R, R, F, F), SINGLE), \
13437 X(4, (F, F, R, R), SINGLE), \
13438 X(3, (D, R, R), DOUBLE), \
13439 X(3, (R, R, D), DOUBLE), \
13440 X(2, (S, R), SINGLE), \
13441 X(2, (R, S), SINGLE), \
13442 X(2, (F, R), SINGLE), \
13443 X(2, (R, F), SINGLE), \
13444 /* Half float shape supported so far. */\
13445 X (2, (H, D), MIXED), \
13446 X (2, (D, H), MIXED), \
13447 X (2, (H, F), MIXED), \
13448 X (2, (F, H), MIXED), \
13449 X (2, (H, H), HALF), \
13450 X (2, (H, R), HALF), \
13451 X (2, (R, H), HALF), \
13452 X (2, (H, I), HALF), \
13453 X (3, (H, H, H), HALF), \
13454 X (3, (H, F, I), MIXED), \
13455 X (3, (F, H, I), MIXED), \
13456 X (3, (D, H, H), MIXED), \
13457 X (3, (D, H, S), MIXED)
13458
13459 #define S2(A,B) NS_##A##B
13460 #define S3(A,B,C) NS_##A##B##C
13461 #define S4(A,B,C,D) NS_##A##B##C##D
13462
13463 #define X(N, L, C) S##N L
13464
13465 enum neon_shape
13466 {
13467 NEON_SHAPE_DEF,
13468 NS_NULL
13469 };
13470
13471 #undef X
13472 #undef S2
13473 #undef S3
13474 #undef S4
13475
13476 enum neon_shape_class
13477 {
13478 SC_HALF,
13479 SC_SINGLE,
13480 SC_DOUBLE,
13481 SC_QUAD,
13482 SC_MIXED
13483 };
13484
13485 #define X(N, L, C) SC_##C
13486
13487 static enum neon_shape_class neon_shape_class[] =
13488 {
13489 NEON_SHAPE_DEF
13490 };
13491
13492 #undef X
13493
13494 enum neon_shape_el
13495 {
13496 SE_H,
13497 SE_F,
13498 SE_D,
13499 SE_Q,
13500 SE_I,
13501 SE_S,
13502 SE_R,
13503 SE_L
13504 };
13505
13506 /* Register widths of above. */
13507 static unsigned neon_shape_el_size[] =
13508 {
13509 16,
13510 32,
13511 64,
13512 128,
13513 0,
13514 32,
13515 32,
13516 0
13517 };
13518
13519 struct neon_shape_info
13520 {
13521 unsigned els;
13522 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13523 };
13524
13525 #define S2(A,B) { SE_##A, SE_##B }
13526 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13527 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13528
13529 #define X(N, L, C) { N, S##N L }
13530
13531 static struct neon_shape_info neon_shape_tab[] =
13532 {
13533 NEON_SHAPE_DEF
13534 };
13535
13536 #undef X
13537 #undef S2
13538 #undef S3
13539 #undef S4
13540
13541 /* Bit masks used in type checking given instructions.
13542 'N_EQK' means the type must be the same as (or based on in some way) the key
13543 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13544 set, various other bits can be set as well in order to modify the meaning of
13545 the type constraint. */
13546
13547 enum neon_type_mask
13548 {
13549 N_S8 = 0x0000001,
13550 N_S16 = 0x0000002,
13551 N_S32 = 0x0000004,
13552 N_S64 = 0x0000008,
13553 N_U8 = 0x0000010,
13554 N_U16 = 0x0000020,
13555 N_U32 = 0x0000040,
13556 N_U64 = 0x0000080,
13557 N_I8 = 0x0000100,
13558 N_I16 = 0x0000200,
13559 N_I32 = 0x0000400,
13560 N_I64 = 0x0000800,
13561 N_8 = 0x0001000,
13562 N_16 = 0x0002000,
13563 N_32 = 0x0004000,
13564 N_64 = 0x0008000,
13565 N_P8 = 0x0010000,
13566 N_P16 = 0x0020000,
13567 N_F16 = 0x0040000,
13568 N_F32 = 0x0080000,
13569 N_F64 = 0x0100000,
13570 N_P64 = 0x0200000,
13571 N_KEY = 0x1000000, /* Key element (main type specifier). */
13572 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13573 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13574 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13575 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13576 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13577 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13578 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13579 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13580 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13581 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13582 N_UTYP = 0,
13583 N_MAX_NONSPECIAL = N_P64
13584 };
13585
13586 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13587
13588 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13589 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13590 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13591 #define N_S_32 (N_S8 | N_S16 | N_S32)
13592 #define N_F_16_32 (N_F16 | N_F32)
13593 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13594 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13595 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13596 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13597
13598 /* Pass this as the first type argument to neon_check_type to ignore types
13599 altogether. */
13600 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13601
13602 /* Select a "shape" for the current instruction (describing register types or
13603 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13604 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13605 function of operand parsing, so this function doesn't need to be called.
13606 Shapes should be listed in order of decreasing length. */
13607
13608 static enum neon_shape
13609 neon_select_shape (enum neon_shape shape, ...)
13610 {
13611 va_list ap;
13612 enum neon_shape first_shape = shape;
13613
13614 /* Fix missing optional operands. FIXME: we don't know at this point how
13615 many arguments we should have, so this makes the assumption that we have
13616 > 1. This is true of all current Neon opcodes, I think, but may not be
13617 true in the future. */
13618 if (!inst.operands[1].present)
13619 inst.operands[1] = inst.operands[0];
13620
13621 va_start (ap, shape);
13622
13623 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13624 {
13625 unsigned j;
13626 int matches = 1;
13627
13628 for (j = 0; j < neon_shape_tab[shape].els; j++)
13629 {
13630 if (!inst.operands[j].present)
13631 {
13632 matches = 0;
13633 break;
13634 }
13635
13636 switch (neon_shape_tab[shape].el[j])
13637 {
13638 /* If a .f16, .16, .u16, .s16 type specifier is given over
13639 a VFP single precision register operand, it's essentially
13640 means only half of the register is used.
13641
13642 If the type specifier is given after the mnemonics, the
13643 information is stored in inst.vectype. If the type specifier
13644 is given after register operand, the information is stored
13645 in inst.operands[].vectype.
13646
13647 When there is only one type specifier, and all the register
13648 operands are the same type of hardware register, the type
13649 specifier applies to all register operands.
13650
13651 If no type specifier is given, the shape is inferred from
13652 operand information.
13653
13654 for example:
13655 vadd.f16 s0, s1, s2: NS_HHH
13656 vabs.f16 s0, s1: NS_HH
13657 vmov.f16 s0, r1: NS_HR
13658 vmov.f16 r0, s1: NS_RH
13659 vcvt.f16 r0, s1: NS_RH
13660 vcvt.f16.s32 s2, s2, #29: NS_HFI
13661 vcvt.f16.s32 s2, s2: NS_HF
13662 */
13663 case SE_H:
13664 if (!(inst.operands[j].isreg
13665 && inst.operands[j].isvec
13666 && inst.operands[j].issingle
13667 && !inst.operands[j].isquad
13668 && ((inst.vectype.elems == 1
13669 && inst.vectype.el[0].size == 16)
13670 || (inst.vectype.elems > 1
13671 && inst.vectype.el[j].size == 16)
13672 || (inst.vectype.elems == 0
13673 && inst.operands[j].vectype.type != NT_invtype
13674 && inst.operands[j].vectype.size == 16))))
13675 matches = 0;
13676 break;
13677
13678 case SE_F:
13679 if (!(inst.operands[j].isreg
13680 && inst.operands[j].isvec
13681 && inst.operands[j].issingle
13682 && !inst.operands[j].isquad
13683 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13684 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13685 || (inst.vectype.elems == 0
13686 && (inst.operands[j].vectype.size == 32
13687 || inst.operands[j].vectype.type == NT_invtype)))))
13688 matches = 0;
13689 break;
13690
13691 case SE_D:
13692 if (!(inst.operands[j].isreg
13693 && inst.operands[j].isvec
13694 && !inst.operands[j].isquad
13695 && !inst.operands[j].issingle))
13696 matches = 0;
13697 break;
13698
13699 case SE_R:
13700 if (!(inst.operands[j].isreg
13701 && !inst.operands[j].isvec))
13702 matches = 0;
13703 break;
13704
13705 case SE_Q:
13706 if (!(inst.operands[j].isreg
13707 && inst.operands[j].isvec
13708 && inst.operands[j].isquad
13709 && !inst.operands[j].issingle))
13710 matches = 0;
13711 break;
13712
13713 case SE_I:
13714 if (!(!inst.operands[j].isreg
13715 && !inst.operands[j].isscalar))
13716 matches = 0;
13717 break;
13718
13719 case SE_S:
13720 if (!(!inst.operands[j].isreg
13721 && inst.operands[j].isscalar))
13722 matches = 0;
13723 break;
13724
13725 case SE_L:
13726 break;
13727 }
13728 if (!matches)
13729 break;
13730 }
13731 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13732 /* We've matched all the entries in the shape table, and we don't
13733 have any left over operands which have not been matched. */
13734 break;
13735 }
13736
13737 va_end (ap);
13738
13739 if (shape == NS_NULL && first_shape != NS_NULL)
13740 first_error (_("invalid instruction shape"));
13741
13742 return shape;
13743 }
13744
13745 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13746 means the Q bit should be set). */
13747
13748 static int
13749 neon_quad (enum neon_shape shape)
13750 {
13751 return neon_shape_class[shape] == SC_QUAD;
13752 }
13753
13754 static void
13755 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13756 unsigned *g_size)
13757 {
13758 /* Allow modification to be made to types which are constrained to be
13759 based on the key element, based on bits set alongside N_EQK. */
13760 if ((typebits & N_EQK) != 0)
13761 {
13762 if ((typebits & N_HLF) != 0)
13763 *g_size /= 2;
13764 else if ((typebits & N_DBL) != 0)
13765 *g_size *= 2;
13766 if ((typebits & N_SGN) != 0)
13767 *g_type = NT_signed;
13768 else if ((typebits & N_UNS) != 0)
13769 *g_type = NT_unsigned;
13770 else if ((typebits & N_INT) != 0)
13771 *g_type = NT_integer;
13772 else if ((typebits & N_FLT) != 0)
13773 *g_type = NT_float;
13774 else if ((typebits & N_SIZ) != 0)
13775 *g_type = NT_untyped;
13776 }
13777 }
13778
13779 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13780 operand type, i.e. the single type specified in a Neon instruction when it
13781 is the only one given. */
13782
13783 static struct neon_type_el
13784 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13785 {
13786 struct neon_type_el dest = *key;
13787
13788 gas_assert ((thisarg & N_EQK) != 0);
13789
13790 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13791
13792 return dest;
13793 }
13794
13795 /* Convert Neon type and size into compact bitmask representation. */
13796
13797 static enum neon_type_mask
13798 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13799 {
13800 switch (type)
13801 {
13802 case NT_untyped:
13803 switch (size)
13804 {
13805 case 8: return N_8;
13806 case 16: return N_16;
13807 case 32: return N_32;
13808 case 64: return N_64;
13809 default: ;
13810 }
13811 break;
13812
13813 case NT_integer:
13814 switch (size)
13815 {
13816 case 8: return N_I8;
13817 case 16: return N_I16;
13818 case 32: return N_I32;
13819 case 64: return N_I64;
13820 default: ;
13821 }
13822 break;
13823
13824 case NT_float:
13825 switch (size)
13826 {
13827 case 16: return N_F16;
13828 case 32: return N_F32;
13829 case 64: return N_F64;
13830 default: ;
13831 }
13832 break;
13833
13834 case NT_poly:
13835 switch (size)
13836 {
13837 case 8: return N_P8;
13838 case 16: return N_P16;
13839 case 64: return N_P64;
13840 default: ;
13841 }
13842 break;
13843
13844 case NT_signed:
13845 switch (size)
13846 {
13847 case 8: return N_S8;
13848 case 16: return N_S16;
13849 case 32: return N_S32;
13850 case 64: return N_S64;
13851 default: ;
13852 }
13853 break;
13854
13855 case NT_unsigned:
13856 switch (size)
13857 {
13858 case 8: return N_U8;
13859 case 16: return N_U16;
13860 case 32: return N_U32;
13861 case 64: return N_U64;
13862 default: ;
13863 }
13864 break;
13865
13866 default: ;
13867 }
13868
13869 return N_UTYP;
13870 }
13871
13872 /* Convert compact Neon bitmask type representation to a type and size. Only
13873 handles the case where a single bit is set in the mask. */
13874
13875 static int
13876 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13877 enum neon_type_mask mask)
13878 {
13879 if ((mask & N_EQK) != 0)
13880 return FAIL;
13881
13882 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13883 *size = 8;
13884 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13885 *size = 16;
13886 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13887 *size = 32;
13888 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13889 *size = 64;
13890 else
13891 return FAIL;
13892
13893 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13894 *type = NT_signed;
13895 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13896 *type = NT_unsigned;
13897 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13898 *type = NT_integer;
13899 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13900 *type = NT_untyped;
13901 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13902 *type = NT_poly;
13903 else if ((mask & (N_F_ALL)) != 0)
13904 *type = NT_float;
13905 else
13906 return FAIL;
13907
13908 return SUCCESS;
13909 }
13910
13911 /* Modify a bitmask of allowed types. This is only needed for type
13912 relaxation. */
13913
13914 static unsigned
13915 modify_types_allowed (unsigned allowed, unsigned mods)
13916 {
13917 unsigned size;
13918 enum neon_el_type type;
13919 unsigned destmask;
13920 int i;
13921
13922 destmask = 0;
13923
13924 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13925 {
13926 if (el_type_of_type_chk (&type, &size,
13927 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13928 {
13929 neon_modify_type_size (mods, &type, &size);
13930 destmask |= type_chk_of_el_type (type, size);
13931 }
13932 }
13933
13934 return destmask;
13935 }
13936
13937 /* Check type and return type classification.
13938 The manual states (paraphrase): If one datatype is given, it indicates the
13939 type given in:
13940 - the second operand, if there is one
13941 - the operand, if there is no second operand
13942 - the result, if there are no operands.
13943 This isn't quite good enough though, so we use a concept of a "key" datatype
13944 which is set on a per-instruction basis, which is the one which matters when
13945 only one data type is written.
13946 Note: this function has side-effects (e.g. filling in missing operands). All
13947 Neon instructions should call it before performing bit encoding. */
13948
13949 static struct neon_type_el
13950 neon_check_type (unsigned els, enum neon_shape ns, ...)
13951 {
13952 va_list ap;
13953 unsigned i, pass, key_el = 0;
13954 unsigned types[NEON_MAX_TYPE_ELS];
13955 enum neon_el_type k_type = NT_invtype;
13956 unsigned k_size = -1u;
13957 struct neon_type_el badtype = {NT_invtype, -1};
13958 unsigned key_allowed = 0;
13959
13960 /* Optional registers in Neon instructions are always (not) in operand 1.
13961 Fill in the missing operand here, if it was omitted. */
13962 if (els > 1 && !inst.operands[1].present)
13963 inst.operands[1] = inst.operands[0];
13964
13965 /* Suck up all the varargs. */
13966 va_start (ap, ns);
13967 for (i = 0; i < els; i++)
13968 {
13969 unsigned thisarg = va_arg (ap, unsigned);
13970 if (thisarg == N_IGNORE_TYPE)
13971 {
13972 va_end (ap);
13973 return badtype;
13974 }
13975 types[i] = thisarg;
13976 if ((thisarg & N_KEY) != 0)
13977 key_el = i;
13978 }
13979 va_end (ap);
13980
13981 if (inst.vectype.elems > 0)
13982 for (i = 0; i < els; i++)
13983 if (inst.operands[i].vectype.type != NT_invtype)
13984 {
13985 first_error (_("types specified in both the mnemonic and operands"));
13986 return badtype;
13987 }
13988
13989 /* Duplicate inst.vectype elements here as necessary.
13990 FIXME: No idea if this is exactly the same as the ARM assembler,
13991 particularly when an insn takes one register and one non-register
13992 operand. */
13993 if (inst.vectype.elems == 1 && els > 1)
13994 {
13995 unsigned j;
13996 inst.vectype.elems = els;
13997 inst.vectype.el[key_el] = inst.vectype.el[0];
13998 for (j = 0; j < els; j++)
13999 if (j != key_el)
14000 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14001 types[j]);
14002 }
14003 else if (inst.vectype.elems == 0 && els > 0)
14004 {
14005 unsigned j;
14006 /* No types were given after the mnemonic, so look for types specified
14007 after each operand. We allow some flexibility here; as long as the
14008 "key" operand has a type, we can infer the others. */
14009 for (j = 0; j < els; j++)
14010 if (inst.operands[j].vectype.type != NT_invtype)
14011 inst.vectype.el[j] = inst.operands[j].vectype;
14012
14013 if (inst.operands[key_el].vectype.type != NT_invtype)
14014 {
14015 for (j = 0; j < els; j++)
14016 if (inst.operands[j].vectype.type == NT_invtype)
14017 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14018 types[j]);
14019 }
14020 else
14021 {
14022 first_error (_("operand types can't be inferred"));
14023 return badtype;
14024 }
14025 }
14026 else if (inst.vectype.elems != els)
14027 {
14028 first_error (_("type specifier has the wrong number of parts"));
14029 return badtype;
14030 }
14031
14032 for (pass = 0; pass < 2; pass++)
14033 {
14034 for (i = 0; i < els; i++)
14035 {
14036 unsigned thisarg = types[i];
14037 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14038 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14039 enum neon_el_type g_type = inst.vectype.el[i].type;
14040 unsigned g_size = inst.vectype.el[i].size;
14041
14042 /* Decay more-specific signed & unsigned types to sign-insensitive
14043 integer types if sign-specific variants are unavailable. */
14044 if ((g_type == NT_signed || g_type == NT_unsigned)
14045 && (types_allowed & N_SU_ALL) == 0)
14046 g_type = NT_integer;
14047
14048 /* If only untyped args are allowed, decay any more specific types to
14049 them. Some instructions only care about signs for some element
14050 sizes, so handle that properly. */
14051 if (((types_allowed & N_UNT) == 0)
14052 && ((g_size == 8 && (types_allowed & N_8) != 0)
14053 || (g_size == 16 && (types_allowed & N_16) != 0)
14054 || (g_size == 32 && (types_allowed & N_32) != 0)
14055 || (g_size == 64 && (types_allowed & N_64) != 0)))
14056 g_type = NT_untyped;
14057
14058 if (pass == 0)
14059 {
14060 if ((thisarg & N_KEY) != 0)
14061 {
14062 k_type = g_type;
14063 k_size = g_size;
14064 key_allowed = thisarg & ~N_KEY;
14065
14066 /* Check architecture constraint on FP16 extension. */
14067 if (k_size == 16
14068 && k_type == NT_float
14069 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14070 {
14071 inst.error = _(BAD_FP16);
14072 return badtype;
14073 }
14074 }
14075 }
14076 else
14077 {
14078 if ((thisarg & N_VFP) != 0)
14079 {
14080 enum neon_shape_el regshape;
14081 unsigned regwidth, match;
14082
14083 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14084 if (ns == NS_NULL)
14085 {
14086 first_error (_("invalid instruction shape"));
14087 return badtype;
14088 }
14089 regshape = neon_shape_tab[ns].el[i];
14090 regwidth = neon_shape_el_size[regshape];
14091
14092 /* In VFP mode, operands must match register widths. If we
14093 have a key operand, use its width, else use the width of
14094 the current operand. */
14095 if (k_size != -1u)
14096 match = k_size;
14097 else
14098 match = g_size;
14099
14100 /* FP16 will use a single precision register. */
14101 if (regwidth == 32 && match == 16)
14102 {
14103 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14104 match = regwidth;
14105 else
14106 {
14107 inst.error = _(BAD_FP16);
14108 return badtype;
14109 }
14110 }
14111
14112 if (regwidth != match)
14113 {
14114 first_error (_("operand size must match register width"));
14115 return badtype;
14116 }
14117 }
14118
14119 if ((thisarg & N_EQK) == 0)
14120 {
14121 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14122
14123 if ((given_type & types_allowed) == 0)
14124 {
14125 first_error (_("bad type in Neon instruction"));
14126 return badtype;
14127 }
14128 }
14129 else
14130 {
14131 enum neon_el_type mod_k_type = k_type;
14132 unsigned mod_k_size = k_size;
14133 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14134 if (g_type != mod_k_type || g_size != mod_k_size)
14135 {
14136 first_error (_("inconsistent types in Neon instruction"));
14137 return badtype;
14138 }
14139 }
14140 }
14141 }
14142 }
14143
14144 return inst.vectype.el[key_el];
14145 }
14146
14147 /* Neon-style VFP instruction forwarding. */
14148
14149 /* Thumb VFP instructions have 0xE in the condition field. */
14150
14151 static void
14152 do_vfp_cond_or_thumb (void)
14153 {
14154 inst.is_neon = 1;
14155
14156 if (thumb_mode)
14157 inst.instruction |= 0xe0000000;
14158 else
14159 inst.instruction |= inst.cond << 28;
14160 }
14161
14162 /* Look up and encode a simple mnemonic, for use as a helper function for the
14163 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14164 etc. It is assumed that operand parsing has already been done, and that the
14165 operands are in the form expected by the given opcode (this isn't necessarily
14166 the same as the form in which they were parsed, hence some massaging must
14167 take place before this function is called).
14168 Checks current arch version against that in the looked-up opcode. */
14169
14170 static void
14171 do_vfp_nsyn_opcode (const char *opname)
14172 {
14173 const struct asm_opcode *opcode;
14174
14175 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14176
14177 if (!opcode)
14178 abort ();
14179
14180 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14181 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14182 _(BAD_FPU));
14183
14184 inst.is_neon = 1;
14185
14186 if (thumb_mode)
14187 {
14188 inst.instruction = opcode->tvalue;
14189 opcode->tencode ();
14190 }
14191 else
14192 {
14193 inst.instruction = (inst.cond << 28) | opcode->avalue;
14194 opcode->aencode ();
14195 }
14196 }
14197
14198 static void
14199 do_vfp_nsyn_add_sub (enum neon_shape rs)
14200 {
14201 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14202
14203 if (rs == NS_FFF || rs == NS_HHH)
14204 {
14205 if (is_add)
14206 do_vfp_nsyn_opcode ("fadds");
14207 else
14208 do_vfp_nsyn_opcode ("fsubs");
14209
14210 /* ARMv8.2 fp16 instruction. */
14211 if (rs == NS_HHH)
14212 do_scalar_fp16_v82_encode ();
14213 }
14214 else
14215 {
14216 if (is_add)
14217 do_vfp_nsyn_opcode ("faddd");
14218 else
14219 do_vfp_nsyn_opcode ("fsubd");
14220 }
14221 }
14222
14223 /* Check operand types to see if this is a VFP instruction, and if so call
14224 PFN (). */
14225
14226 static int
14227 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14228 {
14229 enum neon_shape rs;
14230 struct neon_type_el et;
14231
14232 switch (args)
14233 {
14234 case 2:
14235 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14236 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14237 break;
14238
14239 case 3:
14240 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14241 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14242 N_F_ALL | N_KEY | N_VFP);
14243 break;
14244
14245 default:
14246 abort ();
14247 }
14248
14249 if (et.type != NT_invtype)
14250 {
14251 pfn (rs);
14252 return SUCCESS;
14253 }
14254
14255 inst.error = NULL;
14256 return FAIL;
14257 }
14258
14259 static void
14260 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14261 {
14262 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14263
14264 if (rs == NS_FFF || rs == NS_HHH)
14265 {
14266 if (is_mla)
14267 do_vfp_nsyn_opcode ("fmacs");
14268 else
14269 do_vfp_nsyn_opcode ("fnmacs");
14270
14271 /* ARMv8.2 fp16 instruction. */
14272 if (rs == NS_HHH)
14273 do_scalar_fp16_v82_encode ();
14274 }
14275 else
14276 {
14277 if (is_mla)
14278 do_vfp_nsyn_opcode ("fmacd");
14279 else
14280 do_vfp_nsyn_opcode ("fnmacd");
14281 }
14282 }
14283
14284 static void
14285 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14286 {
14287 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14288
14289 if (rs == NS_FFF || rs == NS_HHH)
14290 {
14291 if (is_fma)
14292 do_vfp_nsyn_opcode ("ffmas");
14293 else
14294 do_vfp_nsyn_opcode ("ffnmas");
14295
14296 /* ARMv8.2 fp16 instruction. */
14297 if (rs == NS_HHH)
14298 do_scalar_fp16_v82_encode ();
14299 }
14300 else
14301 {
14302 if (is_fma)
14303 do_vfp_nsyn_opcode ("ffmad");
14304 else
14305 do_vfp_nsyn_opcode ("ffnmad");
14306 }
14307 }
14308
14309 static void
14310 do_vfp_nsyn_mul (enum neon_shape rs)
14311 {
14312 if (rs == NS_FFF || rs == NS_HHH)
14313 {
14314 do_vfp_nsyn_opcode ("fmuls");
14315
14316 /* ARMv8.2 fp16 instruction. */
14317 if (rs == NS_HHH)
14318 do_scalar_fp16_v82_encode ();
14319 }
14320 else
14321 do_vfp_nsyn_opcode ("fmuld");
14322 }
14323
14324 static void
14325 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14326 {
14327 int is_neg = (inst.instruction & 0x80) != 0;
14328 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14329
14330 if (rs == NS_FF || rs == NS_HH)
14331 {
14332 if (is_neg)
14333 do_vfp_nsyn_opcode ("fnegs");
14334 else
14335 do_vfp_nsyn_opcode ("fabss");
14336
14337 /* ARMv8.2 fp16 instruction. */
14338 if (rs == NS_HH)
14339 do_scalar_fp16_v82_encode ();
14340 }
14341 else
14342 {
14343 if (is_neg)
14344 do_vfp_nsyn_opcode ("fnegd");
14345 else
14346 do_vfp_nsyn_opcode ("fabsd");
14347 }
14348 }
14349
14350 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14351 insns belong to Neon, and are handled elsewhere. */
14352
14353 static void
14354 do_vfp_nsyn_ldm_stm (int is_dbmode)
14355 {
14356 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14357 if (is_ldm)
14358 {
14359 if (is_dbmode)
14360 do_vfp_nsyn_opcode ("fldmdbs");
14361 else
14362 do_vfp_nsyn_opcode ("fldmias");
14363 }
14364 else
14365 {
14366 if (is_dbmode)
14367 do_vfp_nsyn_opcode ("fstmdbs");
14368 else
14369 do_vfp_nsyn_opcode ("fstmias");
14370 }
14371 }
14372
14373 static void
14374 do_vfp_nsyn_sqrt (void)
14375 {
14376 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14377 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14378
14379 if (rs == NS_FF || rs == NS_HH)
14380 {
14381 do_vfp_nsyn_opcode ("fsqrts");
14382
14383 /* ARMv8.2 fp16 instruction. */
14384 if (rs == NS_HH)
14385 do_scalar_fp16_v82_encode ();
14386 }
14387 else
14388 do_vfp_nsyn_opcode ("fsqrtd");
14389 }
14390
14391 static void
14392 do_vfp_nsyn_div (void)
14393 {
14394 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14395 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14396 N_F_ALL | N_KEY | N_VFP);
14397
14398 if (rs == NS_FFF || rs == NS_HHH)
14399 {
14400 do_vfp_nsyn_opcode ("fdivs");
14401
14402 /* ARMv8.2 fp16 instruction. */
14403 if (rs == NS_HHH)
14404 do_scalar_fp16_v82_encode ();
14405 }
14406 else
14407 do_vfp_nsyn_opcode ("fdivd");
14408 }
14409
14410 static void
14411 do_vfp_nsyn_nmul (void)
14412 {
14413 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14414 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14415 N_F_ALL | N_KEY | N_VFP);
14416
14417 if (rs == NS_FFF || rs == NS_HHH)
14418 {
14419 NEON_ENCODE (SINGLE, inst);
14420 do_vfp_sp_dyadic ();
14421
14422 /* ARMv8.2 fp16 instruction. */
14423 if (rs == NS_HHH)
14424 do_scalar_fp16_v82_encode ();
14425 }
14426 else
14427 {
14428 NEON_ENCODE (DOUBLE, inst);
14429 do_vfp_dp_rd_rn_rm ();
14430 }
14431 do_vfp_cond_or_thumb ();
14432
14433 }
14434
14435 static void
14436 do_vfp_nsyn_cmp (void)
14437 {
14438 enum neon_shape rs;
14439 if (inst.operands[1].isreg)
14440 {
14441 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14442 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14443
14444 if (rs == NS_FF || rs == NS_HH)
14445 {
14446 NEON_ENCODE (SINGLE, inst);
14447 do_vfp_sp_monadic ();
14448 }
14449 else
14450 {
14451 NEON_ENCODE (DOUBLE, inst);
14452 do_vfp_dp_rd_rm ();
14453 }
14454 }
14455 else
14456 {
14457 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14458 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14459
14460 switch (inst.instruction & 0x0fffffff)
14461 {
14462 case N_MNEM_vcmp:
14463 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14464 break;
14465 case N_MNEM_vcmpe:
14466 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14467 break;
14468 default:
14469 abort ();
14470 }
14471
14472 if (rs == NS_FI || rs == NS_HI)
14473 {
14474 NEON_ENCODE (SINGLE, inst);
14475 do_vfp_sp_compare_z ();
14476 }
14477 else
14478 {
14479 NEON_ENCODE (DOUBLE, inst);
14480 do_vfp_dp_rd ();
14481 }
14482 }
14483 do_vfp_cond_or_thumb ();
14484
14485 /* ARMv8.2 fp16 instruction. */
14486 if (rs == NS_HI || rs == NS_HH)
14487 do_scalar_fp16_v82_encode ();
14488 }
14489
14490 static void
14491 nsyn_insert_sp (void)
14492 {
14493 inst.operands[1] = inst.operands[0];
14494 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14495 inst.operands[0].reg = REG_SP;
14496 inst.operands[0].isreg = 1;
14497 inst.operands[0].writeback = 1;
14498 inst.operands[0].present = 1;
14499 }
14500
14501 static void
14502 do_vfp_nsyn_push (void)
14503 {
14504 nsyn_insert_sp ();
14505
14506 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14507 _("register list must contain at least 1 and at most 16 "
14508 "registers"));
14509
14510 if (inst.operands[1].issingle)
14511 do_vfp_nsyn_opcode ("fstmdbs");
14512 else
14513 do_vfp_nsyn_opcode ("fstmdbd");
14514 }
14515
14516 static void
14517 do_vfp_nsyn_pop (void)
14518 {
14519 nsyn_insert_sp ();
14520
14521 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14522 _("register list must contain at least 1 and at most 16 "
14523 "registers"));
14524
14525 if (inst.operands[1].issingle)
14526 do_vfp_nsyn_opcode ("fldmias");
14527 else
14528 do_vfp_nsyn_opcode ("fldmiad");
14529 }
14530
14531 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14532 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14533
14534 static void
14535 neon_dp_fixup (struct arm_it* insn)
14536 {
14537 unsigned int i = insn->instruction;
14538 insn->is_neon = 1;
14539
14540 if (thumb_mode)
14541 {
14542 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14543 if (i & (1 << 24))
14544 i |= 1 << 28;
14545
14546 i &= ~(1 << 24);
14547
14548 i |= 0xef000000;
14549 }
14550 else
14551 i |= 0xf2000000;
14552
14553 insn->instruction = i;
14554 }
14555
14556 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14557 (0, 1, 2, 3). */
14558
14559 static unsigned
14560 neon_logbits (unsigned x)
14561 {
14562 return ffs (x) - 4;
14563 }
14564
14565 #define LOW4(R) ((R) & 0xf)
14566 #define HI1(R) (((R) >> 4) & 1)
14567
14568 /* Encode insns with bit pattern:
14569
14570 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14571 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14572
14573 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14574 different meaning for some instruction. */
14575
14576 static void
14577 neon_three_same (int isquad, int ubit, int size)
14578 {
14579 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14580 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14581 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14582 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14583 inst.instruction |= LOW4 (inst.operands[2].reg);
14584 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14585 inst.instruction |= (isquad != 0) << 6;
14586 inst.instruction |= (ubit != 0) << 24;
14587 if (size != -1)
14588 inst.instruction |= neon_logbits (size) << 20;
14589
14590 neon_dp_fixup (&inst);
14591 }
14592
14593 /* Encode instructions of the form:
14594
14595 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14596 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14597
14598 Don't write size if SIZE == -1. */
14599
14600 static void
14601 neon_two_same (int qbit, int ubit, int size)
14602 {
14603 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14604 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14605 inst.instruction |= LOW4 (inst.operands[1].reg);
14606 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14607 inst.instruction |= (qbit != 0) << 6;
14608 inst.instruction |= (ubit != 0) << 24;
14609
14610 if (size != -1)
14611 inst.instruction |= neon_logbits (size) << 18;
14612
14613 neon_dp_fixup (&inst);
14614 }
14615
14616 /* Neon instruction encoders, in approximate order of appearance. */
14617
14618 static void
14619 do_neon_dyadic_i_su (void)
14620 {
14621 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14622 struct neon_type_el et = neon_check_type (3, rs,
14623 N_EQK, N_EQK, N_SU_32 | N_KEY);
14624 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14625 }
14626
14627 static void
14628 do_neon_dyadic_i64_su (void)
14629 {
14630 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14631 struct neon_type_el et = neon_check_type (3, rs,
14632 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14633 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14634 }
14635
14636 static void
14637 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14638 unsigned immbits)
14639 {
14640 unsigned size = et.size >> 3;
14641 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14642 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14643 inst.instruction |= LOW4 (inst.operands[1].reg);
14644 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14645 inst.instruction |= (isquad != 0) << 6;
14646 inst.instruction |= immbits << 16;
14647 inst.instruction |= (size >> 3) << 7;
14648 inst.instruction |= (size & 0x7) << 19;
14649 if (write_ubit)
14650 inst.instruction |= (uval != 0) << 24;
14651
14652 neon_dp_fixup (&inst);
14653 }
14654
14655 static void
14656 do_neon_shl_imm (void)
14657 {
14658 if (!inst.operands[2].isreg)
14659 {
14660 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14661 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14662 int imm = inst.operands[2].imm;
14663
14664 constraint (imm < 0 || (unsigned)imm >= et.size,
14665 _("immediate out of range for shift"));
14666 NEON_ENCODE (IMMED, inst);
14667 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14668 }
14669 else
14670 {
14671 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14672 struct neon_type_el et = neon_check_type (3, rs,
14673 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14674 unsigned int tmp;
14675
14676 /* VSHL/VQSHL 3-register variants have syntax such as:
14677 vshl.xx Dd, Dm, Dn
14678 whereas other 3-register operations encoded by neon_three_same have
14679 syntax like:
14680 vadd.xx Dd, Dn, Dm
14681 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14682 here. */
14683 tmp = inst.operands[2].reg;
14684 inst.operands[2].reg = inst.operands[1].reg;
14685 inst.operands[1].reg = tmp;
14686 NEON_ENCODE (INTEGER, inst);
14687 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14688 }
14689 }
14690
14691 static void
14692 do_neon_qshl_imm (void)
14693 {
14694 if (!inst.operands[2].isreg)
14695 {
14696 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14697 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14698 int imm = inst.operands[2].imm;
14699
14700 constraint (imm < 0 || (unsigned)imm >= et.size,
14701 _("immediate out of range for shift"));
14702 NEON_ENCODE (IMMED, inst);
14703 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14704 }
14705 else
14706 {
14707 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14708 struct neon_type_el et = neon_check_type (3, rs,
14709 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14710 unsigned int tmp;
14711
14712 /* See note in do_neon_shl_imm. */
14713 tmp = inst.operands[2].reg;
14714 inst.operands[2].reg = inst.operands[1].reg;
14715 inst.operands[1].reg = tmp;
14716 NEON_ENCODE (INTEGER, inst);
14717 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14718 }
14719 }
14720
14721 static void
14722 do_neon_rshl (void)
14723 {
14724 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14725 struct neon_type_el et = neon_check_type (3, rs,
14726 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14727 unsigned int tmp;
14728
14729 tmp = inst.operands[2].reg;
14730 inst.operands[2].reg = inst.operands[1].reg;
14731 inst.operands[1].reg = tmp;
14732 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14733 }
14734
14735 static int
14736 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14737 {
14738 /* Handle .I8 pseudo-instructions. */
14739 if (size == 8)
14740 {
14741 /* Unfortunately, this will make everything apart from zero out-of-range.
14742 FIXME is this the intended semantics? There doesn't seem much point in
14743 accepting .I8 if so. */
14744 immediate |= immediate << 8;
14745 size = 16;
14746 }
14747
14748 if (size >= 32)
14749 {
14750 if (immediate == (immediate & 0x000000ff))
14751 {
14752 *immbits = immediate;
14753 return 0x1;
14754 }
14755 else if (immediate == (immediate & 0x0000ff00))
14756 {
14757 *immbits = immediate >> 8;
14758 return 0x3;
14759 }
14760 else if (immediate == (immediate & 0x00ff0000))
14761 {
14762 *immbits = immediate >> 16;
14763 return 0x5;
14764 }
14765 else if (immediate == (immediate & 0xff000000))
14766 {
14767 *immbits = immediate >> 24;
14768 return 0x7;
14769 }
14770 if ((immediate & 0xffff) != (immediate >> 16))
14771 goto bad_immediate;
14772 immediate &= 0xffff;
14773 }
14774
14775 if (immediate == (immediate & 0x000000ff))
14776 {
14777 *immbits = immediate;
14778 return 0x9;
14779 }
14780 else if (immediate == (immediate & 0x0000ff00))
14781 {
14782 *immbits = immediate >> 8;
14783 return 0xb;
14784 }
14785
14786 bad_immediate:
14787 first_error (_("immediate value out of range"));
14788 return FAIL;
14789 }
14790
14791 static void
14792 do_neon_logic (void)
14793 {
14794 if (inst.operands[2].present && inst.operands[2].isreg)
14795 {
14796 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14797 neon_check_type (3, rs, N_IGNORE_TYPE);
14798 /* U bit and size field were set as part of the bitmask. */
14799 NEON_ENCODE (INTEGER, inst);
14800 neon_three_same (neon_quad (rs), 0, -1);
14801 }
14802 else
14803 {
14804 const int three_ops_form = (inst.operands[2].present
14805 && !inst.operands[2].isreg);
14806 const int immoperand = (three_ops_form ? 2 : 1);
14807 enum neon_shape rs = (three_ops_form
14808 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14809 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14810 struct neon_type_el et = neon_check_type (2, rs,
14811 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14812 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14813 unsigned immbits;
14814 int cmode;
14815
14816 if (et.type == NT_invtype)
14817 return;
14818
14819 if (three_ops_form)
14820 constraint (inst.operands[0].reg != inst.operands[1].reg,
14821 _("first and second operands shall be the same register"));
14822
14823 NEON_ENCODE (IMMED, inst);
14824
14825 immbits = inst.operands[immoperand].imm;
14826 if (et.size == 64)
14827 {
14828 /* .i64 is a pseudo-op, so the immediate must be a repeating
14829 pattern. */
14830 if (immbits != (inst.operands[immoperand].regisimm ?
14831 inst.operands[immoperand].reg : 0))
14832 {
14833 /* Set immbits to an invalid constant. */
14834 immbits = 0xdeadbeef;
14835 }
14836 }
14837
14838 switch (opcode)
14839 {
14840 case N_MNEM_vbic:
14841 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14842 break;
14843
14844 case N_MNEM_vorr:
14845 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14846 break;
14847
14848 case N_MNEM_vand:
14849 /* Pseudo-instruction for VBIC. */
14850 neon_invert_size (&immbits, 0, et.size);
14851 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14852 break;
14853
14854 case N_MNEM_vorn:
14855 /* Pseudo-instruction for VORR. */
14856 neon_invert_size (&immbits, 0, et.size);
14857 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14858 break;
14859
14860 default:
14861 abort ();
14862 }
14863
14864 if (cmode == FAIL)
14865 return;
14866
14867 inst.instruction |= neon_quad (rs) << 6;
14868 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14869 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14870 inst.instruction |= cmode << 8;
14871 neon_write_immbits (immbits);
14872
14873 neon_dp_fixup (&inst);
14874 }
14875 }
14876
14877 static void
14878 do_neon_bitfield (void)
14879 {
14880 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14881 neon_check_type (3, rs, N_IGNORE_TYPE);
14882 neon_three_same (neon_quad (rs), 0, -1);
14883 }
14884
14885 static void
14886 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14887 unsigned destbits)
14888 {
14889 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14890 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14891 types | N_KEY);
14892 if (et.type == NT_float)
14893 {
14894 NEON_ENCODE (FLOAT, inst);
14895 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14896 }
14897 else
14898 {
14899 NEON_ENCODE (INTEGER, inst);
14900 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14901 }
14902 }
14903
14904 static void
14905 do_neon_dyadic_if_su (void)
14906 {
14907 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14908 }
14909
14910 static void
14911 do_neon_dyadic_if_su_d (void)
14912 {
14913 /* This version only allow D registers, but that constraint is enforced during
14914 operand parsing so we don't need to do anything extra here. */
14915 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14916 }
14917
14918 static void
14919 do_neon_dyadic_if_i_d (void)
14920 {
14921 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14922 affected if we specify unsigned args. */
14923 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14924 }
14925
14926 enum vfp_or_neon_is_neon_bits
14927 {
14928 NEON_CHECK_CC = 1,
14929 NEON_CHECK_ARCH = 2,
14930 NEON_CHECK_ARCH8 = 4
14931 };
14932
14933 /* Call this function if an instruction which may have belonged to the VFP or
14934 Neon instruction sets, but turned out to be a Neon instruction (due to the
14935 operand types involved, etc.). We have to check and/or fix-up a couple of
14936 things:
14937
14938 - Make sure the user hasn't attempted to make a Neon instruction
14939 conditional.
14940 - Alter the value in the condition code field if necessary.
14941 - Make sure that the arch supports Neon instructions.
14942
14943 Which of these operations take place depends on bits from enum
14944 vfp_or_neon_is_neon_bits.
14945
14946 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14947 current instruction's condition is COND_ALWAYS, the condition field is
14948 changed to inst.uncond_value. This is necessary because instructions shared
14949 between VFP and Neon may be conditional for the VFP variants only, and the
14950 unconditional Neon version must have, e.g., 0xF in the condition field. */
14951
14952 static int
14953 vfp_or_neon_is_neon (unsigned check)
14954 {
14955 /* Conditions are always legal in Thumb mode (IT blocks). */
14956 if (!thumb_mode && (check & NEON_CHECK_CC))
14957 {
14958 if (inst.cond != COND_ALWAYS)
14959 {
14960 first_error (_(BAD_COND));
14961 return FAIL;
14962 }
14963 if (inst.uncond_value != -1)
14964 inst.instruction |= inst.uncond_value << 28;
14965 }
14966
14967 if ((check & NEON_CHECK_ARCH)
14968 && !mark_feature_used (&fpu_neon_ext_v1))
14969 {
14970 first_error (_(BAD_FPU));
14971 return FAIL;
14972 }
14973
14974 if ((check & NEON_CHECK_ARCH8)
14975 && !mark_feature_used (&fpu_neon_ext_armv8))
14976 {
14977 first_error (_(BAD_FPU));
14978 return FAIL;
14979 }
14980
14981 return SUCCESS;
14982 }
14983
14984 static void
14985 do_neon_addsub_if_i (void)
14986 {
14987 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14988 return;
14989
14990 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14991 return;
14992
14993 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14994 affected if we specify unsigned args. */
14995 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14996 }
14997
14998 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14999 result to be:
15000 V<op> A,B (A is operand 0, B is operand 2)
15001 to mean:
15002 V<op> A,B,A
15003 not:
15004 V<op> A,B,B
15005 so handle that case specially. */
15006
15007 static void
15008 neon_exchange_operands (void)
15009 {
15010 if (inst.operands[1].present)
15011 {
15012 void *scratch = xmalloc (sizeof (inst.operands[0]));
15013
15014 /* Swap operands[1] and operands[2]. */
15015 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15016 inst.operands[1] = inst.operands[2];
15017 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15018 free (scratch);
15019 }
15020 else
15021 {
15022 inst.operands[1] = inst.operands[2];
15023 inst.operands[2] = inst.operands[0];
15024 }
15025 }
15026
15027 static void
15028 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15029 {
15030 if (inst.operands[2].isreg)
15031 {
15032 if (invert)
15033 neon_exchange_operands ();
15034 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15035 }
15036 else
15037 {
15038 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15039 struct neon_type_el et = neon_check_type (2, rs,
15040 N_EQK | N_SIZ, immtypes | N_KEY);
15041
15042 NEON_ENCODE (IMMED, inst);
15043 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15044 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15045 inst.instruction |= LOW4 (inst.operands[1].reg);
15046 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15047 inst.instruction |= neon_quad (rs) << 6;
15048 inst.instruction |= (et.type == NT_float) << 10;
15049 inst.instruction |= neon_logbits (et.size) << 18;
15050
15051 neon_dp_fixup (&inst);
15052 }
15053 }
15054
15055 static void
15056 do_neon_cmp (void)
15057 {
15058 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15059 }
15060
15061 static void
15062 do_neon_cmp_inv (void)
15063 {
15064 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15065 }
15066
15067 static void
15068 do_neon_ceq (void)
15069 {
15070 neon_compare (N_IF_32, N_IF_32, FALSE);
15071 }
15072
15073 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15074 scalars, which are encoded in 5 bits, M : Rm.
15075 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15076 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15077 index in M.
15078
15079 Dot Product instructions are similar to multiply instructions except elsize
15080 should always be 32.
15081
15082 This function translates SCALAR, which is GAS's internal encoding of indexed
15083 scalar register, to raw encoding. There is also register and index range
15084 check based on ELSIZE. */
15085
15086 static unsigned
15087 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15088 {
15089 unsigned regno = NEON_SCALAR_REG (scalar);
15090 unsigned elno = NEON_SCALAR_INDEX (scalar);
15091
15092 switch (elsize)
15093 {
15094 case 16:
15095 if (regno > 7 || elno > 3)
15096 goto bad_scalar;
15097 return regno | (elno << 3);
15098
15099 case 32:
15100 if (regno > 15 || elno > 1)
15101 goto bad_scalar;
15102 return regno | (elno << 4);
15103
15104 default:
15105 bad_scalar:
15106 first_error (_("scalar out of range for multiply instruction"));
15107 }
15108
15109 return 0;
15110 }
15111
15112 /* Encode multiply / multiply-accumulate scalar instructions. */
15113
15114 static void
15115 neon_mul_mac (struct neon_type_el et, int ubit)
15116 {
15117 unsigned scalar;
15118
15119 /* Give a more helpful error message if we have an invalid type. */
15120 if (et.type == NT_invtype)
15121 return;
15122
15123 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15124 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15125 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15126 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15127 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15128 inst.instruction |= LOW4 (scalar);
15129 inst.instruction |= HI1 (scalar) << 5;
15130 inst.instruction |= (et.type == NT_float) << 8;
15131 inst.instruction |= neon_logbits (et.size) << 20;
15132 inst.instruction |= (ubit != 0) << 24;
15133
15134 neon_dp_fixup (&inst);
15135 }
15136
15137 static void
15138 do_neon_mac_maybe_scalar (void)
15139 {
15140 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15141 return;
15142
15143 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15144 return;
15145
15146 if (inst.operands[2].isscalar)
15147 {
15148 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15149 struct neon_type_el et = neon_check_type (3, rs,
15150 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15151 NEON_ENCODE (SCALAR, inst);
15152 neon_mul_mac (et, neon_quad (rs));
15153 }
15154 else
15155 {
15156 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15157 affected if we specify unsigned args. */
15158 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15159 }
15160 }
15161
15162 static void
15163 do_neon_fmac (void)
15164 {
15165 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15166 return;
15167
15168 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15169 return;
15170
15171 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15172 }
15173
15174 static void
15175 do_neon_tst (void)
15176 {
15177 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15178 struct neon_type_el et = neon_check_type (3, rs,
15179 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15180 neon_three_same (neon_quad (rs), 0, et.size);
15181 }
15182
15183 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15184 same types as the MAC equivalents. The polynomial type for this instruction
15185 is encoded the same as the integer type. */
15186
15187 static void
15188 do_neon_mul (void)
15189 {
15190 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15191 return;
15192
15193 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15194 return;
15195
15196 if (inst.operands[2].isscalar)
15197 do_neon_mac_maybe_scalar ();
15198 else
15199 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15200 }
15201
15202 static void
15203 do_neon_qdmulh (void)
15204 {
15205 if (inst.operands[2].isscalar)
15206 {
15207 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15208 struct neon_type_el et = neon_check_type (3, rs,
15209 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15210 NEON_ENCODE (SCALAR, inst);
15211 neon_mul_mac (et, neon_quad (rs));
15212 }
15213 else
15214 {
15215 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15216 struct neon_type_el et = neon_check_type (3, rs,
15217 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15218 NEON_ENCODE (INTEGER, inst);
15219 /* The U bit (rounding) comes from bit mask. */
15220 neon_three_same (neon_quad (rs), 0, et.size);
15221 }
15222 }
15223
15224 static void
15225 do_neon_qrdmlah (void)
15226 {
15227 /* Check we're on the correct architecture. */
15228 if (!mark_feature_used (&fpu_neon_ext_armv8))
15229 inst.error =
15230 _("instruction form not available on this architecture.");
15231 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15232 {
15233 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15234 record_feature_use (&fpu_neon_ext_v8_1);
15235 }
15236
15237 if (inst.operands[2].isscalar)
15238 {
15239 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15240 struct neon_type_el et = neon_check_type (3, rs,
15241 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15242 NEON_ENCODE (SCALAR, inst);
15243 neon_mul_mac (et, neon_quad (rs));
15244 }
15245 else
15246 {
15247 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15248 struct neon_type_el et = neon_check_type (3, rs,
15249 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15250 NEON_ENCODE (INTEGER, inst);
15251 /* The U bit (rounding) comes from bit mask. */
15252 neon_three_same (neon_quad (rs), 0, et.size);
15253 }
15254 }
15255
15256 static void
15257 do_neon_fcmp_absolute (void)
15258 {
15259 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15260 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15261 N_F_16_32 | N_KEY);
15262 /* Size field comes from bit mask. */
15263 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15264 }
15265
15266 static void
15267 do_neon_fcmp_absolute_inv (void)
15268 {
15269 neon_exchange_operands ();
15270 do_neon_fcmp_absolute ();
15271 }
15272
15273 static void
15274 do_neon_step (void)
15275 {
15276 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15277 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15278 N_F_16_32 | N_KEY);
15279 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15280 }
15281
15282 static void
15283 do_neon_abs_neg (void)
15284 {
15285 enum neon_shape rs;
15286 struct neon_type_el et;
15287
15288 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15289 return;
15290
15291 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15292 return;
15293
15294 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15295 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15296
15297 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15298 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15299 inst.instruction |= LOW4 (inst.operands[1].reg);
15300 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15301 inst.instruction |= neon_quad (rs) << 6;
15302 inst.instruction |= (et.type == NT_float) << 10;
15303 inst.instruction |= neon_logbits (et.size) << 18;
15304
15305 neon_dp_fixup (&inst);
15306 }
15307
15308 static void
15309 do_neon_sli (void)
15310 {
15311 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15312 struct neon_type_el et = neon_check_type (2, rs,
15313 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15314 int imm = inst.operands[2].imm;
15315 constraint (imm < 0 || (unsigned)imm >= et.size,
15316 _("immediate out of range for insert"));
15317 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15318 }
15319
15320 static void
15321 do_neon_sri (void)
15322 {
15323 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15324 struct neon_type_el et = neon_check_type (2, rs,
15325 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15326 int imm = inst.operands[2].imm;
15327 constraint (imm < 1 || (unsigned)imm > et.size,
15328 _("immediate out of range for insert"));
15329 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15330 }
15331
15332 static void
15333 do_neon_qshlu_imm (void)
15334 {
15335 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15336 struct neon_type_el et = neon_check_type (2, rs,
15337 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15338 int imm = inst.operands[2].imm;
15339 constraint (imm < 0 || (unsigned)imm >= et.size,
15340 _("immediate out of range for shift"));
15341 /* Only encodes the 'U present' variant of the instruction.
15342 In this case, signed types have OP (bit 8) set to 0.
15343 Unsigned types have OP set to 1. */
15344 inst.instruction |= (et.type == NT_unsigned) << 8;
15345 /* The rest of the bits are the same as other immediate shifts. */
15346 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15347 }
15348
15349 static void
15350 do_neon_qmovn (void)
15351 {
15352 struct neon_type_el et = neon_check_type (2, NS_DQ,
15353 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15354 /* Saturating move where operands can be signed or unsigned, and the
15355 destination has the same signedness. */
15356 NEON_ENCODE (INTEGER, inst);
15357 if (et.type == NT_unsigned)
15358 inst.instruction |= 0xc0;
15359 else
15360 inst.instruction |= 0x80;
15361 neon_two_same (0, 1, et.size / 2);
15362 }
15363
15364 static void
15365 do_neon_qmovun (void)
15366 {
15367 struct neon_type_el et = neon_check_type (2, NS_DQ,
15368 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15369 /* Saturating move with unsigned results. Operands must be signed. */
15370 NEON_ENCODE (INTEGER, inst);
15371 neon_two_same (0, 1, et.size / 2);
15372 }
15373
15374 static void
15375 do_neon_rshift_sat_narrow (void)
15376 {
15377 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15378 or unsigned. If operands are unsigned, results must also be unsigned. */
15379 struct neon_type_el et = neon_check_type (2, NS_DQI,
15380 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15381 int imm = inst.operands[2].imm;
15382 /* This gets the bounds check, size encoding and immediate bits calculation
15383 right. */
15384 et.size /= 2;
15385
15386 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15387 VQMOVN.I<size> <Dd>, <Qm>. */
15388 if (imm == 0)
15389 {
15390 inst.operands[2].present = 0;
15391 inst.instruction = N_MNEM_vqmovn;
15392 do_neon_qmovn ();
15393 return;
15394 }
15395
15396 constraint (imm < 1 || (unsigned)imm > et.size,
15397 _("immediate out of range"));
15398 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15399 }
15400
15401 static void
15402 do_neon_rshift_sat_narrow_u (void)
15403 {
15404 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15405 or unsigned. If operands are unsigned, results must also be unsigned. */
15406 struct neon_type_el et = neon_check_type (2, NS_DQI,
15407 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15408 int imm = inst.operands[2].imm;
15409 /* This gets the bounds check, size encoding and immediate bits calculation
15410 right. */
15411 et.size /= 2;
15412
15413 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15414 VQMOVUN.I<size> <Dd>, <Qm>. */
15415 if (imm == 0)
15416 {
15417 inst.operands[2].present = 0;
15418 inst.instruction = N_MNEM_vqmovun;
15419 do_neon_qmovun ();
15420 return;
15421 }
15422
15423 constraint (imm < 1 || (unsigned)imm > et.size,
15424 _("immediate out of range"));
15425 /* FIXME: The manual is kind of unclear about what value U should have in
15426 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15427 must be 1. */
15428 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15429 }
15430
15431 static void
15432 do_neon_movn (void)
15433 {
15434 struct neon_type_el et = neon_check_type (2, NS_DQ,
15435 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15436 NEON_ENCODE (INTEGER, inst);
15437 neon_two_same (0, 1, et.size / 2);
15438 }
15439
15440 static void
15441 do_neon_rshift_narrow (void)
15442 {
15443 struct neon_type_el et = neon_check_type (2, NS_DQI,
15444 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15445 int imm = inst.operands[2].imm;
15446 /* This gets the bounds check, size encoding and immediate bits calculation
15447 right. */
15448 et.size /= 2;
15449
15450 /* If immediate is zero then we are a pseudo-instruction for
15451 VMOVN.I<size> <Dd>, <Qm> */
15452 if (imm == 0)
15453 {
15454 inst.operands[2].present = 0;
15455 inst.instruction = N_MNEM_vmovn;
15456 do_neon_movn ();
15457 return;
15458 }
15459
15460 constraint (imm < 1 || (unsigned)imm > et.size,
15461 _("immediate out of range for narrowing operation"));
15462 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15463 }
15464
15465 static void
15466 do_neon_shll (void)
15467 {
15468 /* FIXME: Type checking when lengthening. */
15469 struct neon_type_el et = neon_check_type (2, NS_QDI,
15470 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15471 unsigned imm = inst.operands[2].imm;
15472
15473 if (imm == et.size)
15474 {
15475 /* Maximum shift variant. */
15476 NEON_ENCODE (INTEGER, inst);
15477 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15478 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15479 inst.instruction |= LOW4 (inst.operands[1].reg);
15480 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15481 inst.instruction |= neon_logbits (et.size) << 18;
15482
15483 neon_dp_fixup (&inst);
15484 }
15485 else
15486 {
15487 /* A more-specific type check for non-max versions. */
15488 et = neon_check_type (2, NS_QDI,
15489 N_EQK | N_DBL, N_SU_32 | N_KEY);
15490 NEON_ENCODE (IMMED, inst);
15491 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15492 }
15493 }
15494
15495 /* Check the various types for the VCVT instruction, and return which version
15496 the current instruction is. */
15497
15498 #define CVT_FLAVOUR_VAR \
15499 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15500 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15501 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15502 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15503 /* Half-precision conversions. */ \
15504 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15505 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15506 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15507 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15508 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15509 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15510 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15511 Compared with single/double precision variants, only the co-processor \
15512 field is different, so the encoding flow is reused here. */ \
15513 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15514 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15515 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15516 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15517 /* VFP instructions. */ \
15518 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15519 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15520 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15521 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15522 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15523 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15524 /* VFP instructions with bitshift. */ \
15525 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15526 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15527 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15528 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15529 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15530 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15531 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15532 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15533
15534 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15535 neon_cvt_flavour_##C,
15536
15537 /* The different types of conversions we can do. */
15538 enum neon_cvt_flavour
15539 {
15540 CVT_FLAVOUR_VAR
15541 neon_cvt_flavour_invalid,
15542 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15543 };
15544
15545 #undef CVT_VAR
15546
15547 static enum neon_cvt_flavour
15548 get_neon_cvt_flavour (enum neon_shape rs)
15549 {
15550 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15551 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15552 if (et.type != NT_invtype) \
15553 { \
15554 inst.error = NULL; \
15555 return (neon_cvt_flavour_##C); \
15556 }
15557
15558 struct neon_type_el et;
15559 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15560 || rs == NS_FF) ? N_VFP : 0;
15561 /* The instruction versions which take an immediate take one register
15562 argument, which is extended to the width of the full register. Thus the
15563 "source" and "destination" registers must have the same width. Hack that
15564 here by making the size equal to the key (wider, in this case) operand. */
15565 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15566
15567 CVT_FLAVOUR_VAR;
15568
15569 return neon_cvt_flavour_invalid;
15570 #undef CVT_VAR
15571 }
15572
15573 enum neon_cvt_mode
15574 {
15575 neon_cvt_mode_a,
15576 neon_cvt_mode_n,
15577 neon_cvt_mode_p,
15578 neon_cvt_mode_m,
15579 neon_cvt_mode_z,
15580 neon_cvt_mode_x,
15581 neon_cvt_mode_r
15582 };
15583
15584 /* Neon-syntax VFP conversions. */
15585
15586 static void
15587 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15588 {
15589 const char *opname = 0;
15590
15591 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15592 || rs == NS_FHI || rs == NS_HFI)
15593 {
15594 /* Conversions with immediate bitshift. */
15595 const char *enc[] =
15596 {
15597 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15598 CVT_FLAVOUR_VAR
15599 NULL
15600 #undef CVT_VAR
15601 };
15602
15603 if (flavour < (int) ARRAY_SIZE (enc))
15604 {
15605 opname = enc[flavour];
15606 constraint (inst.operands[0].reg != inst.operands[1].reg,
15607 _("operands 0 and 1 must be the same register"));
15608 inst.operands[1] = inst.operands[2];
15609 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15610 }
15611 }
15612 else
15613 {
15614 /* Conversions without bitshift. */
15615 const char *enc[] =
15616 {
15617 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15618 CVT_FLAVOUR_VAR
15619 NULL
15620 #undef CVT_VAR
15621 };
15622
15623 if (flavour < (int) ARRAY_SIZE (enc))
15624 opname = enc[flavour];
15625 }
15626
15627 if (opname)
15628 do_vfp_nsyn_opcode (opname);
15629
15630 /* ARMv8.2 fp16 VCVT instruction. */
15631 if (flavour == neon_cvt_flavour_s32_f16
15632 || flavour == neon_cvt_flavour_u32_f16
15633 || flavour == neon_cvt_flavour_f16_u32
15634 || flavour == neon_cvt_flavour_f16_s32)
15635 do_scalar_fp16_v82_encode ();
15636 }
15637
15638 static void
15639 do_vfp_nsyn_cvtz (void)
15640 {
15641 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15642 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15643 const char *enc[] =
15644 {
15645 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15646 CVT_FLAVOUR_VAR
15647 NULL
15648 #undef CVT_VAR
15649 };
15650
15651 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15652 do_vfp_nsyn_opcode (enc[flavour]);
15653 }
15654
15655 static void
15656 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15657 enum neon_cvt_mode mode)
15658 {
15659 int sz, op;
15660 int rm;
15661
15662 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15663 D register operands. */
15664 if (flavour == neon_cvt_flavour_s32_f64
15665 || flavour == neon_cvt_flavour_u32_f64)
15666 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15667 _(BAD_FPU));
15668
15669 if (flavour == neon_cvt_flavour_s32_f16
15670 || flavour == neon_cvt_flavour_u32_f16)
15671 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15672 _(BAD_FP16));
15673
15674 set_it_insn_type (OUTSIDE_IT_INSN);
15675
15676 switch (flavour)
15677 {
15678 case neon_cvt_flavour_s32_f64:
15679 sz = 1;
15680 op = 1;
15681 break;
15682 case neon_cvt_flavour_s32_f32:
15683 sz = 0;
15684 op = 1;
15685 break;
15686 case neon_cvt_flavour_s32_f16:
15687 sz = 0;
15688 op = 1;
15689 break;
15690 case neon_cvt_flavour_u32_f64:
15691 sz = 1;
15692 op = 0;
15693 break;
15694 case neon_cvt_flavour_u32_f32:
15695 sz = 0;
15696 op = 0;
15697 break;
15698 case neon_cvt_flavour_u32_f16:
15699 sz = 0;
15700 op = 0;
15701 break;
15702 default:
15703 first_error (_("invalid instruction shape"));
15704 return;
15705 }
15706
15707 switch (mode)
15708 {
15709 case neon_cvt_mode_a: rm = 0; break;
15710 case neon_cvt_mode_n: rm = 1; break;
15711 case neon_cvt_mode_p: rm = 2; break;
15712 case neon_cvt_mode_m: rm = 3; break;
15713 default: first_error (_("invalid rounding mode")); return;
15714 }
15715
15716 NEON_ENCODE (FPV8, inst);
15717 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15718 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15719 inst.instruction |= sz << 8;
15720
15721 /* ARMv8.2 fp16 VCVT instruction. */
15722 if (flavour == neon_cvt_flavour_s32_f16
15723 ||flavour == neon_cvt_flavour_u32_f16)
15724 do_scalar_fp16_v82_encode ();
15725 inst.instruction |= op << 7;
15726 inst.instruction |= rm << 16;
15727 inst.instruction |= 0xf0000000;
15728 inst.is_neon = TRUE;
15729 }
15730
15731 static void
15732 do_neon_cvt_1 (enum neon_cvt_mode mode)
15733 {
15734 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15735 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15736 NS_FH, NS_HF, NS_FHI, NS_HFI,
15737 NS_NULL);
15738 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15739
15740 if (flavour == neon_cvt_flavour_invalid)
15741 return;
15742
15743 /* PR11109: Handle round-to-zero for VCVT conversions. */
15744 if (mode == neon_cvt_mode_z
15745 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15746 && (flavour == neon_cvt_flavour_s16_f16
15747 || flavour == neon_cvt_flavour_u16_f16
15748 || flavour == neon_cvt_flavour_s32_f32
15749 || flavour == neon_cvt_flavour_u32_f32
15750 || flavour == neon_cvt_flavour_s32_f64
15751 || flavour == neon_cvt_flavour_u32_f64)
15752 && (rs == NS_FD || rs == NS_FF))
15753 {
15754 do_vfp_nsyn_cvtz ();
15755 return;
15756 }
15757
15758 /* ARMv8.2 fp16 VCVT conversions. */
15759 if (mode == neon_cvt_mode_z
15760 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15761 && (flavour == neon_cvt_flavour_s32_f16
15762 || flavour == neon_cvt_flavour_u32_f16)
15763 && (rs == NS_FH))
15764 {
15765 do_vfp_nsyn_cvtz ();
15766 do_scalar_fp16_v82_encode ();
15767 return;
15768 }
15769
15770 /* VFP rather than Neon conversions. */
15771 if (flavour >= neon_cvt_flavour_first_fp)
15772 {
15773 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15774 do_vfp_nsyn_cvt (rs, flavour);
15775 else
15776 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15777
15778 return;
15779 }
15780
15781 switch (rs)
15782 {
15783 case NS_DDI:
15784 case NS_QQI:
15785 {
15786 unsigned immbits;
15787 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15788 0x0000100, 0x1000100, 0x0, 0x1000000};
15789
15790 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15791 return;
15792
15793 /* Fixed-point conversion with #0 immediate is encoded as an
15794 integer conversion. */
15795 if (inst.operands[2].present && inst.operands[2].imm == 0)
15796 goto int_encode;
15797 NEON_ENCODE (IMMED, inst);
15798 if (flavour != neon_cvt_flavour_invalid)
15799 inst.instruction |= enctab[flavour];
15800 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15801 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15802 inst.instruction |= LOW4 (inst.operands[1].reg);
15803 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15804 inst.instruction |= neon_quad (rs) << 6;
15805 inst.instruction |= 1 << 21;
15806 if (flavour < neon_cvt_flavour_s16_f16)
15807 {
15808 inst.instruction |= 1 << 21;
15809 immbits = 32 - inst.operands[2].imm;
15810 inst.instruction |= immbits << 16;
15811 }
15812 else
15813 {
15814 inst.instruction |= 3 << 20;
15815 immbits = 16 - inst.operands[2].imm;
15816 inst.instruction |= immbits << 16;
15817 inst.instruction &= ~(1 << 9);
15818 }
15819
15820 neon_dp_fixup (&inst);
15821 }
15822 break;
15823
15824 case NS_DD:
15825 case NS_QQ:
15826 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15827 {
15828 NEON_ENCODE (FLOAT, inst);
15829 set_it_insn_type (OUTSIDE_IT_INSN);
15830
15831 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15832 return;
15833
15834 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15835 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15836 inst.instruction |= LOW4 (inst.operands[1].reg);
15837 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15838 inst.instruction |= neon_quad (rs) << 6;
15839 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15840 || flavour == neon_cvt_flavour_u32_f32) << 7;
15841 inst.instruction |= mode << 8;
15842 if (flavour == neon_cvt_flavour_u16_f16
15843 || flavour == neon_cvt_flavour_s16_f16)
15844 /* Mask off the original size bits and reencode them. */
15845 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15846
15847 if (thumb_mode)
15848 inst.instruction |= 0xfc000000;
15849 else
15850 inst.instruction |= 0xf0000000;
15851 }
15852 else
15853 {
15854 int_encode:
15855 {
15856 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15857 0x100, 0x180, 0x0, 0x080};
15858
15859 NEON_ENCODE (INTEGER, inst);
15860
15861 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15862 return;
15863
15864 if (flavour != neon_cvt_flavour_invalid)
15865 inst.instruction |= enctab[flavour];
15866
15867 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15868 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15869 inst.instruction |= LOW4 (inst.operands[1].reg);
15870 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15871 inst.instruction |= neon_quad (rs) << 6;
15872 if (flavour >= neon_cvt_flavour_s16_f16
15873 && flavour <= neon_cvt_flavour_f16_u16)
15874 /* Half precision. */
15875 inst.instruction |= 1 << 18;
15876 else
15877 inst.instruction |= 2 << 18;
15878
15879 neon_dp_fixup (&inst);
15880 }
15881 }
15882 break;
15883
15884 /* Half-precision conversions for Advanced SIMD -- neon. */
15885 case NS_QD:
15886 case NS_DQ:
15887
15888 if ((rs == NS_DQ)
15889 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15890 {
15891 as_bad (_("operand size must match register width"));
15892 break;
15893 }
15894
15895 if ((rs == NS_QD)
15896 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15897 {
15898 as_bad (_("operand size must match register width"));
15899 break;
15900 }
15901
15902 if (rs == NS_DQ)
15903 inst.instruction = 0x3b60600;
15904 else
15905 inst.instruction = 0x3b60700;
15906
15907 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15908 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15909 inst.instruction |= LOW4 (inst.operands[1].reg);
15910 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15911 neon_dp_fixup (&inst);
15912 break;
15913
15914 default:
15915 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15916 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15917 do_vfp_nsyn_cvt (rs, flavour);
15918 else
15919 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15920 }
15921 }
15922
15923 static void
15924 do_neon_cvtr (void)
15925 {
15926 do_neon_cvt_1 (neon_cvt_mode_x);
15927 }
15928
15929 static void
15930 do_neon_cvt (void)
15931 {
15932 do_neon_cvt_1 (neon_cvt_mode_z);
15933 }
15934
15935 static void
15936 do_neon_cvta (void)
15937 {
15938 do_neon_cvt_1 (neon_cvt_mode_a);
15939 }
15940
15941 static void
15942 do_neon_cvtn (void)
15943 {
15944 do_neon_cvt_1 (neon_cvt_mode_n);
15945 }
15946
15947 static void
15948 do_neon_cvtp (void)
15949 {
15950 do_neon_cvt_1 (neon_cvt_mode_p);
15951 }
15952
15953 static void
15954 do_neon_cvtm (void)
15955 {
15956 do_neon_cvt_1 (neon_cvt_mode_m);
15957 }
15958
15959 static void
15960 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15961 {
15962 if (is_double)
15963 mark_feature_used (&fpu_vfp_ext_armv8);
15964
15965 encode_arm_vfp_reg (inst.operands[0].reg,
15966 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15967 encode_arm_vfp_reg (inst.operands[1].reg,
15968 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15969 inst.instruction |= to ? 0x10000 : 0;
15970 inst.instruction |= t ? 0x80 : 0;
15971 inst.instruction |= is_double ? 0x100 : 0;
15972 do_vfp_cond_or_thumb ();
15973 }
15974
15975 static void
15976 do_neon_cvttb_1 (bfd_boolean t)
15977 {
15978 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15979 NS_DF, NS_DH, NS_NULL);
15980
15981 if (rs == NS_NULL)
15982 return;
15983 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15984 {
15985 inst.error = NULL;
15986 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15987 }
15988 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15989 {
15990 inst.error = NULL;
15991 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15992 }
15993 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15994 {
15995 /* The VCVTB and VCVTT instructions with D-register operands
15996 don't work for SP only targets. */
15997 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15998 _(BAD_FPU));
15999
16000 inst.error = NULL;
16001 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16002 }
16003 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16004 {
16005 /* The VCVTB and VCVTT instructions with D-register operands
16006 don't work for SP only targets. */
16007 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16008 _(BAD_FPU));
16009
16010 inst.error = NULL;
16011 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16012 }
16013 else
16014 return;
16015 }
16016
16017 static void
16018 do_neon_cvtb (void)
16019 {
16020 do_neon_cvttb_1 (FALSE);
16021 }
16022
16023
16024 static void
16025 do_neon_cvtt (void)
16026 {
16027 do_neon_cvttb_1 (TRUE);
16028 }
16029
16030 static void
16031 neon_move_immediate (void)
16032 {
16033 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16034 struct neon_type_el et = neon_check_type (2, rs,
16035 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16036 unsigned immlo, immhi = 0, immbits;
16037 int op, cmode, float_p;
16038
16039 constraint (et.type == NT_invtype,
16040 _("operand size must be specified for immediate VMOV"));
16041
16042 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16043 op = (inst.instruction & (1 << 5)) != 0;
16044
16045 immlo = inst.operands[1].imm;
16046 if (inst.operands[1].regisimm)
16047 immhi = inst.operands[1].reg;
16048
16049 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16050 _("immediate has bits set outside the operand size"));
16051
16052 float_p = inst.operands[1].immisfloat;
16053
16054 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16055 et.size, et.type)) == FAIL)
16056 {
16057 /* Invert relevant bits only. */
16058 neon_invert_size (&immlo, &immhi, et.size);
16059 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16060 with one or the other; those cases are caught by
16061 neon_cmode_for_move_imm. */
16062 op = !op;
16063 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16064 &op, et.size, et.type)) == FAIL)
16065 {
16066 first_error (_("immediate out of range"));
16067 return;
16068 }
16069 }
16070
16071 inst.instruction &= ~(1 << 5);
16072 inst.instruction |= op << 5;
16073
16074 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16075 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16076 inst.instruction |= neon_quad (rs) << 6;
16077 inst.instruction |= cmode << 8;
16078
16079 neon_write_immbits (immbits);
16080 }
16081
16082 static void
16083 do_neon_mvn (void)
16084 {
16085 if (inst.operands[1].isreg)
16086 {
16087 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16088
16089 NEON_ENCODE (INTEGER, inst);
16090 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16091 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16092 inst.instruction |= LOW4 (inst.operands[1].reg);
16093 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16094 inst.instruction |= neon_quad (rs) << 6;
16095 }
16096 else
16097 {
16098 NEON_ENCODE (IMMED, inst);
16099 neon_move_immediate ();
16100 }
16101
16102 neon_dp_fixup (&inst);
16103 }
16104
16105 /* Encode instructions of form:
16106
16107 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16108 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16109
16110 static void
16111 neon_mixed_length (struct neon_type_el et, unsigned size)
16112 {
16113 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16114 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16115 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16116 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16117 inst.instruction |= LOW4 (inst.operands[2].reg);
16118 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16119 inst.instruction |= (et.type == NT_unsigned) << 24;
16120 inst.instruction |= neon_logbits (size) << 20;
16121
16122 neon_dp_fixup (&inst);
16123 }
16124
16125 static void
16126 do_neon_dyadic_long (void)
16127 {
16128 /* FIXME: Type checking for lengthening op. */
16129 struct neon_type_el et = neon_check_type (3, NS_QDD,
16130 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16131 neon_mixed_length (et, et.size);
16132 }
16133
16134 static void
16135 do_neon_abal (void)
16136 {
16137 struct neon_type_el et = neon_check_type (3, NS_QDD,
16138 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16139 neon_mixed_length (et, et.size);
16140 }
16141
16142 static void
16143 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16144 {
16145 if (inst.operands[2].isscalar)
16146 {
16147 struct neon_type_el et = neon_check_type (3, NS_QDS,
16148 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16149 NEON_ENCODE (SCALAR, inst);
16150 neon_mul_mac (et, et.type == NT_unsigned);
16151 }
16152 else
16153 {
16154 struct neon_type_el et = neon_check_type (3, NS_QDD,
16155 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16156 NEON_ENCODE (INTEGER, inst);
16157 neon_mixed_length (et, et.size);
16158 }
16159 }
16160
16161 static void
16162 do_neon_mac_maybe_scalar_long (void)
16163 {
16164 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16165 }
16166
16167 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16168 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16169
16170 static unsigned
16171 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16172 {
16173 unsigned regno = NEON_SCALAR_REG (scalar);
16174 unsigned elno = NEON_SCALAR_INDEX (scalar);
16175
16176 if (quad_p)
16177 {
16178 if (regno > 7 || elno > 3)
16179 goto bad_scalar;
16180
16181 return ((regno & 0x7)
16182 | ((elno & 0x1) << 3)
16183 | (((elno >> 1) & 0x1) << 5));
16184 }
16185 else
16186 {
16187 if (regno > 15 || elno > 1)
16188 goto bad_scalar;
16189
16190 return (((regno & 0x1) << 5)
16191 | ((regno >> 1) & 0x7)
16192 | ((elno & 0x1) << 3));
16193 }
16194
16195 bad_scalar:
16196 first_error (_("scalar out of range for multiply instruction"));
16197 return 0;
16198 }
16199
16200 static void
16201 do_neon_fmac_maybe_scalar_long (int subtype)
16202 {
16203 enum neon_shape rs;
16204 int high8;
16205 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16206 field (bits[21:20]) has different meaning. For scalar index variant, it's
16207 used to differentiate add and subtract, otherwise it's with fixed value
16208 0x2. */
16209 int size = -1;
16210
16211 if (inst.cond != COND_ALWAYS)
16212 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16213 "behaviour is UNPREDICTABLE"));
16214
16215 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16216 _(BAD_FP16));
16217
16218 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16219 _(BAD_FPU));
16220
16221 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16222 be a scalar index register. */
16223 if (inst.operands[2].isscalar)
16224 {
16225 high8 = 0xfe000000;
16226 if (subtype)
16227 size = 16;
16228 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16229 }
16230 else
16231 {
16232 high8 = 0xfc000000;
16233 size = 32;
16234 if (subtype)
16235 inst.instruction |= (0x1 << 23);
16236 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16237 }
16238
16239 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16240
16241 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16242 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16243 so we simply pass -1 as size. */
16244 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16245 neon_three_same (quad_p, 0, size);
16246
16247 /* Undo neon_dp_fixup. Redo the high eight bits. */
16248 inst.instruction &= 0x00ffffff;
16249 inst.instruction |= high8;
16250
16251 #define LOW1(R) ((R) & 0x1)
16252 #define HI4(R) (((R) >> 1) & 0xf)
16253 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16254 whether the instruction is in Q form and whether Vm is a scalar indexed
16255 operand. */
16256 if (inst.operands[2].isscalar)
16257 {
16258 unsigned rm
16259 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16260 inst.instruction &= 0xffffffd0;
16261 inst.instruction |= rm;
16262
16263 if (!quad_p)
16264 {
16265 /* Redo Rn as well. */
16266 inst.instruction &= 0xfff0ff7f;
16267 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16268 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16269 }
16270 }
16271 else if (!quad_p)
16272 {
16273 /* Redo Rn and Rm. */
16274 inst.instruction &= 0xfff0ff50;
16275 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16276 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16277 inst.instruction |= HI4 (inst.operands[2].reg);
16278 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16279 }
16280 }
16281
16282 static void
16283 do_neon_vfmal (void)
16284 {
16285 return do_neon_fmac_maybe_scalar_long (0);
16286 }
16287
16288 static void
16289 do_neon_vfmsl (void)
16290 {
16291 return do_neon_fmac_maybe_scalar_long (1);
16292 }
16293
16294 static void
16295 do_neon_dyadic_wide (void)
16296 {
16297 struct neon_type_el et = neon_check_type (3, NS_QQD,
16298 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16299 neon_mixed_length (et, et.size);
16300 }
16301
16302 static void
16303 do_neon_dyadic_narrow (void)
16304 {
16305 struct neon_type_el et = neon_check_type (3, NS_QDD,
16306 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16307 /* Operand sign is unimportant, and the U bit is part of the opcode,
16308 so force the operand type to integer. */
16309 et.type = NT_integer;
16310 neon_mixed_length (et, et.size / 2);
16311 }
16312
16313 static void
16314 do_neon_mul_sat_scalar_long (void)
16315 {
16316 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16317 }
16318
16319 static void
16320 do_neon_vmull (void)
16321 {
16322 if (inst.operands[2].isscalar)
16323 do_neon_mac_maybe_scalar_long ();
16324 else
16325 {
16326 struct neon_type_el et = neon_check_type (3, NS_QDD,
16327 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16328
16329 if (et.type == NT_poly)
16330 NEON_ENCODE (POLY, inst);
16331 else
16332 NEON_ENCODE (INTEGER, inst);
16333
16334 /* For polynomial encoding the U bit must be zero, and the size must
16335 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16336 obviously, as 0b10). */
16337 if (et.size == 64)
16338 {
16339 /* Check we're on the correct architecture. */
16340 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16341 inst.error =
16342 _("Instruction form not available on this architecture.");
16343
16344 et.size = 32;
16345 }
16346
16347 neon_mixed_length (et, et.size);
16348 }
16349 }
16350
16351 static void
16352 do_neon_ext (void)
16353 {
16354 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16355 struct neon_type_el et = neon_check_type (3, rs,
16356 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16357 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16358
16359 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16360 _("shift out of range"));
16361 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16362 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16363 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16364 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16365 inst.instruction |= LOW4 (inst.operands[2].reg);
16366 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16367 inst.instruction |= neon_quad (rs) << 6;
16368 inst.instruction |= imm << 8;
16369
16370 neon_dp_fixup (&inst);
16371 }
16372
16373 static void
16374 do_neon_rev (void)
16375 {
16376 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16377 struct neon_type_el et = neon_check_type (2, rs,
16378 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16379 unsigned op = (inst.instruction >> 7) & 3;
16380 /* N (width of reversed regions) is encoded as part of the bitmask. We
16381 extract it here to check the elements to be reversed are smaller.
16382 Otherwise we'd get a reserved instruction. */
16383 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16384 gas_assert (elsize != 0);
16385 constraint (et.size >= elsize,
16386 _("elements must be smaller than reversal region"));
16387 neon_two_same (neon_quad (rs), 1, et.size);
16388 }
16389
16390 static void
16391 do_neon_dup (void)
16392 {
16393 if (inst.operands[1].isscalar)
16394 {
16395 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16396 struct neon_type_el et = neon_check_type (2, rs,
16397 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16398 unsigned sizebits = et.size >> 3;
16399 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16400 int logsize = neon_logbits (et.size);
16401 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16402
16403 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16404 return;
16405
16406 NEON_ENCODE (SCALAR, inst);
16407 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16408 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16409 inst.instruction |= LOW4 (dm);
16410 inst.instruction |= HI1 (dm) << 5;
16411 inst.instruction |= neon_quad (rs) << 6;
16412 inst.instruction |= x << 17;
16413 inst.instruction |= sizebits << 16;
16414
16415 neon_dp_fixup (&inst);
16416 }
16417 else
16418 {
16419 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16420 struct neon_type_el et = neon_check_type (2, rs,
16421 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16422 /* Duplicate ARM register to lanes of vector. */
16423 NEON_ENCODE (ARMREG, inst);
16424 switch (et.size)
16425 {
16426 case 8: inst.instruction |= 0x400000; break;
16427 case 16: inst.instruction |= 0x000020; break;
16428 case 32: inst.instruction |= 0x000000; break;
16429 default: break;
16430 }
16431 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16432 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16433 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16434 inst.instruction |= neon_quad (rs) << 21;
16435 /* The encoding for this instruction is identical for the ARM and Thumb
16436 variants, except for the condition field. */
16437 do_vfp_cond_or_thumb ();
16438 }
16439 }
16440
16441 /* VMOV has particularly many variations. It can be one of:
16442 0. VMOV<c><q> <Qd>, <Qm>
16443 1. VMOV<c><q> <Dd>, <Dm>
16444 (Register operations, which are VORR with Rm = Rn.)
16445 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16446 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16447 (Immediate loads.)
16448 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16449 (ARM register to scalar.)
16450 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16451 (Two ARM registers to vector.)
16452 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16453 (Scalar to ARM register.)
16454 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16455 (Vector to two ARM registers.)
16456 8. VMOV.F32 <Sd>, <Sm>
16457 9. VMOV.F64 <Dd>, <Dm>
16458 (VFP register moves.)
16459 10. VMOV.F32 <Sd>, #imm
16460 11. VMOV.F64 <Dd>, #imm
16461 (VFP float immediate load.)
16462 12. VMOV <Rd>, <Sm>
16463 (VFP single to ARM reg.)
16464 13. VMOV <Sd>, <Rm>
16465 (ARM reg to VFP single.)
16466 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16467 (Two ARM regs to two VFP singles.)
16468 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16469 (Two VFP singles to two ARM regs.)
16470
16471 These cases can be disambiguated using neon_select_shape, except cases 1/9
16472 and 3/11 which depend on the operand type too.
16473
16474 All the encoded bits are hardcoded by this function.
16475
16476 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16477 Cases 5, 7 may be used with VFPv2 and above.
16478
16479 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16480 can specify a type where it doesn't make sense to, and is ignored). */
16481
16482 static void
16483 do_neon_mov (void)
16484 {
16485 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16486 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16487 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16488 NS_HR, NS_RH, NS_HI, NS_NULL);
16489 struct neon_type_el et;
16490 const char *ldconst = 0;
16491
16492 switch (rs)
16493 {
16494 case NS_DD: /* case 1/9. */
16495 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16496 /* It is not an error here if no type is given. */
16497 inst.error = NULL;
16498 if (et.type == NT_float && et.size == 64)
16499 {
16500 do_vfp_nsyn_opcode ("fcpyd");
16501 break;
16502 }
16503 /* fall through. */
16504
16505 case NS_QQ: /* case 0/1. */
16506 {
16507 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16508 return;
16509 /* The architecture manual I have doesn't explicitly state which
16510 value the U bit should have for register->register moves, but
16511 the equivalent VORR instruction has U = 0, so do that. */
16512 inst.instruction = 0x0200110;
16513 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16514 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16515 inst.instruction |= LOW4 (inst.operands[1].reg);
16516 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16517 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16518 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16519 inst.instruction |= neon_quad (rs) << 6;
16520
16521 neon_dp_fixup (&inst);
16522 }
16523 break;
16524
16525 case NS_DI: /* case 3/11. */
16526 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16527 inst.error = NULL;
16528 if (et.type == NT_float && et.size == 64)
16529 {
16530 /* case 11 (fconstd). */
16531 ldconst = "fconstd";
16532 goto encode_fconstd;
16533 }
16534 /* fall through. */
16535
16536 case NS_QI: /* case 2/3. */
16537 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16538 return;
16539 inst.instruction = 0x0800010;
16540 neon_move_immediate ();
16541 neon_dp_fixup (&inst);
16542 break;
16543
16544 case NS_SR: /* case 4. */
16545 {
16546 unsigned bcdebits = 0;
16547 int logsize;
16548 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16549 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16550
16551 /* .<size> is optional here, defaulting to .32. */
16552 if (inst.vectype.elems == 0
16553 && inst.operands[0].vectype.type == NT_invtype
16554 && inst.operands[1].vectype.type == NT_invtype)
16555 {
16556 inst.vectype.el[0].type = NT_untyped;
16557 inst.vectype.el[0].size = 32;
16558 inst.vectype.elems = 1;
16559 }
16560
16561 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16562 logsize = neon_logbits (et.size);
16563
16564 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16565 _(BAD_FPU));
16566 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16567 && et.size != 32, _(BAD_FPU));
16568 constraint (et.type == NT_invtype, _("bad type for scalar"));
16569 constraint (x >= 64 / et.size, _("scalar index out of range"));
16570
16571 switch (et.size)
16572 {
16573 case 8: bcdebits = 0x8; break;
16574 case 16: bcdebits = 0x1; break;
16575 case 32: bcdebits = 0x0; break;
16576 default: ;
16577 }
16578
16579 bcdebits |= x << logsize;
16580
16581 inst.instruction = 0xe000b10;
16582 do_vfp_cond_or_thumb ();
16583 inst.instruction |= LOW4 (dn) << 16;
16584 inst.instruction |= HI1 (dn) << 7;
16585 inst.instruction |= inst.operands[1].reg << 12;
16586 inst.instruction |= (bcdebits & 3) << 5;
16587 inst.instruction |= (bcdebits >> 2) << 21;
16588 }
16589 break;
16590
16591 case NS_DRR: /* case 5 (fmdrr). */
16592 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16593 _(BAD_FPU));
16594
16595 inst.instruction = 0xc400b10;
16596 do_vfp_cond_or_thumb ();
16597 inst.instruction |= LOW4 (inst.operands[0].reg);
16598 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16599 inst.instruction |= inst.operands[1].reg << 12;
16600 inst.instruction |= inst.operands[2].reg << 16;
16601 break;
16602
16603 case NS_RS: /* case 6. */
16604 {
16605 unsigned logsize;
16606 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16607 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16608 unsigned abcdebits = 0;
16609
16610 /* .<dt> is optional here, defaulting to .32. */
16611 if (inst.vectype.elems == 0
16612 && inst.operands[0].vectype.type == NT_invtype
16613 && inst.operands[1].vectype.type == NT_invtype)
16614 {
16615 inst.vectype.el[0].type = NT_untyped;
16616 inst.vectype.el[0].size = 32;
16617 inst.vectype.elems = 1;
16618 }
16619
16620 et = neon_check_type (2, NS_NULL,
16621 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16622 logsize = neon_logbits (et.size);
16623
16624 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16625 _(BAD_FPU));
16626 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16627 && et.size != 32, _(BAD_FPU));
16628 constraint (et.type == NT_invtype, _("bad type for scalar"));
16629 constraint (x >= 64 / et.size, _("scalar index out of range"));
16630
16631 switch (et.size)
16632 {
16633 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16634 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16635 case 32: abcdebits = 0x00; break;
16636 default: ;
16637 }
16638
16639 abcdebits |= x << logsize;
16640 inst.instruction = 0xe100b10;
16641 do_vfp_cond_or_thumb ();
16642 inst.instruction |= LOW4 (dn) << 16;
16643 inst.instruction |= HI1 (dn) << 7;
16644 inst.instruction |= inst.operands[0].reg << 12;
16645 inst.instruction |= (abcdebits & 3) << 5;
16646 inst.instruction |= (abcdebits >> 2) << 21;
16647 }
16648 break;
16649
16650 case NS_RRD: /* case 7 (fmrrd). */
16651 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16652 _(BAD_FPU));
16653
16654 inst.instruction = 0xc500b10;
16655 do_vfp_cond_or_thumb ();
16656 inst.instruction |= inst.operands[0].reg << 12;
16657 inst.instruction |= inst.operands[1].reg << 16;
16658 inst.instruction |= LOW4 (inst.operands[2].reg);
16659 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16660 break;
16661
16662 case NS_FF: /* case 8 (fcpys). */
16663 do_vfp_nsyn_opcode ("fcpys");
16664 break;
16665
16666 case NS_HI:
16667 case NS_FI: /* case 10 (fconsts). */
16668 ldconst = "fconsts";
16669 encode_fconstd:
16670 if (is_quarter_float (inst.operands[1].imm))
16671 {
16672 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16673 do_vfp_nsyn_opcode (ldconst);
16674
16675 /* ARMv8.2 fp16 vmov.f16 instruction. */
16676 if (rs == NS_HI)
16677 do_scalar_fp16_v82_encode ();
16678 }
16679 else
16680 first_error (_("immediate out of range"));
16681 break;
16682
16683 case NS_RH:
16684 case NS_RF: /* case 12 (fmrs). */
16685 do_vfp_nsyn_opcode ("fmrs");
16686 /* ARMv8.2 fp16 vmov.f16 instruction. */
16687 if (rs == NS_RH)
16688 do_scalar_fp16_v82_encode ();
16689 break;
16690
16691 case NS_HR:
16692 case NS_FR: /* case 13 (fmsr). */
16693 do_vfp_nsyn_opcode ("fmsr");
16694 /* ARMv8.2 fp16 vmov.f16 instruction. */
16695 if (rs == NS_HR)
16696 do_scalar_fp16_v82_encode ();
16697 break;
16698
16699 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16700 (one of which is a list), but we have parsed four. Do some fiddling to
16701 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16702 expect. */
16703 case NS_RRFF: /* case 14 (fmrrs). */
16704 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16705 _("VFP registers must be adjacent"));
16706 inst.operands[2].imm = 2;
16707 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16708 do_vfp_nsyn_opcode ("fmrrs");
16709 break;
16710
16711 case NS_FFRR: /* case 15 (fmsrr). */
16712 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16713 _("VFP registers must be adjacent"));
16714 inst.operands[1] = inst.operands[2];
16715 inst.operands[2] = inst.operands[3];
16716 inst.operands[0].imm = 2;
16717 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16718 do_vfp_nsyn_opcode ("fmsrr");
16719 break;
16720
16721 case NS_NULL:
16722 /* neon_select_shape has determined that the instruction
16723 shape is wrong and has already set the error message. */
16724 break;
16725
16726 default:
16727 abort ();
16728 }
16729 }
16730
16731 static void
16732 do_neon_rshift_round_imm (void)
16733 {
16734 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16735 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16736 int imm = inst.operands[2].imm;
16737
16738 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16739 if (imm == 0)
16740 {
16741 inst.operands[2].present = 0;
16742 do_neon_mov ();
16743 return;
16744 }
16745
16746 constraint (imm < 1 || (unsigned)imm > et.size,
16747 _("immediate out of range for shift"));
16748 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16749 et.size - imm);
16750 }
16751
16752 static void
16753 do_neon_movhf (void)
16754 {
16755 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16756 constraint (rs != NS_HH, _("invalid suffix"));
16757
16758 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16759 _(BAD_FPU));
16760
16761 do_vfp_sp_monadic ();
16762
16763 inst.is_neon = 1;
16764 inst.instruction |= 0xf0000000;
16765 }
16766
16767 static void
16768 do_neon_movl (void)
16769 {
16770 struct neon_type_el et = neon_check_type (2, NS_QD,
16771 N_EQK | N_DBL, N_SU_32 | N_KEY);
16772 unsigned sizebits = et.size >> 3;
16773 inst.instruction |= sizebits << 19;
16774 neon_two_same (0, et.type == NT_unsigned, -1);
16775 }
16776
16777 static void
16778 do_neon_trn (void)
16779 {
16780 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16781 struct neon_type_el et = neon_check_type (2, rs,
16782 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16783 NEON_ENCODE (INTEGER, inst);
16784 neon_two_same (neon_quad (rs), 1, et.size);
16785 }
16786
16787 static void
16788 do_neon_zip_uzp (void)
16789 {
16790 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16791 struct neon_type_el et = neon_check_type (2, rs,
16792 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16793 if (rs == NS_DD && et.size == 32)
16794 {
16795 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16796 inst.instruction = N_MNEM_vtrn;
16797 do_neon_trn ();
16798 return;
16799 }
16800 neon_two_same (neon_quad (rs), 1, et.size);
16801 }
16802
16803 static void
16804 do_neon_sat_abs_neg (void)
16805 {
16806 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16807 struct neon_type_el et = neon_check_type (2, rs,
16808 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16809 neon_two_same (neon_quad (rs), 1, et.size);
16810 }
16811
16812 static void
16813 do_neon_pair_long (void)
16814 {
16815 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16816 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16817 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16818 inst.instruction |= (et.type == NT_unsigned) << 7;
16819 neon_two_same (neon_quad (rs), 1, et.size);
16820 }
16821
16822 static void
16823 do_neon_recip_est (void)
16824 {
16825 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16826 struct neon_type_el et = neon_check_type (2, rs,
16827 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16828 inst.instruction |= (et.type == NT_float) << 8;
16829 neon_two_same (neon_quad (rs), 1, et.size);
16830 }
16831
16832 static void
16833 do_neon_cls (void)
16834 {
16835 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16836 struct neon_type_el et = neon_check_type (2, rs,
16837 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16838 neon_two_same (neon_quad (rs), 1, et.size);
16839 }
16840
16841 static void
16842 do_neon_clz (void)
16843 {
16844 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16845 struct neon_type_el et = neon_check_type (2, rs,
16846 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16847 neon_two_same (neon_quad (rs), 1, et.size);
16848 }
16849
16850 static void
16851 do_neon_cnt (void)
16852 {
16853 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16854 struct neon_type_el et = neon_check_type (2, rs,
16855 N_EQK | N_INT, N_8 | N_KEY);
16856 neon_two_same (neon_quad (rs), 1, et.size);
16857 }
16858
16859 static void
16860 do_neon_swp (void)
16861 {
16862 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16863 neon_two_same (neon_quad (rs), 1, -1);
16864 }
16865
16866 static void
16867 do_neon_tbl_tbx (void)
16868 {
16869 unsigned listlenbits;
16870 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16871
16872 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16873 {
16874 first_error (_("bad list length for table lookup"));
16875 return;
16876 }
16877
16878 listlenbits = inst.operands[1].imm - 1;
16879 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16880 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16881 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16882 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16883 inst.instruction |= LOW4 (inst.operands[2].reg);
16884 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16885 inst.instruction |= listlenbits << 8;
16886
16887 neon_dp_fixup (&inst);
16888 }
16889
16890 static void
16891 do_neon_ldm_stm (void)
16892 {
16893 /* P, U and L bits are part of bitmask. */
16894 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16895 unsigned offsetbits = inst.operands[1].imm * 2;
16896
16897 if (inst.operands[1].issingle)
16898 {
16899 do_vfp_nsyn_ldm_stm (is_dbmode);
16900 return;
16901 }
16902
16903 constraint (is_dbmode && !inst.operands[0].writeback,
16904 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16905
16906 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16907 _("register list must contain at least 1 and at most 16 "
16908 "registers"));
16909
16910 inst.instruction |= inst.operands[0].reg << 16;
16911 inst.instruction |= inst.operands[0].writeback << 21;
16912 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16913 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16914
16915 inst.instruction |= offsetbits;
16916
16917 do_vfp_cond_or_thumb ();
16918 }
16919
16920 static void
16921 do_neon_ldr_str (void)
16922 {
16923 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16924
16925 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16926 And is UNPREDICTABLE in thumb mode. */
16927 if (!is_ldr
16928 && inst.operands[1].reg == REG_PC
16929 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16930 {
16931 if (thumb_mode)
16932 inst.error = _("Use of PC here is UNPREDICTABLE");
16933 else if (warn_on_deprecated)
16934 as_tsktsk (_("Use of PC here is deprecated"));
16935 }
16936
16937 if (inst.operands[0].issingle)
16938 {
16939 if (is_ldr)
16940 do_vfp_nsyn_opcode ("flds");
16941 else
16942 do_vfp_nsyn_opcode ("fsts");
16943
16944 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16945 if (inst.vectype.el[0].size == 16)
16946 do_scalar_fp16_v82_encode ();
16947 }
16948 else
16949 {
16950 if (is_ldr)
16951 do_vfp_nsyn_opcode ("fldd");
16952 else
16953 do_vfp_nsyn_opcode ("fstd");
16954 }
16955 }
16956
16957 /* "interleave" version also handles non-interleaving register VLD1/VST1
16958 instructions. */
16959
16960 static void
16961 do_neon_ld_st_interleave (void)
16962 {
16963 struct neon_type_el et = neon_check_type (1, NS_NULL,
16964 N_8 | N_16 | N_32 | N_64);
16965 unsigned alignbits = 0;
16966 unsigned idx;
16967 /* The bits in this table go:
16968 0: register stride of one (0) or two (1)
16969 1,2: register list length, minus one (1, 2, 3, 4).
16970 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16971 We use -1 for invalid entries. */
16972 const int typetable[] =
16973 {
16974 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16975 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16976 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16977 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16978 };
16979 int typebits;
16980
16981 if (et.type == NT_invtype)
16982 return;
16983
16984 if (inst.operands[1].immisalign)
16985 switch (inst.operands[1].imm >> 8)
16986 {
16987 case 64: alignbits = 1; break;
16988 case 128:
16989 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16990 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16991 goto bad_alignment;
16992 alignbits = 2;
16993 break;
16994 case 256:
16995 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16996 goto bad_alignment;
16997 alignbits = 3;
16998 break;
16999 default:
17000 bad_alignment:
17001 first_error (_("bad alignment"));
17002 return;
17003 }
17004
17005 inst.instruction |= alignbits << 4;
17006 inst.instruction |= neon_logbits (et.size) << 6;
17007
17008 /* Bits [4:6] of the immediate in a list specifier encode register stride
17009 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17010 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17011 up the right value for "type" in a table based on this value and the given
17012 list style, then stick it back. */
17013 idx = ((inst.operands[0].imm >> 4) & 7)
17014 | (((inst.instruction >> 8) & 3) << 3);
17015
17016 typebits = typetable[idx];
17017
17018 constraint (typebits == -1, _("bad list type for instruction"));
17019 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17020 _("bad element type for instruction"));
17021
17022 inst.instruction &= ~0xf00;
17023 inst.instruction |= typebits << 8;
17024 }
17025
17026 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17027 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17028 otherwise. The variable arguments are a list of pairs of legal (size, align)
17029 values, terminated with -1. */
17030
17031 static int
17032 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17033 {
17034 va_list ap;
17035 int result = FAIL, thissize, thisalign;
17036
17037 if (!inst.operands[1].immisalign)
17038 {
17039 *do_alignment = 0;
17040 return SUCCESS;
17041 }
17042
17043 va_start (ap, do_alignment);
17044
17045 do
17046 {
17047 thissize = va_arg (ap, int);
17048 if (thissize == -1)
17049 break;
17050 thisalign = va_arg (ap, int);
17051
17052 if (size == thissize && align == thisalign)
17053 result = SUCCESS;
17054 }
17055 while (result != SUCCESS);
17056
17057 va_end (ap);
17058
17059 if (result == SUCCESS)
17060 *do_alignment = 1;
17061 else
17062 first_error (_("unsupported alignment for instruction"));
17063
17064 return result;
17065 }
17066
17067 static void
17068 do_neon_ld_st_lane (void)
17069 {
17070 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17071 int align_good, do_alignment = 0;
17072 int logsize = neon_logbits (et.size);
17073 int align = inst.operands[1].imm >> 8;
17074 int n = (inst.instruction >> 8) & 3;
17075 int max_el = 64 / et.size;
17076
17077 if (et.type == NT_invtype)
17078 return;
17079
17080 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17081 _("bad list length"));
17082 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17083 _("scalar index out of range"));
17084 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17085 && et.size == 8,
17086 _("stride of 2 unavailable when element size is 8"));
17087
17088 switch (n)
17089 {
17090 case 0: /* VLD1 / VST1. */
17091 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17092 32, 32, -1);
17093 if (align_good == FAIL)
17094 return;
17095 if (do_alignment)
17096 {
17097 unsigned alignbits = 0;
17098 switch (et.size)
17099 {
17100 case 16: alignbits = 0x1; break;
17101 case 32: alignbits = 0x3; break;
17102 default: ;
17103 }
17104 inst.instruction |= alignbits << 4;
17105 }
17106 break;
17107
17108 case 1: /* VLD2 / VST2. */
17109 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17110 16, 32, 32, 64, -1);
17111 if (align_good == FAIL)
17112 return;
17113 if (do_alignment)
17114 inst.instruction |= 1 << 4;
17115 break;
17116
17117 case 2: /* VLD3 / VST3. */
17118 constraint (inst.operands[1].immisalign,
17119 _("can't use alignment with this instruction"));
17120 break;
17121
17122 case 3: /* VLD4 / VST4. */
17123 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17124 16, 64, 32, 64, 32, 128, -1);
17125 if (align_good == FAIL)
17126 return;
17127 if (do_alignment)
17128 {
17129 unsigned alignbits = 0;
17130 switch (et.size)
17131 {
17132 case 8: alignbits = 0x1; break;
17133 case 16: alignbits = 0x1; break;
17134 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17135 default: ;
17136 }
17137 inst.instruction |= alignbits << 4;
17138 }
17139 break;
17140
17141 default: ;
17142 }
17143
17144 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17145 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17146 inst.instruction |= 1 << (4 + logsize);
17147
17148 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17149 inst.instruction |= logsize << 10;
17150 }
17151
17152 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17153
17154 static void
17155 do_neon_ld_dup (void)
17156 {
17157 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17158 int align_good, do_alignment = 0;
17159
17160 if (et.type == NT_invtype)
17161 return;
17162
17163 switch ((inst.instruction >> 8) & 3)
17164 {
17165 case 0: /* VLD1. */
17166 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17167 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17168 &do_alignment, 16, 16, 32, 32, -1);
17169 if (align_good == FAIL)
17170 return;
17171 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17172 {
17173 case 1: break;
17174 case 2: inst.instruction |= 1 << 5; break;
17175 default: first_error (_("bad list length")); return;
17176 }
17177 inst.instruction |= neon_logbits (et.size) << 6;
17178 break;
17179
17180 case 1: /* VLD2. */
17181 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17182 &do_alignment, 8, 16, 16, 32, 32, 64,
17183 -1);
17184 if (align_good == FAIL)
17185 return;
17186 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17187 _("bad list length"));
17188 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17189 inst.instruction |= 1 << 5;
17190 inst.instruction |= neon_logbits (et.size) << 6;
17191 break;
17192
17193 case 2: /* VLD3. */
17194 constraint (inst.operands[1].immisalign,
17195 _("can't use alignment with this instruction"));
17196 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17197 _("bad list length"));
17198 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17199 inst.instruction |= 1 << 5;
17200 inst.instruction |= neon_logbits (et.size) << 6;
17201 break;
17202
17203 case 3: /* VLD4. */
17204 {
17205 int align = inst.operands[1].imm >> 8;
17206 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17207 16, 64, 32, 64, 32, 128, -1);
17208 if (align_good == FAIL)
17209 return;
17210 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17211 _("bad list length"));
17212 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17213 inst.instruction |= 1 << 5;
17214 if (et.size == 32 && align == 128)
17215 inst.instruction |= 0x3 << 6;
17216 else
17217 inst.instruction |= neon_logbits (et.size) << 6;
17218 }
17219 break;
17220
17221 default: ;
17222 }
17223
17224 inst.instruction |= do_alignment << 4;
17225 }
17226
17227 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17228 apart from bits [11:4]. */
17229
17230 static void
17231 do_neon_ldx_stx (void)
17232 {
17233 if (inst.operands[1].isreg)
17234 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17235
17236 switch (NEON_LANE (inst.operands[0].imm))
17237 {
17238 case NEON_INTERLEAVE_LANES:
17239 NEON_ENCODE (INTERLV, inst);
17240 do_neon_ld_st_interleave ();
17241 break;
17242
17243 case NEON_ALL_LANES:
17244 NEON_ENCODE (DUP, inst);
17245 if (inst.instruction == N_INV)
17246 {
17247 first_error ("only loads support such operands");
17248 break;
17249 }
17250 do_neon_ld_dup ();
17251 break;
17252
17253 default:
17254 NEON_ENCODE (LANE, inst);
17255 do_neon_ld_st_lane ();
17256 }
17257
17258 /* L bit comes from bit mask. */
17259 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17260 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17261 inst.instruction |= inst.operands[1].reg << 16;
17262
17263 if (inst.operands[1].postind)
17264 {
17265 int postreg = inst.operands[1].imm & 0xf;
17266 constraint (!inst.operands[1].immisreg,
17267 _("post-index must be a register"));
17268 constraint (postreg == 0xd || postreg == 0xf,
17269 _("bad register for post-index"));
17270 inst.instruction |= postreg;
17271 }
17272 else
17273 {
17274 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17275 constraint (inst.reloc.exp.X_op != O_constant
17276 || inst.reloc.exp.X_add_number != 0,
17277 BAD_ADDR_MODE);
17278
17279 if (inst.operands[1].writeback)
17280 {
17281 inst.instruction |= 0xd;
17282 }
17283 else
17284 inst.instruction |= 0xf;
17285 }
17286
17287 if (thumb_mode)
17288 inst.instruction |= 0xf9000000;
17289 else
17290 inst.instruction |= 0xf4000000;
17291 }
17292
17293 /* FP v8. */
17294 static void
17295 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17296 {
17297 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17298 D register operands. */
17299 if (neon_shape_class[rs] == SC_DOUBLE)
17300 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17301 _(BAD_FPU));
17302
17303 NEON_ENCODE (FPV8, inst);
17304
17305 if (rs == NS_FFF || rs == NS_HHH)
17306 {
17307 do_vfp_sp_dyadic ();
17308
17309 /* ARMv8.2 fp16 instruction. */
17310 if (rs == NS_HHH)
17311 do_scalar_fp16_v82_encode ();
17312 }
17313 else
17314 do_vfp_dp_rd_rn_rm ();
17315
17316 if (rs == NS_DDD)
17317 inst.instruction |= 0x100;
17318
17319 inst.instruction |= 0xf0000000;
17320 }
17321
17322 static void
17323 do_vsel (void)
17324 {
17325 set_it_insn_type (OUTSIDE_IT_INSN);
17326
17327 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17328 first_error (_("invalid instruction shape"));
17329 }
17330
17331 static void
17332 do_vmaxnm (void)
17333 {
17334 set_it_insn_type (OUTSIDE_IT_INSN);
17335
17336 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17337 return;
17338
17339 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17340 return;
17341
17342 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17343 }
17344
17345 static void
17346 do_vrint_1 (enum neon_cvt_mode mode)
17347 {
17348 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17349 struct neon_type_el et;
17350
17351 if (rs == NS_NULL)
17352 return;
17353
17354 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17355 D register operands. */
17356 if (neon_shape_class[rs] == SC_DOUBLE)
17357 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17358 _(BAD_FPU));
17359
17360 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17361 | N_VFP);
17362 if (et.type != NT_invtype)
17363 {
17364 /* VFP encodings. */
17365 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17366 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17367 set_it_insn_type (OUTSIDE_IT_INSN);
17368
17369 NEON_ENCODE (FPV8, inst);
17370 if (rs == NS_FF || rs == NS_HH)
17371 do_vfp_sp_monadic ();
17372 else
17373 do_vfp_dp_rd_rm ();
17374
17375 switch (mode)
17376 {
17377 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17378 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17379 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17380 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17381 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17382 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17383 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17384 default: abort ();
17385 }
17386
17387 inst.instruction |= (rs == NS_DD) << 8;
17388 do_vfp_cond_or_thumb ();
17389
17390 /* ARMv8.2 fp16 vrint instruction. */
17391 if (rs == NS_HH)
17392 do_scalar_fp16_v82_encode ();
17393 }
17394 else
17395 {
17396 /* Neon encodings (or something broken...). */
17397 inst.error = NULL;
17398 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17399
17400 if (et.type == NT_invtype)
17401 return;
17402
17403 set_it_insn_type (OUTSIDE_IT_INSN);
17404 NEON_ENCODE (FLOAT, inst);
17405
17406 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17407 return;
17408
17409 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17410 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17411 inst.instruction |= LOW4 (inst.operands[1].reg);
17412 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17413 inst.instruction |= neon_quad (rs) << 6;
17414 /* Mask off the original size bits and reencode them. */
17415 inst.instruction = ((inst.instruction & 0xfff3ffff)
17416 | neon_logbits (et.size) << 18);
17417
17418 switch (mode)
17419 {
17420 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17421 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17422 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17423 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17424 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17425 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17426 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17427 default: abort ();
17428 }
17429
17430 if (thumb_mode)
17431 inst.instruction |= 0xfc000000;
17432 else
17433 inst.instruction |= 0xf0000000;
17434 }
17435 }
17436
17437 static void
17438 do_vrintx (void)
17439 {
17440 do_vrint_1 (neon_cvt_mode_x);
17441 }
17442
17443 static void
17444 do_vrintz (void)
17445 {
17446 do_vrint_1 (neon_cvt_mode_z);
17447 }
17448
17449 static void
17450 do_vrintr (void)
17451 {
17452 do_vrint_1 (neon_cvt_mode_r);
17453 }
17454
17455 static void
17456 do_vrinta (void)
17457 {
17458 do_vrint_1 (neon_cvt_mode_a);
17459 }
17460
17461 static void
17462 do_vrintn (void)
17463 {
17464 do_vrint_1 (neon_cvt_mode_n);
17465 }
17466
17467 static void
17468 do_vrintp (void)
17469 {
17470 do_vrint_1 (neon_cvt_mode_p);
17471 }
17472
17473 static void
17474 do_vrintm (void)
17475 {
17476 do_vrint_1 (neon_cvt_mode_m);
17477 }
17478
17479 static unsigned
17480 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17481 {
17482 unsigned regno = NEON_SCALAR_REG (opnd);
17483 unsigned elno = NEON_SCALAR_INDEX (opnd);
17484
17485 if (elsize == 16 && elno < 2 && regno < 16)
17486 return regno | (elno << 4);
17487 else if (elsize == 32 && elno == 0)
17488 return regno;
17489
17490 first_error (_("scalar out of range"));
17491 return 0;
17492 }
17493
17494 static void
17495 do_vcmla (void)
17496 {
17497 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17498 _(BAD_FPU));
17499 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17500 unsigned rot = inst.reloc.exp.X_add_number;
17501 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17502 _("immediate out of range"));
17503 rot /= 90;
17504 if (inst.operands[2].isscalar)
17505 {
17506 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17507 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17508 N_KEY | N_F16 | N_F32).size;
17509 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17510 inst.is_neon = 1;
17511 inst.instruction = 0xfe000800;
17512 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17513 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17514 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17515 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17516 inst.instruction |= LOW4 (m);
17517 inst.instruction |= HI1 (m) << 5;
17518 inst.instruction |= neon_quad (rs) << 6;
17519 inst.instruction |= rot << 20;
17520 inst.instruction |= (size == 32) << 23;
17521 }
17522 else
17523 {
17524 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17525 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17526 N_KEY | N_F16 | N_F32).size;
17527 neon_three_same (neon_quad (rs), 0, -1);
17528 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17529 inst.instruction |= 0xfc200800;
17530 inst.instruction |= rot << 23;
17531 inst.instruction |= (size == 32) << 20;
17532 }
17533 }
17534
17535 static void
17536 do_vcadd (void)
17537 {
17538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17539 _(BAD_FPU));
17540 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17541 unsigned rot = inst.reloc.exp.X_add_number;
17542 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17543 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17544 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17545 N_KEY | N_F16 | N_F32).size;
17546 neon_three_same (neon_quad (rs), 0, -1);
17547 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17548 inst.instruction |= 0xfc800800;
17549 inst.instruction |= (rot == 270) << 24;
17550 inst.instruction |= (size == 32) << 20;
17551 }
17552
17553 /* Dot Product instructions encoding support. */
17554
17555 static void
17556 do_neon_dotproduct (int unsigned_p)
17557 {
17558 enum neon_shape rs;
17559 unsigned scalar_oprd2 = 0;
17560 int high8;
17561
17562 if (inst.cond != COND_ALWAYS)
17563 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17564 "is UNPREDICTABLE"));
17565
17566 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17567 _(BAD_FPU));
17568
17569 /* Dot Product instructions are in three-same D/Q register format or the third
17570 operand can be a scalar index register. */
17571 if (inst.operands[2].isscalar)
17572 {
17573 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17574 high8 = 0xfe000000;
17575 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17576 }
17577 else
17578 {
17579 high8 = 0xfc000000;
17580 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17581 }
17582
17583 if (unsigned_p)
17584 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17585 else
17586 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17587
17588 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17589 Product instruction, so we pass 0 as the "ubit" parameter. And the
17590 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17591 neon_three_same (neon_quad (rs), 0, 32);
17592
17593 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17594 different NEON three-same encoding. */
17595 inst.instruction &= 0x00ffffff;
17596 inst.instruction |= high8;
17597 /* Encode 'U' bit which indicates signedness. */
17598 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17599 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17600 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17601 the instruction encoding. */
17602 if (inst.operands[2].isscalar)
17603 {
17604 inst.instruction &= 0xffffffd0;
17605 inst.instruction |= LOW4 (scalar_oprd2);
17606 inst.instruction |= HI1 (scalar_oprd2) << 5;
17607 }
17608 }
17609
17610 /* Dot Product instructions for signed integer. */
17611
17612 static void
17613 do_neon_dotproduct_s (void)
17614 {
17615 return do_neon_dotproduct (0);
17616 }
17617
17618 /* Dot Product instructions for unsigned integer. */
17619
17620 static void
17621 do_neon_dotproduct_u (void)
17622 {
17623 return do_neon_dotproduct (1);
17624 }
17625
17626 /* Crypto v1 instructions. */
17627 static void
17628 do_crypto_2op_1 (unsigned elttype, int op)
17629 {
17630 set_it_insn_type (OUTSIDE_IT_INSN);
17631
17632 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17633 == NT_invtype)
17634 return;
17635
17636 inst.error = NULL;
17637
17638 NEON_ENCODE (INTEGER, inst);
17639 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17640 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17641 inst.instruction |= LOW4 (inst.operands[1].reg);
17642 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17643 if (op != -1)
17644 inst.instruction |= op << 6;
17645
17646 if (thumb_mode)
17647 inst.instruction |= 0xfc000000;
17648 else
17649 inst.instruction |= 0xf0000000;
17650 }
17651
17652 static void
17653 do_crypto_3op_1 (int u, int op)
17654 {
17655 set_it_insn_type (OUTSIDE_IT_INSN);
17656
17657 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17658 N_32 | N_UNT | N_KEY).type == NT_invtype)
17659 return;
17660
17661 inst.error = NULL;
17662
17663 NEON_ENCODE (INTEGER, inst);
17664 neon_three_same (1, u, 8 << op);
17665 }
17666
17667 static void
17668 do_aese (void)
17669 {
17670 do_crypto_2op_1 (N_8, 0);
17671 }
17672
17673 static void
17674 do_aesd (void)
17675 {
17676 do_crypto_2op_1 (N_8, 1);
17677 }
17678
17679 static void
17680 do_aesmc (void)
17681 {
17682 do_crypto_2op_1 (N_8, 2);
17683 }
17684
17685 static void
17686 do_aesimc (void)
17687 {
17688 do_crypto_2op_1 (N_8, 3);
17689 }
17690
17691 static void
17692 do_sha1c (void)
17693 {
17694 do_crypto_3op_1 (0, 0);
17695 }
17696
17697 static void
17698 do_sha1p (void)
17699 {
17700 do_crypto_3op_1 (0, 1);
17701 }
17702
17703 static void
17704 do_sha1m (void)
17705 {
17706 do_crypto_3op_1 (0, 2);
17707 }
17708
17709 static void
17710 do_sha1su0 (void)
17711 {
17712 do_crypto_3op_1 (0, 3);
17713 }
17714
17715 static void
17716 do_sha256h (void)
17717 {
17718 do_crypto_3op_1 (1, 0);
17719 }
17720
17721 static void
17722 do_sha256h2 (void)
17723 {
17724 do_crypto_3op_1 (1, 1);
17725 }
17726
17727 static void
17728 do_sha256su1 (void)
17729 {
17730 do_crypto_3op_1 (1, 2);
17731 }
17732
17733 static void
17734 do_sha1h (void)
17735 {
17736 do_crypto_2op_1 (N_32, -1);
17737 }
17738
17739 static void
17740 do_sha1su1 (void)
17741 {
17742 do_crypto_2op_1 (N_32, 0);
17743 }
17744
17745 static void
17746 do_sha256su0 (void)
17747 {
17748 do_crypto_2op_1 (N_32, 1);
17749 }
17750
17751 static void
17752 do_crc32_1 (unsigned int poly, unsigned int sz)
17753 {
17754 unsigned int Rd = inst.operands[0].reg;
17755 unsigned int Rn = inst.operands[1].reg;
17756 unsigned int Rm = inst.operands[2].reg;
17757
17758 set_it_insn_type (OUTSIDE_IT_INSN);
17759 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17760 inst.instruction |= LOW4 (Rn) << 16;
17761 inst.instruction |= LOW4 (Rm);
17762 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17763 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17764
17765 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17766 as_warn (UNPRED_REG ("r15"));
17767 }
17768
17769 static void
17770 do_crc32b (void)
17771 {
17772 do_crc32_1 (0, 0);
17773 }
17774
17775 static void
17776 do_crc32h (void)
17777 {
17778 do_crc32_1 (0, 1);
17779 }
17780
17781 static void
17782 do_crc32w (void)
17783 {
17784 do_crc32_1 (0, 2);
17785 }
17786
17787 static void
17788 do_crc32cb (void)
17789 {
17790 do_crc32_1 (1, 0);
17791 }
17792
17793 static void
17794 do_crc32ch (void)
17795 {
17796 do_crc32_1 (1, 1);
17797 }
17798
17799 static void
17800 do_crc32cw (void)
17801 {
17802 do_crc32_1 (1, 2);
17803 }
17804
17805 static void
17806 do_vjcvt (void)
17807 {
17808 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17809 _(BAD_FPU));
17810 neon_check_type (2, NS_FD, N_S32, N_F64);
17811 do_vfp_sp_dp_cvt ();
17812 do_vfp_cond_or_thumb ();
17813 }
17814
17815 \f
17816 /* Overall per-instruction processing. */
17817
17818 /* We need to be able to fix up arbitrary expressions in some statements.
17819 This is so that we can handle symbols that are an arbitrary distance from
17820 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17821 which returns part of an address in a form which will be valid for
17822 a data instruction. We do this by pushing the expression into a symbol
17823 in the expr_section, and creating a fix for that. */
17824
17825 static void
17826 fix_new_arm (fragS * frag,
17827 int where,
17828 short int size,
17829 expressionS * exp,
17830 int pc_rel,
17831 int reloc)
17832 {
17833 fixS * new_fix;
17834
17835 switch (exp->X_op)
17836 {
17837 case O_constant:
17838 if (pc_rel)
17839 {
17840 /* Create an absolute valued symbol, so we have something to
17841 refer to in the object file. Unfortunately for us, gas's
17842 generic expression parsing will already have folded out
17843 any use of .set foo/.type foo %function that may have
17844 been used to set type information of the target location,
17845 that's being specified symbolically. We have to presume
17846 the user knows what they are doing. */
17847 char name[16 + 8];
17848 symbolS *symbol;
17849
17850 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17851
17852 symbol = symbol_find_or_make (name);
17853 S_SET_SEGMENT (symbol, absolute_section);
17854 symbol_set_frag (symbol, &zero_address_frag);
17855 S_SET_VALUE (symbol, exp->X_add_number);
17856 exp->X_op = O_symbol;
17857 exp->X_add_symbol = symbol;
17858 exp->X_add_number = 0;
17859 }
17860 /* FALLTHROUGH */
17861 case O_symbol:
17862 case O_add:
17863 case O_subtract:
17864 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17865 (enum bfd_reloc_code_real) reloc);
17866 break;
17867
17868 default:
17869 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17870 pc_rel, (enum bfd_reloc_code_real) reloc);
17871 break;
17872 }
17873
17874 /* Mark whether the fix is to a THUMB instruction, or an ARM
17875 instruction. */
17876 new_fix->tc_fix_data = thumb_mode;
17877 }
17878
17879 /* Create a frg for an instruction requiring relaxation. */
17880 static void
17881 output_relax_insn (void)
17882 {
17883 char * to;
17884 symbolS *sym;
17885 int offset;
17886
17887 /* The size of the instruction is unknown, so tie the debug info to the
17888 start of the instruction. */
17889 dwarf2_emit_insn (0);
17890
17891 switch (inst.reloc.exp.X_op)
17892 {
17893 case O_symbol:
17894 sym = inst.reloc.exp.X_add_symbol;
17895 offset = inst.reloc.exp.X_add_number;
17896 break;
17897 case O_constant:
17898 sym = NULL;
17899 offset = inst.reloc.exp.X_add_number;
17900 break;
17901 default:
17902 sym = make_expr_symbol (&inst.reloc.exp);
17903 offset = 0;
17904 break;
17905 }
17906 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17907 inst.relax, sym, offset, NULL/*offset, opcode*/);
17908 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17909 }
17910
17911 /* Write a 32-bit thumb instruction to buf. */
17912 static void
17913 put_thumb32_insn (char * buf, unsigned long insn)
17914 {
17915 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17916 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17917 }
17918
17919 static void
17920 output_inst (const char * str)
17921 {
17922 char * to = NULL;
17923
17924 if (inst.error)
17925 {
17926 as_bad ("%s -- `%s'", inst.error, str);
17927 return;
17928 }
17929 if (inst.relax)
17930 {
17931 output_relax_insn ();
17932 return;
17933 }
17934 if (inst.size == 0)
17935 return;
17936
17937 to = frag_more (inst.size);
17938 /* PR 9814: Record the thumb mode into the current frag so that we know
17939 what type of NOP padding to use, if necessary. We override any previous
17940 setting so that if the mode has changed then the NOPS that we use will
17941 match the encoding of the last instruction in the frag. */
17942 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17943
17944 if (thumb_mode && (inst.size > THUMB_SIZE))
17945 {
17946 gas_assert (inst.size == (2 * THUMB_SIZE));
17947 put_thumb32_insn (to, inst.instruction);
17948 }
17949 else if (inst.size > INSN_SIZE)
17950 {
17951 gas_assert (inst.size == (2 * INSN_SIZE));
17952 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17953 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17954 }
17955 else
17956 md_number_to_chars (to, inst.instruction, inst.size);
17957
17958 if (inst.reloc.type != BFD_RELOC_UNUSED)
17959 fix_new_arm (frag_now, to - frag_now->fr_literal,
17960 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17961 inst.reloc.type);
17962
17963 dwarf2_emit_insn (inst.size);
17964 }
17965
17966 static char *
17967 output_it_inst (int cond, int mask, char * to)
17968 {
17969 unsigned long instruction = 0xbf00;
17970
17971 mask &= 0xf;
17972 instruction |= mask;
17973 instruction |= cond << 4;
17974
17975 if (to == NULL)
17976 {
17977 to = frag_more (2);
17978 #ifdef OBJ_ELF
17979 dwarf2_emit_insn (2);
17980 #endif
17981 }
17982
17983 md_number_to_chars (to, instruction, 2);
17984
17985 return to;
17986 }
17987
17988 /* Tag values used in struct asm_opcode's tag field. */
17989 enum opcode_tag
17990 {
17991 OT_unconditional, /* Instruction cannot be conditionalized.
17992 The ARM condition field is still 0xE. */
17993 OT_unconditionalF, /* Instruction cannot be conditionalized
17994 and carries 0xF in its ARM condition field. */
17995 OT_csuffix, /* Instruction takes a conditional suffix. */
17996 OT_csuffixF, /* Some forms of the instruction take a conditional
17997 suffix, others place 0xF where the condition field
17998 would be. */
17999 OT_cinfix3, /* Instruction takes a conditional infix,
18000 beginning at character index 3. (In
18001 unified mode, it becomes a suffix.) */
18002 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18003 tsts, cmps, cmns, and teqs. */
18004 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18005 character index 3, even in unified mode. Used for
18006 legacy instructions where suffix and infix forms
18007 may be ambiguous. */
18008 OT_csuf_or_in3, /* Instruction takes either a conditional
18009 suffix or an infix at character index 3. */
18010 OT_odd_infix_unc, /* This is the unconditional variant of an
18011 instruction that takes a conditional infix
18012 at an unusual position. In unified mode,
18013 this variant will accept a suffix. */
18014 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18015 are the conditional variants of instructions that
18016 take conditional infixes in unusual positions.
18017 The infix appears at character index
18018 (tag - OT_odd_infix_0). These are not accepted
18019 in unified mode. */
18020 };
18021
18022 /* Subroutine of md_assemble, responsible for looking up the primary
18023 opcode from the mnemonic the user wrote. STR points to the
18024 beginning of the mnemonic.
18025
18026 This is not simply a hash table lookup, because of conditional
18027 variants. Most instructions have conditional variants, which are
18028 expressed with a _conditional affix_ to the mnemonic. If we were
18029 to encode each conditional variant as a literal string in the opcode
18030 table, it would have approximately 20,000 entries.
18031
18032 Most mnemonics take this affix as a suffix, and in unified syntax,
18033 'most' is upgraded to 'all'. However, in the divided syntax, some
18034 instructions take the affix as an infix, notably the s-variants of
18035 the arithmetic instructions. Of those instructions, all but six
18036 have the infix appear after the third character of the mnemonic.
18037
18038 Accordingly, the algorithm for looking up primary opcodes given
18039 an identifier is:
18040
18041 1. Look up the identifier in the opcode table.
18042 If we find a match, go to step U.
18043
18044 2. Look up the last two characters of the identifier in the
18045 conditions table. If we find a match, look up the first N-2
18046 characters of the identifier in the opcode table. If we
18047 find a match, go to step CE.
18048
18049 3. Look up the fourth and fifth characters of the identifier in
18050 the conditions table. If we find a match, extract those
18051 characters from the identifier, and look up the remaining
18052 characters in the opcode table. If we find a match, go
18053 to step CM.
18054
18055 4. Fail.
18056
18057 U. Examine the tag field of the opcode structure, in case this is
18058 one of the six instructions with its conditional infix in an
18059 unusual place. If it is, the tag tells us where to find the
18060 infix; look it up in the conditions table and set inst.cond
18061 accordingly. Otherwise, this is an unconditional instruction.
18062 Again set inst.cond accordingly. Return the opcode structure.
18063
18064 CE. Examine the tag field to make sure this is an instruction that
18065 should receive a conditional suffix. If it is not, fail.
18066 Otherwise, set inst.cond from the suffix we already looked up,
18067 and return the opcode structure.
18068
18069 CM. Examine the tag field to make sure this is an instruction that
18070 should receive a conditional infix after the third character.
18071 If it is not, fail. Otherwise, undo the edits to the current
18072 line of input and proceed as for case CE. */
18073
18074 static const struct asm_opcode *
18075 opcode_lookup (char **str)
18076 {
18077 char *end, *base;
18078 char *affix;
18079 const struct asm_opcode *opcode;
18080 const struct asm_cond *cond;
18081 char save[2];
18082
18083 /* Scan up to the end of the mnemonic, which must end in white space,
18084 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18085 for (base = end = *str; *end != '\0'; end++)
18086 if (*end == ' ' || *end == '.')
18087 break;
18088
18089 if (end == base)
18090 return NULL;
18091
18092 /* Handle a possible width suffix and/or Neon type suffix. */
18093 if (end[0] == '.')
18094 {
18095 int offset = 2;
18096
18097 /* The .w and .n suffixes are only valid if the unified syntax is in
18098 use. */
18099 if (unified_syntax && end[1] == 'w')
18100 inst.size_req = 4;
18101 else if (unified_syntax && end[1] == 'n')
18102 inst.size_req = 2;
18103 else
18104 offset = 0;
18105
18106 inst.vectype.elems = 0;
18107
18108 *str = end + offset;
18109
18110 if (end[offset] == '.')
18111 {
18112 /* See if we have a Neon type suffix (possible in either unified or
18113 non-unified ARM syntax mode). */
18114 if (parse_neon_type (&inst.vectype, str) == FAIL)
18115 return NULL;
18116 }
18117 else if (end[offset] != '\0' && end[offset] != ' ')
18118 return NULL;
18119 }
18120 else
18121 *str = end;
18122
18123 /* Look for unaffixed or special-case affixed mnemonic. */
18124 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18125 end - base);
18126 if (opcode)
18127 {
18128 /* step U */
18129 if (opcode->tag < OT_odd_infix_0)
18130 {
18131 inst.cond = COND_ALWAYS;
18132 return opcode;
18133 }
18134
18135 if (warn_on_deprecated && unified_syntax)
18136 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18137 affix = base + (opcode->tag - OT_odd_infix_0);
18138 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18139 gas_assert (cond);
18140
18141 inst.cond = cond->value;
18142 return opcode;
18143 }
18144
18145 /* Cannot have a conditional suffix on a mnemonic of less than two
18146 characters. */
18147 if (end - base < 3)
18148 return NULL;
18149
18150 /* Look for suffixed mnemonic. */
18151 affix = end - 2;
18152 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18153 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18154 affix - base);
18155 if (opcode && cond)
18156 {
18157 /* step CE */
18158 switch (opcode->tag)
18159 {
18160 case OT_cinfix3_legacy:
18161 /* Ignore conditional suffixes matched on infix only mnemonics. */
18162 break;
18163
18164 case OT_cinfix3:
18165 case OT_cinfix3_deprecated:
18166 case OT_odd_infix_unc:
18167 if (!unified_syntax)
18168 return NULL;
18169 /* Fall through. */
18170
18171 case OT_csuffix:
18172 case OT_csuffixF:
18173 case OT_csuf_or_in3:
18174 inst.cond = cond->value;
18175 return opcode;
18176
18177 case OT_unconditional:
18178 case OT_unconditionalF:
18179 if (thumb_mode)
18180 inst.cond = cond->value;
18181 else
18182 {
18183 /* Delayed diagnostic. */
18184 inst.error = BAD_COND;
18185 inst.cond = COND_ALWAYS;
18186 }
18187 return opcode;
18188
18189 default:
18190 return NULL;
18191 }
18192 }
18193
18194 /* Cannot have a usual-position infix on a mnemonic of less than
18195 six characters (five would be a suffix). */
18196 if (end - base < 6)
18197 return NULL;
18198
18199 /* Look for infixed mnemonic in the usual position. */
18200 affix = base + 3;
18201 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18202 if (!cond)
18203 return NULL;
18204
18205 memcpy (save, affix, 2);
18206 memmove (affix, affix + 2, (end - affix) - 2);
18207 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18208 (end - base) - 2);
18209 memmove (affix + 2, affix, (end - affix) - 2);
18210 memcpy (affix, save, 2);
18211
18212 if (opcode
18213 && (opcode->tag == OT_cinfix3
18214 || opcode->tag == OT_cinfix3_deprecated
18215 || opcode->tag == OT_csuf_or_in3
18216 || opcode->tag == OT_cinfix3_legacy))
18217 {
18218 /* Step CM. */
18219 if (warn_on_deprecated && unified_syntax
18220 && (opcode->tag == OT_cinfix3
18221 || opcode->tag == OT_cinfix3_deprecated))
18222 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18223
18224 inst.cond = cond->value;
18225 return opcode;
18226 }
18227
18228 return NULL;
18229 }
18230
18231 /* This function generates an initial IT instruction, leaving its block
18232 virtually open for the new instructions. Eventually,
18233 the mask will be updated by now_it_add_mask () each time
18234 a new instruction needs to be included in the IT block.
18235 Finally, the block is closed with close_automatic_it_block ().
18236 The block closure can be requested either from md_assemble (),
18237 a tencode (), or due to a label hook. */
18238
18239 static void
18240 new_automatic_it_block (int cond)
18241 {
18242 now_it.state = AUTOMATIC_IT_BLOCK;
18243 now_it.mask = 0x18;
18244 now_it.cc = cond;
18245 now_it.block_length = 1;
18246 mapping_state (MAP_THUMB);
18247 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18248 now_it.warn_deprecated = FALSE;
18249 now_it.insn_cond = TRUE;
18250 }
18251
18252 /* Close an automatic IT block.
18253 See comments in new_automatic_it_block (). */
18254
18255 static void
18256 close_automatic_it_block (void)
18257 {
18258 now_it.mask = 0x10;
18259 now_it.block_length = 0;
18260 }
18261
18262 /* Update the mask of the current automatically-generated IT
18263 instruction. See comments in new_automatic_it_block (). */
18264
18265 static void
18266 now_it_add_mask (int cond)
18267 {
18268 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18269 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18270 | ((bitvalue) << (nbit)))
18271 const int resulting_bit = (cond & 1);
18272
18273 now_it.mask &= 0xf;
18274 now_it.mask = SET_BIT_VALUE (now_it.mask,
18275 resulting_bit,
18276 (5 - now_it.block_length));
18277 now_it.mask = SET_BIT_VALUE (now_it.mask,
18278 1,
18279 ((5 - now_it.block_length) - 1) );
18280 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18281
18282 #undef CLEAR_BIT
18283 #undef SET_BIT_VALUE
18284 }
18285
18286 /* The IT blocks handling machinery is accessed through the these functions:
18287 it_fsm_pre_encode () from md_assemble ()
18288 set_it_insn_type () optional, from the tencode functions
18289 set_it_insn_type_last () ditto
18290 in_it_block () ditto
18291 it_fsm_post_encode () from md_assemble ()
18292 force_automatic_it_block_close () from label handling functions
18293
18294 Rationale:
18295 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18296 initializing the IT insn type with a generic initial value depending
18297 on the inst.condition.
18298 2) During the tencode function, two things may happen:
18299 a) The tencode function overrides the IT insn type by
18300 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18301 b) The tencode function queries the IT block state by
18302 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18303
18304 Both set_it_insn_type and in_it_block run the internal FSM state
18305 handling function (handle_it_state), because: a) setting the IT insn
18306 type may incur in an invalid state (exiting the function),
18307 and b) querying the state requires the FSM to be updated.
18308 Specifically we want to avoid creating an IT block for conditional
18309 branches, so it_fsm_pre_encode is actually a guess and we can't
18310 determine whether an IT block is required until the tencode () routine
18311 has decided what type of instruction this actually it.
18312 Because of this, if set_it_insn_type and in_it_block have to be used,
18313 set_it_insn_type has to be called first.
18314
18315 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18316 determines the insn IT type depending on the inst.cond code.
18317 When a tencode () routine encodes an instruction that can be
18318 either outside an IT block, or, in the case of being inside, has to be
18319 the last one, set_it_insn_type_last () will determine the proper
18320 IT instruction type based on the inst.cond code. Otherwise,
18321 set_it_insn_type can be called for overriding that logic or
18322 for covering other cases.
18323
18324 Calling handle_it_state () may not transition the IT block state to
18325 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18326 still queried. Instead, if the FSM determines that the state should
18327 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18328 after the tencode () function: that's what it_fsm_post_encode () does.
18329
18330 Since in_it_block () calls the state handling function to get an
18331 updated state, an error may occur (due to invalid insns combination).
18332 In that case, inst.error is set.
18333 Therefore, inst.error has to be checked after the execution of
18334 the tencode () routine.
18335
18336 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18337 any pending state change (if any) that didn't take place in
18338 handle_it_state () as explained above. */
18339
18340 static void
18341 it_fsm_pre_encode (void)
18342 {
18343 if (inst.cond != COND_ALWAYS)
18344 inst.it_insn_type = INSIDE_IT_INSN;
18345 else
18346 inst.it_insn_type = OUTSIDE_IT_INSN;
18347
18348 now_it.state_handled = 0;
18349 }
18350
18351 /* IT state FSM handling function. */
18352
18353 static int
18354 handle_it_state (void)
18355 {
18356 now_it.state_handled = 1;
18357 now_it.insn_cond = FALSE;
18358
18359 switch (now_it.state)
18360 {
18361 case OUTSIDE_IT_BLOCK:
18362 switch (inst.it_insn_type)
18363 {
18364 case OUTSIDE_IT_INSN:
18365 break;
18366
18367 case INSIDE_IT_INSN:
18368 case INSIDE_IT_LAST_INSN:
18369 if (thumb_mode == 0)
18370 {
18371 if (unified_syntax
18372 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18373 as_tsktsk (_("Warning: conditional outside an IT block"\
18374 " for Thumb."));
18375 }
18376 else
18377 {
18378 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18379 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18380 {
18381 /* Automatically generate the IT instruction. */
18382 new_automatic_it_block (inst.cond);
18383 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18384 close_automatic_it_block ();
18385 }
18386 else
18387 {
18388 inst.error = BAD_OUT_IT;
18389 return FAIL;
18390 }
18391 }
18392 break;
18393
18394 case IF_INSIDE_IT_LAST_INSN:
18395 case NEUTRAL_IT_INSN:
18396 break;
18397
18398 case IT_INSN:
18399 now_it.state = MANUAL_IT_BLOCK;
18400 now_it.block_length = 0;
18401 break;
18402 }
18403 break;
18404
18405 case AUTOMATIC_IT_BLOCK:
18406 /* Three things may happen now:
18407 a) We should increment current it block size;
18408 b) We should close current it block (closing insn or 4 insns);
18409 c) We should close current it block and start a new one (due
18410 to incompatible conditions or
18411 4 insns-length block reached). */
18412
18413 switch (inst.it_insn_type)
18414 {
18415 case OUTSIDE_IT_INSN:
18416 /* The closure of the block shall happen immediately,
18417 so any in_it_block () call reports the block as closed. */
18418 force_automatic_it_block_close ();
18419 break;
18420
18421 case INSIDE_IT_INSN:
18422 case INSIDE_IT_LAST_INSN:
18423 case IF_INSIDE_IT_LAST_INSN:
18424 now_it.block_length++;
18425
18426 if (now_it.block_length > 4
18427 || !now_it_compatible (inst.cond))
18428 {
18429 force_automatic_it_block_close ();
18430 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18431 new_automatic_it_block (inst.cond);
18432 }
18433 else
18434 {
18435 now_it.insn_cond = TRUE;
18436 now_it_add_mask (inst.cond);
18437 }
18438
18439 if (now_it.state == AUTOMATIC_IT_BLOCK
18440 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18441 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18442 close_automatic_it_block ();
18443 break;
18444
18445 case NEUTRAL_IT_INSN:
18446 now_it.block_length++;
18447 now_it.insn_cond = TRUE;
18448
18449 if (now_it.block_length > 4)
18450 force_automatic_it_block_close ();
18451 else
18452 now_it_add_mask (now_it.cc & 1);
18453 break;
18454
18455 case IT_INSN:
18456 close_automatic_it_block ();
18457 now_it.state = MANUAL_IT_BLOCK;
18458 break;
18459 }
18460 break;
18461
18462 case MANUAL_IT_BLOCK:
18463 {
18464 /* Check conditional suffixes. */
18465 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18466 int is_last;
18467 now_it.mask <<= 1;
18468 now_it.mask &= 0x1f;
18469 is_last = (now_it.mask == 0x10);
18470 now_it.insn_cond = TRUE;
18471
18472 switch (inst.it_insn_type)
18473 {
18474 case OUTSIDE_IT_INSN:
18475 inst.error = BAD_NOT_IT;
18476 return FAIL;
18477
18478 case INSIDE_IT_INSN:
18479 if (cond != inst.cond)
18480 {
18481 inst.error = BAD_IT_COND;
18482 return FAIL;
18483 }
18484 break;
18485
18486 case INSIDE_IT_LAST_INSN:
18487 case IF_INSIDE_IT_LAST_INSN:
18488 if (cond != inst.cond)
18489 {
18490 inst.error = BAD_IT_COND;
18491 return FAIL;
18492 }
18493 if (!is_last)
18494 {
18495 inst.error = BAD_BRANCH;
18496 return FAIL;
18497 }
18498 break;
18499
18500 case NEUTRAL_IT_INSN:
18501 /* The BKPT instruction is unconditional even in an IT block. */
18502 break;
18503
18504 case IT_INSN:
18505 inst.error = BAD_IT_IT;
18506 return FAIL;
18507 }
18508 }
18509 break;
18510 }
18511
18512 return SUCCESS;
18513 }
18514
18515 struct depr_insn_mask
18516 {
18517 unsigned long pattern;
18518 unsigned long mask;
18519 const char* description;
18520 };
18521
18522 /* List of 16-bit instruction patterns deprecated in an IT block in
18523 ARMv8. */
18524 static const struct depr_insn_mask depr_it_insns[] = {
18525 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18526 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18527 { 0xa000, 0xb800, N_("ADR") },
18528 { 0x4800, 0xf800, N_("Literal loads") },
18529 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18530 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18531 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18532 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18533 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18534 { 0, 0, NULL }
18535 };
18536
18537 static void
18538 it_fsm_post_encode (void)
18539 {
18540 int is_last;
18541
18542 if (!now_it.state_handled)
18543 handle_it_state ();
18544
18545 if (now_it.insn_cond
18546 && !now_it.warn_deprecated
18547 && warn_on_deprecated
18548 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18549 {
18550 if (inst.instruction >= 0x10000)
18551 {
18552 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18553 "deprecated in ARMv8"));
18554 now_it.warn_deprecated = TRUE;
18555 }
18556 else
18557 {
18558 const struct depr_insn_mask *p = depr_it_insns;
18559
18560 while (p->mask != 0)
18561 {
18562 if ((inst.instruction & p->mask) == p->pattern)
18563 {
18564 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18565 "of the following class are deprecated in ARMv8: "
18566 "%s"), p->description);
18567 now_it.warn_deprecated = TRUE;
18568 break;
18569 }
18570
18571 ++p;
18572 }
18573 }
18574
18575 if (now_it.block_length > 1)
18576 {
18577 as_tsktsk (_("IT blocks containing more than one conditional "
18578 "instruction are deprecated in ARMv8"));
18579 now_it.warn_deprecated = TRUE;
18580 }
18581 }
18582
18583 is_last = (now_it.mask == 0x10);
18584 if (is_last)
18585 {
18586 now_it.state = OUTSIDE_IT_BLOCK;
18587 now_it.mask = 0;
18588 }
18589 }
18590
18591 static void
18592 force_automatic_it_block_close (void)
18593 {
18594 if (now_it.state == AUTOMATIC_IT_BLOCK)
18595 {
18596 close_automatic_it_block ();
18597 now_it.state = OUTSIDE_IT_BLOCK;
18598 now_it.mask = 0;
18599 }
18600 }
18601
18602 static int
18603 in_it_block (void)
18604 {
18605 if (!now_it.state_handled)
18606 handle_it_state ();
18607
18608 return now_it.state != OUTSIDE_IT_BLOCK;
18609 }
18610
18611 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18612 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18613 here, hence the "known" in the function name. */
18614
18615 static bfd_boolean
18616 known_t32_only_insn (const struct asm_opcode *opcode)
18617 {
18618 /* Original Thumb-1 wide instruction. */
18619 if (opcode->tencode == do_t_blx
18620 || opcode->tencode == do_t_branch23
18621 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18622 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18623 return TRUE;
18624
18625 /* Wide-only instruction added to ARMv8-M Baseline. */
18626 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18627 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18628 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18629 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18630 return TRUE;
18631
18632 return FALSE;
18633 }
18634
18635 /* Whether wide instruction variant can be used if available for a valid OPCODE
18636 in ARCH. */
18637
18638 static bfd_boolean
18639 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18640 {
18641 if (known_t32_only_insn (opcode))
18642 return TRUE;
18643
18644 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18645 of variant T3 of B.W is checked in do_t_branch. */
18646 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18647 && opcode->tencode == do_t_branch)
18648 return TRUE;
18649
18650 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18651 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18652 && opcode->tencode == do_t_mov_cmp
18653 /* Make sure CMP instruction is not affected. */
18654 && opcode->aencode == do_mov)
18655 return TRUE;
18656
18657 /* Wide instruction variants of all instructions with narrow *and* wide
18658 variants become available with ARMv6t2. Other opcodes are either
18659 narrow-only or wide-only and are thus available if OPCODE is valid. */
18660 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18661 return TRUE;
18662
18663 /* OPCODE with narrow only instruction variant or wide variant not
18664 available. */
18665 return FALSE;
18666 }
18667
18668 void
18669 md_assemble (char *str)
18670 {
18671 char *p = str;
18672 const struct asm_opcode * opcode;
18673
18674 /* Align the previous label if needed. */
18675 if (last_label_seen != NULL)
18676 {
18677 symbol_set_frag (last_label_seen, frag_now);
18678 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18679 S_SET_SEGMENT (last_label_seen, now_seg);
18680 }
18681
18682 memset (&inst, '\0', sizeof (inst));
18683 inst.reloc.type = BFD_RELOC_UNUSED;
18684
18685 opcode = opcode_lookup (&p);
18686 if (!opcode)
18687 {
18688 /* It wasn't an instruction, but it might be a register alias of
18689 the form alias .req reg, or a Neon .dn/.qn directive. */
18690 if (! create_register_alias (str, p)
18691 && ! create_neon_reg_alias (str, p))
18692 as_bad (_("bad instruction `%s'"), str);
18693
18694 return;
18695 }
18696
18697 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18698 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18699
18700 /* The value which unconditional instructions should have in place of the
18701 condition field. */
18702 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18703
18704 if (thumb_mode)
18705 {
18706 arm_feature_set variant;
18707
18708 variant = cpu_variant;
18709 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18710 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18711 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18712 /* Check that this instruction is supported for this CPU. */
18713 if (!opcode->tvariant
18714 || (thumb_mode == 1
18715 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18716 {
18717 if (opcode->tencode == do_t_swi)
18718 as_bad (_("SVC is not permitted on this architecture"));
18719 else
18720 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18721 return;
18722 }
18723 if (inst.cond != COND_ALWAYS && !unified_syntax
18724 && opcode->tencode != do_t_branch)
18725 {
18726 as_bad (_("Thumb does not support conditional execution"));
18727 return;
18728 }
18729
18730 /* Two things are addressed here:
18731 1) Implicit require narrow instructions on Thumb-1.
18732 This avoids relaxation accidentally introducing Thumb-2
18733 instructions.
18734 2) Reject wide instructions in non Thumb-2 cores.
18735
18736 Only instructions with narrow and wide variants need to be handled
18737 but selecting all non wide-only instructions is easier. */
18738 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18739 && !t32_insn_ok (variant, opcode))
18740 {
18741 if (inst.size_req == 0)
18742 inst.size_req = 2;
18743 else if (inst.size_req == 4)
18744 {
18745 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18746 as_bad (_("selected processor does not support 32bit wide "
18747 "variant of instruction `%s'"), str);
18748 else
18749 as_bad (_("selected processor does not support `%s' in "
18750 "Thumb-2 mode"), str);
18751 return;
18752 }
18753 }
18754
18755 inst.instruction = opcode->tvalue;
18756
18757 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18758 {
18759 /* Prepare the it_insn_type for those encodings that don't set
18760 it. */
18761 it_fsm_pre_encode ();
18762
18763 opcode->tencode ();
18764
18765 it_fsm_post_encode ();
18766 }
18767
18768 if (!(inst.error || inst.relax))
18769 {
18770 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18771 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18772 if (inst.size_req && inst.size_req != inst.size)
18773 {
18774 as_bad (_("cannot honor width suffix -- `%s'"), str);
18775 return;
18776 }
18777 }
18778
18779 /* Something has gone badly wrong if we try to relax a fixed size
18780 instruction. */
18781 gas_assert (inst.size_req == 0 || !inst.relax);
18782
18783 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18784 *opcode->tvariant);
18785 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18786 set those bits when Thumb-2 32-bit instructions are seen. The impact
18787 of relaxable instructions will be considered later after we finish all
18788 relaxation. */
18789 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18790 variant = arm_arch_none;
18791 else
18792 variant = cpu_variant;
18793 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18794 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18795 arm_ext_v6t2);
18796
18797 check_neon_suffixes;
18798
18799 if (!inst.error)
18800 {
18801 mapping_state (MAP_THUMB);
18802 }
18803 }
18804 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18805 {
18806 bfd_boolean is_bx;
18807
18808 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18809 is_bx = (opcode->aencode == do_bx);
18810
18811 /* Check that this instruction is supported for this CPU. */
18812 if (!(is_bx && fix_v4bx)
18813 && !(opcode->avariant &&
18814 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18815 {
18816 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18817 return;
18818 }
18819 if (inst.size_req)
18820 {
18821 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18822 return;
18823 }
18824
18825 inst.instruction = opcode->avalue;
18826 if (opcode->tag == OT_unconditionalF)
18827 inst.instruction |= 0xFU << 28;
18828 else
18829 inst.instruction |= inst.cond << 28;
18830 inst.size = INSN_SIZE;
18831 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18832 {
18833 it_fsm_pre_encode ();
18834 opcode->aencode ();
18835 it_fsm_post_encode ();
18836 }
18837 /* Arm mode bx is marked as both v4T and v5 because it's still required
18838 on a hypothetical non-thumb v5 core. */
18839 if (is_bx)
18840 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18841 else
18842 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18843 *opcode->avariant);
18844
18845 check_neon_suffixes;
18846
18847 if (!inst.error)
18848 {
18849 mapping_state (MAP_ARM);
18850 }
18851 }
18852 else
18853 {
18854 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18855 "-- `%s'"), str);
18856 return;
18857 }
18858 output_inst (str);
18859 }
18860
18861 static void
18862 check_it_blocks_finished (void)
18863 {
18864 #ifdef OBJ_ELF
18865 asection *sect;
18866
18867 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18868 if (seg_info (sect)->tc_segment_info_data.current_it.state
18869 == MANUAL_IT_BLOCK)
18870 {
18871 as_warn (_("section '%s' finished with an open IT block."),
18872 sect->name);
18873 }
18874 #else
18875 if (now_it.state == MANUAL_IT_BLOCK)
18876 as_warn (_("file finished with an open IT block."));
18877 #endif
18878 }
18879
18880 /* Various frobbings of labels and their addresses. */
18881
18882 void
18883 arm_start_line_hook (void)
18884 {
18885 last_label_seen = NULL;
18886 }
18887
18888 void
18889 arm_frob_label (symbolS * sym)
18890 {
18891 last_label_seen = sym;
18892
18893 ARM_SET_THUMB (sym, thumb_mode);
18894
18895 #if defined OBJ_COFF || defined OBJ_ELF
18896 ARM_SET_INTERWORK (sym, support_interwork);
18897 #endif
18898
18899 force_automatic_it_block_close ();
18900
18901 /* Note - do not allow local symbols (.Lxxx) to be labelled
18902 as Thumb functions. This is because these labels, whilst
18903 they exist inside Thumb code, are not the entry points for
18904 possible ARM->Thumb calls. Also, these labels can be used
18905 as part of a computed goto or switch statement. eg gcc
18906 can generate code that looks like this:
18907
18908 ldr r2, [pc, .Laaa]
18909 lsl r3, r3, #2
18910 ldr r2, [r3, r2]
18911 mov pc, r2
18912
18913 .Lbbb: .word .Lxxx
18914 .Lccc: .word .Lyyy
18915 ..etc...
18916 .Laaa: .word Lbbb
18917
18918 The first instruction loads the address of the jump table.
18919 The second instruction converts a table index into a byte offset.
18920 The third instruction gets the jump address out of the table.
18921 The fourth instruction performs the jump.
18922
18923 If the address stored at .Laaa is that of a symbol which has the
18924 Thumb_Func bit set, then the linker will arrange for this address
18925 to have the bottom bit set, which in turn would mean that the
18926 address computation performed by the third instruction would end
18927 up with the bottom bit set. Since the ARM is capable of unaligned
18928 word loads, the instruction would then load the incorrect address
18929 out of the jump table, and chaos would ensue. */
18930 if (label_is_thumb_function_name
18931 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18932 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18933 {
18934 /* When the address of a Thumb function is taken the bottom
18935 bit of that address should be set. This will allow
18936 interworking between Arm and Thumb functions to work
18937 correctly. */
18938
18939 THUMB_SET_FUNC (sym, 1);
18940
18941 label_is_thumb_function_name = FALSE;
18942 }
18943
18944 dwarf2_emit_label (sym);
18945 }
18946
18947 bfd_boolean
18948 arm_data_in_code (void)
18949 {
18950 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18951 {
18952 *input_line_pointer = '/';
18953 input_line_pointer += 5;
18954 *input_line_pointer = 0;
18955 return TRUE;
18956 }
18957
18958 return FALSE;
18959 }
18960
18961 char *
18962 arm_canonicalize_symbol_name (char * name)
18963 {
18964 int len;
18965
18966 if (thumb_mode && (len = strlen (name)) > 5
18967 && streq (name + len - 5, "/data"))
18968 *(name + len - 5) = 0;
18969
18970 return name;
18971 }
18972 \f
18973 /* Table of all register names defined by default. The user can
18974 define additional names with .req. Note that all register names
18975 should appear in both upper and lowercase variants. Some registers
18976 also have mixed-case names. */
18977
18978 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18979 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18980 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18981 #define REGSET(p,t) \
18982 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18983 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18984 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18985 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18986 #define REGSETH(p,t) \
18987 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18988 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18989 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18990 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18991 #define REGSET2(p,t) \
18992 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18993 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18994 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18995 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18996 #define SPLRBANK(base,bank,t) \
18997 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18998 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18999 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19000 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19001 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19002 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19003
19004 static const struct reg_entry reg_names[] =
19005 {
19006 /* ARM integer registers. */
19007 REGSET(r, RN), REGSET(R, RN),
19008
19009 /* ATPCS synonyms. */
19010 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19011 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19012 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19013
19014 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19015 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19016 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19017
19018 /* Well-known aliases. */
19019 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19020 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19021
19022 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19023 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19024
19025 /* Coprocessor numbers. */
19026 REGSET(p, CP), REGSET(P, CP),
19027
19028 /* Coprocessor register numbers. The "cr" variants are for backward
19029 compatibility. */
19030 REGSET(c, CN), REGSET(C, CN),
19031 REGSET(cr, CN), REGSET(CR, CN),
19032
19033 /* ARM banked registers. */
19034 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19035 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19036 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19037 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19038 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19039 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19040 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19041
19042 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19043 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19044 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19045 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19046 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19047 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19048 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19049 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19050
19051 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19052 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19053 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19054 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19055 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19056 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19057 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19058 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19059 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19060
19061 /* FPA registers. */
19062 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19063 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19064
19065 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19066 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19067
19068 /* VFP SP registers. */
19069 REGSET(s,VFS), REGSET(S,VFS),
19070 REGSETH(s,VFS), REGSETH(S,VFS),
19071
19072 /* VFP DP Registers. */
19073 REGSET(d,VFD), REGSET(D,VFD),
19074 /* Extra Neon DP registers. */
19075 REGSETH(d,VFD), REGSETH(D,VFD),
19076
19077 /* Neon QP registers. */
19078 REGSET2(q,NQ), REGSET2(Q,NQ),
19079
19080 /* VFP control registers. */
19081 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19082 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19083 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19084 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19085 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19086 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19087 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19088
19089 /* Maverick DSP coprocessor registers. */
19090 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19091 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19092
19093 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19094 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19095 REGDEF(dspsc,0,DSPSC),
19096
19097 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19098 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19099 REGDEF(DSPSC,0,DSPSC),
19100
19101 /* iWMMXt data registers - p0, c0-15. */
19102 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19103
19104 /* iWMMXt control registers - p1, c0-3. */
19105 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19106 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19107 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19108 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19109
19110 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19111 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19112 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19113 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19114 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19115
19116 /* XScale accumulator registers. */
19117 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19118 };
19119 #undef REGDEF
19120 #undef REGNUM
19121 #undef REGSET
19122
19123 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19124 within psr_required_here. */
19125 static const struct asm_psr psrs[] =
19126 {
19127 /* Backward compatibility notation. Note that "all" is no longer
19128 truly all possible PSR bits. */
19129 {"all", PSR_c | PSR_f},
19130 {"flg", PSR_f},
19131 {"ctl", PSR_c},
19132
19133 /* Individual flags. */
19134 {"f", PSR_f},
19135 {"c", PSR_c},
19136 {"x", PSR_x},
19137 {"s", PSR_s},
19138
19139 /* Combinations of flags. */
19140 {"fs", PSR_f | PSR_s},
19141 {"fx", PSR_f | PSR_x},
19142 {"fc", PSR_f | PSR_c},
19143 {"sf", PSR_s | PSR_f},
19144 {"sx", PSR_s | PSR_x},
19145 {"sc", PSR_s | PSR_c},
19146 {"xf", PSR_x | PSR_f},
19147 {"xs", PSR_x | PSR_s},
19148 {"xc", PSR_x | PSR_c},
19149 {"cf", PSR_c | PSR_f},
19150 {"cs", PSR_c | PSR_s},
19151 {"cx", PSR_c | PSR_x},
19152 {"fsx", PSR_f | PSR_s | PSR_x},
19153 {"fsc", PSR_f | PSR_s | PSR_c},
19154 {"fxs", PSR_f | PSR_x | PSR_s},
19155 {"fxc", PSR_f | PSR_x | PSR_c},
19156 {"fcs", PSR_f | PSR_c | PSR_s},
19157 {"fcx", PSR_f | PSR_c | PSR_x},
19158 {"sfx", PSR_s | PSR_f | PSR_x},
19159 {"sfc", PSR_s | PSR_f | PSR_c},
19160 {"sxf", PSR_s | PSR_x | PSR_f},
19161 {"sxc", PSR_s | PSR_x | PSR_c},
19162 {"scf", PSR_s | PSR_c | PSR_f},
19163 {"scx", PSR_s | PSR_c | PSR_x},
19164 {"xfs", PSR_x | PSR_f | PSR_s},
19165 {"xfc", PSR_x | PSR_f | PSR_c},
19166 {"xsf", PSR_x | PSR_s | PSR_f},
19167 {"xsc", PSR_x | PSR_s | PSR_c},
19168 {"xcf", PSR_x | PSR_c | PSR_f},
19169 {"xcs", PSR_x | PSR_c | PSR_s},
19170 {"cfs", PSR_c | PSR_f | PSR_s},
19171 {"cfx", PSR_c | PSR_f | PSR_x},
19172 {"csf", PSR_c | PSR_s | PSR_f},
19173 {"csx", PSR_c | PSR_s | PSR_x},
19174 {"cxf", PSR_c | PSR_x | PSR_f},
19175 {"cxs", PSR_c | PSR_x | PSR_s},
19176 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19177 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19178 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19179 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19180 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19181 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19182 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19183 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19184 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19185 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19186 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19187 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19188 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19189 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19190 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19191 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19192 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19193 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19194 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19195 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19196 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19197 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19198 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19199 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19200 };
19201
19202 /* Table of V7M psr names. */
19203 static const struct asm_psr v7m_psrs[] =
19204 {
19205 {"apsr", 0x0 }, {"APSR", 0x0 },
19206 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19207 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19208 {"psr", 0x3 }, {"PSR", 0x3 },
19209 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19210 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19211 {"epsr", 0x6 }, {"EPSR", 0x6 },
19212 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19213 {"msp", 0x8 }, {"MSP", 0x8 },
19214 {"psp", 0x9 }, {"PSP", 0x9 },
19215 {"msplim", 0xa }, {"MSPLIM", 0xa },
19216 {"psplim", 0xb }, {"PSPLIM", 0xb },
19217 {"primask", 0x10}, {"PRIMASK", 0x10},
19218 {"basepri", 0x11}, {"BASEPRI", 0x11},
19219 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19220 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19221 {"control", 0x14}, {"CONTROL", 0x14},
19222 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19223 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19224 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19225 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19226 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19227 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19228 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19229 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19230 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19231 };
19232
19233 /* Table of all shift-in-operand names. */
19234 static const struct asm_shift_name shift_names [] =
19235 {
19236 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19237 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19238 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19239 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19240 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19241 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19242 };
19243
19244 /* Table of all explicit relocation names. */
19245 #ifdef OBJ_ELF
19246 static struct reloc_entry reloc_names[] =
19247 {
19248 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19249 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19250 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19251 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19252 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19253 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19254 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19255 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19256 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19257 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19258 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19259 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19260 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19261 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19262 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19263 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19264 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19265 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
19266 };
19267 #endif
19268
19269 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19270 static const struct asm_cond conds[] =
19271 {
19272 {"eq", 0x0},
19273 {"ne", 0x1},
19274 {"cs", 0x2}, {"hs", 0x2},
19275 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19276 {"mi", 0x4},
19277 {"pl", 0x5},
19278 {"vs", 0x6},
19279 {"vc", 0x7},
19280 {"hi", 0x8},
19281 {"ls", 0x9},
19282 {"ge", 0xa},
19283 {"lt", 0xb},
19284 {"gt", 0xc},
19285 {"le", 0xd},
19286 {"al", 0xe}
19287 };
19288
19289 #define UL_BARRIER(L,U,CODE,FEAT) \
19290 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19291 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19292
19293 static struct asm_barrier_opt barrier_opt_names[] =
19294 {
19295 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19296 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19297 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19298 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19299 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19300 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19301 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19302 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19303 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19304 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19305 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19306 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19307 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19308 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19309 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19310 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19311 };
19312
19313 #undef UL_BARRIER
19314
19315 /* Table of ARM-format instructions. */
19316
19317 /* Macros for gluing together operand strings. N.B. In all cases
19318 other than OPS0, the trailing OP_stop comes from default
19319 zero-initialization of the unspecified elements of the array. */
19320 #define OPS0() { OP_stop, }
19321 #define OPS1(a) { OP_##a, }
19322 #define OPS2(a,b) { OP_##a,OP_##b, }
19323 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19324 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19325 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19326 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19327
19328 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19329 This is useful when mixing operands for ARM and THUMB, i.e. using the
19330 MIX_ARM_THUMB_OPERANDS macro.
19331 In order to use these macros, prefix the number of operands with _
19332 e.g. _3. */
19333 #define OPS_1(a) { a, }
19334 #define OPS_2(a,b) { a,b, }
19335 #define OPS_3(a,b,c) { a,b,c, }
19336 #define OPS_4(a,b,c,d) { a,b,c,d, }
19337 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19338 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19339
19340 /* These macros abstract out the exact format of the mnemonic table and
19341 save some repeated characters. */
19342
19343 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19344 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19345 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19346 THUMB_VARIANT, do_##ae, do_##te }
19347
19348 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19349 a T_MNEM_xyz enumerator. */
19350 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19351 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19352 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19353 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19354
19355 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19356 infix after the third character. */
19357 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19358 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19359 THUMB_VARIANT, do_##ae, do_##te }
19360 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19361 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19362 THUMB_VARIANT, do_##ae, do_##te }
19363 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19364 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19365 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19366 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19367 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19368 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19369 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19370 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19371
19372 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19373 field is still 0xE. Many of the Thumb variants can be executed
19374 conditionally, so this is checked separately. */
19375 #define TUE(mnem, op, top, nops, ops, ae, te) \
19376 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19377 THUMB_VARIANT, do_##ae, do_##te }
19378
19379 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19380 Used by mnemonics that have very minimal differences in the encoding for
19381 ARM and Thumb variants and can be handled in a common function. */
19382 #define TUEc(mnem, op, top, nops, ops, en) \
19383 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19384 THUMB_VARIANT, do_##en, do_##en }
19385
19386 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19387 condition code field. */
19388 #define TUF(mnem, op, top, nops, ops, ae, te) \
19389 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19390 THUMB_VARIANT, do_##ae, do_##te }
19391
19392 /* ARM-only variants of all the above. */
19393 #define CE(mnem, op, nops, ops, ae) \
19394 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19395
19396 #define C3(mnem, op, nops, ops, ae) \
19397 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19398
19399 /* Legacy mnemonics that always have conditional infix after the third
19400 character. */
19401 #define CL(mnem, op, nops, ops, ae) \
19402 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19403 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19404
19405 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19406 #define cCE(mnem, op, nops, ops, ae) \
19407 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19408
19409 /* Legacy coprocessor instructions where conditional infix and conditional
19410 suffix are ambiguous. For consistency this includes all FPA instructions,
19411 not just the potentially ambiguous ones. */
19412 #define cCL(mnem, op, nops, ops, ae) \
19413 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19414 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19415
19416 /* Coprocessor, takes either a suffix or a position-3 infix
19417 (for an FPA corner case). */
19418 #define C3E(mnem, op, nops, ops, ae) \
19419 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19420 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19421
19422 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19423 { m1 #m2 m3, OPS##nops ops, \
19424 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19425 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19426
19427 #define CM(m1, m2, op, nops, ops, ae) \
19428 xCM_ (m1, , m2, op, nops, ops, ae), \
19429 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19430 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19431 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19432 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19433 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19434 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19435 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19436 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19437 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19438 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19439 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19440 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19441 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19442 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19443 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19444 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19445 xCM_ (m1, le, m2, op, nops, ops, ae), \
19446 xCM_ (m1, al, m2, op, nops, ops, ae)
19447
19448 #define UE(mnem, op, nops, ops, ae) \
19449 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19450
19451 #define UF(mnem, op, nops, ops, ae) \
19452 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19453
19454 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19455 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19456 use the same encoding function for each. */
19457 #define NUF(mnem, op, nops, ops, enc) \
19458 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19459 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19460
19461 /* Neon data processing, version which indirects through neon_enc_tab for
19462 the various overloaded versions of opcodes. */
19463 #define nUF(mnem, op, nops, ops, enc) \
19464 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19465 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19466
19467 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19468 version. */
19469 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19470 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19471 THUMB_VARIANT, do_##enc, do_##enc }
19472
19473 #define NCE(mnem, op, nops, ops, enc) \
19474 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19475
19476 #define NCEF(mnem, op, nops, ops, enc) \
19477 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19478
19479 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19480 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19481 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19482 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19483
19484 #define nCE(mnem, op, nops, ops, enc) \
19485 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19486
19487 #define nCEF(mnem, op, nops, ops, enc) \
19488 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19489
19490 #define do_0 0
19491
19492 static const struct asm_opcode insns[] =
19493 {
19494 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19495 #define THUMB_VARIANT & arm_ext_v4t
19496 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19497 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19498 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19499 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19500 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19501 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19502 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19503 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19504 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19505 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19506 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19507 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19508 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19509 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19510 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19511 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19512
19513 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19514 for setting PSR flag bits. They are obsolete in V6 and do not
19515 have Thumb equivalents. */
19516 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19517 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19518 CL("tstp", 110f000, 2, (RR, SH), cmp),
19519 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19520 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19521 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19522 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19523 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19524 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19525
19526 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19527 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19528 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19529 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19530
19531 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19532 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19533 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19534 OP_RRnpc),
19535 OP_ADDRGLDR),ldst, t_ldst),
19536 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19537
19538 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19539 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19540 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19541 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19542 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19543 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19544
19545 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19546 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19547
19548 /* Pseudo ops. */
19549 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19550 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19551 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19552 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19553
19554 /* Thumb-compatibility pseudo ops. */
19555 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19556 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19557 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19558 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19559 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19560 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19561 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19562 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19563 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19564 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19565 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19566 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19567
19568 /* These may simplify to neg. */
19569 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19570 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19571
19572 #undef THUMB_VARIANT
19573 #define THUMB_VARIANT & arm_ext_os
19574
19575 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19576 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19577
19578 #undef THUMB_VARIANT
19579 #define THUMB_VARIANT & arm_ext_v6
19580
19581 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19582
19583 /* V1 instructions with no Thumb analogue prior to V6T2. */
19584 #undef THUMB_VARIANT
19585 #define THUMB_VARIANT & arm_ext_v6t2
19586
19587 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19588 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19589 CL("teqp", 130f000, 2, (RR, SH), cmp),
19590
19591 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19592 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19593 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19594 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19595
19596 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19597 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19598
19599 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19600 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19601
19602 /* V1 instructions with no Thumb analogue at all. */
19603 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19604 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19605
19606 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19607 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19608 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19609 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19610 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19611 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19612 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19613 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19614
19615 #undef ARM_VARIANT
19616 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19617 #undef THUMB_VARIANT
19618 #define THUMB_VARIANT & arm_ext_v4t
19619
19620 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19621 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19622
19623 #undef THUMB_VARIANT
19624 #define THUMB_VARIANT & arm_ext_v6t2
19625
19626 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19627 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19628
19629 /* Generic coprocessor instructions. */
19630 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19631 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19632 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19633 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19634 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19635 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19636 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19637
19638 #undef ARM_VARIANT
19639 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19640
19641 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19642 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19643
19644 #undef ARM_VARIANT
19645 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19646 #undef THUMB_VARIANT
19647 #define THUMB_VARIANT & arm_ext_msr
19648
19649 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19650 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19651
19652 #undef ARM_VARIANT
19653 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19654 #undef THUMB_VARIANT
19655 #define THUMB_VARIANT & arm_ext_v6t2
19656
19657 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19658 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19659 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19660 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19661 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19662 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19663 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19664 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19665
19666 #undef ARM_VARIANT
19667 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19668 #undef THUMB_VARIANT
19669 #define THUMB_VARIANT & arm_ext_v4t
19670
19671 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19672 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19673 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19674 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19675 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19676 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19677
19678 #undef ARM_VARIANT
19679 #define ARM_VARIANT & arm_ext_v4t_5
19680
19681 /* ARM Architecture 4T. */
19682 /* Note: bx (and blx) are required on V5, even if the processor does
19683 not support Thumb. */
19684 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19685
19686 #undef ARM_VARIANT
19687 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19688 #undef THUMB_VARIANT
19689 #define THUMB_VARIANT & arm_ext_v5t
19690
19691 /* Note: blx has 2 variants; the .value coded here is for
19692 BLX(2). Only this variant has conditional execution. */
19693 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19694 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19695
19696 #undef THUMB_VARIANT
19697 #define THUMB_VARIANT & arm_ext_v6t2
19698
19699 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19700 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19701 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19702 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19703 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19704 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19705 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19706 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19707
19708 #undef ARM_VARIANT
19709 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19710 #undef THUMB_VARIANT
19711 #define THUMB_VARIANT & arm_ext_v5exp
19712
19713 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19714 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19715 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19716 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19717
19718 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19719 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19720
19721 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19722 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19723 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19724 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19725
19726 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19727 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19728 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19729 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19730
19731 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19732 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19733
19734 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19735 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19736 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19737 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19738
19739 #undef ARM_VARIANT
19740 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19741 #undef THUMB_VARIANT
19742 #define THUMB_VARIANT & arm_ext_v6t2
19743
19744 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19745 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19746 ldrd, t_ldstd),
19747 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19748 ADDRGLDRS), ldrd, t_ldstd),
19749
19750 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19751 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19752
19753 #undef ARM_VARIANT
19754 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19755
19756 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19757
19758 #undef ARM_VARIANT
19759 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19760 #undef THUMB_VARIANT
19761 #define THUMB_VARIANT & arm_ext_v6
19762
19763 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19764 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19765 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19766 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19767 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19768 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19769 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19770 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19771 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19772 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19773
19774 #undef THUMB_VARIANT
19775 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19776
19777 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19778 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19779 strex, t_strex),
19780 #undef THUMB_VARIANT
19781 #define THUMB_VARIANT & arm_ext_v6t2
19782
19783 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19784 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19785
19786 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19787 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19788
19789 /* ARM V6 not included in V7M. */
19790 #undef THUMB_VARIANT
19791 #define THUMB_VARIANT & arm_ext_v6_notm
19792 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19793 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19794 UF(rfeib, 9900a00, 1, (RRw), rfe),
19795 UF(rfeda, 8100a00, 1, (RRw), rfe),
19796 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19797 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19798 UF(rfefa, 8100a00, 1, (RRw), rfe),
19799 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19800 UF(rfeed, 9900a00, 1, (RRw), rfe),
19801 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19802 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19803 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19804 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19805 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19806 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19807 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19808 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19809 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19810 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19811
19812 /* ARM V6 not included in V7M (eg. integer SIMD). */
19813 #undef THUMB_VARIANT
19814 #define THUMB_VARIANT & arm_ext_v6_dsp
19815 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19816 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19817 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19818 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19819 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19820 /* Old name for QASX. */
19821 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19822 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19823 /* Old name for QSAX. */
19824 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19825 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19826 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19827 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19828 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19829 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19830 /* Old name for SASX. */
19831 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19832 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19833 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19834 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19835 /* Old name for SHASX. */
19836 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19837 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19838 /* Old name for SHSAX. */
19839 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19840 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19841 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19842 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19843 /* Old name for SSAX. */
19844 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19845 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19846 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19847 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19848 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19849 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19850 /* Old name for UASX. */
19851 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19852 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19853 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19854 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19855 /* Old name for UHASX. */
19856 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19857 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19858 /* Old name for UHSAX. */
19859 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19860 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19861 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19862 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19863 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19864 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19865 /* Old name for UQASX. */
19866 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19867 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19868 /* Old name for UQSAX. */
19869 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19870 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19871 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19872 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19873 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19874 /* Old name for USAX. */
19875 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19876 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19877 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19878 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19879 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19880 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19881 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19882 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19883 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19884 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19885 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19886 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19887 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19888 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19889 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19890 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19891 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19892 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19893 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19894 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19895 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19896 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19897 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19898 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19899 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19900 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19901 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19902 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19903 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19904 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19905 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19906 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19907 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19908 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19909
19910 #undef ARM_VARIANT
19911 #define ARM_VARIANT & arm_ext_v6k
19912 #undef THUMB_VARIANT
19913 #define THUMB_VARIANT & arm_ext_v6k
19914
19915 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19916 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19917 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19918 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19919
19920 #undef THUMB_VARIANT
19921 #define THUMB_VARIANT & arm_ext_v6_notm
19922 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19923 ldrexd, t_ldrexd),
19924 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19925 RRnpcb), strexd, t_strexd),
19926
19927 #undef THUMB_VARIANT
19928 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19929 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19930 rd_rn, rd_rn),
19931 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19932 rd_rn, rd_rn),
19933 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19934 strex, t_strexbh),
19935 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19936 strex, t_strexbh),
19937 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19938
19939 #undef ARM_VARIANT
19940 #define ARM_VARIANT & arm_ext_sec
19941 #undef THUMB_VARIANT
19942 #define THUMB_VARIANT & arm_ext_sec
19943
19944 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19945
19946 #undef ARM_VARIANT
19947 #define ARM_VARIANT & arm_ext_virt
19948 #undef THUMB_VARIANT
19949 #define THUMB_VARIANT & arm_ext_virt
19950
19951 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19952 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19953
19954 #undef ARM_VARIANT
19955 #define ARM_VARIANT & arm_ext_pan
19956 #undef THUMB_VARIANT
19957 #define THUMB_VARIANT & arm_ext_pan
19958
19959 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19960
19961 #undef ARM_VARIANT
19962 #define ARM_VARIANT & arm_ext_v6t2
19963 #undef THUMB_VARIANT
19964 #define THUMB_VARIANT & arm_ext_v6t2
19965
19966 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19967 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19968 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19969 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19970
19971 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19972 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19973
19974 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19975 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19976 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19977 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19978
19979 #undef THUMB_VARIANT
19980 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19981 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19982 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19983
19984 /* Thumb-only instructions. */
19985 #undef ARM_VARIANT
19986 #define ARM_VARIANT NULL
19987 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19988 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19989
19990 /* ARM does not really have an IT instruction, so always allow it.
19991 The opcode is copied from Thumb in order to allow warnings in
19992 -mimplicit-it=[never | arm] modes. */
19993 #undef ARM_VARIANT
19994 #define ARM_VARIANT & arm_ext_v1
19995 #undef THUMB_VARIANT
19996 #define THUMB_VARIANT & arm_ext_v6t2
19997
19998 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19999 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20000 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20001 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20002 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20003 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20004 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20005 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20006 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20007 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20008 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20009 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20010 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20011 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20012 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20013 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20014 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20015 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20016
20017 /* Thumb2 only instructions. */
20018 #undef ARM_VARIANT
20019 #define ARM_VARIANT NULL
20020
20021 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20022 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20023 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20024 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20025 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20026 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20027
20028 /* Hardware division instructions. */
20029 #undef ARM_VARIANT
20030 #define ARM_VARIANT & arm_ext_adiv
20031 #undef THUMB_VARIANT
20032 #define THUMB_VARIANT & arm_ext_div
20033
20034 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20035 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20036
20037 /* ARM V6M/V7 instructions. */
20038 #undef ARM_VARIANT
20039 #define ARM_VARIANT & arm_ext_barrier
20040 #undef THUMB_VARIANT
20041 #define THUMB_VARIANT & arm_ext_barrier
20042
20043 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20044 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20045 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20046
20047 /* ARM V7 instructions. */
20048 #undef ARM_VARIANT
20049 #define ARM_VARIANT & arm_ext_v7
20050 #undef THUMB_VARIANT
20051 #define THUMB_VARIANT & arm_ext_v7
20052
20053 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20054 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20055
20056 #undef ARM_VARIANT
20057 #define ARM_VARIANT & arm_ext_mp
20058 #undef THUMB_VARIANT
20059 #define THUMB_VARIANT & arm_ext_mp
20060
20061 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20062
20063 /* AArchv8 instructions. */
20064 #undef ARM_VARIANT
20065 #define ARM_VARIANT & arm_ext_v8
20066
20067 /* Instructions shared between armv8-a and armv8-m. */
20068 #undef THUMB_VARIANT
20069 #define THUMB_VARIANT & arm_ext_atomics
20070
20071 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20072 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20073 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20074 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20075 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20076 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20077 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20078 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20079 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20080 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20081 stlex, t_stlex),
20082 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20083 stlex, t_stlex),
20084 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20085 stlex, t_stlex),
20086 #undef THUMB_VARIANT
20087 #define THUMB_VARIANT & arm_ext_v8
20088
20089 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20090 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20091 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20092 ldrexd, t_ldrexd),
20093 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20094 strexd, t_strexd),
20095 /* ARMv8 T32 only. */
20096 #undef ARM_VARIANT
20097 #define ARM_VARIANT NULL
20098 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20099 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20100 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20101
20102 /* FP for ARMv8. */
20103 #undef ARM_VARIANT
20104 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20105 #undef THUMB_VARIANT
20106 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20107
20108 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20109 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20110 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20111 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20112 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20113 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20114 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20115 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20116 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20117 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20118 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20119 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20120 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20121 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20122 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20123 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20124 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20125
20126 /* Crypto v1 extensions. */
20127 #undef ARM_VARIANT
20128 #define ARM_VARIANT & fpu_crypto_ext_armv8
20129 #undef THUMB_VARIANT
20130 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20131
20132 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20133 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20134 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20135 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20136 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20137 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20138 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20139 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20140 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20141 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20142 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20143 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20144 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20145 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20146
20147 #undef ARM_VARIANT
20148 #define ARM_VARIANT & crc_ext_armv8
20149 #undef THUMB_VARIANT
20150 #define THUMB_VARIANT & crc_ext_armv8
20151 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20152 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20153 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20154 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20155 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20156 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20157
20158 /* ARMv8.2 RAS extension. */
20159 #undef ARM_VARIANT
20160 #define ARM_VARIANT & arm_ext_ras
20161 #undef THUMB_VARIANT
20162 #define THUMB_VARIANT & arm_ext_ras
20163 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20164
20165 #undef ARM_VARIANT
20166 #define ARM_VARIANT & arm_ext_v8_3
20167 #undef THUMB_VARIANT
20168 #define THUMB_VARIANT & arm_ext_v8_3
20169 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20170 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20171 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20172
20173 #undef ARM_VARIANT
20174 #define ARM_VARIANT & fpu_neon_ext_dotprod
20175 #undef THUMB_VARIANT
20176 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20177 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20178 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20179
20180 #undef ARM_VARIANT
20181 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20182 #undef THUMB_VARIANT
20183 #define THUMB_VARIANT NULL
20184
20185 cCE("wfs", e200110, 1, (RR), rd),
20186 cCE("rfs", e300110, 1, (RR), rd),
20187 cCE("wfc", e400110, 1, (RR), rd),
20188 cCE("rfc", e500110, 1, (RR), rd),
20189
20190 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20191 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20192 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20193 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20194
20195 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20196 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20197 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20198 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20199
20200 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20201 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20202 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20203 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20204 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20205 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20206 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20207 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20208 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20209 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20210 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20211 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20212
20213 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20214 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20215 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20216 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20217 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20218 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20219 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20220 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20221 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20222 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20223 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20224 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20225
20226 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20227 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20228 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20229 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20230 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20231 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20232 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20233 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20234 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20235 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20236 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20237 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20238
20239 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20240 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20241 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20242 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20243 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20244 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20245 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20246 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20247 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20248 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20249 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20250 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20251
20252 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20253 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20254 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20255 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20256 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20257 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20258 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20259 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20260 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20261 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20262 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20263 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20264
20265 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20266 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20267 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20268 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20269 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20270 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20271 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20272 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20273 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20274 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20275 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20276 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20277
20278 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20279 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20280 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20281 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20282 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20283 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20284 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20285 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20286 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20287 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20288 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20289 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20290
20291 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20292 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20293 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20294 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20295 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20296 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20297 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20298 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20299 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20300 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20301 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20302 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20303
20304 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20305 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20306 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20307 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20308 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20309 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20310 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20311 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20312 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20313 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20314 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20315 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20316
20317 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20318 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20319 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20320 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20321 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20322 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20323 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20324 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20325 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20326 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20327 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20328 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20329
20330 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20331 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20332 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20333 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20334 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20335 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20336 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20337 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20338 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20339 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20340 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20341 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20342
20343 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20344 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20345 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20346 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20347 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20348 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20349 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20350 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20351 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20352 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20353 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20354 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20355
20356 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20357 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20358 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20359 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20360 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20361 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20362 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20363 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20364 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20365 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20366 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20367 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20368
20369 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20370 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20371 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20372 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20373 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20374 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20375 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20376 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20377 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20378 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20379 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20380 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20381
20382 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20383 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20384 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20385 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20386 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20387 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20388 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20389 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20390 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20391 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20392 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20393 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20394
20395 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20396 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20397 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20398 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20399 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20400 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20401 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20402 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20403 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20404 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20405 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20406 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20407
20408 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20409 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20410 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20411 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20412 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20413 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20414 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20415 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20416 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20417 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20418 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20419 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20420
20421 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20422 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20423 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20424 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20425 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20426 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20427 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20428 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20429 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20430 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20431 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20432 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20433
20434 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20435 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20436 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20437 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20438 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20439 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20440 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20441 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20442 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20443 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20444 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20445 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20446
20447 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20448 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20449 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20450 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20451 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20452 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20453 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20454 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20455 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20456 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20457 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20458 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20459
20460 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20461 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20462 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20463 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20464 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20465 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20466 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20467 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20468 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20469 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20470 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20471 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20472
20473 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20474 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20475 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20476 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20477 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20478 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20479 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20480 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20481 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20482 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20483 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20484 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20485
20486 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20487 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20488 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20489 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20490 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20491 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20492 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20493 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20494 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20495 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20496 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20497 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20498
20499 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20500 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20501 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20502 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20503 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20504 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20505 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20506 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20507 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20508 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20509 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20510 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20511
20512 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20513 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20514 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20515 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20516 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20517 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20518 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20519 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20520 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20521 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20522 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20523 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20524
20525 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20526 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20527 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20528 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20529 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20530 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20531 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20532 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20533 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20534 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20535 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20536 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20537
20538 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20539 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20540 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20541 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20542 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20543 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20544 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20545 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20546 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20547 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20548 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20549 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20550
20551 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20552 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20553 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20554 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20555 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20556 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20557 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20558 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20559 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20560 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20561 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20562 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20563
20564 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20565 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20566 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20567 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20568 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20569 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20570 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20571 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20572 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20573 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20574 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20575 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20576
20577 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20578 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20579 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20580 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20581
20582 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20583 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20584 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20585 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20586 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20587 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20588 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20589 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20590 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20591 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20592 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20593 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20594
20595 /* The implementation of the FIX instruction is broken on some
20596 assemblers, in that it accepts a precision specifier as well as a
20597 rounding specifier, despite the fact that this is meaningless.
20598 To be more compatible, we accept it as well, though of course it
20599 does not set any bits. */
20600 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20601 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20602 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20603 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20604 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20605 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20606 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20607 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20608 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20609 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20610 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20611 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20612 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20613
20614 /* Instructions that were new with the real FPA, call them V2. */
20615 #undef ARM_VARIANT
20616 #define ARM_VARIANT & fpu_fpa_ext_v2
20617
20618 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20619 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20620 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20621 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20622 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20623 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20624
20625 #undef ARM_VARIANT
20626 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20627
20628 /* Moves and type conversions. */
20629 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20630 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20631 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20632 cCE("fmstat", ef1fa10, 0, (), noargs),
20633 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20634 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20635 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20636 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20637 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20638 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20639 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20640 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20641 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20642 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20643
20644 /* Memory operations. */
20645 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20646 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20647 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20648 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20649 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20650 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20651 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20652 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20653 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20654 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20655 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20656 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20657 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20658 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20659 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20660 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20661 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20662 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20663
20664 /* Monadic operations. */
20665 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20666 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20667 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20668
20669 /* Dyadic operations. */
20670 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20671 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20672 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20673 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20674 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20675 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20676 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20677 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20678 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20679
20680 /* Comparisons. */
20681 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20682 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20683 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20684 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20685
20686 /* Double precision load/store are still present on single precision
20687 implementations. */
20688 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20689 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20690 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20691 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20692 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20693 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20694 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20695 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20696 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20697 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20698
20699 #undef ARM_VARIANT
20700 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20701
20702 /* Moves and type conversions. */
20703 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20704 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20705 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20706 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20707 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20708 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20709 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20710 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20711 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20712 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20713 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20714 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20715 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20716
20717 /* Monadic operations. */
20718 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20719 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20720 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20721
20722 /* Dyadic operations. */
20723 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20724 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20725 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20726 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20727 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20728 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20729 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20730 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20731 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20732
20733 /* Comparisons. */
20734 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20735 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20736 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20737 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20738
20739 #undef ARM_VARIANT
20740 #define ARM_VARIANT & fpu_vfp_ext_v2
20741
20742 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20743 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20744 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20745 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20746
20747 /* Instructions which may belong to either the Neon or VFP instruction sets.
20748 Individual encoder functions perform additional architecture checks. */
20749 #undef ARM_VARIANT
20750 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20751 #undef THUMB_VARIANT
20752 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20753
20754 /* These mnemonics are unique to VFP. */
20755 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20756 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20757 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20758 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20759 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20760 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20761 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20762 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20763 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20764 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20765
20766 /* Mnemonics shared by Neon and VFP. */
20767 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20768 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20769 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20770
20771 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20772 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20773
20774 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20775 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20776
20777 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20778 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20779 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20780 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20781 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20782 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20783 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20784 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20785
20786 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20787 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20788 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20789 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20790
20791
20792 /* NOTE: All VMOV encoding is special-cased! */
20793 NCE(vmov, 0, 1, (VMOV), neon_mov),
20794 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20795
20796 #undef ARM_VARIANT
20797 #define ARM_VARIANT & arm_ext_fp16
20798 #undef THUMB_VARIANT
20799 #define THUMB_VARIANT & arm_ext_fp16
20800 /* New instructions added from v8.2, allowing the extraction and insertion of
20801 the upper 16 bits of a 32-bit vector register. */
20802 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20803 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20804
20805 /* New backported fma/fms instructions optional in v8.2. */
20806 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
20807 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
20808
20809 #undef THUMB_VARIANT
20810 #define THUMB_VARIANT & fpu_neon_ext_v1
20811 #undef ARM_VARIANT
20812 #define ARM_VARIANT & fpu_neon_ext_v1
20813
20814 /* Data processing with three registers of the same length. */
20815 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20816 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20817 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20818 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20819 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20820 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20821 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20822 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20823 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20824 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20825 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20826 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20827 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20828 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20829 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20830 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20831 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20832 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20833 /* If not immediate, fall back to neon_dyadic_i64_su.
20834 shl_imm should accept I8 I16 I32 I64,
20835 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20836 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20837 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20838 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20839 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20840 /* Logic ops, types optional & ignored. */
20841 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20842 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20843 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20844 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20845 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20846 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20847 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20848 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20849 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20850 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20851 /* Bitfield ops, untyped. */
20852 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20853 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20854 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20855 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20856 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20857 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20858 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20859 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20860 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20861 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20862 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20863 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20864 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20865 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20866 back to neon_dyadic_if_su. */
20867 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20868 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20869 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20870 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20871 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20872 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20873 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20874 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20875 /* Comparison. Type I8 I16 I32 F32. */
20876 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20877 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20878 /* As above, D registers only. */
20879 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20880 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20881 /* Int and float variants, signedness unimportant. */
20882 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20883 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20884 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20885 /* Add/sub take types I8 I16 I32 I64 F32. */
20886 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20887 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20888 /* vtst takes sizes 8, 16, 32. */
20889 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20890 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20891 /* VMUL takes I8 I16 I32 F32 P8. */
20892 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20893 /* VQD{R}MULH takes S16 S32. */
20894 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20895 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20896 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20897 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20898 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20899 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20900 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20901 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20902 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20903 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20904 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20905 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20906 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20907 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20908 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20909 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20910 /* ARM v8.1 extension. */
20911 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20912 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20913 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20914 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20915
20916 /* Two address, int/float. Types S8 S16 S32 F32. */
20917 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20918 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20919
20920 /* Data processing with two registers and a shift amount. */
20921 /* Right shifts, and variants with rounding.
20922 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20923 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20924 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20925 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20926 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20927 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20928 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20929 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20930 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20931 /* Shift and insert. Sizes accepted 8 16 32 64. */
20932 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20933 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20934 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20935 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20936 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20937 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20938 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20939 /* Right shift immediate, saturating & narrowing, with rounding variants.
20940 Types accepted S16 S32 S64 U16 U32 U64. */
20941 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20942 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20943 /* As above, unsigned. Types accepted S16 S32 S64. */
20944 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20945 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20946 /* Right shift narrowing. Types accepted I16 I32 I64. */
20947 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20948 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20949 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20950 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20951 /* CVT with optional immediate for fixed-point variant. */
20952 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20953
20954 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20955 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20956
20957 /* Data processing, three registers of different lengths. */
20958 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20959 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20960 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20961 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20962 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20963 /* If not scalar, fall back to neon_dyadic_long.
20964 Vector types as above, scalar types S16 S32 U16 U32. */
20965 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20966 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20967 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20968 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20969 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20970 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20971 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20972 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20973 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20974 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20975 /* Saturating doubling multiplies. Types S16 S32. */
20976 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20977 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20978 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20979 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20980 S16 S32 U16 U32. */
20981 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20982
20983 /* Extract. Size 8. */
20984 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20985 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20986
20987 /* Two registers, miscellaneous. */
20988 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20989 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20990 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20991 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20992 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20993 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20994 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20995 /* Vector replicate. Sizes 8 16 32. */
20996 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20997 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20998 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20999 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21000 /* VMOVN. Types I16 I32 I64. */
21001 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21002 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21003 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21004 /* VQMOVUN. Types S16 S32 S64. */
21005 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21006 /* VZIP / VUZP. Sizes 8 16 32. */
21007 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21008 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21009 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21010 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21011 /* VQABS / VQNEG. Types S8 S16 S32. */
21012 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21013 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21014 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21015 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21016 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21017 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21018 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21019 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21020 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21021 /* Reciprocal estimates. Types U32 F16 F32. */
21022 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21023 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21024 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21025 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21026 /* VCLS. Types S8 S16 S32. */
21027 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21028 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21029 /* VCLZ. Types I8 I16 I32. */
21030 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21031 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21032 /* VCNT. Size 8. */
21033 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21034 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21035 /* Two address, untyped. */
21036 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21037 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21038 /* VTRN. Sizes 8 16 32. */
21039 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21040 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21041
21042 /* Table lookup. Size 8. */
21043 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21044 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21045
21046 #undef THUMB_VARIANT
21047 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21048 #undef ARM_VARIANT
21049 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21050
21051 /* Neon element/structure load/store. */
21052 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21053 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21054 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21055 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21056 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21057 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21058 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21059 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21060
21061 #undef THUMB_VARIANT
21062 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21063 #undef ARM_VARIANT
21064 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21065 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21066 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21067 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21068 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21069 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21070 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21071 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21072 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21073 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21074
21075 #undef THUMB_VARIANT
21076 #define THUMB_VARIANT & fpu_vfp_ext_v3
21077 #undef ARM_VARIANT
21078 #define ARM_VARIANT & fpu_vfp_ext_v3
21079
21080 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21081 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21082 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21083 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21084 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21085 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21086 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21087 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21088 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21089
21090 #undef ARM_VARIANT
21091 #define ARM_VARIANT & fpu_vfp_ext_fma
21092 #undef THUMB_VARIANT
21093 #define THUMB_VARIANT & fpu_vfp_ext_fma
21094 /* Mnemonics shared by Neon and VFP. These are included in the
21095 VFP FMA variant; NEON and VFP FMA always includes the NEON
21096 FMA instructions. */
21097 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21098 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21099 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21100 the v form should always be used. */
21101 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21102 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21103 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21104 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21105 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21106 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21107
21108 #undef THUMB_VARIANT
21109 #undef ARM_VARIANT
21110 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21111
21112 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21113 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21114 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21115 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21116 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21117 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21118 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21119 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21120
21121 #undef ARM_VARIANT
21122 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21123
21124 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21125 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21126 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21127 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21128 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21129 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21130 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21131 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21132 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21133 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21134 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21135 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21136 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21137 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21138 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21139 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21140 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21141 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21142 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21143 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21144 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21145 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21146 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21147 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21148 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21149 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21150 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21151 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21152 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21153 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21154 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21155 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21156 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21157 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21158 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21159 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21160 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21161 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21162 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21163 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21164 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21165 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21166 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21167 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21168 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21169 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21170 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21171 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21172 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21173 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21174 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21175 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21176 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21177 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21178 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21179 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21180 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21181 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21182 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21183 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21184 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21185 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21186 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21187 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21188 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21189 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21190 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21191 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21192 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21193 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21194 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21195 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21196 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21197 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21198 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21199 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21200 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21201 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21202 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21203 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21204 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21205 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21206 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21207 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21208 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21209 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21210 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21211 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21212 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21213 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21214 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21215 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21216 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21217 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21218 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21219 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21220 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21221 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21222 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21223 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21224 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21225 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21226 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21227 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21228 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21229 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21230 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21231 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21232 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21233 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21234 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21235 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21236 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21237 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21238 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21239 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21240 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21241 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21242 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21243 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21244 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21245 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21246 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21247 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21248 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21249 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21250 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21251 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21252 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21253 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21254 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21255 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21256 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21257 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21258 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21259 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21260 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21261 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21262 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21263 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21264 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21265 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21266 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21267 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21268 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21269 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21270 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21271 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21272 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21273 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21274 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21275 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21276 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21277 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21278 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21279 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21280 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21281 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21282 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21283 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21284 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21285 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21286
21287 #undef ARM_VARIANT
21288 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21289
21290 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21291 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21292 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21293 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21294 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21295 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21296 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21297 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21298 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21299 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21300 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21301 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21302 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21303 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21304 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21305 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21306 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21307 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21308 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21309 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21310 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21311 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21312 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21313 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21314 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21315 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21316 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21317 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21318 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21319 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21320 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21321 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21322 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21323 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21324 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21325 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21326 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21327 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21328 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21329 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21330 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21331 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21332 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21333 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21334 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21335 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21336 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21337 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21338 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21339 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21340 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21341 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21342 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21343 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21344 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21345 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21346 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21347
21348 #undef ARM_VARIANT
21349 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21350
21351 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21352 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21353 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21354 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21355 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21356 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21357 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21358 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21359 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21360 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21361 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21362 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21363 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21364 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21365 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21366 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21367 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21368 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21369 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21370 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21371 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21372 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21373 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21374 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21375 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21376 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21377 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21378 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21379 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21380 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21381 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21382 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21383 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21384 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21385 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21386 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21387 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21388 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21389 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21390 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21391 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21392 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21393 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21394 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21395 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21396 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21397 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21398 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21399 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21400 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21401 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21402 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21403 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21404 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21405 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21406 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21407 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21408 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21409 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21410 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21411 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21412 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21413 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21414 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21415 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21416 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21417 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21418 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21419 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21420 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21421 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21422 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21423 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21424 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21425 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21426 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21427
21428 /* ARMv8-M instructions. */
21429 #undef ARM_VARIANT
21430 #define ARM_VARIANT NULL
21431 #undef THUMB_VARIANT
21432 #define THUMB_VARIANT & arm_ext_v8m
21433 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21434 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21435 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21436 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21437 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21438 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21439 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21440
21441 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21442 instructions behave as nop if no VFP is present. */
21443 #undef THUMB_VARIANT
21444 #define THUMB_VARIANT & arm_ext_v8m_main
21445 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21446 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21447 };
21448 #undef ARM_VARIANT
21449 #undef THUMB_VARIANT
21450 #undef TCE
21451 #undef TUE
21452 #undef TUF
21453 #undef TCC
21454 #undef cCE
21455 #undef cCL
21456 #undef C3E
21457 #undef CE
21458 #undef CM
21459 #undef UE
21460 #undef UF
21461 #undef UT
21462 #undef NUF
21463 #undef nUF
21464 #undef NCE
21465 #undef nCE
21466 #undef OPS0
21467 #undef OPS1
21468 #undef OPS2
21469 #undef OPS3
21470 #undef OPS4
21471 #undef OPS5
21472 #undef OPS6
21473 #undef do_0
21474 \f
21475 /* MD interface: bits in the object file. */
21476
21477 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21478 for use in the a.out file, and stores them in the array pointed to by buf.
21479 This knows about the endian-ness of the target machine and does
21480 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21481 2 (short) and 4 (long) Floating numbers are put out as a series of
21482 LITTLENUMS (shorts, here at least). */
21483
21484 void
21485 md_number_to_chars (char * buf, valueT val, int n)
21486 {
21487 if (target_big_endian)
21488 number_to_chars_bigendian (buf, val, n);
21489 else
21490 number_to_chars_littleendian (buf, val, n);
21491 }
21492
21493 static valueT
21494 md_chars_to_number (char * buf, int n)
21495 {
21496 valueT result = 0;
21497 unsigned char * where = (unsigned char *) buf;
21498
21499 if (target_big_endian)
21500 {
21501 while (n--)
21502 {
21503 result <<= 8;
21504 result |= (*where++ & 255);
21505 }
21506 }
21507 else
21508 {
21509 while (n--)
21510 {
21511 result <<= 8;
21512 result |= (where[n] & 255);
21513 }
21514 }
21515
21516 return result;
21517 }
21518
21519 /* MD interface: Sections. */
21520
21521 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21522 that an rs_machine_dependent frag may reach. */
21523
21524 unsigned int
21525 arm_frag_max_var (fragS *fragp)
21526 {
21527 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21528 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21529
21530 Note that we generate relaxable instructions even for cases that don't
21531 really need it, like an immediate that's a trivial constant. So we're
21532 overestimating the instruction size for some of those cases. Rather
21533 than putting more intelligence here, it would probably be better to
21534 avoid generating a relaxation frag in the first place when it can be
21535 determined up front that a short instruction will suffice. */
21536
21537 gas_assert (fragp->fr_type == rs_machine_dependent);
21538 return INSN_SIZE;
21539 }
21540
21541 /* Estimate the size of a frag before relaxing. Assume everything fits in
21542 2 bytes. */
21543
21544 int
21545 md_estimate_size_before_relax (fragS * fragp,
21546 segT segtype ATTRIBUTE_UNUSED)
21547 {
21548 fragp->fr_var = 2;
21549 return 2;
21550 }
21551
21552 /* Convert a machine dependent frag. */
21553
21554 void
21555 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21556 {
21557 unsigned long insn;
21558 unsigned long old_op;
21559 char *buf;
21560 expressionS exp;
21561 fixS *fixp;
21562 int reloc_type;
21563 int pc_rel;
21564 int opcode;
21565
21566 buf = fragp->fr_literal + fragp->fr_fix;
21567
21568 old_op = bfd_get_16(abfd, buf);
21569 if (fragp->fr_symbol)
21570 {
21571 exp.X_op = O_symbol;
21572 exp.X_add_symbol = fragp->fr_symbol;
21573 }
21574 else
21575 {
21576 exp.X_op = O_constant;
21577 }
21578 exp.X_add_number = fragp->fr_offset;
21579 opcode = fragp->fr_subtype;
21580 switch (opcode)
21581 {
21582 case T_MNEM_ldr_pc:
21583 case T_MNEM_ldr_pc2:
21584 case T_MNEM_ldr_sp:
21585 case T_MNEM_str_sp:
21586 case T_MNEM_ldr:
21587 case T_MNEM_ldrb:
21588 case T_MNEM_ldrh:
21589 case T_MNEM_str:
21590 case T_MNEM_strb:
21591 case T_MNEM_strh:
21592 if (fragp->fr_var == 4)
21593 {
21594 insn = THUMB_OP32 (opcode);
21595 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21596 {
21597 insn |= (old_op & 0x700) << 4;
21598 }
21599 else
21600 {
21601 insn |= (old_op & 7) << 12;
21602 insn |= (old_op & 0x38) << 13;
21603 }
21604 insn |= 0x00000c00;
21605 put_thumb32_insn (buf, insn);
21606 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21607 }
21608 else
21609 {
21610 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21611 }
21612 pc_rel = (opcode == T_MNEM_ldr_pc2);
21613 break;
21614 case T_MNEM_adr:
21615 if (fragp->fr_var == 4)
21616 {
21617 insn = THUMB_OP32 (opcode);
21618 insn |= (old_op & 0xf0) << 4;
21619 put_thumb32_insn (buf, insn);
21620 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21621 }
21622 else
21623 {
21624 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21625 exp.X_add_number -= 4;
21626 }
21627 pc_rel = 1;
21628 break;
21629 case T_MNEM_mov:
21630 case T_MNEM_movs:
21631 case T_MNEM_cmp:
21632 case T_MNEM_cmn:
21633 if (fragp->fr_var == 4)
21634 {
21635 int r0off = (opcode == T_MNEM_mov
21636 || opcode == T_MNEM_movs) ? 0 : 8;
21637 insn = THUMB_OP32 (opcode);
21638 insn = (insn & 0xe1ffffff) | 0x10000000;
21639 insn |= (old_op & 0x700) << r0off;
21640 put_thumb32_insn (buf, insn);
21641 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21642 }
21643 else
21644 {
21645 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21646 }
21647 pc_rel = 0;
21648 break;
21649 case T_MNEM_b:
21650 if (fragp->fr_var == 4)
21651 {
21652 insn = THUMB_OP32(opcode);
21653 put_thumb32_insn (buf, insn);
21654 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21655 }
21656 else
21657 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21658 pc_rel = 1;
21659 break;
21660 case T_MNEM_bcond:
21661 if (fragp->fr_var == 4)
21662 {
21663 insn = THUMB_OP32(opcode);
21664 insn |= (old_op & 0xf00) << 14;
21665 put_thumb32_insn (buf, insn);
21666 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21667 }
21668 else
21669 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21670 pc_rel = 1;
21671 break;
21672 case T_MNEM_add_sp:
21673 case T_MNEM_add_pc:
21674 case T_MNEM_inc_sp:
21675 case T_MNEM_dec_sp:
21676 if (fragp->fr_var == 4)
21677 {
21678 /* ??? Choose between add and addw. */
21679 insn = THUMB_OP32 (opcode);
21680 insn |= (old_op & 0xf0) << 4;
21681 put_thumb32_insn (buf, insn);
21682 if (opcode == T_MNEM_add_pc)
21683 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21684 else
21685 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21686 }
21687 else
21688 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21689 pc_rel = 0;
21690 break;
21691
21692 case T_MNEM_addi:
21693 case T_MNEM_addis:
21694 case T_MNEM_subi:
21695 case T_MNEM_subis:
21696 if (fragp->fr_var == 4)
21697 {
21698 insn = THUMB_OP32 (opcode);
21699 insn |= (old_op & 0xf0) << 4;
21700 insn |= (old_op & 0xf) << 16;
21701 put_thumb32_insn (buf, insn);
21702 if (insn & (1 << 20))
21703 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21704 else
21705 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21706 }
21707 else
21708 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21709 pc_rel = 0;
21710 break;
21711 default:
21712 abort ();
21713 }
21714 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21715 (enum bfd_reloc_code_real) reloc_type);
21716 fixp->fx_file = fragp->fr_file;
21717 fixp->fx_line = fragp->fr_line;
21718 fragp->fr_fix += fragp->fr_var;
21719
21720 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21721 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21722 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21723 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21724 }
21725
21726 /* Return the size of a relaxable immediate operand instruction.
21727 SHIFT and SIZE specify the form of the allowable immediate. */
21728 static int
21729 relax_immediate (fragS *fragp, int size, int shift)
21730 {
21731 offsetT offset;
21732 offsetT mask;
21733 offsetT low;
21734
21735 /* ??? Should be able to do better than this. */
21736 if (fragp->fr_symbol)
21737 return 4;
21738
21739 low = (1 << shift) - 1;
21740 mask = (1 << (shift + size)) - (1 << shift);
21741 offset = fragp->fr_offset;
21742 /* Force misaligned offsets to 32-bit variant. */
21743 if (offset & low)
21744 return 4;
21745 if (offset & ~mask)
21746 return 4;
21747 return 2;
21748 }
21749
21750 /* Get the address of a symbol during relaxation. */
21751 static addressT
21752 relaxed_symbol_addr (fragS *fragp, long stretch)
21753 {
21754 fragS *sym_frag;
21755 addressT addr;
21756 symbolS *sym;
21757
21758 sym = fragp->fr_symbol;
21759 sym_frag = symbol_get_frag (sym);
21760 know (S_GET_SEGMENT (sym) != absolute_section
21761 || sym_frag == &zero_address_frag);
21762 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21763
21764 /* If frag has yet to be reached on this pass, assume it will
21765 move by STRETCH just as we did. If this is not so, it will
21766 be because some frag between grows, and that will force
21767 another pass. */
21768
21769 if (stretch != 0
21770 && sym_frag->relax_marker != fragp->relax_marker)
21771 {
21772 fragS *f;
21773
21774 /* Adjust stretch for any alignment frag. Note that if have
21775 been expanding the earlier code, the symbol may be
21776 defined in what appears to be an earlier frag. FIXME:
21777 This doesn't handle the fr_subtype field, which specifies
21778 a maximum number of bytes to skip when doing an
21779 alignment. */
21780 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21781 {
21782 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21783 {
21784 if (stretch < 0)
21785 stretch = - ((- stretch)
21786 & ~ ((1 << (int) f->fr_offset) - 1));
21787 else
21788 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21789 if (stretch == 0)
21790 break;
21791 }
21792 }
21793 if (f != NULL)
21794 addr += stretch;
21795 }
21796
21797 return addr;
21798 }
21799
21800 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21801 load. */
21802 static int
21803 relax_adr (fragS *fragp, asection *sec, long stretch)
21804 {
21805 addressT addr;
21806 offsetT val;
21807
21808 /* Assume worst case for symbols not known to be in the same section. */
21809 if (fragp->fr_symbol == NULL
21810 || !S_IS_DEFINED (fragp->fr_symbol)
21811 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21812 || S_IS_WEAK (fragp->fr_symbol))
21813 return 4;
21814
21815 val = relaxed_symbol_addr (fragp, stretch);
21816 addr = fragp->fr_address + fragp->fr_fix;
21817 addr = (addr + 4) & ~3;
21818 /* Force misaligned targets to 32-bit variant. */
21819 if (val & 3)
21820 return 4;
21821 val -= addr;
21822 if (val < 0 || val > 1020)
21823 return 4;
21824 return 2;
21825 }
21826
21827 /* Return the size of a relaxable add/sub immediate instruction. */
21828 static int
21829 relax_addsub (fragS *fragp, asection *sec)
21830 {
21831 char *buf;
21832 int op;
21833
21834 buf = fragp->fr_literal + fragp->fr_fix;
21835 op = bfd_get_16(sec->owner, buf);
21836 if ((op & 0xf) == ((op >> 4) & 0xf))
21837 return relax_immediate (fragp, 8, 0);
21838 else
21839 return relax_immediate (fragp, 3, 0);
21840 }
21841
21842 /* Return TRUE iff the definition of symbol S could be pre-empted
21843 (overridden) at link or load time. */
21844 static bfd_boolean
21845 symbol_preemptible (symbolS *s)
21846 {
21847 /* Weak symbols can always be pre-empted. */
21848 if (S_IS_WEAK (s))
21849 return TRUE;
21850
21851 /* Non-global symbols cannot be pre-empted. */
21852 if (! S_IS_EXTERNAL (s))
21853 return FALSE;
21854
21855 #ifdef OBJ_ELF
21856 /* In ELF, a global symbol can be marked protected, or private. In that
21857 case it can't be pre-empted (other definitions in the same link unit
21858 would violate the ODR). */
21859 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21860 return FALSE;
21861 #endif
21862
21863 /* Other global symbols might be pre-empted. */
21864 return TRUE;
21865 }
21866
21867 /* Return the size of a relaxable branch instruction. BITS is the
21868 size of the offset field in the narrow instruction. */
21869
21870 static int
21871 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21872 {
21873 addressT addr;
21874 offsetT val;
21875 offsetT limit;
21876
21877 /* Assume worst case for symbols not known to be in the same section. */
21878 if (!S_IS_DEFINED (fragp->fr_symbol)
21879 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21880 || S_IS_WEAK (fragp->fr_symbol))
21881 return 4;
21882
21883 #ifdef OBJ_ELF
21884 /* A branch to a function in ARM state will require interworking. */
21885 if (S_IS_DEFINED (fragp->fr_symbol)
21886 && ARM_IS_FUNC (fragp->fr_symbol))
21887 return 4;
21888 #endif
21889
21890 if (symbol_preemptible (fragp->fr_symbol))
21891 return 4;
21892
21893 val = relaxed_symbol_addr (fragp, stretch);
21894 addr = fragp->fr_address + fragp->fr_fix + 4;
21895 val -= addr;
21896
21897 /* Offset is a signed value *2 */
21898 limit = 1 << bits;
21899 if (val >= limit || val < -limit)
21900 return 4;
21901 return 2;
21902 }
21903
21904
21905 /* Relax a machine dependent frag. This returns the amount by which
21906 the current size of the frag should change. */
21907
21908 int
21909 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21910 {
21911 int oldsize;
21912 int newsize;
21913
21914 oldsize = fragp->fr_var;
21915 switch (fragp->fr_subtype)
21916 {
21917 case T_MNEM_ldr_pc2:
21918 newsize = relax_adr (fragp, sec, stretch);
21919 break;
21920 case T_MNEM_ldr_pc:
21921 case T_MNEM_ldr_sp:
21922 case T_MNEM_str_sp:
21923 newsize = relax_immediate (fragp, 8, 2);
21924 break;
21925 case T_MNEM_ldr:
21926 case T_MNEM_str:
21927 newsize = relax_immediate (fragp, 5, 2);
21928 break;
21929 case T_MNEM_ldrh:
21930 case T_MNEM_strh:
21931 newsize = relax_immediate (fragp, 5, 1);
21932 break;
21933 case T_MNEM_ldrb:
21934 case T_MNEM_strb:
21935 newsize = relax_immediate (fragp, 5, 0);
21936 break;
21937 case T_MNEM_adr:
21938 newsize = relax_adr (fragp, sec, stretch);
21939 break;
21940 case T_MNEM_mov:
21941 case T_MNEM_movs:
21942 case T_MNEM_cmp:
21943 case T_MNEM_cmn:
21944 newsize = relax_immediate (fragp, 8, 0);
21945 break;
21946 case T_MNEM_b:
21947 newsize = relax_branch (fragp, sec, 11, stretch);
21948 break;
21949 case T_MNEM_bcond:
21950 newsize = relax_branch (fragp, sec, 8, stretch);
21951 break;
21952 case T_MNEM_add_sp:
21953 case T_MNEM_add_pc:
21954 newsize = relax_immediate (fragp, 8, 2);
21955 break;
21956 case T_MNEM_inc_sp:
21957 case T_MNEM_dec_sp:
21958 newsize = relax_immediate (fragp, 7, 2);
21959 break;
21960 case T_MNEM_addi:
21961 case T_MNEM_addis:
21962 case T_MNEM_subi:
21963 case T_MNEM_subis:
21964 newsize = relax_addsub (fragp, sec);
21965 break;
21966 default:
21967 abort ();
21968 }
21969
21970 fragp->fr_var = newsize;
21971 /* Freeze wide instructions that are at or before the same location as
21972 in the previous pass. This avoids infinite loops.
21973 Don't freeze them unconditionally because targets may be artificially
21974 misaligned by the expansion of preceding frags. */
21975 if (stretch <= 0 && newsize > 2)
21976 {
21977 md_convert_frag (sec->owner, sec, fragp);
21978 frag_wane (fragp);
21979 }
21980
21981 return newsize - oldsize;
21982 }
21983
21984 /* Round up a section size to the appropriate boundary. */
21985
21986 valueT
21987 md_section_align (segT segment ATTRIBUTE_UNUSED,
21988 valueT size)
21989 {
21990 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21991 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21992 {
21993 /* For a.out, force the section size to be aligned. If we don't do
21994 this, BFD will align it for us, but it will not write out the
21995 final bytes of the section. This may be a bug in BFD, but it is
21996 easier to fix it here since that is how the other a.out targets
21997 work. */
21998 int align;
21999
22000 align = bfd_get_section_alignment (stdoutput, segment);
22001 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
22002 }
22003 #endif
22004
22005 return size;
22006 }
22007
22008 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22009 of an rs_align_code fragment. */
22010
22011 void
22012 arm_handle_align (fragS * fragP)
22013 {
22014 static unsigned char const arm_noop[2][2][4] =
22015 {
22016 { /* ARMv1 */
22017 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22018 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22019 },
22020 { /* ARMv6k */
22021 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22022 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22023 },
22024 };
22025 static unsigned char const thumb_noop[2][2][2] =
22026 {
22027 { /* Thumb-1 */
22028 {0xc0, 0x46}, /* LE */
22029 {0x46, 0xc0}, /* BE */
22030 },
22031 { /* Thumb-2 */
22032 {0x00, 0xbf}, /* LE */
22033 {0xbf, 0x00} /* BE */
22034 }
22035 };
22036 static unsigned char const wide_thumb_noop[2][4] =
22037 { /* Wide Thumb-2 */
22038 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22039 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22040 };
22041
22042 unsigned bytes, fix, noop_size;
22043 char * p;
22044 const unsigned char * noop;
22045 const unsigned char *narrow_noop = NULL;
22046 #ifdef OBJ_ELF
22047 enum mstate state;
22048 #endif
22049
22050 if (fragP->fr_type != rs_align_code)
22051 return;
22052
22053 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22054 p = fragP->fr_literal + fragP->fr_fix;
22055 fix = 0;
22056
22057 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22058 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22059
22060 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22061
22062 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22063 {
22064 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22065 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22066 {
22067 narrow_noop = thumb_noop[1][target_big_endian];
22068 noop = wide_thumb_noop[target_big_endian];
22069 }
22070 else
22071 noop = thumb_noop[0][target_big_endian];
22072 noop_size = 2;
22073 #ifdef OBJ_ELF
22074 state = MAP_THUMB;
22075 #endif
22076 }
22077 else
22078 {
22079 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22080 ? selected_cpu : arm_arch_none,
22081 arm_ext_v6k) != 0]
22082 [target_big_endian];
22083 noop_size = 4;
22084 #ifdef OBJ_ELF
22085 state = MAP_ARM;
22086 #endif
22087 }
22088
22089 fragP->fr_var = noop_size;
22090
22091 if (bytes & (noop_size - 1))
22092 {
22093 fix = bytes & (noop_size - 1);
22094 #ifdef OBJ_ELF
22095 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22096 #endif
22097 memset (p, 0, fix);
22098 p += fix;
22099 bytes -= fix;
22100 }
22101
22102 if (narrow_noop)
22103 {
22104 if (bytes & noop_size)
22105 {
22106 /* Insert a narrow noop. */
22107 memcpy (p, narrow_noop, noop_size);
22108 p += noop_size;
22109 bytes -= noop_size;
22110 fix += noop_size;
22111 }
22112
22113 /* Use wide noops for the remainder */
22114 noop_size = 4;
22115 }
22116
22117 while (bytes >= noop_size)
22118 {
22119 memcpy (p, noop, noop_size);
22120 p += noop_size;
22121 bytes -= noop_size;
22122 fix += noop_size;
22123 }
22124
22125 fragP->fr_fix += fix;
22126 }
22127
22128 /* Called from md_do_align. Used to create an alignment
22129 frag in a code section. */
22130
22131 void
22132 arm_frag_align_code (int n, int max)
22133 {
22134 char * p;
22135
22136 /* We assume that there will never be a requirement
22137 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22138 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22139 {
22140 char err_msg[128];
22141
22142 sprintf (err_msg,
22143 _("alignments greater than %d bytes not supported in .text sections."),
22144 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22145 as_fatal ("%s", err_msg);
22146 }
22147
22148 p = frag_var (rs_align_code,
22149 MAX_MEM_FOR_RS_ALIGN_CODE,
22150 1,
22151 (relax_substateT) max,
22152 (symbolS *) NULL,
22153 (offsetT) n,
22154 (char *) NULL);
22155 *p = 0;
22156 }
22157
22158 /* Perform target specific initialisation of a frag.
22159 Note - despite the name this initialisation is not done when the frag
22160 is created, but only when its type is assigned. A frag can be created
22161 and used a long time before its type is set, so beware of assuming that
22162 this initialisation is performed first. */
22163
22164 #ifndef OBJ_ELF
22165 void
22166 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22167 {
22168 /* Record whether this frag is in an ARM or a THUMB area. */
22169 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22170 }
22171
22172 #else /* OBJ_ELF is defined. */
22173 void
22174 arm_init_frag (fragS * fragP, int max_chars)
22175 {
22176 bfd_boolean frag_thumb_mode;
22177
22178 /* If the current ARM vs THUMB mode has not already
22179 been recorded into this frag then do so now. */
22180 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22181 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22182
22183 /* PR 21809: Do not set a mapping state for debug sections
22184 - it just confuses other tools. */
22185 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22186 return;
22187
22188 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22189
22190 /* Record a mapping symbol for alignment frags. We will delete this
22191 later if the alignment ends up empty. */
22192 switch (fragP->fr_type)
22193 {
22194 case rs_align:
22195 case rs_align_test:
22196 case rs_fill:
22197 mapping_state_2 (MAP_DATA, max_chars);
22198 break;
22199 case rs_align_code:
22200 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22201 break;
22202 default:
22203 break;
22204 }
22205 }
22206
22207 /* When we change sections we need to issue a new mapping symbol. */
22208
22209 void
22210 arm_elf_change_section (void)
22211 {
22212 /* Link an unlinked unwind index table section to the .text section. */
22213 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22214 && elf_linked_to_section (now_seg) == NULL)
22215 elf_linked_to_section (now_seg) = text_section;
22216 }
22217
22218 int
22219 arm_elf_section_type (const char * str, size_t len)
22220 {
22221 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22222 return SHT_ARM_EXIDX;
22223
22224 return -1;
22225 }
22226 \f
22227 /* Code to deal with unwinding tables. */
22228
22229 static void add_unwind_adjustsp (offsetT);
22230
22231 /* Generate any deferred unwind frame offset. */
22232
22233 static void
22234 flush_pending_unwind (void)
22235 {
22236 offsetT offset;
22237
22238 offset = unwind.pending_offset;
22239 unwind.pending_offset = 0;
22240 if (offset != 0)
22241 add_unwind_adjustsp (offset);
22242 }
22243
22244 /* Add an opcode to this list for this function. Two-byte opcodes should
22245 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22246 order. */
22247
22248 static void
22249 add_unwind_opcode (valueT op, int length)
22250 {
22251 /* Add any deferred stack adjustment. */
22252 if (unwind.pending_offset)
22253 flush_pending_unwind ();
22254
22255 unwind.sp_restored = 0;
22256
22257 if (unwind.opcode_count + length > unwind.opcode_alloc)
22258 {
22259 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22260 if (unwind.opcodes)
22261 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22262 unwind.opcode_alloc);
22263 else
22264 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22265 }
22266 while (length > 0)
22267 {
22268 length--;
22269 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22270 op >>= 8;
22271 unwind.opcode_count++;
22272 }
22273 }
22274
22275 /* Add unwind opcodes to adjust the stack pointer. */
22276
22277 static void
22278 add_unwind_adjustsp (offsetT offset)
22279 {
22280 valueT op;
22281
22282 if (offset > 0x200)
22283 {
22284 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22285 char bytes[5];
22286 int n;
22287 valueT o;
22288
22289 /* Long form: 0xb2, uleb128. */
22290 /* This might not fit in a word so add the individual bytes,
22291 remembering the list is built in reverse order. */
22292 o = (valueT) ((offset - 0x204) >> 2);
22293 if (o == 0)
22294 add_unwind_opcode (0, 1);
22295
22296 /* Calculate the uleb128 encoding of the offset. */
22297 n = 0;
22298 while (o)
22299 {
22300 bytes[n] = o & 0x7f;
22301 o >>= 7;
22302 if (o)
22303 bytes[n] |= 0x80;
22304 n++;
22305 }
22306 /* Add the insn. */
22307 for (; n; n--)
22308 add_unwind_opcode (bytes[n - 1], 1);
22309 add_unwind_opcode (0xb2, 1);
22310 }
22311 else if (offset > 0x100)
22312 {
22313 /* Two short opcodes. */
22314 add_unwind_opcode (0x3f, 1);
22315 op = (offset - 0x104) >> 2;
22316 add_unwind_opcode (op, 1);
22317 }
22318 else if (offset > 0)
22319 {
22320 /* Short opcode. */
22321 op = (offset - 4) >> 2;
22322 add_unwind_opcode (op, 1);
22323 }
22324 else if (offset < 0)
22325 {
22326 offset = -offset;
22327 while (offset > 0x100)
22328 {
22329 add_unwind_opcode (0x7f, 1);
22330 offset -= 0x100;
22331 }
22332 op = ((offset - 4) >> 2) | 0x40;
22333 add_unwind_opcode (op, 1);
22334 }
22335 }
22336
22337 /* Finish the list of unwind opcodes for this function. */
22338
22339 static void
22340 finish_unwind_opcodes (void)
22341 {
22342 valueT op;
22343
22344 if (unwind.fp_used)
22345 {
22346 /* Adjust sp as necessary. */
22347 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22348 flush_pending_unwind ();
22349
22350 /* After restoring sp from the frame pointer. */
22351 op = 0x90 | unwind.fp_reg;
22352 add_unwind_opcode (op, 1);
22353 }
22354 else
22355 flush_pending_unwind ();
22356 }
22357
22358
22359 /* Start an exception table entry. If idx is nonzero this is an index table
22360 entry. */
22361
22362 static void
22363 start_unwind_section (const segT text_seg, int idx)
22364 {
22365 const char * text_name;
22366 const char * prefix;
22367 const char * prefix_once;
22368 const char * group_name;
22369 char * sec_name;
22370 int type;
22371 int flags;
22372 int linkonce;
22373
22374 if (idx)
22375 {
22376 prefix = ELF_STRING_ARM_unwind;
22377 prefix_once = ELF_STRING_ARM_unwind_once;
22378 type = SHT_ARM_EXIDX;
22379 }
22380 else
22381 {
22382 prefix = ELF_STRING_ARM_unwind_info;
22383 prefix_once = ELF_STRING_ARM_unwind_info_once;
22384 type = SHT_PROGBITS;
22385 }
22386
22387 text_name = segment_name (text_seg);
22388 if (streq (text_name, ".text"))
22389 text_name = "";
22390
22391 if (strncmp (text_name, ".gnu.linkonce.t.",
22392 strlen (".gnu.linkonce.t.")) == 0)
22393 {
22394 prefix = prefix_once;
22395 text_name += strlen (".gnu.linkonce.t.");
22396 }
22397
22398 sec_name = concat (prefix, text_name, (char *) NULL);
22399
22400 flags = SHF_ALLOC;
22401 linkonce = 0;
22402 group_name = 0;
22403
22404 /* Handle COMDAT group. */
22405 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22406 {
22407 group_name = elf_group_name (text_seg);
22408 if (group_name == NULL)
22409 {
22410 as_bad (_("Group section `%s' has no group signature"),
22411 segment_name (text_seg));
22412 ignore_rest_of_line ();
22413 return;
22414 }
22415 flags |= SHF_GROUP;
22416 linkonce = 1;
22417 }
22418
22419 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22420 linkonce, 0);
22421
22422 /* Set the section link for index tables. */
22423 if (idx)
22424 elf_linked_to_section (now_seg) = text_seg;
22425 }
22426
22427
22428 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22429 personality routine data. Returns zero, or the index table value for
22430 an inline entry. */
22431
22432 static valueT
22433 create_unwind_entry (int have_data)
22434 {
22435 int size;
22436 addressT where;
22437 char *ptr;
22438 /* The current word of data. */
22439 valueT data;
22440 /* The number of bytes left in this word. */
22441 int n;
22442
22443 finish_unwind_opcodes ();
22444
22445 /* Remember the current text section. */
22446 unwind.saved_seg = now_seg;
22447 unwind.saved_subseg = now_subseg;
22448
22449 start_unwind_section (now_seg, 0);
22450
22451 if (unwind.personality_routine == NULL)
22452 {
22453 if (unwind.personality_index == -2)
22454 {
22455 if (have_data)
22456 as_bad (_("handlerdata in cantunwind frame"));
22457 return 1; /* EXIDX_CANTUNWIND. */
22458 }
22459
22460 /* Use a default personality routine if none is specified. */
22461 if (unwind.personality_index == -1)
22462 {
22463 if (unwind.opcode_count > 3)
22464 unwind.personality_index = 1;
22465 else
22466 unwind.personality_index = 0;
22467 }
22468
22469 /* Space for the personality routine entry. */
22470 if (unwind.personality_index == 0)
22471 {
22472 if (unwind.opcode_count > 3)
22473 as_bad (_("too many unwind opcodes for personality routine 0"));
22474
22475 if (!have_data)
22476 {
22477 /* All the data is inline in the index table. */
22478 data = 0x80;
22479 n = 3;
22480 while (unwind.opcode_count > 0)
22481 {
22482 unwind.opcode_count--;
22483 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22484 n--;
22485 }
22486
22487 /* Pad with "finish" opcodes. */
22488 while (n--)
22489 data = (data << 8) | 0xb0;
22490
22491 return data;
22492 }
22493 size = 0;
22494 }
22495 else
22496 /* We get two opcodes "free" in the first word. */
22497 size = unwind.opcode_count - 2;
22498 }
22499 else
22500 {
22501 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22502 if (unwind.personality_index != -1)
22503 {
22504 as_bad (_("attempt to recreate an unwind entry"));
22505 return 1;
22506 }
22507
22508 /* An extra byte is required for the opcode count. */
22509 size = unwind.opcode_count + 1;
22510 }
22511
22512 size = (size + 3) >> 2;
22513 if (size > 0xff)
22514 as_bad (_("too many unwind opcodes"));
22515
22516 frag_align (2, 0, 0);
22517 record_alignment (now_seg, 2);
22518 unwind.table_entry = expr_build_dot ();
22519
22520 /* Allocate the table entry. */
22521 ptr = frag_more ((size << 2) + 4);
22522 /* PR 13449: Zero the table entries in case some of them are not used. */
22523 memset (ptr, 0, (size << 2) + 4);
22524 where = frag_now_fix () - ((size << 2) + 4);
22525
22526 switch (unwind.personality_index)
22527 {
22528 case -1:
22529 /* ??? Should this be a PLT generating relocation? */
22530 /* Custom personality routine. */
22531 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22532 BFD_RELOC_ARM_PREL31);
22533
22534 where += 4;
22535 ptr += 4;
22536
22537 /* Set the first byte to the number of additional words. */
22538 data = size > 0 ? size - 1 : 0;
22539 n = 3;
22540 break;
22541
22542 /* ABI defined personality routines. */
22543 case 0:
22544 /* Three opcodes bytes are packed into the first word. */
22545 data = 0x80;
22546 n = 3;
22547 break;
22548
22549 case 1:
22550 case 2:
22551 /* The size and first two opcode bytes go in the first word. */
22552 data = ((0x80 + unwind.personality_index) << 8) | size;
22553 n = 2;
22554 break;
22555
22556 default:
22557 /* Should never happen. */
22558 abort ();
22559 }
22560
22561 /* Pack the opcodes into words (MSB first), reversing the list at the same
22562 time. */
22563 while (unwind.opcode_count > 0)
22564 {
22565 if (n == 0)
22566 {
22567 md_number_to_chars (ptr, data, 4);
22568 ptr += 4;
22569 n = 4;
22570 data = 0;
22571 }
22572 unwind.opcode_count--;
22573 n--;
22574 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22575 }
22576
22577 /* Finish off the last word. */
22578 if (n < 4)
22579 {
22580 /* Pad with "finish" opcodes. */
22581 while (n--)
22582 data = (data << 8) | 0xb0;
22583
22584 md_number_to_chars (ptr, data, 4);
22585 }
22586
22587 if (!have_data)
22588 {
22589 /* Add an empty descriptor if there is no user-specified data. */
22590 ptr = frag_more (4);
22591 md_number_to_chars (ptr, 0, 4);
22592 }
22593
22594 return 0;
22595 }
22596
22597
22598 /* Initialize the DWARF-2 unwind information for this procedure. */
22599
22600 void
22601 tc_arm_frame_initial_instructions (void)
22602 {
22603 cfi_add_CFA_def_cfa (REG_SP, 0);
22604 }
22605 #endif /* OBJ_ELF */
22606
22607 /* Convert REGNAME to a DWARF-2 register number. */
22608
22609 int
22610 tc_arm_regname_to_dw2regnum (char *regname)
22611 {
22612 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22613 if (reg != FAIL)
22614 return reg;
22615
22616 /* PR 16694: Allow VFP registers as well. */
22617 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22618 if (reg != FAIL)
22619 return 64 + reg;
22620
22621 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22622 if (reg != FAIL)
22623 return reg + 256;
22624
22625 return FAIL;
22626 }
22627
22628 #ifdef TE_PE
22629 void
22630 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22631 {
22632 expressionS exp;
22633
22634 exp.X_op = O_secrel;
22635 exp.X_add_symbol = symbol;
22636 exp.X_add_number = 0;
22637 emit_expr (&exp, size);
22638 }
22639 #endif
22640
22641 /* MD interface: Symbol and relocation handling. */
22642
22643 /* Return the address within the segment that a PC-relative fixup is
22644 relative to. For ARM, PC-relative fixups applied to instructions
22645 are generally relative to the location of the fixup plus 8 bytes.
22646 Thumb branches are offset by 4, and Thumb loads relative to PC
22647 require special handling. */
22648
22649 long
22650 md_pcrel_from_section (fixS * fixP, segT seg)
22651 {
22652 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22653
22654 /* If this is pc-relative and we are going to emit a relocation
22655 then we just want to put out any pipeline compensation that the linker
22656 will need. Otherwise we want to use the calculated base.
22657 For WinCE we skip the bias for externals as well, since this
22658 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22659 if (fixP->fx_pcrel
22660 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22661 || (arm_force_relocation (fixP)
22662 #ifdef TE_WINCE
22663 && !S_IS_EXTERNAL (fixP->fx_addsy)
22664 #endif
22665 )))
22666 base = 0;
22667
22668
22669 switch (fixP->fx_r_type)
22670 {
22671 /* PC relative addressing on the Thumb is slightly odd as the
22672 bottom two bits of the PC are forced to zero for the
22673 calculation. This happens *after* application of the
22674 pipeline offset. However, Thumb adrl already adjusts for
22675 this, so we need not do it again. */
22676 case BFD_RELOC_ARM_THUMB_ADD:
22677 return base & ~3;
22678
22679 case BFD_RELOC_ARM_THUMB_OFFSET:
22680 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22681 case BFD_RELOC_ARM_T32_ADD_PC12:
22682 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22683 return (base + 4) & ~3;
22684
22685 /* Thumb branches are simply offset by +4. */
22686 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22687 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22688 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22689 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22690 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22691 return base + 4;
22692
22693 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22694 if (fixP->fx_addsy
22695 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22696 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22697 && ARM_IS_FUNC (fixP->fx_addsy)
22698 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22699 base = fixP->fx_where + fixP->fx_frag->fr_address;
22700 return base + 4;
22701
22702 /* BLX is like branches above, but forces the low two bits of PC to
22703 zero. */
22704 case BFD_RELOC_THUMB_PCREL_BLX:
22705 if (fixP->fx_addsy
22706 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22707 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22708 && THUMB_IS_FUNC (fixP->fx_addsy)
22709 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22710 base = fixP->fx_where + fixP->fx_frag->fr_address;
22711 return (base + 4) & ~3;
22712
22713 /* ARM mode branches are offset by +8. However, the Windows CE
22714 loader expects the relocation not to take this into account. */
22715 case BFD_RELOC_ARM_PCREL_BLX:
22716 if (fixP->fx_addsy
22717 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22718 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22719 && ARM_IS_FUNC (fixP->fx_addsy)
22720 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22721 base = fixP->fx_where + fixP->fx_frag->fr_address;
22722 return base + 8;
22723
22724 case BFD_RELOC_ARM_PCREL_CALL:
22725 if (fixP->fx_addsy
22726 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22727 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22728 && THUMB_IS_FUNC (fixP->fx_addsy)
22729 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22730 base = fixP->fx_where + fixP->fx_frag->fr_address;
22731 return base + 8;
22732
22733 case BFD_RELOC_ARM_PCREL_BRANCH:
22734 case BFD_RELOC_ARM_PCREL_JUMP:
22735 case BFD_RELOC_ARM_PLT32:
22736 #ifdef TE_WINCE
22737 /* When handling fixups immediately, because we have already
22738 discovered the value of a symbol, or the address of the frag involved
22739 we must account for the offset by +8, as the OS loader will never see the reloc.
22740 see fixup_segment() in write.c
22741 The S_IS_EXTERNAL test handles the case of global symbols.
22742 Those need the calculated base, not just the pipe compensation the linker will need. */
22743 if (fixP->fx_pcrel
22744 && fixP->fx_addsy != NULL
22745 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22746 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22747 return base + 8;
22748 return base;
22749 #else
22750 return base + 8;
22751 #endif
22752
22753
22754 /* ARM mode loads relative to PC are also offset by +8. Unlike
22755 branches, the Windows CE loader *does* expect the relocation
22756 to take this into account. */
22757 case BFD_RELOC_ARM_OFFSET_IMM:
22758 case BFD_RELOC_ARM_OFFSET_IMM8:
22759 case BFD_RELOC_ARM_HWLITERAL:
22760 case BFD_RELOC_ARM_LITERAL:
22761 case BFD_RELOC_ARM_CP_OFF_IMM:
22762 return base + 8;
22763
22764
22765 /* Other PC-relative relocations are un-offset. */
22766 default:
22767 return base;
22768 }
22769 }
22770
22771 static bfd_boolean flag_warn_syms = TRUE;
22772
22773 bfd_boolean
22774 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22775 {
22776 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22777 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22778 does mean that the resulting code might be very confusing to the reader.
22779 Also this warning can be triggered if the user omits an operand before
22780 an immediate address, eg:
22781
22782 LDR =foo
22783
22784 GAS treats this as an assignment of the value of the symbol foo to a
22785 symbol LDR, and so (without this code) it will not issue any kind of
22786 warning or error message.
22787
22788 Note - ARM instructions are case-insensitive but the strings in the hash
22789 table are all stored in lower case, so we must first ensure that name is
22790 lower case too. */
22791 if (flag_warn_syms && arm_ops_hsh)
22792 {
22793 char * nbuf = strdup (name);
22794 char * p;
22795
22796 for (p = nbuf; *p; p++)
22797 *p = TOLOWER (*p);
22798 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22799 {
22800 static struct hash_control * already_warned = NULL;
22801
22802 if (already_warned == NULL)
22803 already_warned = hash_new ();
22804 /* Only warn about the symbol once. To keep the code
22805 simple we let hash_insert do the lookup for us. */
22806 if (hash_insert (already_warned, name, NULL) == NULL)
22807 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22808 }
22809 else
22810 free (nbuf);
22811 }
22812
22813 return FALSE;
22814 }
22815
22816 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22817 Otherwise we have no need to default values of symbols. */
22818
22819 symbolS *
22820 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22821 {
22822 #ifdef OBJ_ELF
22823 if (name[0] == '_' && name[1] == 'G'
22824 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22825 {
22826 if (!GOT_symbol)
22827 {
22828 if (symbol_find (name))
22829 as_bad (_("GOT already in the symbol table"));
22830
22831 GOT_symbol = symbol_new (name, undefined_section,
22832 (valueT) 0, & zero_address_frag);
22833 }
22834
22835 return GOT_symbol;
22836 }
22837 #endif
22838
22839 return NULL;
22840 }
22841
22842 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22843 computed as two separate immediate values, added together. We
22844 already know that this value cannot be computed by just one ARM
22845 instruction. */
22846
22847 static unsigned int
22848 validate_immediate_twopart (unsigned int val,
22849 unsigned int * highpart)
22850 {
22851 unsigned int a;
22852 unsigned int i;
22853
22854 for (i = 0; i < 32; i += 2)
22855 if (((a = rotate_left (val, i)) & 0xff) != 0)
22856 {
22857 if (a & 0xff00)
22858 {
22859 if (a & ~ 0xffff)
22860 continue;
22861 * highpart = (a >> 8) | ((i + 24) << 7);
22862 }
22863 else if (a & 0xff0000)
22864 {
22865 if (a & 0xff000000)
22866 continue;
22867 * highpart = (a >> 16) | ((i + 16) << 7);
22868 }
22869 else
22870 {
22871 gas_assert (a & 0xff000000);
22872 * highpart = (a >> 24) | ((i + 8) << 7);
22873 }
22874
22875 return (a & 0xff) | (i << 7);
22876 }
22877
22878 return FAIL;
22879 }
22880
22881 static int
22882 validate_offset_imm (unsigned int val, int hwse)
22883 {
22884 if ((hwse && val > 255) || val > 4095)
22885 return FAIL;
22886 return val;
22887 }
22888
22889 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22890 negative immediate constant by altering the instruction. A bit of
22891 a hack really.
22892 MOV <-> MVN
22893 AND <-> BIC
22894 ADC <-> SBC
22895 by inverting the second operand, and
22896 ADD <-> SUB
22897 CMP <-> CMN
22898 by negating the second operand. */
22899
22900 static int
22901 negate_data_op (unsigned long * instruction,
22902 unsigned long value)
22903 {
22904 int op, new_inst;
22905 unsigned long negated, inverted;
22906
22907 negated = encode_arm_immediate (-value);
22908 inverted = encode_arm_immediate (~value);
22909
22910 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22911 switch (op)
22912 {
22913 /* First negates. */
22914 case OPCODE_SUB: /* ADD <-> SUB */
22915 new_inst = OPCODE_ADD;
22916 value = negated;
22917 break;
22918
22919 case OPCODE_ADD:
22920 new_inst = OPCODE_SUB;
22921 value = negated;
22922 break;
22923
22924 case OPCODE_CMP: /* CMP <-> CMN */
22925 new_inst = OPCODE_CMN;
22926 value = negated;
22927 break;
22928
22929 case OPCODE_CMN:
22930 new_inst = OPCODE_CMP;
22931 value = negated;
22932 break;
22933
22934 /* Now Inverted ops. */
22935 case OPCODE_MOV: /* MOV <-> MVN */
22936 new_inst = OPCODE_MVN;
22937 value = inverted;
22938 break;
22939
22940 case OPCODE_MVN:
22941 new_inst = OPCODE_MOV;
22942 value = inverted;
22943 break;
22944
22945 case OPCODE_AND: /* AND <-> BIC */
22946 new_inst = OPCODE_BIC;
22947 value = inverted;
22948 break;
22949
22950 case OPCODE_BIC:
22951 new_inst = OPCODE_AND;
22952 value = inverted;
22953 break;
22954
22955 case OPCODE_ADC: /* ADC <-> SBC */
22956 new_inst = OPCODE_SBC;
22957 value = inverted;
22958 break;
22959
22960 case OPCODE_SBC:
22961 new_inst = OPCODE_ADC;
22962 value = inverted;
22963 break;
22964
22965 /* We cannot do anything. */
22966 default:
22967 return FAIL;
22968 }
22969
22970 if (value == (unsigned) FAIL)
22971 return FAIL;
22972
22973 *instruction &= OPCODE_MASK;
22974 *instruction |= new_inst << DATA_OP_SHIFT;
22975 return value;
22976 }
22977
22978 /* Like negate_data_op, but for Thumb-2. */
22979
22980 static unsigned int
22981 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22982 {
22983 int op, new_inst;
22984 int rd;
22985 unsigned int negated, inverted;
22986
22987 negated = encode_thumb32_immediate (-value);
22988 inverted = encode_thumb32_immediate (~value);
22989
22990 rd = (*instruction >> 8) & 0xf;
22991 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22992 switch (op)
22993 {
22994 /* ADD <-> SUB. Includes CMP <-> CMN. */
22995 case T2_OPCODE_SUB:
22996 new_inst = T2_OPCODE_ADD;
22997 value = negated;
22998 break;
22999
23000 case T2_OPCODE_ADD:
23001 new_inst = T2_OPCODE_SUB;
23002 value = negated;
23003 break;
23004
23005 /* ORR <-> ORN. Includes MOV <-> MVN. */
23006 case T2_OPCODE_ORR:
23007 new_inst = T2_OPCODE_ORN;
23008 value = inverted;
23009 break;
23010
23011 case T2_OPCODE_ORN:
23012 new_inst = T2_OPCODE_ORR;
23013 value = inverted;
23014 break;
23015
23016 /* AND <-> BIC. TST has no inverted equivalent. */
23017 case T2_OPCODE_AND:
23018 new_inst = T2_OPCODE_BIC;
23019 if (rd == 15)
23020 value = FAIL;
23021 else
23022 value = inverted;
23023 break;
23024
23025 case T2_OPCODE_BIC:
23026 new_inst = T2_OPCODE_AND;
23027 value = inverted;
23028 break;
23029
23030 /* ADC <-> SBC */
23031 case T2_OPCODE_ADC:
23032 new_inst = T2_OPCODE_SBC;
23033 value = inverted;
23034 break;
23035
23036 case T2_OPCODE_SBC:
23037 new_inst = T2_OPCODE_ADC;
23038 value = inverted;
23039 break;
23040
23041 /* We cannot do anything. */
23042 default:
23043 return FAIL;
23044 }
23045
23046 if (value == (unsigned int)FAIL)
23047 return FAIL;
23048
23049 *instruction &= T2_OPCODE_MASK;
23050 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23051 return value;
23052 }
23053
23054 /* Read a 32-bit thumb instruction from buf. */
23055
23056 static unsigned long
23057 get_thumb32_insn (char * buf)
23058 {
23059 unsigned long insn;
23060 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23061 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23062
23063 return insn;
23064 }
23065
23066 /* We usually want to set the low bit on the address of thumb function
23067 symbols. In particular .word foo - . should have the low bit set.
23068 Generic code tries to fold the difference of two symbols to
23069 a constant. Prevent this and force a relocation when the first symbols
23070 is a thumb function. */
23071
23072 bfd_boolean
23073 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23074 {
23075 if (op == O_subtract
23076 && l->X_op == O_symbol
23077 && r->X_op == O_symbol
23078 && THUMB_IS_FUNC (l->X_add_symbol))
23079 {
23080 l->X_op = O_subtract;
23081 l->X_op_symbol = r->X_add_symbol;
23082 l->X_add_number -= r->X_add_number;
23083 return TRUE;
23084 }
23085
23086 /* Process as normal. */
23087 return FALSE;
23088 }
23089
23090 /* Encode Thumb2 unconditional branches and calls. The encoding
23091 for the 2 are identical for the immediate values. */
23092
23093 static void
23094 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23095 {
23096 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23097 offsetT newval;
23098 offsetT newval2;
23099 addressT S, I1, I2, lo, hi;
23100
23101 S = (value >> 24) & 0x01;
23102 I1 = (value >> 23) & 0x01;
23103 I2 = (value >> 22) & 0x01;
23104 hi = (value >> 12) & 0x3ff;
23105 lo = (value >> 1) & 0x7ff;
23106 newval = md_chars_to_number (buf, THUMB_SIZE);
23107 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23108 newval |= (S << 10) | hi;
23109 newval2 &= ~T2I1I2MASK;
23110 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23111 md_number_to_chars (buf, newval, THUMB_SIZE);
23112 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23113 }
23114
23115 void
23116 md_apply_fix (fixS * fixP,
23117 valueT * valP,
23118 segT seg)
23119 {
23120 offsetT value = * valP;
23121 offsetT newval;
23122 unsigned int newimm;
23123 unsigned long temp;
23124 int sign;
23125 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23126
23127 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23128
23129 /* Note whether this will delete the relocation. */
23130
23131 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23132 fixP->fx_done = 1;
23133
23134 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23135 consistency with the behaviour on 32-bit hosts. Remember value
23136 for emit_reloc. */
23137 value &= 0xffffffff;
23138 value ^= 0x80000000;
23139 value -= 0x80000000;
23140
23141 *valP = value;
23142 fixP->fx_addnumber = value;
23143
23144 /* Same treatment for fixP->fx_offset. */
23145 fixP->fx_offset &= 0xffffffff;
23146 fixP->fx_offset ^= 0x80000000;
23147 fixP->fx_offset -= 0x80000000;
23148
23149 switch (fixP->fx_r_type)
23150 {
23151 case BFD_RELOC_NONE:
23152 /* This will need to go in the object file. */
23153 fixP->fx_done = 0;
23154 break;
23155
23156 case BFD_RELOC_ARM_IMMEDIATE:
23157 /* We claim that this fixup has been processed here,
23158 even if in fact we generate an error because we do
23159 not have a reloc for it, so tc_gen_reloc will reject it. */
23160 fixP->fx_done = 1;
23161
23162 if (fixP->fx_addsy)
23163 {
23164 const char *msg = 0;
23165
23166 if (! S_IS_DEFINED (fixP->fx_addsy))
23167 msg = _("undefined symbol %s used as an immediate value");
23168 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23169 msg = _("symbol %s is in a different section");
23170 else if (S_IS_WEAK (fixP->fx_addsy))
23171 msg = _("symbol %s is weak and may be overridden later");
23172
23173 if (msg)
23174 {
23175 as_bad_where (fixP->fx_file, fixP->fx_line,
23176 msg, S_GET_NAME (fixP->fx_addsy));
23177 break;
23178 }
23179 }
23180
23181 temp = md_chars_to_number (buf, INSN_SIZE);
23182
23183 /* If the offset is negative, we should use encoding A2 for ADR. */
23184 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23185 newimm = negate_data_op (&temp, value);
23186 else
23187 {
23188 newimm = encode_arm_immediate (value);
23189
23190 /* If the instruction will fail, see if we can fix things up by
23191 changing the opcode. */
23192 if (newimm == (unsigned int) FAIL)
23193 newimm = negate_data_op (&temp, value);
23194 /* MOV accepts both ARM modified immediate (A1 encoding) and
23195 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23196 When disassembling, MOV is preferred when there is no encoding
23197 overlap. */
23198 if (newimm == (unsigned int) FAIL
23199 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23200 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23201 && !((temp >> SBIT_SHIFT) & 0x1)
23202 && value >= 0 && value <= 0xffff)
23203 {
23204 /* Clear bits[23:20] to change encoding from A1 to A2. */
23205 temp &= 0xff0fffff;
23206 /* Encoding high 4bits imm. Code below will encode the remaining
23207 low 12bits. */
23208 temp |= (value & 0x0000f000) << 4;
23209 newimm = value & 0x00000fff;
23210 }
23211 }
23212
23213 if (newimm == (unsigned int) FAIL)
23214 {
23215 as_bad_where (fixP->fx_file, fixP->fx_line,
23216 _("invalid constant (%lx) after fixup"),
23217 (unsigned long) value);
23218 break;
23219 }
23220
23221 newimm |= (temp & 0xfffff000);
23222 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23223 break;
23224
23225 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23226 {
23227 unsigned int highpart = 0;
23228 unsigned int newinsn = 0xe1a00000; /* nop. */
23229
23230 if (fixP->fx_addsy)
23231 {
23232 const char *msg = 0;
23233
23234 if (! S_IS_DEFINED (fixP->fx_addsy))
23235 msg = _("undefined symbol %s used as an immediate value");
23236 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23237 msg = _("symbol %s is in a different section");
23238 else if (S_IS_WEAK (fixP->fx_addsy))
23239 msg = _("symbol %s is weak and may be overridden later");
23240
23241 if (msg)
23242 {
23243 as_bad_where (fixP->fx_file, fixP->fx_line,
23244 msg, S_GET_NAME (fixP->fx_addsy));
23245 break;
23246 }
23247 }
23248
23249 newimm = encode_arm_immediate (value);
23250 temp = md_chars_to_number (buf, INSN_SIZE);
23251
23252 /* If the instruction will fail, see if we can fix things up by
23253 changing the opcode. */
23254 if (newimm == (unsigned int) FAIL
23255 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23256 {
23257 /* No ? OK - try using two ADD instructions to generate
23258 the value. */
23259 newimm = validate_immediate_twopart (value, & highpart);
23260
23261 /* Yes - then make sure that the second instruction is
23262 also an add. */
23263 if (newimm != (unsigned int) FAIL)
23264 newinsn = temp;
23265 /* Still No ? Try using a negated value. */
23266 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23267 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23268 /* Otherwise - give up. */
23269 else
23270 {
23271 as_bad_where (fixP->fx_file, fixP->fx_line,
23272 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23273 (long) value);
23274 break;
23275 }
23276
23277 /* Replace the first operand in the 2nd instruction (which
23278 is the PC) with the destination register. We have
23279 already added in the PC in the first instruction and we
23280 do not want to do it again. */
23281 newinsn &= ~ 0xf0000;
23282 newinsn |= ((newinsn & 0x0f000) << 4);
23283 }
23284
23285 newimm |= (temp & 0xfffff000);
23286 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23287
23288 highpart |= (newinsn & 0xfffff000);
23289 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23290 }
23291 break;
23292
23293 case BFD_RELOC_ARM_OFFSET_IMM:
23294 if (!fixP->fx_done && seg->use_rela_p)
23295 value = 0;
23296 /* Fall through. */
23297
23298 case BFD_RELOC_ARM_LITERAL:
23299 sign = value > 0;
23300
23301 if (value < 0)
23302 value = - value;
23303
23304 if (validate_offset_imm (value, 0) == FAIL)
23305 {
23306 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23307 as_bad_where (fixP->fx_file, fixP->fx_line,
23308 _("invalid literal constant: pool needs to be closer"));
23309 else
23310 as_bad_where (fixP->fx_file, fixP->fx_line,
23311 _("bad immediate value for offset (%ld)"),
23312 (long) value);
23313 break;
23314 }
23315
23316 newval = md_chars_to_number (buf, INSN_SIZE);
23317 if (value == 0)
23318 newval &= 0xfffff000;
23319 else
23320 {
23321 newval &= 0xff7ff000;
23322 newval |= value | (sign ? INDEX_UP : 0);
23323 }
23324 md_number_to_chars (buf, newval, INSN_SIZE);
23325 break;
23326
23327 case BFD_RELOC_ARM_OFFSET_IMM8:
23328 case BFD_RELOC_ARM_HWLITERAL:
23329 sign = value > 0;
23330
23331 if (value < 0)
23332 value = - value;
23333
23334 if (validate_offset_imm (value, 1) == FAIL)
23335 {
23336 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23337 as_bad_where (fixP->fx_file, fixP->fx_line,
23338 _("invalid literal constant: pool needs to be closer"));
23339 else
23340 as_bad_where (fixP->fx_file, fixP->fx_line,
23341 _("bad immediate value for 8-bit offset (%ld)"),
23342 (long) value);
23343 break;
23344 }
23345
23346 newval = md_chars_to_number (buf, INSN_SIZE);
23347 if (value == 0)
23348 newval &= 0xfffff0f0;
23349 else
23350 {
23351 newval &= 0xff7ff0f0;
23352 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23353 }
23354 md_number_to_chars (buf, newval, INSN_SIZE);
23355 break;
23356
23357 case BFD_RELOC_ARM_T32_OFFSET_U8:
23358 if (value < 0 || value > 1020 || value % 4 != 0)
23359 as_bad_where (fixP->fx_file, fixP->fx_line,
23360 _("bad immediate value for offset (%ld)"), (long) value);
23361 value /= 4;
23362
23363 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23364 newval |= value;
23365 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23366 break;
23367
23368 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23369 /* This is a complicated relocation used for all varieties of Thumb32
23370 load/store instruction with immediate offset:
23371
23372 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23373 *4, optional writeback(W)
23374 (doubleword load/store)
23375
23376 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23377 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23378 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23379 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23380 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23381
23382 Uppercase letters indicate bits that are already encoded at
23383 this point. Lowercase letters are our problem. For the
23384 second block of instructions, the secondary opcode nybble
23385 (bits 8..11) is present, and bit 23 is zero, even if this is
23386 a PC-relative operation. */
23387 newval = md_chars_to_number (buf, THUMB_SIZE);
23388 newval <<= 16;
23389 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23390
23391 if ((newval & 0xf0000000) == 0xe0000000)
23392 {
23393 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23394 if (value >= 0)
23395 newval |= (1 << 23);
23396 else
23397 value = -value;
23398 if (value % 4 != 0)
23399 {
23400 as_bad_where (fixP->fx_file, fixP->fx_line,
23401 _("offset not a multiple of 4"));
23402 break;
23403 }
23404 value /= 4;
23405 if (value > 0xff)
23406 {
23407 as_bad_where (fixP->fx_file, fixP->fx_line,
23408 _("offset out of range"));
23409 break;
23410 }
23411 newval &= ~0xff;
23412 }
23413 else if ((newval & 0x000f0000) == 0x000f0000)
23414 {
23415 /* PC-relative, 12-bit offset. */
23416 if (value >= 0)
23417 newval |= (1 << 23);
23418 else
23419 value = -value;
23420 if (value > 0xfff)
23421 {
23422 as_bad_where (fixP->fx_file, fixP->fx_line,
23423 _("offset out of range"));
23424 break;
23425 }
23426 newval &= ~0xfff;
23427 }
23428 else if ((newval & 0x00000100) == 0x00000100)
23429 {
23430 /* Writeback: 8-bit, +/- offset. */
23431 if (value >= 0)
23432 newval |= (1 << 9);
23433 else
23434 value = -value;
23435 if (value > 0xff)
23436 {
23437 as_bad_where (fixP->fx_file, fixP->fx_line,
23438 _("offset out of range"));
23439 break;
23440 }
23441 newval &= ~0xff;
23442 }
23443 else if ((newval & 0x00000f00) == 0x00000e00)
23444 {
23445 /* T-instruction: positive 8-bit offset. */
23446 if (value < 0 || value > 0xff)
23447 {
23448 as_bad_where (fixP->fx_file, fixP->fx_line,
23449 _("offset out of range"));
23450 break;
23451 }
23452 newval &= ~0xff;
23453 newval |= value;
23454 }
23455 else
23456 {
23457 /* Positive 12-bit or negative 8-bit offset. */
23458 int limit;
23459 if (value >= 0)
23460 {
23461 newval |= (1 << 23);
23462 limit = 0xfff;
23463 }
23464 else
23465 {
23466 value = -value;
23467 limit = 0xff;
23468 }
23469 if (value > limit)
23470 {
23471 as_bad_where (fixP->fx_file, fixP->fx_line,
23472 _("offset out of range"));
23473 break;
23474 }
23475 newval &= ~limit;
23476 }
23477
23478 newval |= value;
23479 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23480 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23481 break;
23482
23483 case BFD_RELOC_ARM_SHIFT_IMM:
23484 newval = md_chars_to_number (buf, INSN_SIZE);
23485 if (((unsigned long) value) > 32
23486 || (value == 32
23487 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23488 {
23489 as_bad_where (fixP->fx_file, fixP->fx_line,
23490 _("shift expression is too large"));
23491 break;
23492 }
23493
23494 if (value == 0)
23495 /* Shifts of zero must be done as lsl. */
23496 newval &= ~0x60;
23497 else if (value == 32)
23498 value = 0;
23499 newval &= 0xfffff07f;
23500 newval |= (value & 0x1f) << 7;
23501 md_number_to_chars (buf, newval, INSN_SIZE);
23502 break;
23503
23504 case BFD_RELOC_ARM_T32_IMMEDIATE:
23505 case BFD_RELOC_ARM_T32_ADD_IMM:
23506 case BFD_RELOC_ARM_T32_IMM12:
23507 case BFD_RELOC_ARM_T32_ADD_PC12:
23508 /* We claim that this fixup has been processed here,
23509 even if in fact we generate an error because we do
23510 not have a reloc for it, so tc_gen_reloc will reject it. */
23511 fixP->fx_done = 1;
23512
23513 if (fixP->fx_addsy
23514 && ! S_IS_DEFINED (fixP->fx_addsy))
23515 {
23516 as_bad_where (fixP->fx_file, fixP->fx_line,
23517 _("undefined symbol %s used as an immediate value"),
23518 S_GET_NAME (fixP->fx_addsy));
23519 break;
23520 }
23521
23522 newval = md_chars_to_number (buf, THUMB_SIZE);
23523 newval <<= 16;
23524 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23525
23526 newimm = FAIL;
23527 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23528 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23529 Thumb2 modified immediate encoding (T2). */
23530 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23531 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23532 {
23533 newimm = encode_thumb32_immediate (value);
23534 if (newimm == (unsigned int) FAIL)
23535 newimm = thumb32_negate_data_op (&newval, value);
23536 }
23537 if (newimm == (unsigned int) FAIL)
23538 {
23539 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23540 {
23541 /* Turn add/sum into addw/subw. */
23542 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23543 newval = (newval & 0xfeffffff) | 0x02000000;
23544 /* No flat 12-bit imm encoding for addsw/subsw. */
23545 if ((newval & 0x00100000) == 0)
23546 {
23547 /* 12 bit immediate for addw/subw. */
23548 if (value < 0)
23549 {
23550 value = -value;
23551 newval ^= 0x00a00000;
23552 }
23553 if (value > 0xfff)
23554 newimm = (unsigned int) FAIL;
23555 else
23556 newimm = value;
23557 }
23558 }
23559 else
23560 {
23561 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23562 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23563 disassembling, MOV is preferred when there is no encoding
23564 overlap.
23565 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23566 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23567 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23568 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23569 && value >= 0 && value <=0xffff)
23570 {
23571 /* Toggle bit[25] to change encoding from T2 to T3. */
23572 newval ^= 1 << 25;
23573 /* Clear bits[19:16]. */
23574 newval &= 0xfff0ffff;
23575 /* Encoding high 4bits imm. Code below will encode the
23576 remaining low 12bits. */
23577 newval |= (value & 0x0000f000) << 4;
23578 newimm = value & 0x00000fff;
23579 }
23580 }
23581 }
23582
23583 if (newimm == (unsigned int)FAIL)
23584 {
23585 as_bad_where (fixP->fx_file, fixP->fx_line,
23586 _("invalid constant (%lx) after fixup"),
23587 (unsigned long) value);
23588 break;
23589 }
23590
23591 newval |= (newimm & 0x800) << 15;
23592 newval |= (newimm & 0x700) << 4;
23593 newval |= (newimm & 0x0ff);
23594
23595 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23596 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23597 break;
23598
23599 case BFD_RELOC_ARM_SMC:
23600 if (((unsigned long) value) > 0xffff)
23601 as_bad_where (fixP->fx_file, fixP->fx_line,
23602 _("invalid smc expression"));
23603 newval = md_chars_to_number (buf, INSN_SIZE);
23604 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23605 md_number_to_chars (buf, newval, INSN_SIZE);
23606 break;
23607
23608 case BFD_RELOC_ARM_HVC:
23609 if (((unsigned long) value) > 0xffff)
23610 as_bad_where (fixP->fx_file, fixP->fx_line,
23611 _("invalid hvc expression"));
23612 newval = md_chars_to_number (buf, INSN_SIZE);
23613 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23614 md_number_to_chars (buf, newval, INSN_SIZE);
23615 break;
23616
23617 case BFD_RELOC_ARM_SWI:
23618 if (fixP->tc_fix_data != 0)
23619 {
23620 if (((unsigned long) value) > 0xff)
23621 as_bad_where (fixP->fx_file, fixP->fx_line,
23622 _("invalid swi expression"));
23623 newval = md_chars_to_number (buf, THUMB_SIZE);
23624 newval |= value;
23625 md_number_to_chars (buf, newval, THUMB_SIZE);
23626 }
23627 else
23628 {
23629 if (((unsigned long) value) > 0x00ffffff)
23630 as_bad_where (fixP->fx_file, fixP->fx_line,
23631 _("invalid swi expression"));
23632 newval = md_chars_to_number (buf, INSN_SIZE);
23633 newval |= value;
23634 md_number_to_chars (buf, newval, INSN_SIZE);
23635 }
23636 break;
23637
23638 case BFD_RELOC_ARM_MULTI:
23639 if (((unsigned long) value) > 0xffff)
23640 as_bad_where (fixP->fx_file, fixP->fx_line,
23641 _("invalid expression in load/store multiple"));
23642 newval = value | md_chars_to_number (buf, INSN_SIZE);
23643 md_number_to_chars (buf, newval, INSN_SIZE);
23644 break;
23645
23646 #ifdef OBJ_ELF
23647 case BFD_RELOC_ARM_PCREL_CALL:
23648
23649 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23650 && fixP->fx_addsy
23651 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23652 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23653 && THUMB_IS_FUNC (fixP->fx_addsy))
23654 /* Flip the bl to blx. This is a simple flip
23655 bit here because we generate PCREL_CALL for
23656 unconditional bls. */
23657 {
23658 newval = md_chars_to_number (buf, INSN_SIZE);
23659 newval = newval | 0x10000000;
23660 md_number_to_chars (buf, newval, INSN_SIZE);
23661 temp = 1;
23662 fixP->fx_done = 1;
23663 }
23664 else
23665 temp = 3;
23666 goto arm_branch_common;
23667
23668 case BFD_RELOC_ARM_PCREL_JUMP:
23669 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23670 && fixP->fx_addsy
23671 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23672 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23673 && THUMB_IS_FUNC (fixP->fx_addsy))
23674 {
23675 /* This would map to a bl<cond>, b<cond>,
23676 b<always> to a Thumb function. We
23677 need to force a relocation for this particular
23678 case. */
23679 newval = md_chars_to_number (buf, INSN_SIZE);
23680 fixP->fx_done = 0;
23681 }
23682 /* Fall through. */
23683
23684 case BFD_RELOC_ARM_PLT32:
23685 #endif
23686 case BFD_RELOC_ARM_PCREL_BRANCH:
23687 temp = 3;
23688 goto arm_branch_common;
23689
23690 case BFD_RELOC_ARM_PCREL_BLX:
23691
23692 temp = 1;
23693 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23694 && fixP->fx_addsy
23695 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23696 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23697 && ARM_IS_FUNC (fixP->fx_addsy))
23698 {
23699 /* Flip the blx to a bl and warn. */
23700 const char *name = S_GET_NAME (fixP->fx_addsy);
23701 newval = 0xeb000000;
23702 as_warn_where (fixP->fx_file, fixP->fx_line,
23703 _("blx to '%s' an ARM ISA state function changed to bl"),
23704 name);
23705 md_number_to_chars (buf, newval, INSN_SIZE);
23706 temp = 3;
23707 fixP->fx_done = 1;
23708 }
23709
23710 #ifdef OBJ_ELF
23711 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23712 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23713 #endif
23714
23715 arm_branch_common:
23716 /* We are going to store value (shifted right by two) in the
23717 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23718 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23719 also be clear. */
23720 if (value & temp)
23721 as_bad_where (fixP->fx_file, fixP->fx_line,
23722 _("misaligned branch destination"));
23723 if ((value & (offsetT)0xfe000000) != (offsetT)0
23724 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23725 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23726
23727 if (fixP->fx_done || !seg->use_rela_p)
23728 {
23729 newval = md_chars_to_number (buf, INSN_SIZE);
23730 newval |= (value >> 2) & 0x00ffffff;
23731 /* Set the H bit on BLX instructions. */
23732 if (temp == 1)
23733 {
23734 if (value & 2)
23735 newval |= 0x01000000;
23736 else
23737 newval &= ~0x01000000;
23738 }
23739 md_number_to_chars (buf, newval, INSN_SIZE);
23740 }
23741 break;
23742
23743 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23744 /* CBZ can only branch forward. */
23745
23746 /* Attempts to use CBZ to branch to the next instruction
23747 (which, strictly speaking, are prohibited) will be turned into
23748 no-ops.
23749
23750 FIXME: It may be better to remove the instruction completely and
23751 perform relaxation. */
23752 if (value == -2)
23753 {
23754 newval = md_chars_to_number (buf, THUMB_SIZE);
23755 newval = 0xbf00; /* NOP encoding T1 */
23756 md_number_to_chars (buf, newval, THUMB_SIZE);
23757 }
23758 else
23759 {
23760 if (value & ~0x7e)
23761 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23762
23763 if (fixP->fx_done || !seg->use_rela_p)
23764 {
23765 newval = md_chars_to_number (buf, THUMB_SIZE);
23766 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23767 md_number_to_chars (buf, newval, THUMB_SIZE);
23768 }
23769 }
23770 break;
23771
23772 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23773 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23774 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23775
23776 if (fixP->fx_done || !seg->use_rela_p)
23777 {
23778 newval = md_chars_to_number (buf, THUMB_SIZE);
23779 newval |= (value & 0x1ff) >> 1;
23780 md_number_to_chars (buf, newval, THUMB_SIZE);
23781 }
23782 break;
23783
23784 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23785 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23786 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23787
23788 if (fixP->fx_done || !seg->use_rela_p)
23789 {
23790 newval = md_chars_to_number (buf, THUMB_SIZE);
23791 newval |= (value & 0xfff) >> 1;
23792 md_number_to_chars (buf, newval, THUMB_SIZE);
23793 }
23794 break;
23795
23796 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23797 if (fixP->fx_addsy
23798 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23799 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23800 && ARM_IS_FUNC (fixP->fx_addsy)
23801 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23802 {
23803 /* Force a relocation for a branch 20 bits wide. */
23804 fixP->fx_done = 0;
23805 }
23806 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23807 as_bad_where (fixP->fx_file, fixP->fx_line,
23808 _("conditional branch out of range"));
23809
23810 if (fixP->fx_done || !seg->use_rela_p)
23811 {
23812 offsetT newval2;
23813 addressT S, J1, J2, lo, hi;
23814
23815 S = (value & 0x00100000) >> 20;
23816 J2 = (value & 0x00080000) >> 19;
23817 J1 = (value & 0x00040000) >> 18;
23818 hi = (value & 0x0003f000) >> 12;
23819 lo = (value & 0x00000ffe) >> 1;
23820
23821 newval = md_chars_to_number (buf, THUMB_SIZE);
23822 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23823 newval |= (S << 10) | hi;
23824 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23825 md_number_to_chars (buf, newval, THUMB_SIZE);
23826 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23827 }
23828 break;
23829
23830 case BFD_RELOC_THUMB_PCREL_BLX:
23831 /* If there is a blx from a thumb state function to
23832 another thumb function flip this to a bl and warn
23833 about it. */
23834
23835 if (fixP->fx_addsy
23836 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23837 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23838 && THUMB_IS_FUNC (fixP->fx_addsy))
23839 {
23840 const char *name = S_GET_NAME (fixP->fx_addsy);
23841 as_warn_where (fixP->fx_file, fixP->fx_line,
23842 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23843 name);
23844 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23845 newval = newval | 0x1000;
23846 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23847 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23848 fixP->fx_done = 1;
23849 }
23850
23851
23852 goto thumb_bl_common;
23853
23854 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23855 /* A bl from Thumb state ISA to an internal ARM state function
23856 is converted to a blx. */
23857 if (fixP->fx_addsy
23858 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23859 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23860 && ARM_IS_FUNC (fixP->fx_addsy)
23861 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23862 {
23863 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23864 newval = newval & ~0x1000;
23865 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23866 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23867 fixP->fx_done = 1;
23868 }
23869
23870 thumb_bl_common:
23871
23872 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23873 /* For a BLX instruction, make sure that the relocation is rounded up
23874 to a word boundary. This follows the semantics of the instruction
23875 which specifies that bit 1 of the target address will come from bit
23876 1 of the base address. */
23877 value = (value + 3) & ~ 3;
23878
23879 #ifdef OBJ_ELF
23880 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23881 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23882 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23883 #endif
23884
23885 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23886 {
23887 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23888 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23889 else if ((value & ~0x1ffffff)
23890 && ((value & ~0x1ffffff) != ~0x1ffffff))
23891 as_bad_where (fixP->fx_file, fixP->fx_line,
23892 _("Thumb2 branch out of range"));
23893 }
23894
23895 if (fixP->fx_done || !seg->use_rela_p)
23896 encode_thumb2_b_bl_offset (buf, value);
23897
23898 break;
23899
23900 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23901 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23902 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23903
23904 if (fixP->fx_done || !seg->use_rela_p)
23905 encode_thumb2_b_bl_offset (buf, value);
23906
23907 break;
23908
23909 case BFD_RELOC_8:
23910 if (fixP->fx_done || !seg->use_rela_p)
23911 *buf = value;
23912 break;
23913
23914 case BFD_RELOC_16:
23915 if (fixP->fx_done || !seg->use_rela_p)
23916 md_number_to_chars (buf, value, 2);
23917 break;
23918
23919 #ifdef OBJ_ELF
23920 case BFD_RELOC_ARM_TLS_CALL:
23921 case BFD_RELOC_ARM_THM_TLS_CALL:
23922 case BFD_RELOC_ARM_TLS_DESCSEQ:
23923 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23924 case BFD_RELOC_ARM_TLS_GOTDESC:
23925 case BFD_RELOC_ARM_TLS_GD32:
23926 case BFD_RELOC_ARM_TLS_LE32:
23927 case BFD_RELOC_ARM_TLS_IE32:
23928 case BFD_RELOC_ARM_TLS_LDM32:
23929 case BFD_RELOC_ARM_TLS_LDO32:
23930 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23931 break;
23932
23933 case BFD_RELOC_ARM_GOT32:
23934 case BFD_RELOC_ARM_GOTOFF:
23935 break;
23936
23937 case BFD_RELOC_ARM_GOT_PREL:
23938 if (fixP->fx_done || !seg->use_rela_p)
23939 md_number_to_chars (buf, value, 4);
23940 break;
23941
23942 case BFD_RELOC_ARM_TARGET2:
23943 /* TARGET2 is not partial-inplace, so we need to write the
23944 addend here for REL targets, because it won't be written out
23945 during reloc processing later. */
23946 if (fixP->fx_done || !seg->use_rela_p)
23947 md_number_to_chars (buf, fixP->fx_offset, 4);
23948 break;
23949 #endif
23950
23951 case BFD_RELOC_RVA:
23952 case BFD_RELOC_32:
23953 case BFD_RELOC_ARM_TARGET1:
23954 case BFD_RELOC_ARM_ROSEGREL32:
23955 case BFD_RELOC_ARM_SBREL32:
23956 case BFD_RELOC_32_PCREL:
23957 #ifdef TE_PE
23958 case BFD_RELOC_32_SECREL:
23959 #endif
23960 if (fixP->fx_done || !seg->use_rela_p)
23961 #ifdef TE_WINCE
23962 /* For WinCE we only do this for pcrel fixups. */
23963 if (fixP->fx_done || fixP->fx_pcrel)
23964 #endif
23965 md_number_to_chars (buf, value, 4);
23966 break;
23967
23968 #ifdef OBJ_ELF
23969 case BFD_RELOC_ARM_PREL31:
23970 if (fixP->fx_done || !seg->use_rela_p)
23971 {
23972 newval = md_chars_to_number (buf, 4) & 0x80000000;
23973 if ((value ^ (value >> 1)) & 0x40000000)
23974 {
23975 as_bad_where (fixP->fx_file, fixP->fx_line,
23976 _("rel31 relocation overflow"));
23977 }
23978 newval |= value & 0x7fffffff;
23979 md_number_to_chars (buf, newval, 4);
23980 }
23981 break;
23982 #endif
23983
23984 case BFD_RELOC_ARM_CP_OFF_IMM:
23985 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23986 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23987 newval = md_chars_to_number (buf, INSN_SIZE);
23988 else
23989 newval = get_thumb32_insn (buf);
23990 if ((newval & 0x0f200f00) == 0x0d000900)
23991 {
23992 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23993 has permitted values that are multiples of 2, in the range 0
23994 to 510. */
23995 if (value < -510 || value > 510 || (value & 1))
23996 as_bad_where (fixP->fx_file, fixP->fx_line,
23997 _("co-processor offset out of range"));
23998 }
23999 else if (value < -1023 || value > 1023 || (value & 3))
24000 as_bad_where (fixP->fx_file, fixP->fx_line,
24001 _("co-processor offset out of range"));
24002 cp_off_common:
24003 sign = value > 0;
24004 if (value < 0)
24005 value = -value;
24006 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24007 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24008 newval = md_chars_to_number (buf, INSN_SIZE);
24009 else
24010 newval = get_thumb32_insn (buf);
24011 if (value == 0)
24012 newval &= 0xffffff00;
24013 else
24014 {
24015 newval &= 0xff7fff00;
24016 if ((newval & 0x0f200f00) == 0x0d000900)
24017 {
24018 /* This is a fp16 vstr/vldr.
24019
24020 It requires the immediate offset in the instruction is shifted
24021 left by 1 to be a half-word offset.
24022
24023 Here, left shift by 1 first, and later right shift by 2
24024 should get the right offset. */
24025 value <<= 1;
24026 }
24027 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24028 }
24029 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24030 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24031 md_number_to_chars (buf, newval, INSN_SIZE);
24032 else
24033 put_thumb32_insn (buf, newval);
24034 break;
24035
24036 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24037 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24038 if (value < -255 || value > 255)
24039 as_bad_where (fixP->fx_file, fixP->fx_line,
24040 _("co-processor offset out of range"));
24041 value *= 4;
24042 goto cp_off_common;
24043
24044 case BFD_RELOC_ARM_THUMB_OFFSET:
24045 newval = md_chars_to_number (buf, THUMB_SIZE);
24046 /* Exactly what ranges, and where the offset is inserted depends
24047 on the type of instruction, we can establish this from the
24048 top 4 bits. */
24049 switch (newval >> 12)
24050 {
24051 case 4: /* PC load. */
24052 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24053 forced to zero for these loads; md_pcrel_from has already
24054 compensated for this. */
24055 if (value & 3)
24056 as_bad_where (fixP->fx_file, fixP->fx_line,
24057 _("invalid offset, target not word aligned (0x%08lX)"),
24058 (((unsigned long) fixP->fx_frag->fr_address
24059 + (unsigned long) fixP->fx_where) & ~3)
24060 + (unsigned long) value);
24061
24062 if (value & ~0x3fc)
24063 as_bad_where (fixP->fx_file, fixP->fx_line,
24064 _("invalid offset, value too big (0x%08lX)"),
24065 (long) value);
24066
24067 newval |= value >> 2;
24068 break;
24069
24070 case 9: /* SP load/store. */
24071 if (value & ~0x3fc)
24072 as_bad_where (fixP->fx_file, fixP->fx_line,
24073 _("invalid offset, value too big (0x%08lX)"),
24074 (long) value);
24075 newval |= value >> 2;
24076 break;
24077
24078 case 6: /* Word load/store. */
24079 if (value & ~0x7c)
24080 as_bad_where (fixP->fx_file, fixP->fx_line,
24081 _("invalid offset, value too big (0x%08lX)"),
24082 (long) value);
24083 newval |= value << 4; /* 6 - 2. */
24084 break;
24085
24086 case 7: /* Byte load/store. */
24087 if (value & ~0x1f)
24088 as_bad_where (fixP->fx_file, fixP->fx_line,
24089 _("invalid offset, value too big (0x%08lX)"),
24090 (long) value);
24091 newval |= value << 6;
24092 break;
24093
24094 case 8: /* Halfword load/store. */
24095 if (value & ~0x3e)
24096 as_bad_where (fixP->fx_file, fixP->fx_line,
24097 _("invalid offset, value too big (0x%08lX)"),
24098 (long) value);
24099 newval |= value << 5; /* 6 - 1. */
24100 break;
24101
24102 default:
24103 as_bad_where (fixP->fx_file, fixP->fx_line,
24104 "Unable to process relocation for thumb opcode: %lx",
24105 (unsigned long) newval);
24106 break;
24107 }
24108 md_number_to_chars (buf, newval, THUMB_SIZE);
24109 break;
24110
24111 case BFD_RELOC_ARM_THUMB_ADD:
24112 /* This is a complicated relocation, since we use it for all of
24113 the following immediate relocations:
24114
24115 3bit ADD/SUB
24116 8bit ADD/SUB
24117 9bit ADD/SUB SP word-aligned
24118 10bit ADD PC/SP word-aligned
24119
24120 The type of instruction being processed is encoded in the
24121 instruction field:
24122
24123 0x8000 SUB
24124 0x00F0 Rd
24125 0x000F Rs
24126 */
24127 newval = md_chars_to_number (buf, THUMB_SIZE);
24128 {
24129 int rd = (newval >> 4) & 0xf;
24130 int rs = newval & 0xf;
24131 int subtract = !!(newval & 0x8000);
24132
24133 /* Check for HI regs, only very restricted cases allowed:
24134 Adjusting SP, and using PC or SP to get an address. */
24135 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24136 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24137 as_bad_where (fixP->fx_file, fixP->fx_line,
24138 _("invalid Hi register with immediate"));
24139
24140 /* If value is negative, choose the opposite instruction. */
24141 if (value < 0)
24142 {
24143 value = -value;
24144 subtract = !subtract;
24145 if (value < 0)
24146 as_bad_where (fixP->fx_file, fixP->fx_line,
24147 _("immediate value out of range"));
24148 }
24149
24150 if (rd == REG_SP)
24151 {
24152 if (value & ~0x1fc)
24153 as_bad_where (fixP->fx_file, fixP->fx_line,
24154 _("invalid immediate for stack address calculation"));
24155 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24156 newval |= value >> 2;
24157 }
24158 else if (rs == REG_PC || rs == REG_SP)
24159 {
24160 /* PR gas/18541. If the addition is for a defined symbol
24161 within range of an ADR instruction then accept it. */
24162 if (subtract
24163 && value == 4
24164 && fixP->fx_addsy != NULL)
24165 {
24166 subtract = 0;
24167
24168 if (! S_IS_DEFINED (fixP->fx_addsy)
24169 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24170 || S_IS_WEAK (fixP->fx_addsy))
24171 {
24172 as_bad_where (fixP->fx_file, fixP->fx_line,
24173 _("address calculation needs a strongly defined nearby symbol"));
24174 }
24175 else
24176 {
24177 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24178
24179 /* Round up to the next 4-byte boundary. */
24180 if (v & 3)
24181 v = (v + 3) & ~ 3;
24182 else
24183 v += 4;
24184 v = S_GET_VALUE (fixP->fx_addsy) - v;
24185
24186 if (v & ~0x3fc)
24187 {
24188 as_bad_where (fixP->fx_file, fixP->fx_line,
24189 _("symbol too far away"));
24190 }
24191 else
24192 {
24193 fixP->fx_done = 1;
24194 value = v;
24195 }
24196 }
24197 }
24198
24199 if (subtract || value & ~0x3fc)
24200 as_bad_where (fixP->fx_file, fixP->fx_line,
24201 _("invalid immediate for address calculation (value = 0x%08lX)"),
24202 (unsigned long) (subtract ? - value : value));
24203 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24204 newval |= rd << 8;
24205 newval |= value >> 2;
24206 }
24207 else if (rs == rd)
24208 {
24209 if (value & ~0xff)
24210 as_bad_where (fixP->fx_file, fixP->fx_line,
24211 _("immediate value out of range"));
24212 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24213 newval |= (rd << 8) | value;
24214 }
24215 else
24216 {
24217 if (value & ~0x7)
24218 as_bad_where (fixP->fx_file, fixP->fx_line,
24219 _("immediate value out of range"));
24220 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24221 newval |= rd | (rs << 3) | (value << 6);
24222 }
24223 }
24224 md_number_to_chars (buf, newval, THUMB_SIZE);
24225 break;
24226
24227 case BFD_RELOC_ARM_THUMB_IMM:
24228 newval = md_chars_to_number (buf, THUMB_SIZE);
24229 if (value < 0 || value > 255)
24230 as_bad_where (fixP->fx_file, fixP->fx_line,
24231 _("invalid immediate: %ld is out of range"),
24232 (long) value);
24233 newval |= value;
24234 md_number_to_chars (buf, newval, THUMB_SIZE);
24235 break;
24236
24237 case BFD_RELOC_ARM_THUMB_SHIFT:
24238 /* 5bit shift value (0..32). LSL cannot take 32. */
24239 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24240 temp = newval & 0xf800;
24241 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24242 as_bad_where (fixP->fx_file, fixP->fx_line,
24243 _("invalid shift value: %ld"), (long) value);
24244 /* Shifts of zero must be encoded as LSL. */
24245 if (value == 0)
24246 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24247 /* Shifts of 32 are encoded as zero. */
24248 else if (value == 32)
24249 value = 0;
24250 newval |= value << 6;
24251 md_number_to_chars (buf, newval, THUMB_SIZE);
24252 break;
24253
24254 case BFD_RELOC_VTABLE_INHERIT:
24255 case BFD_RELOC_VTABLE_ENTRY:
24256 fixP->fx_done = 0;
24257 return;
24258
24259 case BFD_RELOC_ARM_MOVW:
24260 case BFD_RELOC_ARM_MOVT:
24261 case BFD_RELOC_ARM_THUMB_MOVW:
24262 case BFD_RELOC_ARM_THUMB_MOVT:
24263 if (fixP->fx_done || !seg->use_rela_p)
24264 {
24265 /* REL format relocations are limited to a 16-bit addend. */
24266 if (!fixP->fx_done)
24267 {
24268 if (value < -0x8000 || value > 0x7fff)
24269 as_bad_where (fixP->fx_file, fixP->fx_line,
24270 _("offset out of range"));
24271 }
24272 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24273 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24274 {
24275 value >>= 16;
24276 }
24277
24278 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24279 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24280 {
24281 newval = get_thumb32_insn (buf);
24282 newval &= 0xfbf08f00;
24283 newval |= (value & 0xf000) << 4;
24284 newval |= (value & 0x0800) << 15;
24285 newval |= (value & 0x0700) << 4;
24286 newval |= (value & 0x00ff);
24287 put_thumb32_insn (buf, newval);
24288 }
24289 else
24290 {
24291 newval = md_chars_to_number (buf, 4);
24292 newval &= 0xfff0f000;
24293 newval |= value & 0x0fff;
24294 newval |= (value & 0xf000) << 4;
24295 md_number_to_chars (buf, newval, 4);
24296 }
24297 }
24298 return;
24299
24300 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24301 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24302 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24303 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24304 gas_assert (!fixP->fx_done);
24305 {
24306 bfd_vma insn;
24307 bfd_boolean is_mov;
24308 bfd_vma encoded_addend = value;
24309
24310 /* Check that addend can be encoded in instruction. */
24311 if (!seg->use_rela_p && (value < 0 || value > 255))
24312 as_bad_where (fixP->fx_file, fixP->fx_line,
24313 _("the offset 0x%08lX is not representable"),
24314 (unsigned long) encoded_addend);
24315
24316 /* Extract the instruction. */
24317 insn = md_chars_to_number (buf, THUMB_SIZE);
24318 is_mov = (insn & 0xf800) == 0x2000;
24319
24320 /* Encode insn. */
24321 if (is_mov)
24322 {
24323 if (!seg->use_rela_p)
24324 insn |= encoded_addend;
24325 }
24326 else
24327 {
24328 int rd, rs;
24329
24330 /* Extract the instruction. */
24331 /* Encoding is the following
24332 0x8000 SUB
24333 0x00F0 Rd
24334 0x000F Rs
24335 */
24336 /* The following conditions must be true :
24337 - ADD
24338 - Rd == Rs
24339 - Rd <= 7
24340 */
24341 rd = (insn >> 4) & 0xf;
24342 rs = insn & 0xf;
24343 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24344 as_bad_where (fixP->fx_file, fixP->fx_line,
24345 _("Unable to process relocation for thumb opcode: %lx"),
24346 (unsigned long) insn);
24347
24348 /* Encode as ADD immediate8 thumb 1 code. */
24349 insn = 0x3000 | (rd << 8);
24350
24351 /* Place the encoded addend into the first 8 bits of the
24352 instruction. */
24353 if (!seg->use_rela_p)
24354 insn |= encoded_addend;
24355 }
24356
24357 /* Update the instruction. */
24358 md_number_to_chars (buf, insn, THUMB_SIZE);
24359 }
24360 break;
24361
24362 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24363 case BFD_RELOC_ARM_ALU_PC_G0:
24364 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24365 case BFD_RELOC_ARM_ALU_PC_G1:
24366 case BFD_RELOC_ARM_ALU_PC_G2:
24367 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24368 case BFD_RELOC_ARM_ALU_SB_G0:
24369 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24370 case BFD_RELOC_ARM_ALU_SB_G1:
24371 case BFD_RELOC_ARM_ALU_SB_G2:
24372 gas_assert (!fixP->fx_done);
24373 if (!seg->use_rela_p)
24374 {
24375 bfd_vma insn;
24376 bfd_vma encoded_addend;
24377 bfd_vma addend_abs = abs (value);
24378
24379 /* Check that the absolute value of the addend can be
24380 expressed as an 8-bit constant plus a rotation. */
24381 encoded_addend = encode_arm_immediate (addend_abs);
24382 if (encoded_addend == (unsigned int) FAIL)
24383 as_bad_where (fixP->fx_file, fixP->fx_line,
24384 _("the offset 0x%08lX is not representable"),
24385 (unsigned long) addend_abs);
24386
24387 /* Extract the instruction. */
24388 insn = md_chars_to_number (buf, INSN_SIZE);
24389
24390 /* If the addend is positive, use an ADD instruction.
24391 Otherwise use a SUB. Take care not to destroy the S bit. */
24392 insn &= 0xff1fffff;
24393 if (value < 0)
24394 insn |= 1 << 22;
24395 else
24396 insn |= 1 << 23;
24397
24398 /* Place the encoded addend into the first 12 bits of the
24399 instruction. */
24400 insn &= 0xfffff000;
24401 insn |= encoded_addend;
24402
24403 /* Update the instruction. */
24404 md_number_to_chars (buf, insn, INSN_SIZE);
24405 }
24406 break;
24407
24408 case BFD_RELOC_ARM_LDR_PC_G0:
24409 case BFD_RELOC_ARM_LDR_PC_G1:
24410 case BFD_RELOC_ARM_LDR_PC_G2:
24411 case BFD_RELOC_ARM_LDR_SB_G0:
24412 case BFD_RELOC_ARM_LDR_SB_G1:
24413 case BFD_RELOC_ARM_LDR_SB_G2:
24414 gas_assert (!fixP->fx_done);
24415 if (!seg->use_rela_p)
24416 {
24417 bfd_vma insn;
24418 bfd_vma addend_abs = abs (value);
24419
24420 /* Check that the absolute value of the addend can be
24421 encoded in 12 bits. */
24422 if (addend_abs >= 0x1000)
24423 as_bad_where (fixP->fx_file, fixP->fx_line,
24424 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24425 (unsigned long) addend_abs);
24426
24427 /* Extract the instruction. */
24428 insn = md_chars_to_number (buf, INSN_SIZE);
24429
24430 /* If the addend is negative, clear bit 23 of the instruction.
24431 Otherwise set it. */
24432 if (value < 0)
24433 insn &= ~(1 << 23);
24434 else
24435 insn |= 1 << 23;
24436
24437 /* Place the absolute value of the addend into the first 12 bits
24438 of the instruction. */
24439 insn &= 0xfffff000;
24440 insn |= addend_abs;
24441
24442 /* Update the instruction. */
24443 md_number_to_chars (buf, insn, INSN_SIZE);
24444 }
24445 break;
24446
24447 case BFD_RELOC_ARM_LDRS_PC_G0:
24448 case BFD_RELOC_ARM_LDRS_PC_G1:
24449 case BFD_RELOC_ARM_LDRS_PC_G2:
24450 case BFD_RELOC_ARM_LDRS_SB_G0:
24451 case BFD_RELOC_ARM_LDRS_SB_G1:
24452 case BFD_RELOC_ARM_LDRS_SB_G2:
24453 gas_assert (!fixP->fx_done);
24454 if (!seg->use_rela_p)
24455 {
24456 bfd_vma insn;
24457 bfd_vma addend_abs = abs (value);
24458
24459 /* Check that the absolute value of the addend can be
24460 encoded in 8 bits. */
24461 if (addend_abs >= 0x100)
24462 as_bad_where (fixP->fx_file, fixP->fx_line,
24463 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24464 (unsigned long) addend_abs);
24465
24466 /* Extract the instruction. */
24467 insn = md_chars_to_number (buf, INSN_SIZE);
24468
24469 /* If the addend is negative, clear bit 23 of the instruction.
24470 Otherwise set it. */
24471 if (value < 0)
24472 insn &= ~(1 << 23);
24473 else
24474 insn |= 1 << 23;
24475
24476 /* Place the first four bits of the absolute value of the addend
24477 into the first 4 bits of the instruction, and the remaining
24478 four into bits 8 .. 11. */
24479 insn &= 0xfffff0f0;
24480 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24481
24482 /* Update the instruction. */
24483 md_number_to_chars (buf, insn, INSN_SIZE);
24484 }
24485 break;
24486
24487 case BFD_RELOC_ARM_LDC_PC_G0:
24488 case BFD_RELOC_ARM_LDC_PC_G1:
24489 case BFD_RELOC_ARM_LDC_PC_G2:
24490 case BFD_RELOC_ARM_LDC_SB_G0:
24491 case BFD_RELOC_ARM_LDC_SB_G1:
24492 case BFD_RELOC_ARM_LDC_SB_G2:
24493 gas_assert (!fixP->fx_done);
24494 if (!seg->use_rela_p)
24495 {
24496 bfd_vma insn;
24497 bfd_vma addend_abs = abs (value);
24498
24499 /* Check that the absolute value of the addend is a multiple of
24500 four and, when divided by four, fits in 8 bits. */
24501 if (addend_abs & 0x3)
24502 as_bad_where (fixP->fx_file, fixP->fx_line,
24503 _("bad offset 0x%08lX (must be word-aligned)"),
24504 (unsigned long) addend_abs);
24505
24506 if ((addend_abs >> 2) > 0xff)
24507 as_bad_where (fixP->fx_file, fixP->fx_line,
24508 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24509 (unsigned long) addend_abs);
24510
24511 /* Extract the instruction. */
24512 insn = md_chars_to_number (buf, INSN_SIZE);
24513
24514 /* If the addend is negative, clear bit 23 of the instruction.
24515 Otherwise set it. */
24516 if (value < 0)
24517 insn &= ~(1 << 23);
24518 else
24519 insn |= 1 << 23;
24520
24521 /* Place the addend (divided by four) into the first eight
24522 bits of the instruction. */
24523 insn &= 0xfffffff0;
24524 insn |= addend_abs >> 2;
24525
24526 /* Update the instruction. */
24527 md_number_to_chars (buf, insn, INSN_SIZE);
24528 }
24529 break;
24530
24531 case BFD_RELOC_ARM_V4BX:
24532 /* This will need to go in the object file. */
24533 fixP->fx_done = 0;
24534 break;
24535
24536 case BFD_RELOC_UNUSED:
24537 default:
24538 as_bad_where (fixP->fx_file, fixP->fx_line,
24539 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24540 }
24541 }
24542
24543 /* Translate internal representation of relocation info to BFD target
24544 format. */
24545
24546 arelent *
24547 tc_gen_reloc (asection *section, fixS *fixp)
24548 {
24549 arelent * reloc;
24550 bfd_reloc_code_real_type code;
24551
24552 reloc = XNEW (arelent);
24553
24554 reloc->sym_ptr_ptr = XNEW (asymbol *);
24555 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24556 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24557
24558 if (fixp->fx_pcrel)
24559 {
24560 if (section->use_rela_p)
24561 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24562 else
24563 fixp->fx_offset = reloc->address;
24564 }
24565 reloc->addend = fixp->fx_offset;
24566
24567 switch (fixp->fx_r_type)
24568 {
24569 case BFD_RELOC_8:
24570 if (fixp->fx_pcrel)
24571 {
24572 code = BFD_RELOC_8_PCREL;
24573 break;
24574 }
24575 /* Fall through. */
24576
24577 case BFD_RELOC_16:
24578 if (fixp->fx_pcrel)
24579 {
24580 code = BFD_RELOC_16_PCREL;
24581 break;
24582 }
24583 /* Fall through. */
24584
24585 case BFD_RELOC_32:
24586 if (fixp->fx_pcrel)
24587 {
24588 code = BFD_RELOC_32_PCREL;
24589 break;
24590 }
24591 /* Fall through. */
24592
24593 case BFD_RELOC_ARM_MOVW:
24594 if (fixp->fx_pcrel)
24595 {
24596 code = BFD_RELOC_ARM_MOVW_PCREL;
24597 break;
24598 }
24599 /* Fall through. */
24600
24601 case BFD_RELOC_ARM_MOVT:
24602 if (fixp->fx_pcrel)
24603 {
24604 code = BFD_RELOC_ARM_MOVT_PCREL;
24605 break;
24606 }
24607 /* Fall through. */
24608
24609 case BFD_RELOC_ARM_THUMB_MOVW:
24610 if (fixp->fx_pcrel)
24611 {
24612 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24613 break;
24614 }
24615 /* Fall through. */
24616
24617 case BFD_RELOC_ARM_THUMB_MOVT:
24618 if (fixp->fx_pcrel)
24619 {
24620 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24621 break;
24622 }
24623 /* Fall through. */
24624
24625 case BFD_RELOC_NONE:
24626 case BFD_RELOC_ARM_PCREL_BRANCH:
24627 case BFD_RELOC_ARM_PCREL_BLX:
24628 case BFD_RELOC_RVA:
24629 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24630 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24631 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24632 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24633 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24634 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24635 case BFD_RELOC_VTABLE_ENTRY:
24636 case BFD_RELOC_VTABLE_INHERIT:
24637 #ifdef TE_PE
24638 case BFD_RELOC_32_SECREL:
24639 #endif
24640 code = fixp->fx_r_type;
24641 break;
24642
24643 case BFD_RELOC_THUMB_PCREL_BLX:
24644 #ifdef OBJ_ELF
24645 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24646 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24647 else
24648 #endif
24649 code = BFD_RELOC_THUMB_PCREL_BLX;
24650 break;
24651
24652 case BFD_RELOC_ARM_LITERAL:
24653 case BFD_RELOC_ARM_HWLITERAL:
24654 /* If this is called then the a literal has
24655 been referenced across a section boundary. */
24656 as_bad_where (fixp->fx_file, fixp->fx_line,
24657 _("literal referenced across section boundary"));
24658 return NULL;
24659
24660 #ifdef OBJ_ELF
24661 case BFD_RELOC_ARM_TLS_CALL:
24662 case BFD_RELOC_ARM_THM_TLS_CALL:
24663 case BFD_RELOC_ARM_TLS_DESCSEQ:
24664 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24665 case BFD_RELOC_ARM_GOT32:
24666 case BFD_RELOC_ARM_GOTOFF:
24667 case BFD_RELOC_ARM_GOT_PREL:
24668 case BFD_RELOC_ARM_PLT32:
24669 case BFD_RELOC_ARM_TARGET1:
24670 case BFD_RELOC_ARM_ROSEGREL32:
24671 case BFD_RELOC_ARM_SBREL32:
24672 case BFD_RELOC_ARM_PREL31:
24673 case BFD_RELOC_ARM_TARGET2:
24674 case BFD_RELOC_ARM_TLS_LDO32:
24675 case BFD_RELOC_ARM_PCREL_CALL:
24676 case BFD_RELOC_ARM_PCREL_JUMP:
24677 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24678 case BFD_RELOC_ARM_ALU_PC_G0:
24679 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24680 case BFD_RELOC_ARM_ALU_PC_G1:
24681 case BFD_RELOC_ARM_ALU_PC_G2:
24682 case BFD_RELOC_ARM_LDR_PC_G0:
24683 case BFD_RELOC_ARM_LDR_PC_G1:
24684 case BFD_RELOC_ARM_LDR_PC_G2:
24685 case BFD_RELOC_ARM_LDRS_PC_G0:
24686 case BFD_RELOC_ARM_LDRS_PC_G1:
24687 case BFD_RELOC_ARM_LDRS_PC_G2:
24688 case BFD_RELOC_ARM_LDC_PC_G0:
24689 case BFD_RELOC_ARM_LDC_PC_G1:
24690 case BFD_RELOC_ARM_LDC_PC_G2:
24691 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24692 case BFD_RELOC_ARM_ALU_SB_G0:
24693 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24694 case BFD_RELOC_ARM_ALU_SB_G1:
24695 case BFD_RELOC_ARM_ALU_SB_G2:
24696 case BFD_RELOC_ARM_LDR_SB_G0:
24697 case BFD_RELOC_ARM_LDR_SB_G1:
24698 case BFD_RELOC_ARM_LDR_SB_G2:
24699 case BFD_RELOC_ARM_LDRS_SB_G0:
24700 case BFD_RELOC_ARM_LDRS_SB_G1:
24701 case BFD_RELOC_ARM_LDRS_SB_G2:
24702 case BFD_RELOC_ARM_LDC_SB_G0:
24703 case BFD_RELOC_ARM_LDC_SB_G1:
24704 case BFD_RELOC_ARM_LDC_SB_G2:
24705 case BFD_RELOC_ARM_V4BX:
24706 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24707 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24708 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24709 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24710 code = fixp->fx_r_type;
24711 break;
24712
24713 case BFD_RELOC_ARM_TLS_GOTDESC:
24714 case BFD_RELOC_ARM_TLS_GD32:
24715 case BFD_RELOC_ARM_TLS_LE32:
24716 case BFD_RELOC_ARM_TLS_IE32:
24717 case BFD_RELOC_ARM_TLS_LDM32:
24718 /* BFD will include the symbol's address in the addend.
24719 But we don't want that, so subtract it out again here. */
24720 if (!S_IS_COMMON (fixp->fx_addsy))
24721 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24722 code = fixp->fx_r_type;
24723 break;
24724 #endif
24725
24726 case BFD_RELOC_ARM_IMMEDIATE:
24727 as_bad_where (fixp->fx_file, fixp->fx_line,
24728 _("internal relocation (type: IMMEDIATE) not fixed up"));
24729 return NULL;
24730
24731 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24732 as_bad_where (fixp->fx_file, fixp->fx_line,
24733 _("ADRL used for a symbol not defined in the same file"));
24734 return NULL;
24735
24736 case BFD_RELOC_ARM_OFFSET_IMM:
24737 if (section->use_rela_p)
24738 {
24739 code = fixp->fx_r_type;
24740 break;
24741 }
24742
24743 if (fixp->fx_addsy != NULL
24744 && !S_IS_DEFINED (fixp->fx_addsy)
24745 && S_IS_LOCAL (fixp->fx_addsy))
24746 {
24747 as_bad_where (fixp->fx_file, fixp->fx_line,
24748 _("undefined local label `%s'"),
24749 S_GET_NAME (fixp->fx_addsy));
24750 return NULL;
24751 }
24752
24753 as_bad_where (fixp->fx_file, fixp->fx_line,
24754 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24755 return NULL;
24756
24757 default:
24758 {
24759 const char * type;
24760
24761 switch (fixp->fx_r_type)
24762 {
24763 case BFD_RELOC_NONE: type = "NONE"; break;
24764 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24765 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24766 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24767 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24768 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24769 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24770 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24771 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24772 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24773 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24774 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24775 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24776 default: type = _("<unknown>"); break;
24777 }
24778 as_bad_where (fixp->fx_file, fixp->fx_line,
24779 _("cannot represent %s relocation in this object file format"),
24780 type);
24781 return NULL;
24782 }
24783 }
24784
24785 #ifdef OBJ_ELF
24786 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24787 && GOT_symbol
24788 && fixp->fx_addsy == GOT_symbol)
24789 {
24790 code = BFD_RELOC_ARM_GOTPC;
24791 reloc->addend = fixp->fx_offset = reloc->address;
24792 }
24793 #endif
24794
24795 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24796
24797 if (reloc->howto == NULL)
24798 {
24799 as_bad_where (fixp->fx_file, fixp->fx_line,
24800 _("cannot represent %s relocation in this object file format"),
24801 bfd_get_reloc_code_name (code));
24802 return NULL;
24803 }
24804
24805 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24806 vtable entry to be used in the relocation's section offset. */
24807 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24808 reloc->address = fixp->fx_offset;
24809
24810 return reloc;
24811 }
24812
24813 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24814
24815 void
24816 cons_fix_new_arm (fragS * frag,
24817 int where,
24818 int size,
24819 expressionS * exp,
24820 bfd_reloc_code_real_type reloc)
24821 {
24822 int pcrel = 0;
24823
24824 /* Pick a reloc.
24825 FIXME: @@ Should look at CPU word size. */
24826 switch (size)
24827 {
24828 case 1:
24829 reloc = BFD_RELOC_8;
24830 break;
24831 case 2:
24832 reloc = BFD_RELOC_16;
24833 break;
24834 case 4:
24835 default:
24836 reloc = BFD_RELOC_32;
24837 break;
24838 case 8:
24839 reloc = BFD_RELOC_64;
24840 break;
24841 }
24842
24843 #ifdef TE_PE
24844 if (exp->X_op == O_secrel)
24845 {
24846 exp->X_op = O_symbol;
24847 reloc = BFD_RELOC_32_SECREL;
24848 }
24849 #endif
24850
24851 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24852 }
24853
24854 #if defined (OBJ_COFF)
24855 void
24856 arm_validate_fix (fixS * fixP)
24857 {
24858 /* If the destination of the branch is a defined symbol which does not have
24859 the THUMB_FUNC attribute, then we must be calling a function which has
24860 the (interfacearm) attribute. We look for the Thumb entry point to that
24861 function and change the branch to refer to that function instead. */
24862 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24863 && fixP->fx_addsy != NULL
24864 && S_IS_DEFINED (fixP->fx_addsy)
24865 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24866 {
24867 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24868 }
24869 }
24870 #endif
24871
24872
24873 int
24874 arm_force_relocation (struct fix * fixp)
24875 {
24876 #if defined (OBJ_COFF) && defined (TE_PE)
24877 if (fixp->fx_r_type == BFD_RELOC_RVA)
24878 return 1;
24879 #endif
24880
24881 /* In case we have a call or a branch to a function in ARM ISA mode from
24882 a thumb function or vice-versa force the relocation. These relocations
24883 are cleared off for some cores that might have blx and simple transformations
24884 are possible. */
24885
24886 #ifdef OBJ_ELF
24887 switch (fixp->fx_r_type)
24888 {
24889 case BFD_RELOC_ARM_PCREL_JUMP:
24890 case BFD_RELOC_ARM_PCREL_CALL:
24891 case BFD_RELOC_THUMB_PCREL_BLX:
24892 if (THUMB_IS_FUNC (fixp->fx_addsy))
24893 return 1;
24894 break;
24895
24896 case BFD_RELOC_ARM_PCREL_BLX:
24897 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24898 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24899 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24900 if (ARM_IS_FUNC (fixp->fx_addsy))
24901 return 1;
24902 break;
24903
24904 default:
24905 break;
24906 }
24907 #endif
24908
24909 /* Resolve these relocations even if the symbol is extern or weak.
24910 Technically this is probably wrong due to symbol preemption.
24911 In practice these relocations do not have enough range to be useful
24912 at dynamic link time, and some code (e.g. in the Linux kernel)
24913 expects these references to be resolved. */
24914 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24915 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24916 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24917 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24918 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24919 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24920 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24921 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24922 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24923 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24924 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24925 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24926 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24927 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24928 return 0;
24929
24930 /* Always leave these relocations for the linker. */
24931 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24932 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24933 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24934 return 1;
24935
24936 /* Always generate relocations against function symbols. */
24937 if (fixp->fx_r_type == BFD_RELOC_32
24938 && fixp->fx_addsy
24939 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24940 return 1;
24941
24942 return generic_force_reloc (fixp);
24943 }
24944
24945 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24946 /* Relocations against function names must be left unadjusted,
24947 so that the linker can use this information to generate interworking
24948 stubs. The MIPS version of this function
24949 also prevents relocations that are mips-16 specific, but I do not
24950 know why it does this.
24951
24952 FIXME:
24953 There is one other problem that ought to be addressed here, but
24954 which currently is not: Taking the address of a label (rather
24955 than a function) and then later jumping to that address. Such
24956 addresses also ought to have their bottom bit set (assuming that
24957 they reside in Thumb code), but at the moment they will not. */
24958
24959 bfd_boolean
24960 arm_fix_adjustable (fixS * fixP)
24961 {
24962 if (fixP->fx_addsy == NULL)
24963 return 1;
24964
24965 /* Preserve relocations against symbols with function type. */
24966 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24967 return FALSE;
24968
24969 if (THUMB_IS_FUNC (fixP->fx_addsy)
24970 && fixP->fx_subsy == NULL)
24971 return FALSE;
24972
24973 /* We need the symbol name for the VTABLE entries. */
24974 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24975 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24976 return FALSE;
24977
24978 /* Don't allow symbols to be discarded on GOT related relocs. */
24979 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24980 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24981 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24982 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24983 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24984 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24985 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24986 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24987 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24988 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24989 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24990 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24991 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24992 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24993 return FALSE;
24994
24995 /* Similarly for group relocations. */
24996 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24997 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24998 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24999 return FALSE;
25000
25001 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25002 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25003 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25004 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25005 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25006 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25007 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25008 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25009 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25010 return FALSE;
25011
25012 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25013 offsets, so keep these symbols. */
25014 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25015 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25016 return FALSE;
25017
25018 return TRUE;
25019 }
25020 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25021
25022 #ifdef OBJ_ELF
25023 const char *
25024 elf32_arm_target_format (void)
25025 {
25026 #ifdef TE_SYMBIAN
25027 return (target_big_endian
25028 ? "elf32-bigarm-symbian"
25029 : "elf32-littlearm-symbian");
25030 #elif defined (TE_VXWORKS)
25031 return (target_big_endian
25032 ? "elf32-bigarm-vxworks"
25033 : "elf32-littlearm-vxworks");
25034 #elif defined (TE_NACL)
25035 return (target_big_endian
25036 ? "elf32-bigarm-nacl"
25037 : "elf32-littlearm-nacl");
25038 #else
25039 if (target_big_endian)
25040 return "elf32-bigarm";
25041 else
25042 return "elf32-littlearm";
25043 #endif
25044 }
25045
25046 void
25047 armelf_frob_symbol (symbolS * symp,
25048 int * puntp)
25049 {
25050 elf_frob_symbol (symp, puntp);
25051 }
25052 #endif
25053
25054 /* MD interface: Finalization. */
25055
25056 void
25057 arm_cleanup (void)
25058 {
25059 literal_pool * pool;
25060
25061 /* Ensure that all the IT blocks are properly closed. */
25062 check_it_blocks_finished ();
25063
25064 for (pool = list_of_pools; pool; pool = pool->next)
25065 {
25066 /* Put it at the end of the relevant section. */
25067 subseg_set (pool->section, pool->sub_section);
25068 #ifdef OBJ_ELF
25069 arm_elf_change_section ();
25070 #endif
25071 s_ltorg (0);
25072 }
25073 }
25074
25075 #ifdef OBJ_ELF
25076 /* Remove any excess mapping symbols generated for alignment frags in
25077 SEC. We may have created a mapping symbol before a zero byte
25078 alignment; remove it if there's a mapping symbol after the
25079 alignment. */
25080 static void
25081 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25082 void *dummy ATTRIBUTE_UNUSED)
25083 {
25084 segment_info_type *seginfo = seg_info (sec);
25085 fragS *fragp;
25086
25087 if (seginfo == NULL || seginfo->frchainP == NULL)
25088 return;
25089
25090 for (fragp = seginfo->frchainP->frch_root;
25091 fragp != NULL;
25092 fragp = fragp->fr_next)
25093 {
25094 symbolS *sym = fragp->tc_frag_data.last_map;
25095 fragS *next = fragp->fr_next;
25096
25097 /* Variable-sized frags have been converted to fixed size by
25098 this point. But if this was variable-sized to start with,
25099 there will be a fixed-size frag after it. So don't handle
25100 next == NULL. */
25101 if (sym == NULL || next == NULL)
25102 continue;
25103
25104 if (S_GET_VALUE (sym) < next->fr_address)
25105 /* Not at the end of this frag. */
25106 continue;
25107 know (S_GET_VALUE (sym) == next->fr_address);
25108
25109 do
25110 {
25111 if (next->tc_frag_data.first_map != NULL)
25112 {
25113 /* Next frag starts with a mapping symbol. Discard this
25114 one. */
25115 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25116 break;
25117 }
25118
25119 if (next->fr_next == NULL)
25120 {
25121 /* This mapping symbol is at the end of the section. Discard
25122 it. */
25123 know (next->fr_fix == 0 && next->fr_var == 0);
25124 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25125 break;
25126 }
25127
25128 /* As long as we have empty frags without any mapping symbols,
25129 keep looking. */
25130 /* If the next frag is non-empty and does not start with a
25131 mapping symbol, then this mapping symbol is required. */
25132 if (next->fr_address != next->fr_next->fr_address)
25133 break;
25134
25135 next = next->fr_next;
25136 }
25137 while (next != NULL);
25138 }
25139 }
25140 #endif
25141
25142 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25143 ARM ones. */
25144
25145 void
25146 arm_adjust_symtab (void)
25147 {
25148 #ifdef OBJ_COFF
25149 symbolS * sym;
25150
25151 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25152 {
25153 if (ARM_IS_THUMB (sym))
25154 {
25155 if (THUMB_IS_FUNC (sym))
25156 {
25157 /* Mark the symbol as a Thumb function. */
25158 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
25159 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
25160 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
25161
25162 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25163 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25164 else
25165 as_bad (_("%s: unexpected function type: %d"),
25166 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25167 }
25168 else switch (S_GET_STORAGE_CLASS (sym))
25169 {
25170 case C_EXT:
25171 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25172 break;
25173 case C_STAT:
25174 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25175 break;
25176 case C_LABEL:
25177 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25178 break;
25179 default:
25180 /* Do nothing. */
25181 break;
25182 }
25183 }
25184
25185 if (ARM_IS_INTERWORK (sym))
25186 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25187 }
25188 #endif
25189 #ifdef OBJ_ELF
25190 symbolS * sym;
25191 char bind;
25192
25193 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25194 {
25195 if (ARM_IS_THUMB (sym))
25196 {
25197 elf_symbol_type * elf_sym;
25198
25199 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25200 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25201
25202 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25203 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25204 {
25205 /* If it's a .thumb_func, declare it as so,
25206 otherwise tag label as .code 16. */
25207 if (THUMB_IS_FUNC (sym))
25208 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25209 ST_BRANCH_TO_THUMB);
25210 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25211 elf_sym->internal_elf_sym.st_info =
25212 ELF_ST_INFO (bind, STT_ARM_16BIT);
25213 }
25214 }
25215 }
25216
25217 /* Remove any overlapping mapping symbols generated by alignment frags. */
25218 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25219 /* Now do generic ELF adjustments. */
25220 elf_adjust_symtab ();
25221 #endif
25222 }
25223
25224 /* MD interface: Initialization. */
25225
25226 static void
25227 set_constant_flonums (void)
25228 {
25229 int i;
25230
25231 for (i = 0; i < NUM_FLOAT_VALS; i++)
25232 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25233 abort ();
25234 }
25235
25236 /* Auto-select Thumb mode if it's the only available instruction set for the
25237 given architecture. */
25238
25239 static void
25240 autoselect_thumb_from_cpu_variant (void)
25241 {
25242 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25243 opcode_select (16);
25244 }
25245
25246 void
25247 md_begin (void)
25248 {
25249 unsigned mach;
25250 unsigned int i;
25251
25252 if ( (arm_ops_hsh = hash_new ()) == NULL
25253 || (arm_cond_hsh = hash_new ()) == NULL
25254 || (arm_shift_hsh = hash_new ()) == NULL
25255 || (arm_psr_hsh = hash_new ()) == NULL
25256 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25257 || (arm_reg_hsh = hash_new ()) == NULL
25258 || (arm_reloc_hsh = hash_new ()) == NULL
25259 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25260 as_fatal (_("virtual memory exhausted"));
25261
25262 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25263 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25264 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25265 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25266 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25267 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25268 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25269 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25270 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25271 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25272 (void *) (v7m_psrs + i));
25273 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25274 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25275 for (i = 0;
25276 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25277 i++)
25278 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25279 (void *) (barrier_opt_names + i));
25280 #ifdef OBJ_ELF
25281 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25282 {
25283 struct reloc_entry * entry = reloc_names + i;
25284
25285 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25286 /* This makes encode_branch() use the EABI versions of this relocation. */
25287 entry->reloc = BFD_RELOC_UNUSED;
25288
25289 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25290 }
25291 #endif
25292
25293 set_constant_flonums ();
25294
25295 /* Set the cpu variant based on the command-line options. We prefer
25296 -mcpu= over -march= if both are set (as for GCC); and we prefer
25297 -mfpu= over any other way of setting the floating point unit.
25298 Use of legacy options with new options are faulted. */
25299 if (legacy_cpu)
25300 {
25301 if (mcpu_cpu_opt || march_cpu_opt)
25302 as_bad (_("use of old and new-style options to set CPU type"));
25303
25304 mcpu_cpu_opt = legacy_cpu;
25305 }
25306 else if (!mcpu_cpu_opt)
25307 {
25308 mcpu_cpu_opt = march_cpu_opt;
25309 dyn_mcpu_ext_opt = dyn_march_ext_opt;
25310 /* Avoid double free in arm_md_end. */
25311 dyn_march_ext_opt = NULL;
25312 }
25313
25314 if (legacy_fpu)
25315 {
25316 if (mfpu_opt)
25317 as_bad (_("use of old and new-style options to set FPU type"));
25318
25319 mfpu_opt = legacy_fpu;
25320 }
25321 else if (!mfpu_opt)
25322 {
25323 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25324 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25325 /* Some environments specify a default FPU. If they don't, infer it
25326 from the processor. */
25327 if (mcpu_fpu_opt)
25328 mfpu_opt = mcpu_fpu_opt;
25329 else
25330 mfpu_opt = march_fpu_opt;
25331 #else
25332 mfpu_opt = &fpu_default;
25333 #endif
25334 }
25335
25336 if (!mfpu_opt)
25337 {
25338 if (mcpu_cpu_opt != NULL)
25339 mfpu_opt = &fpu_default;
25340 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
25341 mfpu_opt = &fpu_arch_vfp_v2;
25342 else
25343 mfpu_opt = &fpu_arch_fpa;
25344 }
25345
25346 #ifdef CPU_DEFAULT
25347 if (!mcpu_cpu_opt)
25348 {
25349 mcpu_cpu_opt = &cpu_default;
25350 selected_cpu = cpu_default;
25351 }
25352 else if (dyn_mcpu_ext_opt)
25353 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
25354 else
25355 selected_cpu = *mcpu_cpu_opt;
25356 #else
25357 if (mcpu_cpu_opt && dyn_mcpu_ext_opt)
25358 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
25359 else if (mcpu_cpu_opt)
25360 selected_cpu = *mcpu_cpu_opt;
25361 else
25362 mcpu_cpu_opt = &arm_arch_any;
25363 #endif
25364
25365 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25366 if (dyn_mcpu_ext_opt)
25367 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
25368
25369 autoselect_thumb_from_cpu_variant ();
25370
25371 arm_arch_used = thumb_arch_used = arm_arch_none;
25372
25373 #if defined OBJ_COFF || defined OBJ_ELF
25374 {
25375 unsigned int flags = 0;
25376
25377 #if defined OBJ_ELF
25378 flags = meabi_flags;
25379
25380 switch (meabi_flags)
25381 {
25382 case EF_ARM_EABI_UNKNOWN:
25383 #endif
25384 /* Set the flags in the private structure. */
25385 if (uses_apcs_26) flags |= F_APCS26;
25386 if (support_interwork) flags |= F_INTERWORK;
25387 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25388 if (pic_code) flags |= F_PIC;
25389 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25390 flags |= F_SOFT_FLOAT;
25391
25392 switch (mfloat_abi_opt)
25393 {
25394 case ARM_FLOAT_ABI_SOFT:
25395 case ARM_FLOAT_ABI_SOFTFP:
25396 flags |= F_SOFT_FLOAT;
25397 break;
25398
25399 case ARM_FLOAT_ABI_HARD:
25400 if (flags & F_SOFT_FLOAT)
25401 as_bad (_("hard-float conflicts with specified fpu"));
25402 break;
25403 }
25404
25405 /* Using pure-endian doubles (even if soft-float). */
25406 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25407 flags |= F_VFP_FLOAT;
25408
25409 #if defined OBJ_ELF
25410 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25411 flags |= EF_ARM_MAVERICK_FLOAT;
25412 break;
25413
25414 case EF_ARM_EABI_VER4:
25415 case EF_ARM_EABI_VER5:
25416 /* No additional flags to set. */
25417 break;
25418
25419 default:
25420 abort ();
25421 }
25422 #endif
25423 bfd_set_private_flags (stdoutput, flags);
25424
25425 /* We have run out flags in the COFF header to encode the
25426 status of ATPCS support, so instead we create a dummy,
25427 empty, debug section called .arm.atpcs. */
25428 if (atpcs)
25429 {
25430 asection * sec;
25431
25432 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25433
25434 if (sec != NULL)
25435 {
25436 bfd_set_section_flags
25437 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25438 bfd_set_section_size (stdoutput, sec, 0);
25439 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25440 }
25441 }
25442 }
25443 #endif
25444
25445 /* Record the CPU type as well. */
25446 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25447 mach = bfd_mach_arm_iWMMXt2;
25448 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25449 mach = bfd_mach_arm_iWMMXt;
25450 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25451 mach = bfd_mach_arm_XScale;
25452 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25453 mach = bfd_mach_arm_ep9312;
25454 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25455 mach = bfd_mach_arm_5TE;
25456 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25457 {
25458 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25459 mach = bfd_mach_arm_5T;
25460 else
25461 mach = bfd_mach_arm_5;
25462 }
25463 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25464 {
25465 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25466 mach = bfd_mach_arm_4T;
25467 else
25468 mach = bfd_mach_arm_4;
25469 }
25470 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25471 mach = bfd_mach_arm_3M;
25472 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25473 mach = bfd_mach_arm_3;
25474 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25475 mach = bfd_mach_arm_2a;
25476 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25477 mach = bfd_mach_arm_2;
25478 else
25479 mach = bfd_mach_arm_unknown;
25480
25481 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25482 }
25483
25484 /* Command line processing. */
25485
25486 /* md_parse_option
25487 Invocation line includes a switch not recognized by the base assembler.
25488 See if it's a processor-specific option.
25489
25490 This routine is somewhat complicated by the need for backwards
25491 compatibility (since older releases of gcc can't be changed).
25492 The new options try to make the interface as compatible as
25493 possible with GCC.
25494
25495 New options (supported) are:
25496
25497 -mcpu=<cpu name> Assemble for selected processor
25498 -march=<architecture name> Assemble for selected architecture
25499 -mfpu=<fpu architecture> Assemble for selected FPU.
25500 -EB/-mbig-endian Big-endian
25501 -EL/-mlittle-endian Little-endian
25502 -k Generate PIC code
25503 -mthumb Start in Thumb mode
25504 -mthumb-interwork Code supports ARM/Thumb interworking
25505
25506 -m[no-]warn-deprecated Warn about deprecated features
25507 -m[no-]warn-syms Warn when symbols match instructions
25508
25509 For now we will also provide support for:
25510
25511 -mapcs-32 32-bit Program counter
25512 -mapcs-26 26-bit Program counter
25513 -macps-float Floats passed in FP registers
25514 -mapcs-reentrant Reentrant code
25515 -matpcs
25516 (sometime these will probably be replaced with -mapcs=<list of options>
25517 and -matpcs=<list of options>)
25518
25519 The remaining options are only supported for back-wards compatibility.
25520 Cpu variants, the arm part is optional:
25521 -m[arm]1 Currently not supported.
25522 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25523 -m[arm]3 Arm 3 processor
25524 -m[arm]6[xx], Arm 6 processors
25525 -m[arm]7[xx][t][[d]m] Arm 7 processors
25526 -m[arm]8[10] Arm 8 processors
25527 -m[arm]9[20][tdmi] Arm 9 processors
25528 -mstrongarm[110[0]] StrongARM processors
25529 -mxscale XScale processors
25530 -m[arm]v[2345[t[e]]] Arm architectures
25531 -mall All (except the ARM1)
25532 FP variants:
25533 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25534 -mfpe-old (No float load/store multiples)
25535 -mvfpxd VFP Single precision
25536 -mvfp All VFP
25537 -mno-fpu Disable all floating point instructions
25538
25539 The following CPU names are recognized:
25540 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25541 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25542 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25543 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25544 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25545 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25546 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25547
25548 */
25549
25550 const char * md_shortopts = "m:k";
25551
25552 #ifdef ARM_BI_ENDIAN
25553 #define OPTION_EB (OPTION_MD_BASE + 0)
25554 #define OPTION_EL (OPTION_MD_BASE + 1)
25555 #else
25556 #if TARGET_BYTES_BIG_ENDIAN
25557 #define OPTION_EB (OPTION_MD_BASE + 0)
25558 #else
25559 #define OPTION_EL (OPTION_MD_BASE + 1)
25560 #endif
25561 #endif
25562 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25563
25564 struct option md_longopts[] =
25565 {
25566 #ifdef OPTION_EB
25567 {"EB", no_argument, NULL, OPTION_EB},
25568 #endif
25569 #ifdef OPTION_EL
25570 {"EL", no_argument, NULL, OPTION_EL},
25571 #endif
25572 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25573 {NULL, no_argument, NULL, 0}
25574 };
25575
25576 size_t md_longopts_size = sizeof (md_longopts);
25577
25578 struct arm_option_table
25579 {
25580 const char * option; /* Option name to match. */
25581 const char * help; /* Help information. */
25582 int * var; /* Variable to change. */
25583 int value; /* What to change it to. */
25584 const char * deprecated; /* If non-null, print this message. */
25585 };
25586
25587 struct arm_option_table arm_opts[] =
25588 {
25589 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25590 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25591 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25592 &support_interwork, 1, NULL},
25593 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25594 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25595 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25596 1, NULL},
25597 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25598 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25599 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25600 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25601 NULL},
25602
25603 /* These are recognized by the assembler, but have no affect on code. */
25604 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25605 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25606
25607 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25608 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25609 &warn_on_deprecated, 0, NULL},
25610 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25611 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25612 {NULL, NULL, NULL, 0, NULL}
25613 };
25614
25615 struct arm_legacy_option_table
25616 {
25617 const char * option; /* Option name to match. */
25618 const arm_feature_set ** var; /* Variable to change. */
25619 const arm_feature_set value; /* What to change it to. */
25620 const char * deprecated; /* If non-null, print this message. */
25621 };
25622
25623 const struct arm_legacy_option_table arm_legacy_opts[] =
25624 {
25625 /* DON'T add any new processors to this list -- we want the whole list
25626 to go away... Add them to the processors table instead. */
25627 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25628 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25629 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25630 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25631 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25632 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25633 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25634 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25635 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25636 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25637 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25638 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25639 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25640 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25641 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25642 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25643 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25644 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25645 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25646 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25647 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25648 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25649 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25650 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25651 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25652 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25653 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25654 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25655 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25656 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25657 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25658 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25659 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25660 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25661 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25662 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25663 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25664 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25665 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25666 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25667 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25668 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25669 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25670 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25671 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25672 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25673 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25674 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25675 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25676 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25677 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25678 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25679 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25680 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25681 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25682 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25683 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25684 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25685 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25686 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25687 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25688 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25689 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25690 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25691 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25692 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25693 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25694 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25695 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25696 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25697 N_("use -mcpu=strongarm110")},
25698 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25699 N_("use -mcpu=strongarm1100")},
25700 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25701 N_("use -mcpu=strongarm1110")},
25702 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25703 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25704 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25705
25706 /* Architecture variants -- don't add any more to this list either. */
25707 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25708 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25709 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25710 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25711 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25712 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25713 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25714 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25715 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25716 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25717 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25718 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25719 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25720 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25721 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25722 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25723 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25724 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25725
25726 /* Floating point variants -- don't add any more to this list either. */
25727 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25728 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25729 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25730 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25731 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25732
25733 {NULL, NULL, ARM_ARCH_NONE, NULL}
25734 };
25735
25736 struct arm_cpu_option_table
25737 {
25738 const char * name;
25739 size_t name_len;
25740 const arm_feature_set value;
25741 const arm_feature_set ext;
25742 /* For some CPUs we assume an FPU unless the user explicitly sets
25743 -mfpu=... */
25744 const arm_feature_set default_fpu;
25745 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25746 case. */
25747 const char * canonical_name;
25748 };
25749
25750 /* This list should, at a minimum, contain all the cpu names
25751 recognized by GCC. */
25752 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25753
25754 static const struct arm_cpu_option_table arm_cpus[] =
25755 {
25756 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25757 ARM_ARCH_NONE,
25758 FPU_ARCH_FPA),
25759 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25760 ARM_ARCH_NONE,
25761 FPU_ARCH_FPA),
25762 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25763 ARM_ARCH_NONE,
25764 FPU_ARCH_FPA),
25765 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25766 ARM_ARCH_NONE,
25767 FPU_ARCH_FPA),
25768 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25769 ARM_ARCH_NONE,
25770 FPU_ARCH_FPA),
25771 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25772 ARM_ARCH_NONE,
25773 FPU_ARCH_FPA),
25774 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25775 ARM_ARCH_NONE,
25776 FPU_ARCH_FPA),
25777 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25778 ARM_ARCH_NONE,
25779 FPU_ARCH_FPA),
25780 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25781 ARM_ARCH_NONE,
25782 FPU_ARCH_FPA),
25783 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25784 ARM_ARCH_NONE,
25785 FPU_ARCH_FPA),
25786 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25787 ARM_ARCH_NONE,
25788 FPU_ARCH_FPA),
25789 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25790 ARM_ARCH_NONE,
25791 FPU_ARCH_FPA),
25792 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25793 ARM_ARCH_NONE,
25794 FPU_ARCH_FPA),
25795 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25796 ARM_ARCH_NONE,
25797 FPU_ARCH_FPA),
25798 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25799 ARM_ARCH_NONE,
25800 FPU_ARCH_FPA),
25801 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25802 ARM_ARCH_NONE,
25803 FPU_ARCH_FPA),
25804 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25805 ARM_ARCH_NONE,
25806 FPU_ARCH_FPA),
25807 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25808 ARM_ARCH_NONE,
25809 FPU_ARCH_FPA),
25810 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25811 ARM_ARCH_NONE,
25812 FPU_ARCH_FPA),
25813 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25814 ARM_ARCH_NONE,
25815 FPU_ARCH_FPA),
25816 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25817 ARM_ARCH_NONE,
25818 FPU_ARCH_FPA),
25819 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25820 ARM_ARCH_NONE,
25821 FPU_ARCH_FPA),
25822 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25823 ARM_ARCH_NONE,
25824 FPU_ARCH_FPA),
25825 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25826 ARM_ARCH_NONE,
25827 FPU_ARCH_FPA),
25828 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25829 ARM_ARCH_NONE,
25830 FPU_ARCH_FPA),
25831 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25832 ARM_ARCH_NONE,
25833 FPU_ARCH_FPA),
25834 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25835 ARM_ARCH_NONE,
25836 FPU_ARCH_FPA),
25837 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25838 ARM_ARCH_NONE,
25839 FPU_ARCH_FPA),
25840 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25841 ARM_ARCH_NONE,
25842 FPU_ARCH_FPA),
25843 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25844 ARM_ARCH_NONE,
25845 FPU_ARCH_FPA),
25846 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25847 ARM_ARCH_NONE,
25848 FPU_ARCH_FPA),
25849 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
25850 ARM_ARCH_NONE,
25851 FPU_ARCH_FPA),
25852 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
25853 ARM_ARCH_NONE,
25854 FPU_ARCH_FPA),
25855 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
25856 ARM_ARCH_NONE,
25857 FPU_ARCH_FPA),
25858 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
25859 ARM_ARCH_NONE,
25860 FPU_ARCH_FPA),
25861 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
25862 ARM_ARCH_NONE,
25863 FPU_ARCH_FPA),
25864 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
25865 ARM_ARCH_NONE,
25866 FPU_ARCH_FPA),
25867 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
25868 ARM_ARCH_NONE,
25869 FPU_ARCH_FPA),
25870 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
25871 ARM_ARCH_NONE,
25872 FPU_ARCH_FPA),
25873 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
25874 ARM_ARCH_NONE,
25875 FPU_ARCH_FPA),
25876 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
25877 ARM_ARCH_NONE,
25878 FPU_ARCH_FPA),
25879 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
25880 ARM_ARCH_NONE,
25881 FPU_ARCH_FPA),
25882 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
25883 ARM_ARCH_NONE,
25884 FPU_ARCH_FPA),
25885 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
25886 ARM_ARCH_NONE,
25887 FPU_ARCH_FPA),
25888 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
25889 ARM_ARCH_NONE,
25890 FPU_ARCH_FPA),
25891 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
25892 ARM_ARCH_NONE,
25893 FPU_ARCH_FPA),
25894
25895 /* For V5 or later processors we default to using VFP; but the user
25896 should really set the FPU type explicitly. */
25897 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
25898 ARM_ARCH_NONE,
25899 FPU_ARCH_VFP_V2),
25900 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
25901 ARM_ARCH_NONE,
25902 FPU_ARCH_VFP_V2),
25903 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25904 ARM_ARCH_NONE,
25905 FPU_ARCH_VFP_V2),
25906 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25907 ARM_ARCH_NONE,
25908 FPU_ARCH_VFP_V2),
25909 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
25910 ARM_ARCH_NONE,
25911 FPU_ARCH_VFP_V2),
25912 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
25913 ARM_ARCH_NONE,
25914 FPU_ARCH_VFP_V2),
25915 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
25916 ARM_ARCH_NONE,
25917 FPU_ARCH_VFP_V2),
25918 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
25919 ARM_ARCH_NONE,
25920 FPU_ARCH_VFP_V2),
25921 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
25922 ARM_ARCH_NONE,
25923 FPU_ARCH_VFP_V2),
25924 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
25925 ARM_ARCH_NONE,
25926 FPU_ARCH_VFP_V2),
25927 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
25928 ARM_ARCH_NONE,
25929 FPU_ARCH_VFP_V2),
25930 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
25931 ARM_ARCH_NONE,
25932 FPU_ARCH_VFP_V2),
25933 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
25934 ARM_ARCH_NONE,
25935 FPU_ARCH_VFP_V1),
25936 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
25937 ARM_ARCH_NONE,
25938 FPU_ARCH_VFP_V1),
25939 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
25940 ARM_ARCH_NONE,
25941 FPU_ARCH_VFP_V2),
25942 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
25943 ARM_ARCH_NONE,
25944 FPU_ARCH_VFP_V2),
25945 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
25946 ARM_ARCH_NONE,
25947 FPU_ARCH_VFP_V1),
25948 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
25949 ARM_ARCH_NONE,
25950 FPU_ARCH_VFP_V2),
25951 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
25952 ARM_ARCH_NONE,
25953 FPU_ARCH_VFP_V2),
25954 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
25955 ARM_ARCH_NONE,
25956 FPU_ARCH_VFP_V2),
25957 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
25958 ARM_ARCH_NONE,
25959 FPU_ARCH_VFP_V2),
25960 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
25961 ARM_ARCH_NONE,
25962 FPU_ARCH_VFP_V2),
25963 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
25964 ARM_ARCH_NONE,
25965 FPU_ARCH_VFP_V2),
25966 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
25967 ARM_ARCH_NONE,
25968 FPU_ARCH_VFP_V2),
25969 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
25970 ARM_ARCH_NONE,
25971 FPU_ARCH_VFP_V2),
25972 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
25973 ARM_ARCH_NONE,
25974 FPU_ARCH_VFP_V2),
25975 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
25976 ARM_ARCH_NONE,
25977 FPU_NONE),
25978 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
25979 ARM_ARCH_NONE,
25980 FPU_NONE),
25981 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
25982 ARM_ARCH_NONE,
25983 FPU_ARCH_VFP_V2),
25984 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
25985 ARM_ARCH_NONE,
25986 FPU_ARCH_VFP_V2),
25987 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
25988 ARM_ARCH_NONE,
25989 FPU_ARCH_VFP_V2),
25990 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
25991 ARM_ARCH_NONE,
25992 FPU_NONE),
25993 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
25994 ARM_ARCH_NONE,
25995 FPU_NONE),
25996 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
25997 ARM_ARCH_NONE,
25998 FPU_ARCH_VFP_V2),
25999 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26000 ARM_ARCH_NONE,
26001 FPU_NONE),
26002 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26003 ARM_ARCH_NONE,
26004 FPU_ARCH_VFP_V2),
26005 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26006 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26007 FPU_NONE),
26008 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26009 ARM_ARCH_NONE,
26010 FPU_ARCH_NEON_VFP_V4),
26011 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26012 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26013 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26014 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26015 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26016 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26017 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26018 ARM_ARCH_NONE,
26019 FPU_ARCH_NEON_VFP_V4),
26020 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26021 ARM_ARCH_NONE,
26022 FPU_ARCH_NEON_VFP_V4),
26023 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26024 ARM_ARCH_NONE,
26025 FPU_ARCH_NEON_VFP_V4),
26026 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26027 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26028 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26029 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26030 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26031 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26032 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26033 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26034 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26035 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26036 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26037 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26038 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26039 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26040 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26041 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26042 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26043 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26044 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26045 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26046 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26047 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26048 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26049 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26050 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26051 ARM_ARCH_NONE,
26052 FPU_NONE),
26053 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26054 ARM_ARCH_NONE,
26055 FPU_ARCH_VFP_V3D16),
26056 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26057 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26058 FPU_NONE),
26059 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26060 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26061 FPU_ARCH_VFP_V3D16),
26062 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26063 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26064 FPU_ARCH_VFP_V3D16),
26065 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26066 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26067 FPU_ARCH_NEON_VFP_ARMV8),
26068 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26069 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26070 FPU_NONE),
26071 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26072 ARM_ARCH_NONE,
26073 FPU_NONE),
26074 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26075 ARM_ARCH_NONE,
26076 FPU_NONE),
26077 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26078 ARM_ARCH_NONE,
26079 FPU_NONE),
26080 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26081 ARM_ARCH_NONE,
26082 FPU_NONE),
26083 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26084 ARM_ARCH_NONE,
26085 FPU_NONE),
26086 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26087 ARM_ARCH_NONE,
26088 FPU_NONE),
26089 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26090 ARM_ARCH_NONE,
26091 FPU_NONE),
26092 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26093 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26094 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26095
26096 /* ??? XSCALE is really an architecture. */
26097 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
26098 ARM_ARCH_NONE,
26099 FPU_ARCH_VFP_V2),
26100
26101 /* ??? iwmmxt is not a processor. */
26102 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
26103 ARM_ARCH_NONE,
26104 FPU_ARCH_VFP_V2),
26105 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
26106 ARM_ARCH_NONE,
26107 FPU_ARCH_VFP_V2),
26108 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
26109 ARM_ARCH_NONE,
26110 FPU_ARCH_VFP_V2),
26111
26112 /* Maverick. */
26113 ARM_CPU_OPT ("ep9312", "ARM920T",
26114 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
26115 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
26116
26117 /* Marvell processors. */
26118 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
26119 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26120 FPU_ARCH_VFP_V3D16),
26121 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
26122 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26123 FPU_ARCH_NEON_VFP_V4),
26124
26125 /* APM X-Gene family. */
26126 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
26127 ARM_ARCH_NONE,
26128 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26129 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
26130 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26131 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26132
26133 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
26134 };
26135 #undef ARM_CPU_OPT
26136
26137 struct arm_arch_option_table
26138 {
26139 const char * name;
26140 size_t name_len;
26141 const arm_feature_set value;
26142 const arm_feature_set default_fpu;
26143 };
26144
26145 /* This list should, at a minimum, contain all the architecture names
26146 recognized by GCC. */
26147 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
26148
26149 static const struct arm_arch_option_table arm_archs[] =
26150 {
26151 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
26152 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
26153 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
26154 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
26155 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
26156 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
26157 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
26158 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
26159 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
26160 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
26161 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
26162 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
26163 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
26164 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
26165 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
26166 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
26167 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
26168 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
26169 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
26170 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
26171 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
26172 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26173 kept to preserve existing behaviour. */
26174 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26175 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26176 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
26177 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
26178 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
26179 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26180 kept to preserve existing behaviour. */
26181 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26182 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26183 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
26184 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
26185 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
26186 /* The official spelling of the ARMv7 profile variants is the dashed form.
26187 Accept the non-dashed form for compatibility with old toolchains. */
26188 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26189 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
26190 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26191 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26192 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26193 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26194 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26195 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
26196 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
26197 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
26198 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
26199 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
26200 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
26201 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
26202 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
26203 ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
26204 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
26205 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
26206 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
26207 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26208 };
26209 #undef ARM_ARCH_OPT
26210
26211 /* ISA extensions in the co-processor and main instruction set space. */
26212
26213 struct arm_option_extension_value_table
26214 {
26215 const char * name;
26216 size_t name_len;
26217 const arm_feature_set merge_value;
26218 const arm_feature_set clear_value;
26219 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26220 indicates that an extension is available for all architectures while
26221 ARM_ANY marks an empty entry. */
26222 const arm_feature_set allowed_archs[2];
26223 };
26224
26225 /* The following table must be in alphabetical order with a NULL last entry. */
26226
26227 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26228 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26229
26230 static const struct arm_option_extension_value_table arm_extensions[] =
26231 {
26232 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26233 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26234 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26235 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
26236 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26237 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
26238 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
26239 ARM_ARCH_V8_2A),
26240 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26241 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26242 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
26243 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
26244 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26245 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26246 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26247 ARM_ARCH_V8_2A),
26248 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26249 | ARM_EXT2_FP16_FML),
26250 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26251 | ARM_EXT2_FP16_FML),
26252 ARM_ARCH_V8_2A),
26253 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26254 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26255 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26256 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26257 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26258 Thumb divide instruction. Due to this having the same name as the
26259 previous entry, this will be ignored when doing command-line parsing and
26260 only considered by build attribute selection code. */
26261 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26262 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26263 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
26264 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
26265 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
26266 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
26267 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
26268 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
26269 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
26270 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26271 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26272 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26273 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26274 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26275 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26276 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
26277 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
26278 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
26279 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26280 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
26281 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
26282 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26283 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
26284 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
26285 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26286 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26287 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26288 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
26289 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26290 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26291 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26292 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26293 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26294 | ARM_EXT_DIV),
26295 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26296 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26297 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26298 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26299 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26300 };
26301 #undef ARM_EXT_OPT
26302
26303 /* ISA floating-point and Advanced SIMD extensions. */
26304 struct arm_option_fpu_value_table
26305 {
26306 const char * name;
26307 const arm_feature_set value;
26308 };
26309
26310 /* This list should, at a minimum, contain all the fpu names
26311 recognized by GCC. */
26312 static const struct arm_option_fpu_value_table arm_fpus[] =
26313 {
26314 {"softfpa", FPU_NONE},
26315 {"fpe", FPU_ARCH_FPE},
26316 {"fpe2", FPU_ARCH_FPE},
26317 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26318 {"fpa", FPU_ARCH_FPA},
26319 {"fpa10", FPU_ARCH_FPA},
26320 {"fpa11", FPU_ARCH_FPA},
26321 {"arm7500fe", FPU_ARCH_FPA},
26322 {"softvfp", FPU_ARCH_VFP},
26323 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26324 {"vfp", FPU_ARCH_VFP_V2},
26325 {"vfp9", FPU_ARCH_VFP_V2},
26326 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26327 {"vfp10", FPU_ARCH_VFP_V2},
26328 {"vfp10-r0", FPU_ARCH_VFP_V1},
26329 {"vfpxd", FPU_ARCH_VFP_V1xD},
26330 {"vfpv2", FPU_ARCH_VFP_V2},
26331 {"vfpv3", FPU_ARCH_VFP_V3},
26332 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26333 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26334 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26335 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26336 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26337 {"arm1020t", FPU_ARCH_VFP_V1},
26338 {"arm1020e", FPU_ARCH_VFP_V2},
26339 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26340 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26341 {"maverick", FPU_ARCH_MAVERICK},
26342 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26343 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26344 {"neon-fp16", FPU_ARCH_NEON_FP16},
26345 {"vfpv4", FPU_ARCH_VFP_V4},
26346 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26347 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26348 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26349 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26350 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26351 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26352 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26353 {"crypto-neon-fp-armv8",
26354 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26355 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26356 {"crypto-neon-fp-armv8.1",
26357 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26358 {NULL, ARM_ARCH_NONE}
26359 };
26360
26361 struct arm_option_value_table
26362 {
26363 const char *name;
26364 long value;
26365 };
26366
26367 static const struct arm_option_value_table arm_float_abis[] =
26368 {
26369 {"hard", ARM_FLOAT_ABI_HARD},
26370 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26371 {"soft", ARM_FLOAT_ABI_SOFT},
26372 {NULL, 0}
26373 };
26374
26375 #ifdef OBJ_ELF
26376 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26377 static const struct arm_option_value_table arm_eabis[] =
26378 {
26379 {"gnu", EF_ARM_EABI_UNKNOWN},
26380 {"4", EF_ARM_EABI_VER4},
26381 {"5", EF_ARM_EABI_VER5},
26382 {NULL, 0}
26383 };
26384 #endif
26385
26386 struct arm_long_option_table
26387 {
26388 const char * option; /* Substring to match. */
26389 const char * help; /* Help information. */
26390 int (* func) (const char * subopt); /* Function to decode sub-option. */
26391 const char * deprecated; /* If non-null, print this message. */
26392 };
26393
26394 static bfd_boolean
26395 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26396 arm_feature_set **ext_set_p)
26397 {
26398 /* We insist on extensions being specified in alphabetical order, and with
26399 extensions being added before being removed. We achieve this by having
26400 the global ARM_EXTENSIONS table in alphabetical order, and using the
26401 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26402 or removing it (0) and only allowing it to change in the order
26403 -1 -> 1 -> 0. */
26404 const struct arm_option_extension_value_table * opt = NULL;
26405 const arm_feature_set arm_any = ARM_ANY;
26406 int adding_value = -1;
26407
26408 if (!*ext_set_p)
26409 {
26410 *ext_set_p = XNEW (arm_feature_set);
26411 **ext_set_p = arm_arch_none;
26412 }
26413
26414 while (str != NULL && *str != 0)
26415 {
26416 const char *ext;
26417 size_t len;
26418
26419 if (*str != '+')
26420 {
26421 as_bad (_("invalid architectural extension"));
26422 return FALSE;
26423 }
26424
26425 str++;
26426 ext = strchr (str, '+');
26427
26428 if (ext != NULL)
26429 len = ext - str;
26430 else
26431 len = strlen (str);
26432
26433 if (len >= 2 && strncmp (str, "no", 2) == 0)
26434 {
26435 if (adding_value != 0)
26436 {
26437 adding_value = 0;
26438 opt = arm_extensions;
26439 }
26440
26441 len -= 2;
26442 str += 2;
26443 }
26444 else if (len > 0)
26445 {
26446 if (adding_value == -1)
26447 {
26448 adding_value = 1;
26449 opt = arm_extensions;
26450 }
26451 else if (adding_value != 1)
26452 {
26453 as_bad (_("must specify extensions to add before specifying "
26454 "those to remove"));
26455 return FALSE;
26456 }
26457 }
26458
26459 if (len == 0)
26460 {
26461 as_bad (_("missing architectural extension"));
26462 return FALSE;
26463 }
26464
26465 gas_assert (adding_value != -1);
26466 gas_assert (opt != NULL);
26467
26468 /* Scan over the options table trying to find an exact match. */
26469 for (; opt->name != NULL; opt++)
26470 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26471 {
26472 int i, nb_allowed_archs =
26473 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26474 /* Check we can apply the extension to this architecture. */
26475 for (i = 0; i < nb_allowed_archs; i++)
26476 {
26477 /* Empty entry. */
26478 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26479 continue;
26480 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26481 break;
26482 }
26483 if (i == nb_allowed_archs)
26484 {
26485 as_bad (_("extension does not apply to the base architecture"));
26486 return FALSE;
26487 }
26488
26489 /* Add or remove the extension. */
26490 if (adding_value)
26491 ARM_MERGE_FEATURE_SETS (**ext_set_p, **ext_set_p,
26492 opt->merge_value);
26493 else
26494 ARM_CLEAR_FEATURE (**ext_set_p, **ext_set_p, opt->clear_value);
26495
26496 /* Allowing Thumb division instructions for ARMv7 in autodetection
26497 rely on this break so that duplicate extensions (extensions
26498 with the same name as a previous extension in the list) are not
26499 considered for command-line parsing. */
26500 break;
26501 }
26502
26503 if (opt->name == NULL)
26504 {
26505 /* Did we fail to find an extension because it wasn't specified in
26506 alphabetical order, or because it does not exist? */
26507
26508 for (opt = arm_extensions; opt->name != NULL; opt++)
26509 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26510 break;
26511
26512 if (opt->name == NULL)
26513 as_bad (_("unknown architectural extension `%s'"), str);
26514 else
26515 as_bad (_("architectural extensions must be specified in "
26516 "alphabetical order"));
26517
26518 return FALSE;
26519 }
26520 else
26521 {
26522 /* We should skip the extension we've just matched the next time
26523 round. */
26524 opt++;
26525 }
26526
26527 str = ext;
26528 };
26529
26530 return TRUE;
26531 }
26532
26533 static bfd_boolean
26534 arm_parse_cpu (const char *str)
26535 {
26536 const struct arm_cpu_option_table *opt;
26537 const char *ext = strchr (str, '+');
26538 size_t len;
26539
26540 if (ext != NULL)
26541 len = ext - str;
26542 else
26543 len = strlen (str);
26544
26545 if (len == 0)
26546 {
26547 as_bad (_("missing cpu name `%s'"), str);
26548 return FALSE;
26549 }
26550
26551 for (opt = arm_cpus; opt->name != NULL; opt++)
26552 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26553 {
26554 mcpu_cpu_opt = &opt->value;
26555 if (!dyn_mcpu_ext_opt)
26556 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
26557 *dyn_mcpu_ext_opt = opt->ext;
26558 mcpu_fpu_opt = &opt->default_fpu;
26559 if (opt->canonical_name)
26560 {
26561 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26562 strcpy (selected_cpu_name, opt->canonical_name);
26563 }
26564 else
26565 {
26566 size_t i;
26567
26568 if (len >= sizeof selected_cpu_name)
26569 len = (sizeof selected_cpu_name) - 1;
26570
26571 for (i = 0; i < len; i++)
26572 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26573 selected_cpu_name[i] = 0;
26574 }
26575
26576 if (ext != NULL)
26577 return arm_parse_extension (ext, mcpu_cpu_opt, &dyn_mcpu_ext_opt);
26578
26579 return TRUE;
26580 }
26581
26582 as_bad (_("unknown cpu `%s'"), str);
26583 return FALSE;
26584 }
26585
26586 static bfd_boolean
26587 arm_parse_arch (const char *str)
26588 {
26589 const struct arm_arch_option_table *opt;
26590 const char *ext = strchr (str, '+');
26591 size_t len;
26592
26593 if (ext != NULL)
26594 len = ext - str;
26595 else
26596 len = strlen (str);
26597
26598 if (len == 0)
26599 {
26600 as_bad (_("missing architecture name `%s'"), str);
26601 return FALSE;
26602 }
26603
26604 for (opt = arm_archs; opt->name != NULL; opt++)
26605 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26606 {
26607 march_cpu_opt = &opt->value;
26608 march_fpu_opt = &opt->default_fpu;
26609 strcpy (selected_cpu_name, opt->name);
26610
26611 if (ext != NULL)
26612 return arm_parse_extension (ext, march_cpu_opt, &dyn_march_ext_opt);
26613
26614 return TRUE;
26615 }
26616
26617 as_bad (_("unknown architecture `%s'\n"), str);
26618 return FALSE;
26619 }
26620
26621 static bfd_boolean
26622 arm_parse_fpu (const char * str)
26623 {
26624 const struct arm_option_fpu_value_table * opt;
26625
26626 for (opt = arm_fpus; opt->name != NULL; opt++)
26627 if (streq (opt->name, str))
26628 {
26629 mfpu_opt = &opt->value;
26630 return TRUE;
26631 }
26632
26633 as_bad (_("unknown floating point format `%s'\n"), str);
26634 return FALSE;
26635 }
26636
26637 static bfd_boolean
26638 arm_parse_float_abi (const char * str)
26639 {
26640 const struct arm_option_value_table * opt;
26641
26642 for (opt = arm_float_abis; opt->name != NULL; opt++)
26643 if (streq (opt->name, str))
26644 {
26645 mfloat_abi_opt = opt->value;
26646 return TRUE;
26647 }
26648
26649 as_bad (_("unknown floating point abi `%s'\n"), str);
26650 return FALSE;
26651 }
26652
26653 #ifdef OBJ_ELF
26654 static bfd_boolean
26655 arm_parse_eabi (const char * str)
26656 {
26657 const struct arm_option_value_table *opt;
26658
26659 for (opt = arm_eabis; opt->name != NULL; opt++)
26660 if (streq (opt->name, str))
26661 {
26662 meabi_flags = opt->value;
26663 return TRUE;
26664 }
26665 as_bad (_("unknown EABI `%s'\n"), str);
26666 return FALSE;
26667 }
26668 #endif
26669
26670 static bfd_boolean
26671 arm_parse_it_mode (const char * str)
26672 {
26673 bfd_boolean ret = TRUE;
26674
26675 if (streq ("arm", str))
26676 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26677 else if (streq ("thumb", str))
26678 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26679 else if (streq ("always", str))
26680 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26681 else if (streq ("never", str))
26682 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26683 else
26684 {
26685 as_bad (_("unknown implicit IT mode `%s', should be "\
26686 "arm, thumb, always, or never."), str);
26687 ret = FALSE;
26688 }
26689
26690 return ret;
26691 }
26692
26693 static bfd_boolean
26694 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26695 {
26696 codecomposer_syntax = TRUE;
26697 arm_comment_chars[0] = ';';
26698 arm_line_separator_chars[0] = 0;
26699 return TRUE;
26700 }
26701
26702 struct arm_long_option_table arm_long_opts[] =
26703 {
26704 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26705 arm_parse_cpu, NULL},
26706 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26707 arm_parse_arch, NULL},
26708 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26709 arm_parse_fpu, NULL},
26710 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26711 arm_parse_float_abi, NULL},
26712 #ifdef OBJ_ELF
26713 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26714 arm_parse_eabi, NULL},
26715 #endif
26716 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26717 arm_parse_it_mode, NULL},
26718 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26719 arm_ccs_mode, NULL},
26720 {NULL, NULL, 0, NULL}
26721 };
26722
26723 int
26724 md_parse_option (int c, const char * arg)
26725 {
26726 struct arm_option_table *opt;
26727 const struct arm_legacy_option_table *fopt;
26728 struct arm_long_option_table *lopt;
26729
26730 switch (c)
26731 {
26732 #ifdef OPTION_EB
26733 case OPTION_EB:
26734 target_big_endian = 1;
26735 break;
26736 #endif
26737
26738 #ifdef OPTION_EL
26739 case OPTION_EL:
26740 target_big_endian = 0;
26741 break;
26742 #endif
26743
26744 case OPTION_FIX_V4BX:
26745 fix_v4bx = TRUE;
26746 break;
26747
26748 case 'a':
26749 /* Listing option. Just ignore these, we don't support additional
26750 ones. */
26751 return 0;
26752
26753 default:
26754 for (opt = arm_opts; opt->option != NULL; opt++)
26755 {
26756 if (c == opt->option[0]
26757 && ((arg == NULL && opt->option[1] == 0)
26758 || streq (arg, opt->option + 1)))
26759 {
26760 /* If the option is deprecated, tell the user. */
26761 if (warn_on_deprecated && opt->deprecated != NULL)
26762 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26763 arg ? arg : "", _(opt->deprecated));
26764
26765 if (opt->var != NULL)
26766 *opt->var = opt->value;
26767
26768 return 1;
26769 }
26770 }
26771
26772 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26773 {
26774 if (c == fopt->option[0]
26775 && ((arg == NULL && fopt->option[1] == 0)
26776 || streq (arg, fopt->option + 1)))
26777 {
26778 /* If the option is deprecated, tell the user. */
26779 if (warn_on_deprecated && fopt->deprecated != NULL)
26780 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26781 arg ? arg : "", _(fopt->deprecated));
26782
26783 if (fopt->var != NULL)
26784 *fopt->var = &fopt->value;
26785
26786 return 1;
26787 }
26788 }
26789
26790 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26791 {
26792 /* These options are expected to have an argument. */
26793 if (c == lopt->option[0]
26794 && arg != NULL
26795 && strncmp (arg, lopt->option + 1,
26796 strlen (lopt->option + 1)) == 0)
26797 {
26798 /* If the option is deprecated, tell the user. */
26799 if (warn_on_deprecated && lopt->deprecated != NULL)
26800 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26801 _(lopt->deprecated));
26802
26803 /* Call the sup-option parser. */
26804 return lopt->func (arg + strlen (lopt->option) - 1);
26805 }
26806 }
26807
26808 return 0;
26809 }
26810
26811 return 1;
26812 }
26813
26814 void
26815 md_show_usage (FILE * fp)
26816 {
26817 struct arm_option_table *opt;
26818 struct arm_long_option_table *lopt;
26819
26820 fprintf (fp, _(" ARM-specific assembler options:\n"));
26821
26822 for (opt = arm_opts; opt->option != NULL; opt++)
26823 if (opt->help != NULL)
26824 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26825
26826 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26827 if (lopt->help != NULL)
26828 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26829
26830 #ifdef OPTION_EB
26831 fprintf (fp, _("\
26832 -EB assemble code for a big-endian cpu\n"));
26833 #endif
26834
26835 #ifdef OPTION_EL
26836 fprintf (fp, _("\
26837 -EL assemble code for a little-endian cpu\n"));
26838 #endif
26839
26840 fprintf (fp, _("\
26841 --fix-v4bx Allow BX in ARMv4 code\n"));
26842 }
26843
26844 #ifdef OBJ_ELF
26845
26846 typedef struct
26847 {
26848 int val;
26849 arm_feature_set flags;
26850 } cpu_arch_ver_table;
26851
26852 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
26853 chronologically for architectures, with an exception for ARMv6-M and
26854 ARMv6S-M due to legacy reasons. No new architecture should have a
26855 special case. This allows for build attribute selection results to be
26856 stable when new architectures are added. */
26857 static const cpu_arch_ver_table cpu_arch_ver[] =
26858 {
26859 {0, ARM_ARCH_V1},
26860 {0, ARM_ARCH_V2},
26861 {0, ARM_ARCH_V2S},
26862 {0, ARM_ARCH_V3},
26863 {0, ARM_ARCH_V3M},
26864 {1, ARM_ARCH_V4xM},
26865 {1, ARM_ARCH_V4},
26866 {2, ARM_ARCH_V4TxM},
26867 {2, ARM_ARCH_V4T},
26868 {3, ARM_ARCH_V5xM},
26869 {3, ARM_ARCH_V5},
26870 {3, ARM_ARCH_V5TxM},
26871 {3, ARM_ARCH_V5T},
26872 {4, ARM_ARCH_V5TExP},
26873 {4, ARM_ARCH_V5TE},
26874 {5, ARM_ARCH_V5TEJ},
26875 {6, ARM_ARCH_V6},
26876 {7, ARM_ARCH_V6Z},
26877 {7, ARM_ARCH_V6KZ},
26878 {9, ARM_ARCH_V6K},
26879 {8, ARM_ARCH_V6T2},
26880 {8, ARM_ARCH_V6KT2},
26881 {8, ARM_ARCH_V6ZT2},
26882 {8, ARM_ARCH_V6KZT2},
26883
26884 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
26885 always selected build attributes to match those of ARMv6-M
26886 (resp. ARMv6S-M). However, due to these architectures being a strict
26887 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
26888 would be selected when fully respecting chronology of architectures.
26889 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
26890 move them before ARMv7 architectures. */
26891 {11, ARM_ARCH_V6M},
26892 {12, ARM_ARCH_V6SM},
26893
26894 {10, ARM_ARCH_V7},
26895 {10, ARM_ARCH_V7A},
26896 {10, ARM_ARCH_V7R},
26897 {10, ARM_ARCH_V7M},
26898 {10, ARM_ARCH_V7VE},
26899 {13, ARM_ARCH_V7EM},
26900 {14, ARM_ARCH_V8A},
26901 {14, ARM_ARCH_V8_1A},
26902 {14, ARM_ARCH_V8_2A},
26903 {14, ARM_ARCH_V8_3A},
26904 {16, ARM_ARCH_V8M_BASE},
26905 {17, ARM_ARCH_V8M_MAIN},
26906 {15, ARM_ARCH_V8R},
26907 {16, ARM_ARCH_V8_4A},
26908 {-1, ARM_ARCH_NONE}
26909 };
26910
26911 /* Set an attribute if it has not already been set by the user. */
26912
26913 static void
26914 aeabi_set_attribute_int (int tag, int value)
26915 {
26916 if (tag < 1
26917 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26918 || !attributes_set_explicitly[tag])
26919 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26920 }
26921
26922 static void
26923 aeabi_set_attribute_string (int tag, const char *value)
26924 {
26925 if (tag < 1
26926 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26927 || !attributes_set_explicitly[tag])
26928 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26929 }
26930
26931 /* Return whether features in the *NEEDED feature set are available via
26932 extensions for the architecture whose feature set is *ARCH_FSET. */
26933
26934 static bfd_boolean
26935 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
26936 const arm_feature_set *needed)
26937 {
26938 int i, nb_allowed_archs;
26939 arm_feature_set ext_fset;
26940 const struct arm_option_extension_value_table *opt;
26941
26942 ext_fset = arm_arch_none;
26943 for (opt = arm_extensions; opt->name != NULL; opt++)
26944 {
26945 /* Extension does not provide any feature we need. */
26946 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
26947 continue;
26948
26949 nb_allowed_archs =
26950 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26951 for (i = 0; i < nb_allowed_archs; i++)
26952 {
26953 /* Empty entry. */
26954 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
26955 break;
26956
26957 /* Extension is available, add it. */
26958 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
26959 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
26960 }
26961 }
26962
26963 /* Can we enable all features in *needed? */
26964 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
26965 }
26966
26967 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
26968 a given architecture feature set *ARCH_EXT_FSET including extension feature
26969 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
26970 - if true, check for an exact match of the architecture modulo extensions;
26971 - otherwise, select build attribute value of the first superset
26972 architecture released so that results remains stable when new architectures
26973 are added.
26974 For -march/-mcpu=all the build attribute value of the most featureful
26975 architecture is returned. Tag_CPU_arch_profile result is returned in
26976 PROFILE. */
26977
26978 static int
26979 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
26980 const arm_feature_set *ext_fset,
26981 char *profile, int exact_match)
26982 {
26983 arm_feature_set arch_fset;
26984 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
26985
26986 /* Select most featureful architecture with all its extensions if building
26987 for -march=all as the feature sets used to set build attributes. */
26988 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
26989 {
26990 /* Force revisiting of decision for each new architecture. */
26991 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
26992 *profile = 'A';
26993 return TAG_CPU_ARCH_V8;
26994 }
26995
26996 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
26997
26998 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
26999 {
27000 arm_feature_set known_arch_fset;
27001
27002 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
27003 if (exact_match)
27004 {
27005 /* Base architecture match user-specified architecture and
27006 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27007 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
27008 {
27009 p_ver_ret = p_ver;
27010 goto found;
27011 }
27012 /* Base architecture match user-specified architecture only
27013 (eg. ARMv6-M in the same case as above). Record it in case we
27014 find a match with above condition. */
27015 else if (p_ver_ret == NULL
27016 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
27017 p_ver_ret = p_ver;
27018 }
27019 else
27020 {
27021
27022 /* Architecture has all features wanted. */
27023 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
27024 {
27025 arm_feature_set added_fset;
27026
27027 /* Compute features added by this architecture over the one
27028 recorded in p_ver_ret. */
27029 if (p_ver_ret != NULL)
27030 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
27031 p_ver_ret->flags);
27032 /* First architecture that match incl. with extensions, or the
27033 only difference in features over the recorded match is
27034 features that were optional and are now mandatory. */
27035 if (p_ver_ret == NULL
27036 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
27037 {
27038 p_ver_ret = p_ver;
27039 goto found;
27040 }
27041 }
27042 else if (p_ver_ret == NULL)
27043 {
27044 arm_feature_set needed_ext_fset;
27045
27046 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
27047
27048 /* Architecture has all features needed when using some
27049 extensions. Record it and continue searching in case there
27050 exist an architecture providing all needed features without
27051 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
27052 OS extension). */
27053 if (have_ext_for_needed_feat_p (&known_arch_fset,
27054 &needed_ext_fset))
27055 p_ver_ret = p_ver;
27056 }
27057 }
27058 }
27059
27060 if (p_ver_ret == NULL)
27061 return -1;
27062
27063 found:
27064 /* Tag_CPU_arch_profile. */
27065 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
27066 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
27067 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
27068 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
27069 *profile = 'A';
27070 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
27071 *profile = 'R';
27072 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
27073 *profile = 'M';
27074 else
27075 *profile = '\0';
27076 return p_ver_ret->val;
27077 }
27078
27079 /* Set the public EABI object attributes. */
27080
27081 static void
27082 aeabi_set_public_attributes (void)
27083 {
27084 char profile;
27085 int arch = -1;
27086 int virt_sec = 0;
27087 int fp16_optional = 0;
27088 int skip_exact_match = 0;
27089 arm_feature_set flags, flags_arch, flags_ext;
27090
27091 /* Autodetection mode, choose the architecture based the instructions
27092 actually used. */
27093 if (no_cpu_selected ())
27094 {
27095 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
27096
27097 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
27098 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
27099
27100 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
27101 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
27102
27103 /* Code run during relaxation relies on selected_cpu being set. */
27104 selected_cpu = flags;
27105 }
27106 /* Otherwise, choose the architecture based on the capabilities of the
27107 requested cpu. */
27108 else
27109 flags = selected_cpu;
27110 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
27111
27112 /* Allow the user to override the reported architecture. */
27113 if (object_arch)
27114 {
27115 ARM_CLEAR_FEATURE (flags_arch, *object_arch, fpu_any);
27116 flags_ext = arm_arch_none;
27117 }
27118 else
27119 {
27120 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27121 flags_ext = dyn_mcpu_ext_opt ? *dyn_mcpu_ext_opt : arm_arch_none;
27122 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
27123 }
27124
27125 /* When this function is run again after relaxation has happened there is no
27126 way to determine whether an architecture or CPU was specified by the user:
27127 - selected_cpu is set above for relaxation to work;
27128 - march_cpu_opt is not set if only -mcpu or .cpu is used;
27129 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
27130 Therefore, if not in -march=all case we first try an exact match and fall
27131 back to autodetection. */
27132 if (!skip_exact_match)
27133 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
27134 if (arch == -1)
27135 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
27136 if (arch == -1)
27137 as_bad (_("no architecture contains all the instructions used\n"));
27138
27139 /* Tag_CPU_name. */
27140 if (selected_cpu_name[0])
27141 {
27142 char *q;
27143
27144 q = selected_cpu_name;
27145 if (strncmp (q, "armv", 4) == 0)
27146 {
27147 int i;
27148
27149 q += 4;
27150 for (i = 0; q[i]; i++)
27151 q[i] = TOUPPER (q[i]);
27152 }
27153 aeabi_set_attribute_string (Tag_CPU_name, q);
27154 }
27155
27156 /* Tag_CPU_arch. */
27157 aeabi_set_attribute_int (Tag_CPU_arch, arch);
27158
27159 /* Tag_CPU_arch_profile. */
27160 if (profile != '\0')
27161 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
27162
27163 /* Tag_DSP_extension. */
27164 if (dyn_mcpu_ext_opt && ARM_CPU_HAS_FEATURE (*dyn_mcpu_ext_opt, arm_ext_dsp))
27165 aeabi_set_attribute_int (Tag_DSP_extension, 1);
27166
27167 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27168 /* Tag_ARM_ISA_use. */
27169 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
27170 || ARM_FEATURE_ZERO (flags_arch))
27171 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
27172
27173 /* Tag_THUMB_ISA_use. */
27174 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
27175 || ARM_FEATURE_ZERO (flags_arch))
27176 {
27177 int thumb_isa_use;
27178
27179 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27180 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
27181 thumb_isa_use = 3;
27182 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
27183 thumb_isa_use = 2;
27184 else
27185 thumb_isa_use = 1;
27186 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
27187 }
27188
27189 /* Tag_VFP_arch. */
27190 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
27191 aeabi_set_attribute_int (Tag_VFP_arch,
27192 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27193 ? 7 : 8);
27194 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
27195 aeabi_set_attribute_int (Tag_VFP_arch,
27196 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27197 ? 5 : 6);
27198 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
27199 {
27200 fp16_optional = 1;
27201 aeabi_set_attribute_int (Tag_VFP_arch, 3);
27202 }
27203 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
27204 {
27205 aeabi_set_attribute_int (Tag_VFP_arch, 4);
27206 fp16_optional = 1;
27207 }
27208 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
27209 aeabi_set_attribute_int (Tag_VFP_arch, 2);
27210 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
27211 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
27212 aeabi_set_attribute_int (Tag_VFP_arch, 1);
27213
27214 /* Tag_ABI_HardFP_use. */
27215 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
27216 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
27217 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
27218
27219 /* Tag_WMMX_arch. */
27220 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
27221 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
27222 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
27223 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
27224
27225 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27226 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
27227 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
27228 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
27229 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
27230 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
27231 {
27232 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
27233 {
27234 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
27235 }
27236 else
27237 {
27238 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
27239 fp16_optional = 1;
27240 }
27241 }
27242
27243 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27244 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
27245 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
27246
27247 /* Tag_DIV_use.
27248
27249 We set Tag_DIV_use to two when integer divide instructions have been used
27250 in ARM state, or when Thumb integer divide instructions have been used,
27251 but we have no architecture profile set, nor have we any ARM instructions.
27252
27253 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27254 by the base architecture.
27255
27256 For new architectures we will have to check these tests. */
27257 gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
27258 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27259 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
27260 aeabi_set_attribute_int (Tag_DIV_use, 0);
27261 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
27262 || (profile == '\0'
27263 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
27264 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
27265 aeabi_set_attribute_int (Tag_DIV_use, 2);
27266
27267 /* Tag_MP_extension_use. */
27268 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
27269 aeabi_set_attribute_int (Tag_MPextension_use, 1);
27270
27271 /* Tag Virtualization_use. */
27272 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
27273 virt_sec |= 1;
27274 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
27275 virt_sec |= 2;
27276 if (virt_sec != 0)
27277 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
27278 }
27279
27280 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27281 finished and free extension feature bits which will not be used anymore. */
27282
27283 void
27284 arm_md_post_relax (void)
27285 {
27286 aeabi_set_public_attributes ();
27287 XDELETE (dyn_mcpu_ext_opt);
27288 dyn_mcpu_ext_opt = NULL;
27289 XDELETE (dyn_march_ext_opt);
27290 dyn_march_ext_opt = NULL;
27291 }
27292
27293 /* Add the default contents for the .ARM.attributes section. */
27294
27295 void
27296 arm_md_end (void)
27297 {
27298 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27299 return;
27300
27301 aeabi_set_public_attributes ();
27302 }
27303 #endif /* OBJ_ELF */
27304
27305 /* Parse a .cpu directive. */
27306
27307 static void
27308 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
27309 {
27310 const struct arm_cpu_option_table *opt;
27311 char *name;
27312 char saved_char;
27313
27314 name = input_line_pointer;
27315 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27316 input_line_pointer++;
27317 saved_char = *input_line_pointer;
27318 *input_line_pointer = 0;
27319
27320 /* Skip the first "all" entry. */
27321 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
27322 if (streq (opt->name, name))
27323 {
27324 mcpu_cpu_opt = &opt->value;
27325 if (!dyn_mcpu_ext_opt)
27326 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
27327 *dyn_mcpu_ext_opt = opt->ext;
27328 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
27329 if (opt->canonical_name)
27330 strcpy (selected_cpu_name, opt->canonical_name);
27331 else
27332 {
27333 int i;
27334 for (i = 0; opt->name[i]; i++)
27335 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27336
27337 selected_cpu_name[i] = 0;
27338 }
27339 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
27340 if (dyn_mcpu_ext_opt)
27341 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
27342 *input_line_pointer = saved_char;
27343 demand_empty_rest_of_line ();
27344 return;
27345 }
27346 as_bad (_("unknown cpu `%s'"), name);
27347 *input_line_pointer = saved_char;
27348 ignore_rest_of_line ();
27349 }
27350
27351 /* Parse a .arch directive. */
27352
27353 static void
27354 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
27355 {
27356 const struct arm_arch_option_table *opt;
27357 char saved_char;
27358 char *name;
27359
27360 name = input_line_pointer;
27361 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27362 input_line_pointer++;
27363 saved_char = *input_line_pointer;
27364 *input_line_pointer = 0;
27365
27366 /* Skip the first "all" entry. */
27367 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27368 if (streq (opt->name, name))
27369 {
27370 mcpu_cpu_opt = &opt->value;
27371 XDELETE (dyn_mcpu_ext_opt);
27372 dyn_mcpu_ext_opt = NULL;
27373 selected_cpu = *mcpu_cpu_opt;
27374 strcpy (selected_cpu_name, opt->name);
27375 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
27376 *input_line_pointer = saved_char;
27377 demand_empty_rest_of_line ();
27378 return;
27379 }
27380
27381 as_bad (_("unknown architecture `%s'\n"), name);
27382 *input_line_pointer = saved_char;
27383 ignore_rest_of_line ();
27384 }
27385
27386 /* Parse a .object_arch directive. */
27387
27388 static void
27389 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
27390 {
27391 const struct arm_arch_option_table *opt;
27392 char saved_char;
27393 char *name;
27394
27395 name = input_line_pointer;
27396 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27397 input_line_pointer++;
27398 saved_char = *input_line_pointer;
27399 *input_line_pointer = 0;
27400
27401 /* Skip the first "all" entry. */
27402 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27403 if (streq (opt->name, name))
27404 {
27405 object_arch = &opt->value;
27406 *input_line_pointer = saved_char;
27407 demand_empty_rest_of_line ();
27408 return;
27409 }
27410
27411 as_bad (_("unknown architecture `%s'\n"), name);
27412 *input_line_pointer = saved_char;
27413 ignore_rest_of_line ();
27414 }
27415
27416 /* Parse a .arch_extension directive. */
27417
27418 static void
27419 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
27420 {
27421 const struct arm_option_extension_value_table *opt;
27422 const arm_feature_set arm_any = ARM_ANY;
27423 char saved_char;
27424 char *name;
27425 int adding_value = 1;
27426
27427 name = input_line_pointer;
27428 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27429 input_line_pointer++;
27430 saved_char = *input_line_pointer;
27431 *input_line_pointer = 0;
27432
27433 if (strlen (name) >= 2
27434 && strncmp (name, "no", 2) == 0)
27435 {
27436 adding_value = 0;
27437 name += 2;
27438 }
27439
27440 for (opt = arm_extensions; opt->name != NULL; opt++)
27441 if (streq (opt->name, name))
27442 {
27443 int i, nb_allowed_archs =
27444 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27445 for (i = 0; i < nb_allowed_archs; i++)
27446 {
27447 /* Empty entry. */
27448 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
27449 continue;
27450 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
27451 break;
27452 }
27453
27454 if (i == nb_allowed_archs)
27455 {
27456 as_bad (_("architectural extension `%s' is not allowed for the "
27457 "current base architecture"), name);
27458 break;
27459 }
27460
27461 if (!dyn_mcpu_ext_opt)
27462 {
27463 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
27464 *dyn_mcpu_ext_opt = arm_arch_none;
27465 }
27466 if (adding_value)
27467 ARM_MERGE_FEATURE_SETS (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
27468 opt->merge_value);
27469 else
27470 ARM_CLEAR_FEATURE (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
27471 opt->clear_value);
27472
27473 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
27474 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
27475 *input_line_pointer = saved_char;
27476 demand_empty_rest_of_line ();
27477 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27478 on this return so that duplicate extensions (extensions with the
27479 same name as a previous extension in the list) are not considered
27480 for command-line parsing. */
27481 return;
27482 }
27483
27484 if (opt->name == NULL)
27485 as_bad (_("unknown architecture extension `%s'\n"), name);
27486
27487 *input_line_pointer = saved_char;
27488 ignore_rest_of_line ();
27489 }
27490
27491 /* Parse a .fpu directive. */
27492
27493 static void
27494 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27495 {
27496 const struct arm_option_fpu_value_table *opt;
27497 char saved_char;
27498 char *name;
27499
27500 name = input_line_pointer;
27501 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27502 input_line_pointer++;
27503 saved_char = *input_line_pointer;
27504 *input_line_pointer = 0;
27505
27506 for (opt = arm_fpus; opt->name != NULL; opt++)
27507 if (streq (opt->name, name))
27508 {
27509 mfpu_opt = &opt->value;
27510 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
27511 if (dyn_mcpu_ext_opt)
27512 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
27513 *input_line_pointer = saved_char;
27514 demand_empty_rest_of_line ();
27515 return;
27516 }
27517
27518 as_bad (_("unknown floating point format `%s'\n"), name);
27519 *input_line_pointer = saved_char;
27520 ignore_rest_of_line ();
27521 }
27522
27523 /* Copy symbol information. */
27524
27525 void
27526 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27527 {
27528 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27529 }
27530
27531 #ifdef OBJ_ELF
27532 /* Given a symbolic attribute NAME, return the proper integer value.
27533 Returns -1 if the attribute is not known. */
27534
27535 int
27536 arm_convert_symbolic_attribute (const char *name)
27537 {
27538 static const struct
27539 {
27540 const char * name;
27541 const int tag;
27542 }
27543 attribute_table[] =
27544 {
27545 /* When you modify this table you should
27546 also modify the list in doc/c-arm.texi. */
27547 #define T(tag) {#tag, tag}
27548 T (Tag_CPU_raw_name),
27549 T (Tag_CPU_name),
27550 T (Tag_CPU_arch),
27551 T (Tag_CPU_arch_profile),
27552 T (Tag_ARM_ISA_use),
27553 T (Tag_THUMB_ISA_use),
27554 T (Tag_FP_arch),
27555 T (Tag_VFP_arch),
27556 T (Tag_WMMX_arch),
27557 T (Tag_Advanced_SIMD_arch),
27558 T (Tag_PCS_config),
27559 T (Tag_ABI_PCS_R9_use),
27560 T (Tag_ABI_PCS_RW_data),
27561 T (Tag_ABI_PCS_RO_data),
27562 T (Tag_ABI_PCS_GOT_use),
27563 T (Tag_ABI_PCS_wchar_t),
27564 T (Tag_ABI_FP_rounding),
27565 T (Tag_ABI_FP_denormal),
27566 T (Tag_ABI_FP_exceptions),
27567 T (Tag_ABI_FP_user_exceptions),
27568 T (Tag_ABI_FP_number_model),
27569 T (Tag_ABI_align_needed),
27570 T (Tag_ABI_align8_needed),
27571 T (Tag_ABI_align_preserved),
27572 T (Tag_ABI_align8_preserved),
27573 T (Tag_ABI_enum_size),
27574 T (Tag_ABI_HardFP_use),
27575 T (Tag_ABI_VFP_args),
27576 T (Tag_ABI_WMMX_args),
27577 T (Tag_ABI_optimization_goals),
27578 T (Tag_ABI_FP_optimization_goals),
27579 T (Tag_compatibility),
27580 T (Tag_CPU_unaligned_access),
27581 T (Tag_FP_HP_extension),
27582 T (Tag_VFP_HP_extension),
27583 T (Tag_ABI_FP_16bit_format),
27584 T (Tag_MPextension_use),
27585 T (Tag_DIV_use),
27586 T (Tag_nodefaults),
27587 T (Tag_also_compatible_with),
27588 T (Tag_conformance),
27589 T (Tag_T2EE_use),
27590 T (Tag_Virtualization_use),
27591 T (Tag_DSP_extension),
27592 /* We deliberately do not include Tag_MPextension_use_legacy. */
27593 #undef T
27594 };
27595 unsigned int i;
27596
27597 if (name == NULL)
27598 return -1;
27599
27600 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27601 if (streq (name, attribute_table[i].name))
27602 return attribute_table[i].tag;
27603
27604 return -1;
27605 }
27606
27607 /* Apply sym value for relocations only in the case that they are for
27608 local symbols in the same segment as the fixup and you have the
27609 respective architectural feature for blx and simple switches. */
27610
27611 int
27612 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27613 {
27614 if (fixP->fx_addsy
27615 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27616 /* PR 17444: If the local symbol is in a different section then a reloc
27617 will always be generated for it, so applying the symbol value now
27618 will result in a double offset being stored in the relocation. */
27619 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27620 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27621 {
27622 switch (fixP->fx_r_type)
27623 {
27624 case BFD_RELOC_ARM_PCREL_BLX:
27625 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27626 if (ARM_IS_FUNC (fixP->fx_addsy))
27627 return 1;
27628 break;
27629
27630 case BFD_RELOC_ARM_PCREL_CALL:
27631 case BFD_RELOC_THUMB_PCREL_BLX:
27632 if (THUMB_IS_FUNC (fixP->fx_addsy))
27633 return 1;
27634 break;
27635
27636 default:
27637 break;
27638 }
27639
27640 }
27641 return 0;
27642 }
27643 #endif /* OBJ_ELF */