]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
Remove support for creating ARM NOREAD sections.
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, ARM_EXT2_V8M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
211 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
212 static const arm_feature_set arm_ext_v6t2_v8m =
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics =
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
217 static const arm_feature_set arm_ext_v8_2 =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
219
220 static const arm_feature_set arm_arch_any = ARM_ANY;
221 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
222 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
223 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
224 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
225
226 static const arm_feature_set arm_cext_iwmmxt2 =
227 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
228 static const arm_feature_set arm_cext_iwmmxt =
229 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
230 static const arm_feature_set arm_cext_xscale =
231 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
232 static const arm_feature_set arm_cext_maverick =
233 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
234 static const arm_feature_set fpu_fpa_ext_v1 =
235 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
236 static const arm_feature_set fpu_fpa_ext_v2 =
237 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
238 static const arm_feature_set fpu_vfp_ext_v1xd =
239 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
240 static const arm_feature_set fpu_vfp_ext_v1 =
241 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
242 static const arm_feature_set fpu_vfp_ext_v2 =
243 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
244 static const arm_feature_set fpu_vfp_ext_v3xd =
245 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
246 static const arm_feature_set fpu_vfp_ext_v3 =
247 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
248 static const arm_feature_set fpu_vfp_ext_d32 =
249 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
250 static const arm_feature_set fpu_neon_ext_v1 =
251 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
252 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
253 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
254 static const arm_feature_set fpu_vfp_fp16 =
255 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
256 static const arm_feature_set fpu_neon_ext_fma =
257 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
258 static const arm_feature_set fpu_vfp_ext_fma =
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
260 static const arm_feature_set fpu_vfp_ext_armv8 =
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
262 static const arm_feature_set fpu_vfp_ext_armv8xd =
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
264 static const arm_feature_set fpu_neon_ext_armv8 =
265 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
266 static const arm_feature_set fpu_crypto_ext_armv8 =
267 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
268 static const arm_feature_set crc_ext_armv8 =
269 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
270 static const arm_feature_set fpu_neon_ext_v8_1 =
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8 | FPU_NEON_EXT_RDMA);
272
273 static int mfloat_abi_opt = -1;
274 /* Record user cpu selection for object attributes. */
275 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
276 /* Must be long enough to hold any of the names in arm_cpus. */
277 static char selected_cpu_name[20];
278
279 extern FLONUM_TYPE generic_floating_point_number;
280
281 /* Return if no cpu was selected on command-line. */
282 static bfd_boolean
283 no_cpu_selected (void)
284 {
285 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
286 }
287
288 #ifdef OBJ_ELF
289 # ifdef EABI_DEFAULT
290 static int meabi_flags = EABI_DEFAULT;
291 # else
292 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
293 # endif
294
295 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
296
297 bfd_boolean
298 arm_is_eabi (void)
299 {
300 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
301 }
302 #endif
303
304 #ifdef OBJ_ELF
305 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
306 symbolS * GOT_symbol;
307 #endif
308
309 /* 0: assemble for ARM,
310 1: assemble for Thumb,
311 2: assemble for Thumb even though target CPU does not support thumb
312 instructions. */
313 static int thumb_mode = 0;
314 /* A value distinct from the possible values for thumb_mode that we
315 can use to record whether thumb_mode has been copied into the
316 tc_frag_data field of a frag. */
317 #define MODE_RECORDED (1 << 4)
318
319 /* Specifies the intrinsic IT insn behavior mode. */
320 enum implicit_it_mode
321 {
322 IMPLICIT_IT_MODE_NEVER = 0x00,
323 IMPLICIT_IT_MODE_ARM = 0x01,
324 IMPLICIT_IT_MODE_THUMB = 0x02,
325 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
326 };
327 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
328
329 /* If unified_syntax is true, we are processing the new unified
330 ARM/Thumb syntax. Important differences from the old ARM mode:
331
332 - Immediate operands do not require a # prefix.
333 - Conditional affixes always appear at the end of the
334 instruction. (For backward compatibility, those instructions
335 that formerly had them in the middle, continue to accept them
336 there.)
337 - The IT instruction may appear, and if it does is validated
338 against subsequent conditional affixes. It does not generate
339 machine code.
340
341 Important differences from the old Thumb mode:
342
343 - Immediate operands do not require a # prefix.
344 - Most of the V6T2 instructions are only available in unified mode.
345 - The .N and .W suffixes are recognized and honored (it is an error
346 if they cannot be honored).
347 - All instructions set the flags if and only if they have an 's' affix.
348 - Conditional affixes may be used. They are validated against
349 preceding IT instructions. Unlike ARM mode, you cannot use a
350 conditional affix except in the scope of an IT instruction. */
351
352 static bfd_boolean unified_syntax = FALSE;
353
354 /* An immediate operand can start with #, and ld*, st*, pld operands
355 can contain [ and ]. We need to tell APP not to elide whitespace
356 before a [, which can appear as the first operand for pld.
357 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
358 const char arm_symbol_chars[] = "#[]{}";
359
360 enum neon_el_type
361 {
362 NT_invtype,
363 NT_untyped,
364 NT_integer,
365 NT_float,
366 NT_poly,
367 NT_signed,
368 NT_unsigned
369 };
370
371 struct neon_type_el
372 {
373 enum neon_el_type type;
374 unsigned size;
375 };
376
377 #define NEON_MAX_TYPE_ELS 4
378
379 struct neon_type
380 {
381 struct neon_type_el el[NEON_MAX_TYPE_ELS];
382 unsigned elems;
383 };
384
385 enum it_instruction_type
386 {
387 OUTSIDE_IT_INSN,
388 INSIDE_IT_INSN,
389 INSIDE_IT_LAST_INSN,
390 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
391 if inside, should be the last one. */
392 NEUTRAL_IT_INSN, /* This could be either inside or outside,
393 i.e. BKPT and NOP. */
394 IT_INSN /* The IT insn has been parsed. */
395 };
396
397 /* The maximum number of operands we need. */
398 #define ARM_IT_MAX_OPERANDS 6
399
400 struct arm_it
401 {
402 const char * error;
403 unsigned long instruction;
404 int size;
405 int size_req;
406 int cond;
407 /* "uncond_value" is set to the value in place of the conditional field in
408 unconditional versions of the instruction, or -1 if nothing is
409 appropriate. */
410 int uncond_value;
411 struct neon_type vectype;
412 /* This does not indicate an actual NEON instruction, only that
413 the mnemonic accepts neon-style type suffixes. */
414 int is_neon;
415 /* Set to the opcode if the instruction needs relaxation.
416 Zero if the instruction is not relaxed. */
417 unsigned long relax;
418 struct
419 {
420 bfd_reloc_code_real_type type;
421 expressionS exp;
422 int pc_rel;
423 } reloc;
424
425 enum it_instruction_type it_insn_type;
426
427 struct
428 {
429 unsigned reg;
430 signed int imm;
431 struct neon_type_el vectype;
432 unsigned present : 1; /* Operand present. */
433 unsigned isreg : 1; /* Operand was a register. */
434 unsigned immisreg : 1; /* .imm field is a second register. */
435 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
436 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
437 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
438 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
439 instructions. This allows us to disambiguate ARM <-> vector insns. */
440 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
441 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
442 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
443 unsigned issingle : 1; /* Operand is VFP single-precision register. */
444 unsigned hasreloc : 1; /* Operand has relocation suffix. */
445 unsigned writeback : 1; /* Operand has trailing ! */
446 unsigned preind : 1; /* Preindexed address. */
447 unsigned postind : 1; /* Postindexed address. */
448 unsigned negative : 1; /* Index register was negated. */
449 unsigned shifted : 1; /* Shift applied to operation. */
450 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
451 } operands[ARM_IT_MAX_OPERANDS];
452 };
453
454 static struct arm_it inst;
455
456 #define NUM_FLOAT_VALS 8
457
458 const char * fp_const[] =
459 {
460 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
461 };
462
463 /* Number of littlenums required to hold an extended precision number. */
464 #define MAX_LITTLENUMS 6
465
466 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
467
468 #define FAIL (-1)
469 #define SUCCESS (0)
470
471 #define SUFF_S 1
472 #define SUFF_D 2
473 #define SUFF_E 3
474 #define SUFF_P 4
475
476 #define CP_T_X 0x00008000
477 #define CP_T_Y 0x00400000
478
479 #define CONDS_BIT 0x00100000
480 #define LOAD_BIT 0x00100000
481
482 #define DOUBLE_LOAD_FLAG 0x00000001
483
484 struct asm_cond
485 {
486 const char * template_name;
487 unsigned long value;
488 };
489
490 #define COND_ALWAYS 0xE
491
492 struct asm_psr
493 {
494 const char * template_name;
495 unsigned long field;
496 };
497
498 struct asm_barrier_opt
499 {
500 const char * template_name;
501 unsigned long value;
502 const arm_feature_set arch;
503 };
504
505 /* The bit that distinguishes CPSR and SPSR. */
506 #define SPSR_BIT (1 << 22)
507
508 /* The individual PSR flag bits. */
509 #define PSR_c (1 << 16)
510 #define PSR_x (1 << 17)
511 #define PSR_s (1 << 18)
512 #define PSR_f (1 << 19)
513
514 struct reloc_entry
515 {
516 char * name;
517 bfd_reloc_code_real_type reloc;
518 };
519
520 enum vfp_reg_pos
521 {
522 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
523 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
524 };
525
526 enum vfp_ldstm_type
527 {
528 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
529 };
530
531 /* Bits for DEFINED field in neon_typed_alias. */
532 #define NTA_HASTYPE 1
533 #define NTA_HASINDEX 2
534
535 struct neon_typed_alias
536 {
537 unsigned char defined;
538 unsigned char index;
539 struct neon_type_el eltype;
540 };
541
542 /* ARM register categories. This includes coprocessor numbers and various
543 architecture extensions' registers. */
544 enum arm_reg_type
545 {
546 REG_TYPE_RN,
547 REG_TYPE_CP,
548 REG_TYPE_CN,
549 REG_TYPE_FN,
550 REG_TYPE_VFS,
551 REG_TYPE_VFD,
552 REG_TYPE_NQ,
553 REG_TYPE_VFSD,
554 REG_TYPE_NDQ,
555 REG_TYPE_NSDQ,
556 REG_TYPE_VFC,
557 REG_TYPE_MVF,
558 REG_TYPE_MVD,
559 REG_TYPE_MVFX,
560 REG_TYPE_MVDX,
561 REG_TYPE_MVAX,
562 REG_TYPE_DSPSC,
563 REG_TYPE_MMXWR,
564 REG_TYPE_MMXWC,
565 REG_TYPE_MMXWCG,
566 REG_TYPE_XSCALE,
567 REG_TYPE_RNB
568 };
569
570 /* Structure for a hash table entry for a register.
571 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
572 information which states whether a vector type or index is specified (for a
573 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
574 struct reg_entry
575 {
576 const char * name;
577 unsigned int number;
578 unsigned char type;
579 unsigned char builtin;
580 struct neon_typed_alias * neon;
581 };
582
583 /* Diagnostics used when we don't get a register of the expected type. */
584 const char * const reg_expected_msgs[] =
585 {
586 N_("ARM register expected"),
587 N_("bad or missing co-processor number"),
588 N_("co-processor register expected"),
589 N_("FPA register expected"),
590 N_("VFP single precision register expected"),
591 N_("VFP/Neon double precision register expected"),
592 N_("Neon quad precision register expected"),
593 N_("VFP single or double precision register expected"),
594 N_("Neon double or quad precision register expected"),
595 N_("VFP single, double or Neon quad precision register expected"),
596 N_("VFP system register expected"),
597 N_("Maverick MVF register expected"),
598 N_("Maverick MVD register expected"),
599 N_("Maverick MVFX register expected"),
600 N_("Maverick MVDX register expected"),
601 N_("Maverick MVAX register expected"),
602 N_("Maverick DSPSC register expected"),
603 N_("iWMMXt data register expected"),
604 N_("iWMMXt control register expected"),
605 N_("iWMMXt scalar register expected"),
606 N_("XScale accumulator register expected"),
607 };
608
609 /* Some well known registers that we refer to directly elsewhere. */
610 #define REG_R12 12
611 #define REG_SP 13
612 #define REG_LR 14
613 #define REG_PC 15
614
615 /* ARM instructions take 4bytes in the object file, Thumb instructions
616 take 2: */
617 #define INSN_SIZE 4
618
619 struct asm_opcode
620 {
621 /* Basic string to match. */
622 const char * template_name;
623
624 /* Parameters to instruction. */
625 unsigned int operands[8];
626
627 /* Conditional tag - see opcode_lookup. */
628 unsigned int tag : 4;
629
630 /* Basic instruction code. */
631 unsigned int avalue : 28;
632
633 /* Thumb-format instruction code. */
634 unsigned int tvalue;
635
636 /* Which architecture variant provides this instruction. */
637 const arm_feature_set * avariant;
638 const arm_feature_set * tvariant;
639
640 /* Function to call to encode instruction in ARM format. */
641 void (* aencode) (void);
642
643 /* Function to call to encode instruction in Thumb format. */
644 void (* tencode) (void);
645 };
646
647 /* Defines for various bits that we will want to toggle. */
648 #define INST_IMMEDIATE 0x02000000
649 #define OFFSET_REG 0x02000000
650 #define HWOFFSET_IMM 0x00400000
651 #define SHIFT_BY_REG 0x00000010
652 #define PRE_INDEX 0x01000000
653 #define INDEX_UP 0x00800000
654 #define WRITE_BACK 0x00200000
655 #define LDM_TYPE_2_OR_3 0x00400000
656 #define CPSI_MMOD 0x00020000
657
658 #define LITERAL_MASK 0xf000f000
659 #define OPCODE_MASK 0xfe1fffff
660 #define V4_STR_BIT 0x00000020
661 #define VLDR_VMOV_SAME 0x0040f000
662
663 #define T2_SUBS_PC_LR 0xf3de8f00
664
665 #define DATA_OP_SHIFT 21
666
667 #define T2_OPCODE_MASK 0xfe1fffff
668 #define T2_DATA_OP_SHIFT 21
669
670 #define A_COND_MASK 0xf0000000
671 #define A_PUSH_POP_OP_MASK 0x0fff0000
672
673 /* Opcodes for pushing/poping registers to/from the stack. */
674 #define A1_OPCODE_PUSH 0x092d0000
675 #define A2_OPCODE_PUSH 0x052d0004
676 #define A2_OPCODE_POP 0x049d0004
677
678 /* Codes to distinguish the arithmetic instructions. */
679 #define OPCODE_AND 0
680 #define OPCODE_EOR 1
681 #define OPCODE_SUB 2
682 #define OPCODE_RSB 3
683 #define OPCODE_ADD 4
684 #define OPCODE_ADC 5
685 #define OPCODE_SBC 6
686 #define OPCODE_RSC 7
687 #define OPCODE_TST 8
688 #define OPCODE_TEQ 9
689 #define OPCODE_CMP 10
690 #define OPCODE_CMN 11
691 #define OPCODE_ORR 12
692 #define OPCODE_MOV 13
693 #define OPCODE_BIC 14
694 #define OPCODE_MVN 15
695
696 #define T2_OPCODE_AND 0
697 #define T2_OPCODE_BIC 1
698 #define T2_OPCODE_ORR 2
699 #define T2_OPCODE_ORN 3
700 #define T2_OPCODE_EOR 4
701 #define T2_OPCODE_ADD 8
702 #define T2_OPCODE_ADC 10
703 #define T2_OPCODE_SBC 11
704 #define T2_OPCODE_SUB 13
705 #define T2_OPCODE_RSB 14
706
707 #define T_OPCODE_MUL 0x4340
708 #define T_OPCODE_TST 0x4200
709 #define T_OPCODE_CMN 0x42c0
710 #define T_OPCODE_NEG 0x4240
711 #define T_OPCODE_MVN 0x43c0
712
713 #define T_OPCODE_ADD_R3 0x1800
714 #define T_OPCODE_SUB_R3 0x1a00
715 #define T_OPCODE_ADD_HI 0x4400
716 #define T_OPCODE_ADD_ST 0xb000
717 #define T_OPCODE_SUB_ST 0xb080
718 #define T_OPCODE_ADD_SP 0xa800
719 #define T_OPCODE_ADD_PC 0xa000
720 #define T_OPCODE_ADD_I8 0x3000
721 #define T_OPCODE_SUB_I8 0x3800
722 #define T_OPCODE_ADD_I3 0x1c00
723 #define T_OPCODE_SUB_I3 0x1e00
724
725 #define T_OPCODE_ASR_R 0x4100
726 #define T_OPCODE_LSL_R 0x4080
727 #define T_OPCODE_LSR_R 0x40c0
728 #define T_OPCODE_ROR_R 0x41c0
729 #define T_OPCODE_ASR_I 0x1000
730 #define T_OPCODE_LSL_I 0x0000
731 #define T_OPCODE_LSR_I 0x0800
732
733 #define T_OPCODE_MOV_I8 0x2000
734 #define T_OPCODE_CMP_I8 0x2800
735 #define T_OPCODE_CMP_LR 0x4280
736 #define T_OPCODE_MOV_HR 0x4600
737 #define T_OPCODE_CMP_HR 0x4500
738
739 #define T_OPCODE_LDR_PC 0x4800
740 #define T_OPCODE_LDR_SP 0x9800
741 #define T_OPCODE_STR_SP 0x9000
742 #define T_OPCODE_LDR_IW 0x6800
743 #define T_OPCODE_STR_IW 0x6000
744 #define T_OPCODE_LDR_IH 0x8800
745 #define T_OPCODE_STR_IH 0x8000
746 #define T_OPCODE_LDR_IB 0x7800
747 #define T_OPCODE_STR_IB 0x7000
748 #define T_OPCODE_LDR_RW 0x5800
749 #define T_OPCODE_STR_RW 0x5000
750 #define T_OPCODE_LDR_RH 0x5a00
751 #define T_OPCODE_STR_RH 0x5200
752 #define T_OPCODE_LDR_RB 0x5c00
753 #define T_OPCODE_STR_RB 0x5400
754
755 #define T_OPCODE_PUSH 0xb400
756 #define T_OPCODE_POP 0xbc00
757
758 #define T_OPCODE_BRANCH 0xe000
759
760 #define THUMB_SIZE 2 /* Size of thumb instruction. */
761 #define THUMB_PP_PC_LR 0x0100
762 #define THUMB_LOAD_BIT 0x0800
763 #define THUMB2_LOAD_BIT 0x00100000
764
765 #define BAD_ARGS _("bad arguments to instruction")
766 #define BAD_SP _("r13 not allowed here")
767 #define BAD_PC _("r15 not allowed here")
768 #define BAD_COND _("instruction cannot be conditional")
769 #define BAD_OVERLAP _("registers may not be the same")
770 #define BAD_HIREG _("lo register required")
771 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
772 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
773 #define BAD_BRANCH _("branch must be last instruction in IT block")
774 #define BAD_NOT_IT _("instruction not allowed in IT block")
775 #define BAD_FPU _("selected FPU does not support instruction")
776 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
777 #define BAD_IT_COND _("incorrect condition in IT block")
778 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
779 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
780 #define BAD_PC_ADDRESSING \
781 _("cannot use register index with PC-relative addressing")
782 #define BAD_PC_WRITEBACK \
783 _("cannot use writeback with PC-relative addressing")
784 #define BAD_RANGE _("branch out of range")
785 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
786
787 static struct hash_control * arm_ops_hsh;
788 static struct hash_control * arm_cond_hsh;
789 static struct hash_control * arm_shift_hsh;
790 static struct hash_control * arm_psr_hsh;
791 static struct hash_control * arm_v7m_psr_hsh;
792 static struct hash_control * arm_reg_hsh;
793 static struct hash_control * arm_reloc_hsh;
794 static struct hash_control * arm_barrier_opt_hsh;
795
796 /* Stuff needed to resolve the label ambiguity
797 As:
798 ...
799 label: <insn>
800 may differ from:
801 ...
802 label:
803 <insn> */
804
805 symbolS * last_label_seen;
806 static int label_is_thumb_function_name = FALSE;
807
808 /* Literal pool structure. Held on a per-section
809 and per-sub-section basis. */
810
811 #define MAX_LITERAL_POOL_SIZE 1024
812 typedef struct literal_pool
813 {
814 expressionS literals [MAX_LITERAL_POOL_SIZE];
815 unsigned int next_free_entry;
816 unsigned int id;
817 symbolS * symbol;
818 segT section;
819 subsegT sub_section;
820 #ifdef OBJ_ELF
821 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
822 #endif
823 struct literal_pool * next;
824 unsigned int alignment;
825 } literal_pool;
826
827 /* Pointer to a linked list of literal pools. */
828 literal_pool * list_of_pools = NULL;
829
830 typedef enum asmfunc_states
831 {
832 OUTSIDE_ASMFUNC,
833 WAITING_ASMFUNC_NAME,
834 WAITING_ENDASMFUNC
835 } asmfunc_states;
836
837 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
838
839 #ifdef OBJ_ELF
840 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
841 #else
842 static struct current_it now_it;
843 #endif
844
845 static inline int
846 now_it_compatible (int cond)
847 {
848 return (cond & ~1) == (now_it.cc & ~1);
849 }
850
851 static inline int
852 conditional_insn (void)
853 {
854 return inst.cond != COND_ALWAYS;
855 }
856
857 static int in_it_block (void);
858
859 static int handle_it_state (void);
860
861 static void force_automatic_it_block_close (void);
862
863 static void it_fsm_post_encode (void);
864
865 #define set_it_insn_type(type) \
866 do \
867 { \
868 inst.it_insn_type = type; \
869 if (handle_it_state () == FAIL) \
870 return; \
871 } \
872 while (0)
873
874 #define set_it_insn_type_nonvoid(type, failret) \
875 do \
876 { \
877 inst.it_insn_type = type; \
878 if (handle_it_state () == FAIL) \
879 return failret; \
880 } \
881 while(0)
882
883 #define set_it_insn_type_last() \
884 do \
885 { \
886 if (inst.cond == COND_ALWAYS) \
887 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
888 else \
889 set_it_insn_type (INSIDE_IT_LAST_INSN); \
890 } \
891 while (0)
892
893 /* Pure syntax. */
894
895 /* This array holds the chars that always start a comment. If the
896 pre-processor is disabled, these aren't very useful. */
897 char arm_comment_chars[] = "@";
898
899 /* This array holds the chars that only start a comment at the beginning of
900 a line. If the line seems to have the form '# 123 filename'
901 .line and .file directives will appear in the pre-processed output. */
902 /* Note that input_file.c hand checks for '#' at the beginning of the
903 first line of the input file. This is because the compiler outputs
904 #NO_APP at the beginning of its output. */
905 /* Also note that comments like this one will always work. */
906 const char line_comment_chars[] = "#";
907
908 char arm_line_separator_chars[] = ";";
909
910 /* Chars that can be used to separate mant
911 from exp in floating point numbers. */
912 const char EXP_CHARS[] = "eE";
913
914 /* Chars that mean this number is a floating point constant. */
915 /* As in 0f12.456 */
916 /* or 0d1.2345e12 */
917
918 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
919
920 /* Prefix characters that indicate the start of an immediate
921 value. */
922 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
923
924 /* Separator character handling. */
925
926 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
927
928 static inline int
929 skip_past_char (char ** str, char c)
930 {
931 /* PR gas/14987: Allow for whitespace before the expected character. */
932 skip_whitespace (*str);
933
934 if (**str == c)
935 {
936 (*str)++;
937 return SUCCESS;
938 }
939 else
940 return FAIL;
941 }
942
943 #define skip_past_comma(str) skip_past_char (str, ',')
944
945 /* Arithmetic expressions (possibly involving symbols). */
946
947 /* Return TRUE if anything in the expression is a bignum. */
948
949 static int
950 walk_no_bignums (symbolS * sp)
951 {
952 if (symbol_get_value_expression (sp)->X_op == O_big)
953 return 1;
954
955 if (symbol_get_value_expression (sp)->X_add_symbol)
956 {
957 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
958 || (symbol_get_value_expression (sp)->X_op_symbol
959 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
960 }
961
962 return 0;
963 }
964
965 static int in_my_get_expression = 0;
966
967 /* Third argument to my_get_expression. */
968 #define GE_NO_PREFIX 0
969 #define GE_IMM_PREFIX 1
970 #define GE_OPT_PREFIX 2
971 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
972 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
973 #define GE_OPT_PREFIX_BIG 3
974
975 static int
976 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
977 {
978 char * save_in;
979 segT seg;
980
981 /* In unified syntax, all prefixes are optional. */
982 if (unified_syntax)
983 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
984 : GE_OPT_PREFIX;
985
986 switch (prefix_mode)
987 {
988 case GE_NO_PREFIX: break;
989 case GE_IMM_PREFIX:
990 if (!is_immediate_prefix (**str))
991 {
992 inst.error = _("immediate expression requires a # prefix");
993 return FAIL;
994 }
995 (*str)++;
996 break;
997 case GE_OPT_PREFIX:
998 case GE_OPT_PREFIX_BIG:
999 if (is_immediate_prefix (**str))
1000 (*str)++;
1001 break;
1002 default: abort ();
1003 }
1004
1005 memset (ep, 0, sizeof (expressionS));
1006
1007 save_in = input_line_pointer;
1008 input_line_pointer = *str;
1009 in_my_get_expression = 1;
1010 seg = expression (ep);
1011 in_my_get_expression = 0;
1012
1013 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1014 {
1015 /* We found a bad or missing expression in md_operand(). */
1016 *str = input_line_pointer;
1017 input_line_pointer = save_in;
1018 if (inst.error == NULL)
1019 inst.error = (ep->X_op == O_absent
1020 ? _("missing expression") :_("bad expression"));
1021 return 1;
1022 }
1023
1024 #ifdef OBJ_AOUT
1025 if (seg != absolute_section
1026 && seg != text_section
1027 && seg != data_section
1028 && seg != bss_section
1029 && seg != undefined_section)
1030 {
1031 inst.error = _("bad segment");
1032 *str = input_line_pointer;
1033 input_line_pointer = save_in;
1034 return 1;
1035 }
1036 #else
1037 (void) seg;
1038 #endif
1039
1040 /* Get rid of any bignums now, so that we don't generate an error for which
1041 we can't establish a line number later on. Big numbers are never valid
1042 in instructions, which is where this routine is always called. */
1043 if (prefix_mode != GE_OPT_PREFIX_BIG
1044 && (ep->X_op == O_big
1045 || (ep->X_add_symbol
1046 && (walk_no_bignums (ep->X_add_symbol)
1047 || (ep->X_op_symbol
1048 && walk_no_bignums (ep->X_op_symbol))))))
1049 {
1050 inst.error = _("invalid constant");
1051 *str = input_line_pointer;
1052 input_line_pointer = save_in;
1053 return 1;
1054 }
1055
1056 *str = input_line_pointer;
1057 input_line_pointer = save_in;
1058 return 0;
1059 }
1060
1061 /* Turn a string in input_line_pointer into a floating point constant
1062 of type TYPE, and store the appropriate bytes in *LITP. The number
1063 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1064 returned, or NULL on OK.
1065
1066 Note that fp constants aren't represent in the normal way on the ARM.
1067 In big endian mode, things are as expected. However, in little endian
1068 mode fp constants are big-endian word-wise, and little-endian byte-wise
1069 within the words. For example, (double) 1.1 in big endian mode is
1070 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1071 the byte sequence 99 99 f1 3f 9a 99 99 99.
1072
1073 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1074
1075 char *
1076 md_atof (int type, char * litP, int * sizeP)
1077 {
1078 int prec;
1079 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1080 char *t;
1081 int i;
1082
1083 switch (type)
1084 {
1085 case 'f':
1086 case 'F':
1087 case 's':
1088 case 'S':
1089 prec = 2;
1090 break;
1091
1092 case 'd':
1093 case 'D':
1094 case 'r':
1095 case 'R':
1096 prec = 4;
1097 break;
1098
1099 case 'x':
1100 case 'X':
1101 prec = 5;
1102 break;
1103
1104 case 'p':
1105 case 'P':
1106 prec = 5;
1107 break;
1108
1109 default:
1110 *sizeP = 0;
1111 return _("Unrecognized or unsupported floating point constant");
1112 }
1113
1114 t = atof_ieee (input_line_pointer, type, words);
1115 if (t)
1116 input_line_pointer = t;
1117 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1118
1119 if (target_big_endian)
1120 {
1121 for (i = 0; i < prec; i++)
1122 {
1123 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1124 litP += sizeof (LITTLENUM_TYPE);
1125 }
1126 }
1127 else
1128 {
1129 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1130 for (i = prec - 1; i >= 0; i--)
1131 {
1132 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1133 litP += sizeof (LITTLENUM_TYPE);
1134 }
1135 else
1136 /* For a 4 byte float the order of elements in `words' is 1 0.
1137 For an 8 byte float the order is 1 0 3 2. */
1138 for (i = 0; i < prec; i += 2)
1139 {
1140 md_number_to_chars (litP, (valueT) words[i + 1],
1141 sizeof (LITTLENUM_TYPE));
1142 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1143 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1144 litP += 2 * sizeof (LITTLENUM_TYPE);
1145 }
1146 }
1147
1148 return NULL;
1149 }
1150
1151 /* We handle all bad expressions here, so that we can report the faulty
1152 instruction in the error message. */
1153 void
1154 md_operand (expressionS * exp)
1155 {
1156 if (in_my_get_expression)
1157 exp->X_op = O_illegal;
1158 }
1159
1160 /* Immediate values. */
1161
1162 /* Generic immediate-value read function for use in directives.
1163 Accepts anything that 'expression' can fold to a constant.
1164 *val receives the number. */
1165 #ifdef OBJ_ELF
1166 static int
1167 immediate_for_directive (int *val)
1168 {
1169 expressionS exp;
1170 exp.X_op = O_illegal;
1171
1172 if (is_immediate_prefix (*input_line_pointer))
1173 {
1174 input_line_pointer++;
1175 expression (&exp);
1176 }
1177
1178 if (exp.X_op != O_constant)
1179 {
1180 as_bad (_("expected #constant"));
1181 ignore_rest_of_line ();
1182 return FAIL;
1183 }
1184 *val = exp.X_add_number;
1185 return SUCCESS;
1186 }
1187 #endif
1188
1189 /* Register parsing. */
1190
1191 /* Generic register parser. CCP points to what should be the
1192 beginning of a register name. If it is indeed a valid register
1193 name, advance CCP over it and return the reg_entry structure;
1194 otherwise return NULL. Does not issue diagnostics. */
1195
1196 static struct reg_entry *
1197 arm_reg_parse_multi (char **ccp)
1198 {
1199 char *start = *ccp;
1200 char *p;
1201 struct reg_entry *reg;
1202
1203 skip_whitespace (start);
1204
1205 #ifdef REGISTER_PREFIX
1206 if (*start != REGISTER_PREFIX)
1207 return NULL;
1208 start++;
1209 #endif
1210 #ifdef OPTIONAL_REGISTER_PREFIX
1211 if (*start == OPTIONAL_REGISTER_PREFIX)
1212 start++;
1213 #endif
1214
1215 p = start;
1216 if (!ISALPHA (*p) || !is_name_beginner (*p))
1217 return NULL;
1218
1219 do
1220 p++;
1221 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1222
1223 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1224
1225 if (!reg)
1226 return NULL;
1227
1228 *ccp = p;
1229 return reg;
1230 }
1231
1232 static int
1233 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1234 enum arm_reg_type type)
1235 {
1236 /* Alternative syntaxes are accepted for a few register classes. */
1237 switch (type)
1238 {
1239 case REG_TYPE_MVF:
1240 case REG_TYPE_MVD:
1241 case REG_TYPE_MVFX:
1242 case REG_TYPE_MVDX:
1243 /* Generic coprocessor register names are allowed for these. */
1244 if (reg && reg->type == REG_TYPE_CN)
1245 return reg->number;
1246 break;
1247
1248 case REG_TYPE_CP:
1249 /* For backward compatibility, a bare number is valid here. */
1250 {
1251 unsigned long processor = strtoul (start, ccp, 10);
1252 if (*ccp != start && processor <= 15)
1253 return processor;
1254 }
1255
1256 case REG_TYPE_MMXWC:
1257 /* WC includes WCG. ??? I'm not sure this is true for all
1258 instructions that take WC registers. */
1259 if (reg && reg->type == REG_TYPE_MMXWCG)
1260 return reg->number;
1261 break;
1262
1263 default:
1264 break;
1265 }
1266
1267 return FAIL;
1268 }
1269
1270 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1271 return value is the register number or FAIL. */
1272
1273 static int
1274 arm_reg_parse (char **ccp, enum arm_reg_type type)
1275 {
1276 char *start = *ccp;
1277 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1278 int ret;
1279
1280 /* Do not allow a scalar (reg+index) to parse as a register. */
1281 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1282 return FAIL;
1283
1284 if (reg && reg->type == type)
1285 return reg->number;
1286
1287 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1288 return ret;
1289
1290 *ccp = start;
1291 return FAIL;
1292 }
1293
1294 /* Parse a Neon type specifier. *STR should point at the leading '.'
1295 character. Does no verification at this stage that the type fits the opcode
1296 properly. E.g.,
1297
1298 .i32.i32.s16
1299 .s32.f32
1300 .u16
1301
1302 Can all be legally parsed by this function.
1303
1304 Fills in neon_type struct pointer with parsed information, and updates STR
1305 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1306 type, FAIL if not. */
1307
1308 static int
1309 parse_neon_type (struct neon_type *type, char **str)
1310 {
1311 char *ptr = *str;
1312
1313 if (type)
1314 type->elems = 0;
1315
1316 while (type->elems < NEON_MAX_TYPE_ELS)
1317 {
1318 enum neon_el_type thistype = NT_untyped;
1319 unsigned thissize = -1u;
1320
1321 if (*ptr != '.')
1322 break;
1323
1324 ptr++;
1325
1326 /* Just a size without an explicit type. */
1327 if (ISDIGIT (*ptr))
1328 goto parsesize;
1329
1330 switch (TOLOWER (*ptr))
1331 {
1332 case 'i': thistype = NT_integer; break;
1333 case 'f': thistype = NT_float; break;
1334 case 'p': thistype = NT_poly; break;
1335 case 's': thistype = NT_signed; break;
1336 case 'u': thistype = NT_unsigned; break;
1337 case 'd':
1338 thistype = NT_float;
1339 thissize = 64;
1340 ptr++;
1341 goto done;
1342 default:
1343 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1344 return FAIL;
1345 }
1346
1347 ptr++;
1348
1349 /* .f is an abbreviation for .f32. */
1350 if (thistype == NT_float && !ISDIGIT (*ptr))
1351 thissize = 32;
1352 else
1353 {
1354 parsesize:
1355 thissize = strtoul (ptr, &ptr, 10);
1356
1357 if (thissize != 8 && thissize != 16 && thissize != 32
1358 && thissize != 64)
1359 {
1360 as_bad (_("bad size %d in type specifier"), thissize);
1361 return FAIL;
1362 }
1363 }
1364
1365 done:
1366 if (type)
1367 {
1368 type->el[type->elems].type = thistype;
1369 type->el[type->elems].size = thissize;
1370 type->elems++;
1371 }
1372 }
1373
1374 /* Empty/missing type is not a successful parse. */
1375 if (type->elems == 0)
1376 return FAIL;
1377
1378 *str = ptr;
1379
1380 return SUCCESS;
1381 }
1382
1383 /* Errors may be set multiple times during parsing or bit encoding
1384 (particularly in the Neon bits), but usually the earliest error which is set
1385 will be the most meaningful. Avoid overwriting it with later (cascading)
1386 errors by calling this function. */
1387
1388 static void
1389 first_error (const char *err)
1390 {
1391 if (!inst.error)
1392 inst.error = err;
1393 }
1394
1395 /* Parse a single type, e.g. ".s32", leading period included. */
1396 static int
1397 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1398 {
1399 char *str = *ccp;
1400 struct neon_type optype;
1401
1402 if (*str == '.')
1403 {
1404 if (parse_neon_type (&optype, &str) == SUCCESS)
1405 {
1406 if (optype.elems == 1)
1407 *vectype = optype.el[0];
1408 else
1409 {
1410 first_error (_("only one type should be specified for operand"));
1411 return FAIL;
1412 }
1413 }
1414 else
1415 {
1416 first_error (_("vector type expected"));
1417 return FAIL;
1418 }
1419 }
1420 else
1421 return FAIL;
1422
1423 *ccp = str;
1424
1425 return SUCCESS;
1426 }
1427
1428 /* Special meanings for indices (which have a range of 0-7), which will fit into
1429 a 4-bit integer. */
1430
1431 #define NEON_ALL_LANES 15
1432 #define NEON_INTERLEAVE_LANES 14
1433
1434 /* Parse either a register or a scalar, with an optional type. Return the
1435 register number, and optionally fill in the actual type of the register
1436 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1437 type/index information in *TYPEINFO. */
1438
1439 static int
1440 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1441 enum arm_reg_type *rtype,
1442 struct neon_typed_alias *typeinfo)
1443 {
1444 char *str = *ccp;
1445 struct reg_entry *reg = arm_reg_parse_multi (&str);
1446 struct neon_typed_alias atype;
1447 struct neon_type_el parsetype;
1448
1449 atype.defined = 0;
1450 atype.index = -1;
1451 atype.eltype.type = NT_invtype;
1452 atype.eltype.size = -1;
1453
1454 /* Try alternate syntax for some types of register. Note these are mutually
1455 exclusive with the Neon syntax extensions. */
1456 if (reg == NULL)
1457 {
1458 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1459 if (altreg != FAIL)
1460 *ccp = str;
1461 if (typeinfo)
1462 *typeinfo = atype;
1463 return altreg;
1464 }
1465
1466 /* Undo polymorphism when a set of register types may be accepted. */
1467 if ((type == REG_TYPE_NDQ
1468 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1469 || (type == REG_TYPE_VFSD
1470 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1471 || (type == REG_TYPE_NSDQ
1472 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1473 || reg->type == REG_TYPE_NQ))
1474 || (type == REG_TYPE_MMXWC
1475 && (reg->type == REG_TYPE_MMXWCG)))
1476 type = (enum arm_reg_type) reg->type;
1477
1478 if (type != reg->type)
1479 return FAIL;
1480
1481 if (reg->neon)
1482 atype = *reg->neon;
1483
1484 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1485 {
1486 if ((atype.defined & NTA_HASTYPE) != 0)
1487 {
1488 first_error (_("can't redefine type for operand"));
1489 return FAIL;
1490 }
1491 atype.defined |= NTA_HASTYPE;
1492 atype.eltype = parsetype;
1493 }
1494
1495 if (skip_past_char (&str, '[') == SUCCESS)
1496 {
1497 if (type != REG_TYPE_VFD)
1498 {
1499 first_error (_("only D registers may be indexed"));
1500 return FAIL;
1501 }
1502
1503 if ((atype.defined & NTA_HASINDEX) != 0)
1504 {
1505 first_error (_("can't change index for operand"));
1506 return FAIL;
1507 }
1508
1509 atype.defined |= NTA_HASINDEX;
1510
1511 if (skip_past_char (&str, ']') == SUCCESS)
1512 atype.index = NEON_ALL_LANES;
1513 else
1514 {
1515 expressionS exp;
1516
1517 my_get_expression (&exp, &str, GE_NO_PREFIX);
1518
1519 if (exp.X_op != O_constant)
1520 {
1521 first_error (_("constant expression required"));
1522 return FAIL;
1523 }
1524
1525 if (skip_past_char (&str, ']') == FAIL)
1526 return FAIL;
1527
1528 atype.index = exp.X_add_number;
1529 }
1530 }
1531
1532 if (typeinfo)
1533 *typeinfo = atype;
1534
1535 if (rtype)
1536 *rtype = type;
1537
1538 *ccp = str;
1539
1540 return reg->number;
1541 }
1542
1543 /* Like arm_reg_parse, but allow allow the following extra features:
1544 - If RTYPE is non-zero, return the (possibly restricted) type of the
1545 register (e.g. Neon double or quad reg when either has been requested).
1546 - If this is a Neon vector type with additional type information, fill
1547 in the struct pointed to by VECTYPE (if non-NULL).
1548 This function will fault on encountering a scalar. */
1549
1550 static int
1551 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1552 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1553 {
1554 struct neon_typed_alias atype;
1555 char *str = *ccp;
1556 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1557
1558 if (reg == FAIL)
1559 return FAIL;
1560
1561 /* Do not allow regname(... to parse as a register. */
1562 if (*str == '(')
1563 return FAIL;
1564
1565 /* Do not allow a scalar (reg+index) to parse as a register. */
1566 if ((atype.defined & NTA_HASINDEX) != 0)
1567 {
1568 first_error (_("register operand expected, but got scalar"));
1569 return FAIL;
1570 }
1571
1572 if (vectype)
1573 *vectype = atype.eltype;
1574
1575 *ccp = str;
1576
1577 return reg;
1578 }
1579
1580 #define NEON_SCALAR_REG(X) ((X) >> 4)
1581 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1582
1583 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1584 have enough information to be able to do a good job bounds-checking. So, we
1585 just do easy checks here, and do further checks later. */
1586
1587 static int
1588 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1589 {
1590 int reg;
1591 char *str = *ccp;
1592 struct neon_typed_alias atype;
1593
1594 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1595
1596 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1597 return FAIL;
1598
1599 if (atype.index == NEON_ALL_LANES)
1600 {
1601 first_error (_("scalar must have an index"));
1602 return FAIL;
1603 }
1604 else if (atype.index >= 64 / elsize)
1605 {
1606 first_error (_("scalar index out of range"));
1607 return FAIL;
1608 }
1609
1610 if (type)
1611 *type = atype.eltype;
1612
1613 *ccp = str;
1614
1615 return reg * 16 + atype.index;
1616 }
1617
1618 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1619
1620 static long
1621 parse_reg_list (char ** strp)
1622 {
1623 char * str = * strp;
1624 long range = 0;
1625 int another_range;
1626
1627 /* We come back here if we get ranges concatenated by '+' or '|'. */
1628 do
1629 {
1630 skip_whitespace (str);
1631
1632 another_range = 0;
1633
1634 if (*str == '{')
1635 {
1636 int in_range = 0;
1637 int cur_reg = -1;
1638
1639 str++;
1640 do
1641 {
1642 int reg;
1643
1644 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1645 {
1646 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1647 return FAIL;
1648 }
1649
1650 if (in_range)
1651 {
1652 int i;
1653
1654 if (reg <= cur_reg)
1655 {
1656 first_error (_("bad range in register list"));
1657 return FAIL;
1658 }
1659
1660 for (i = cur_reg + 1; i < reg; i++)
1661 {
1662 if (range & (1 << i))
1663 as_tsktsk
1664 (_("Warning: duplicated register (r%d) in register list"),
1665 i);
1666 else
1667 range |= 1 << i;
1668 }
1669 in_range = 0;
1670 }
1671
1672 if (range & (1 << reg))
1673 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1674 reg);
1675 else if (reg <= cur_reg)
1676 as_tsktsk (_("Warning: register range not in ascending order"));
1677
1678 range |= 1 << reg;
1679 cur_reg = reg;
1680 }
1681 while (skip_past_comma (&str) != FAIL
1682 || (in_range = 1, *str++ == '-'));
1683 str--;
1684
1685 if (skip_past_char (&str, '}') == FAIL)
1686 {
1687 first_error (_("missing `}'"));
1688 return FAIL;
1689 }
1690 }
1691 else
1692 {
1693 expressionS exp;
1694
1695 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1696 return FAIL;
1697
1698 if (exp.X_op == O_constant)
1699 {
1700 if (exp.X_add_number
1701 != (exp.X_add_number & 0x0000ffff))
1702 {
1703 inst.error = _("invalid register mask");
1704 return FAIL;
1705 }
1706
1707 if ((range & exp.X_add_number) != 0)
1708 {
1709 int regno = range & exp.X_add_number;
1710
1711 regno &= -regno;
1712 regno = (1 << regno) - 1;
1713 as_tsktsk
1714 (_("Warning: duplicated register (r%d) in register list"),
1715 regno);
1716 }
1717
1718 range |= exp.X_add_number;
1719 }
1720 else
1721 {
1722 if (inst.reloc.type != 0)
1723 {
1724 inst.error = _("expression too complex");
1725 return FAIL;
1726 }
1727
1728 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1729 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1730 inst.reloc.pc_rel = 0;
1731 }
1732 }
1733
1734 if (*str == '|' || *str == '+')
1735 {
1736 str++;
1737 another_range = 1;
1738 }
1739 }
1740 while (another_range);
1741
1742 *strp = str;
1743 return range;
1744 }
1745
1746 /* Types of registers in a list. */
1747
1748 enum reg_list_els
1749 {
1750 REGLIST_VFP_S,
1751 REGLIST_VFP_D,
1752 REGLIST_NEON_D
1753 };
1754
1755 /* Parse a VFP register list. If the string is invalid return FAIL.
1756 Otherwise return the number of registers, and set PBASE to the first
1757 register. Parses registers of type ETYPE.
1758 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1759 - Q registers can be used to specify pairs of D registers
1760 - { } can be omitted from around a singleton register list
1761 FIXME: This is not implemented, as it would require backtracking in
1762 some cases, e.g.:
1763 vtbl.8 d3,d4,d5
1764 This could be done (the meaning isn't really ambiguous), but doesn't
1765 fit in well with the current parsing framework.
1766 - 32 D registers may be used (also true for VFPv3).
1767 FIXME: Types are ignored in these register lists, which is probably a
1768 bug. */
1769
1770 static int
1771 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1772 {
1773 char *str = *ccp;
1774 int base_reg;
1775 int new_base;
1776 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1777 int max_regs = 0;
1778 int count = 0;
1779 int warned = 0;
1780 unsigned long mask = 0;
1781 int i;
1782
1783 if (skip_past_char (&str, '{') == FAIL)
1784 {
1785 inst.error = _("expecting {");
1786 return FAIL;
1787 }
1788
1789 switch (etype)
1790 {
1791 case REGLIST_VFP_S:
1792 regtype = REG_TYPE_VFS;
1793 max_regs = 32;
1794 break;
1795
1796 case REGLIST_VFP_D:
1797 regtype = REG_TYPE_VFD;
1798 break;
1799
1800 case REGLIST_NEON_D:
1801 regtype = REG_TYPE_NDQ;
1802 break;
1803 }
1804
1805 if (etype != REGLIST_VFP_S)
1806 {
1807 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1808 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1809 {
1810 max_regs = 32;
1811 if (thumb_mode)
1812 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1813 fpu_vfp_ext_d32);
1814 else
1815 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1816 fpu_vfp_ext_d32);
1817 }
1818 else
1819 max_regs = 16;
1820 }
1821
1822 base_reg = max_regs;
1823
1824 do
1825 {
1826 int setmask = 1, addregs = 1;
1827
1828 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1829
1830 if (new_base == FAIL)
1831 {
1832 first_error (_(reg_expected_msgs[regtype]));
1833 return FAIL;
1834 }
1835
1836 if (new_base >= max_regs)
1837 {
1838 first_error (_("register out of range in list"));
1839 return FAIL;
1840 }
1841
1842 /* Note: a value of 2 * n is returned for the register Q<n>. */
1843 if (regtype == REG_TYPE_NQ)
1844 {
1845 setmask = 3;
1846 addregs = 2;
1847 }
1848
1849 if (new_base < base_reg)
1850 base_reg = new_base;
1851
1852 if (mask & (setmask << new_base))
1853 {
1854 first_error (_("invalid register list"));
1855 return FAIL;
1856 }
1857
1858 if ((mask >> new_base) != 0 && ! warned)
1859 {
1860 as_tsktsk (_("register list not in ascending order"));
1861 warned = 1;
1862 }
1863
1864 mask |= setmask << new_base;
1865 count += addregs;
1866
1867 if (*str == '-') /* We have the start of a range expression */
1868 {
1869 int high_range;
1870
1871 str++;
1872
1873 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1874 == FAIL)
1875 {
1876 inst.error = gettext (reg_expected_msgs[regtype]);
1877 return FAIL;
1878 }
1879
1880 if (high_range >= max_regs)
1881 {
1882 first_error (_("register out of range in list"));
1883 return FAIL;
1884 }
1885
1886 if (regtype == REG_TYPE_NQ)
1887 high_range = high_range + 1;
1888
1889 if (high_range <= new_base)
1890 {
1891 inst.error = _("register range not in ascending order");
1892 return FAIL;
1893 }
1894
1895 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1896 {
1897 if (mask & (setmask << new_base))
1898 {
1899 inst.error = _("invalid register list");
1900 return FAIL;
1901 }
1902
1903 mask |= setmask << new_base;
1904 count += addregs;
1905 }
1906 }
1907 }
1908 while (skip_past_comma (&str) != FAIL);
1909
1910 str++;
1911
1912 /* Sanity check -- should have raised a parse error above. */
1913 if (count == 0 || count > max_regs)
1914 abort ();
1915
1916 *pbase = base_reg;
1917
1918 /* Final test -- the registers must be consecutive. */
1919 mask >>= base_reg;
1920 for (i = 0; i < count; i++)
1921 {
1922 if ((mask & (1u << i)) == 0)
1923 {
1924 inst.error = _("non-contiguous register range");
1925 return FAIL;
1926 }
1927 }
1928
1929 *ccp = str;
1930
1931 return count;
1932 }
1933
1934 /* True if two alias types are the same. */
1935
1936 static bfd_boolean
1937 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1938 {
1939 if (!a && !b)
1940 return TRUE;
1941
1942 if (!a || !b)
1943 return FALSE;
1944
1945 if (a->defined != b->defined)
1946 return FALSE;
1947
1948 if ((a->defined & NTA_HASTYPE) != 0
1949 && (a->eltype.type != b->eltype.type
1950 || a->eltype.size != b->eltype.size))
1951 return FALSE;
1952
1953 if ((a->defined & NTA_HASINDEX) != 0
1954 && (a->index != b->index))
1955 return FALSE;
1956
1957 return TRUE;
1958 }
1959
1960 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1961 The base register is put in *PBASE.
1962 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1963 the return value.
1964 The register stride (minus one) is put in bit 4 of the return value.
1965 Bits [6:5] encode the list length (minus one).
1966 The type of the list elements is put in *ELTYPE, if non-NULL. */
1967
1968 #define NEON_LANE(X) ((X) & 0xf)
1969 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1970 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1971
1972 static int
1973 parse_neon_el_struct_list (char **str, unsigned *pbase,
1974 struct neon_type_el *eltype)
1975 {
1976 char *ptr = *str;
1977 int base_reg = -1;
1978 int reg_incr = -1;
1979 int count = 0;
1980 int lane = -1;
1981 int leading_brace = 0;
1982 enum arm_reg_type rtype = REG_TYPE_NDQ;
1983 const char *const incr_error = _("register stride must be 1 or 2");
1984 const char *const type_error = _("mismatched element/structure types in list");
1985 struct neon_typed_alias firsttype;
1986
1987 if (skip_past_char (&ptr, '{') == SUCCESS)
1988 leading_brace = 1;
1989
1990 do
1991 {
1992 struct neon_typed_alias atype;
1993 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1994
1995 if (getreg == FAIL)
1996 {
1997 first_error (_(reg_expected_msgs[rtype]));
1998 return FAIL;
1999 }
2000
2001 if (base_reg == -1)
2002 {
2003 base_reg = getreg;
2004 if (rtype == REG_TYPE_NQ)
2005 {
2006 reg_incr = 1;
2007 }
2008 firsttype = atype;
2009 }
2010 else if (reg_incr == -1)
2011 {
2012 reg_incr = getreg - base_reg;
2013 if (reg_incr < 1 || reg_incr > 2)
2014 {
2015 first_error (_(incr_error));
2016 return FAIL;
2017 }
2018 }
2019 else if (getreg != base_reg + reg_incr * count)
2020 {
2021 first_error (_(incr_error));
2022 return FAIL;
2023 }
2024
2025 if (! neon_alias_types_same (&atype, &firsttype))
2026 {
2027 first_error (_(type_error));
2028 return FAIL;
2029 }
2030
2031 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2032 modes. */
2033 if (ptr[0] == '-')
2034 {
2035 struct neon_typed_alias htype;
2036 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2037 if (lane == -1)
2038 lane = NEON_INTERLEAVE_LANES;
2039 else if (lane != NEON_INTERLEAVE_LANES)
2040 {
2041 first_error (_(type_error));
2042 return FAIL;
2043 }
2044 if (reg_incr == -1)
2045 reg_incr = 1;
2046 else if (reg_incr != 1)
2047 {
2048 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2049 return FAIL;
2050 }
2051 ptr++;
2052 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2053 if (hireg == FAIL)
2054 {
2055 first_error (_(reg_expected_msgs[rtype]));
2056 return FAIL;
2057 }
2058 if (! neon_alias_types_same (&htype, &firsttype))
2059 {
2060 first_error (_(type_error));
2061 return FAIL;
2062 }
2063 count += hireg + dregs - getreg;
2064 continue;
2065 }
2066
2067 /* If we're using Q registers, we can't use [] or [n] syntax. */
2068 if (rtype == REG_TYPE_NQ)
2069 {
2070 count += 2;
2071 continue;
2072 }
2073
2074 if ((atype.defined & NTA_HASINDEX) != 0)
2075 {
2076 if (lane == -1)
2077 lane = atype.index;
2078 else if (lane != atype.index)
2079 {
2080 first_error (_(type_error));
2081 return FAIL;
2082 }
2083 }
2084 else if (lane == -1)
2085 lane = NEON_INTERLEAVE_LANES;
2086 else if (lane != NEON_INTERLEAVE_LANES)
2087 {
2088 first_error (_(type_error));
2089 return FAIL;
2090 }
2091 count++;
2092 }
2093 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2094
2095 /* No lane set by [x]. We must be interleaving structures. */
2096 if (lane == -1)
2097 lane = NEON_INTERLEAVE_LANES;
2098
2099 /* Sanity check. */
2100 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2101 || (count > 1 && reg_incr == -1))
2102 {
2103 first_error (_("error parsing element/structure list"));
2104 return FAIL;
2105 }
2106
2107 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2108 {
2109 first_error (_("expected }"));
2110 return FAIL;
2111 }
2112
2113 if (reg_incr == -1)
2114 reg_incr = 1;
2115
2116 if (eltype)
2117 *eltype = firsttype.eltype;
2118
2119 *pbase = base_reg;
2120 *str = ptr;
2121
2122 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2123 }
2124
2125 /* Parse an explicit relocation suffix on an expression. This is
2126 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2127 arm_reloc_hsh contains no entries, so this function can only
2128 succeed if there is no () after the word. Returns -1 on error,
2129 BFD_RELOC_UNUSED if there wasn't any suffix. */
2130
2131 static int
2132 parse_reloc (char **str)
2133 {
2134 struct reloc_entry *r;
2135 char *p, *q;
2136
2137 if (**str != '(')
2138 return BFD_RELOC_UNUSED;
2139
2140 p = *str + 1;
2141 q = p;
2142
2143 while (*q && *q != ')' && *q != ',')
2144 q++;
2145 if (*q != ')')
2146 return -1;
2147
2148 if ((r = (struct reloc_entry *)
2149 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2150 return -1;
2151
2152 *str = q + 1;
2153 return r->reloc;
2154 }
2155
2156 /* Directives: register aliases. */
2157
2158 static struct reg_entry *
2159 insert_reg_alias (char *str, unsigned number, int type)
2160 {
2161 struct reg_entry *new_reg;
2162 const char *name;
2163
2164 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2165 {
2166 if (new_reg->builtin)
2167 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2168
2169 /* Only warn about a redefinition if it's not defined as the
2170 same register. */
2171 else if (new_reg->number != number || new_reg->type != type)
2172 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2173
2174 return NULL;
2175 }
2176
2177 name = xstrdup (str);
2178 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2179
2180 new_reg->name = name;
2181 new_reg->number = number;
2182 new_reg->type = type;
2183 new_reg->builtin = FALSE;
2184 new_reg->neon = NULL;
2185
2186 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2187 abort ();
2188
2189 return new_reg;
2190 }
2191
2192 static void
2193 insert_neon_reg_alias (char *str, int number, int type,
2194 struct neon_typed_alias *atype)
2195 {
2196 struct reg_entry *reg = insert_reg_alias (str, number, type);
2197
2198 if (!reg)
2199 {
2200 first_error (_("attempt to redefine typed alias"));
2201 return;
2202 }
2203
2204 if (atype)
2205 {
2206 reg->neon = (struct neon_typed_alias *)
2207 xmalloc (sizeof (struct neon_typed_alias));
2208 *reg->neon = *atype;
2209 }
2210 }
2211
2212 /* Look for the .req directive. This is of the form:
2213
2214 new_register_name .req existing_register_name
2215
2216 If we find one, or if it looks sufficiently like one that we want to
2217 handle any error here, return TRUE. Otherwise return FALSE. */
2218
2219 static bfd_boolean
2220 create_register_alias (char * newname, char *p)
2221 {
2222 struct reg_entry *old;
2223 char *oldname, *nbuf;
2224 size_t nlen;
2225
2226 /* The input scrubber ensures that whitespace after the mnemonic is
2227 collapsed to single spaces. */
2228 oldname = p;
2229 if (strncmp (oldname, " .req ", 6) != 0)
2230 return FALSE;
2231
2232 oldname += 6;
2233 if (*oldname == '\0')
2234 return FALSE;
2235
2236 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2237 if (!old)
2238 {
2239 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2240 return TRUE;
2241 }
2242
2243 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2244 the desired alias name, and p points to its end. If not, then
2245 the desired alias name is in the global original_case_string. */
2246 #ifdef TC_CASE_SENSITIVE
2247 nlen = p - newname;
2248 #else
2249 newname = original_case_string;
2250 nlen = strlen (newname);
2251 #endif
2252
2253 nbuf = (char *) alloca (nlen + 1);
2254 memcpy (nbuf, newname, nlen);
2255 nbuf[nlen] = '\0';
2256
2257 /* Create aliases under the new name as stated; an all-lowercase
2258 version of the new name; and an all-uppercase version of the new
2259 name. */
2260 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2261 {
2262 for (p = nbuf; *p; p++)
2263 *p = TOUPPER (*p);
2264
2265 if (strncmp (nbuf, newname, nlen))
2266 {
2267 /* If this attempt to create an additional alias fails, do not bother
2268 trying to create the all-lower case alias. We will fail and issue
2269 a second, duplicate error message. This situation arises when the
2270 programmer does something like:
2271 foo .req r0
2272 Foo .req r1
2273 The second .req creates the "Foo" alias but then fails to create
2274 the artificial FOO alias because it has already been created by the
2275 first .req. */
2276 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2277 return TRUE;
2278 }
2279
2280 for (p = nbuf; *p; p++)
2281 *p = TOLOWER (*p);
2282
2283 if (strncmp (nbuf, newname, nlen))
2284 insert_reg_alias (nbuf, old->number, old->type);
2285 }
2286
2287 return TRUE;
2288 }
2289
2290 /* Create a Neon typed/indexed register alias using directives, e.g.:
2291 X .dn d5.s32[1]
2292 Y .qn 6.s16
2293 Z .dn d7
2294 T .dn Z[0]
2295 These typed registers can be used instead of the types specified after the
2296 Neon mnemonic, so long as all operands given have types. Types can also be
2297 specified directly, e.g.:
2298 vadd d0.s32, d1.s32, d2.s32 */
2299
2300 static bfd_boolean
2301 create_neon_reg_alias (char *newname, char *p)
2302 {
2303 enum arm_reg_type basetype;
2304 struct reg_entry *basereg;
2305 struct reg_entry mybasereg;
2306 struct neon_type ntype;
2307 struct neon_typed_alias typeinfo;
2308 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2309 int namelen;
2310
2311 typeinfo.defined = 0;
2312 typeinfo.eltype.type = NT_invtype;
2313 typeinfo.eltype.size = -1;
2314 typeinfo.index = -1;
2315
2316 nameend = p;
2317
2318 if (strncmp (p, " .dn ", 5) == 0)
2319 basetype = REG_TYPE_VFD;
2320 else if (strncmp (p, " .qn ", 5) == 0)
2321 basetype = REG_TYPE_NQ;
2322 else
2323 return FALSE;
2324
2325 p += 5;
2326
2327 if (*p == '\0')
2328 return FALSE;
2329
2330 basereg = arm_reg_parse_multi (&p);
2331
2332 if (basereg && basereg->type != basetype)
2333 {
2334 as_bad (_("bad type for register"));
2335 return FALSE;
2336 }
2337
2338 if (basereg == NULL)
2339 {
2340 expressionS exp;
2341 /* Try parsing as an integer. */
2342 my_get_expression (&exp, &p, GE_NO_PREFIX);
2343 if (exp.X_op != O_constant)
2344 {
2345 as_bad (_("expression must be constant"));
2346 return FALSE;
2347 }
2348 basereg = &mybasereg;
2349 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2350 : exp.X_add_number;
2351 basereg->neon = 0;
2352 }
2353
2354 if (basereg->neon)
2355 typeinfo = *basereg->neon;
2356
2357 if (parse_neon_type (&ntype, &p) == SUCCESS)
2358 {
2359 /* We got a type. */
2360 if (typeinfo.defined & NTA_HASTYPE)
2361 {
2362 as_bad (_("can't redefine the type of a register alias"));
2363 return FALSE;
2364 }
2365
2366 typeinfo.defined |= NTA_HASTYPE;
2367 if (ntype.elems != 1)
2368 {
2369 as_bad (_("you must specify a single type only"));
2370 return FALSE;
2371 }
2372 typeinfo.eltype = ntype.el[0];
2373 }
2374
2375 if (skip_past_char (&p, '[') == SUCCESS)
2376 {
2377 expressionS exp;
2378 /* We got a scalar index. */
2379
2380 if (typeinfo.defined & NTA_HASINDEX)
2381 {
2382 as_bad (_("can't redefine the index of a scalar alias"));
2383 return FALSE;
2384 }
2385
2386 my_get_expression (&exp, &p, GE_NO_PREFIX);
2387
2388 if (exp.X_op != O_constant)
2389 {
2390 as_bad (_("scalar index must be constant"));
2391 return FALSE;
2392 }
2393
2394 typeinfo.defined |= NTA_HASINDEX;
2395 typeinfo.index = exp.X_add_number;
2396
2397 if (skip_past_char (&p, ']') == FAIL)
2398 {
2399 as_bad (_("expecting ]"));
2400 return FALSE;
2401 }
2402 }
2403
2404 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2405 the desired alias name, and p points to its end. If not, then
2406 the desired alias name is in the global original_case_string. */
2407 #ifdef TC_CASE_SENSITIVE
2408 namelen = nameend - newname;
2409 #else
2410 newname = original_case_string;
2411 namelen = strlen (newname);
2412 #endif
2413
2414 namebuf = (char *) alloca (namelen + 1);
2415 strncpy (namebuf, newname, namelen);
2416 namebuf[namelen] = '\0';
2417
2418 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2419 typeinfo.defined != 0 ? &typeinfo : NULL);
2420
2421 /* Insert name in all uppercase. */
2422 for (p = namebuf; *p; p++)
2423 *p = TOUPPER (*p);
2424
2425 if (strncmp (namebuf, newname, namelen))
2426 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2427 typeinfo.defined != 0 ? &typeinfo : NULL);
2428
2429 /* Insert name in all lowercase. */
2430 for (p = namebuf; *p; p++)
2431 *p = TOLOWER (*p);
2432
2433 if (strncmp (namebuf, newname, namelen))
2434 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2435 typeinfo.defined != 0 ? &typeinfo : NULL);
2436
2437 return TRUE;
2438 }
2439
2440 /* Should never be called, as .req goes between the alias and the
2441 register name, not at the beginning of the line. */
2442
2443 static void
2444 s_req (int a ATTRIBUTE_UNUSED)
2445 {
2446 as_bad (_("invalid syntax for .req directive"));
2447 }
2448
2449 static void
2450 s_dn (int a ATTRIBUTE_UNUSED)
2451 {
2452 as_bad (_("invalid syntax for .dn directive"));
2453 }
2454
2455 static void
2456 s_qn (int a ATTRIBUTE_UNUSED)
2457 {
2458 as_bad (_("invalid syntax for .qn directive"));
2459 }
2460
2461 /* The .unreq directive deletes an alias which was previously defined
2462 by .req. For example:
2463
2464 my_alias .req r11
2465 .unreq my_alias */
2466
2467 static void
2468 s_unreq (int a ATTRIBUTE_UNUSED)
2469 {
2470 char * name;
2471 char saved_char;
2472
2473 name = input_line_pointer;
2474
2475 while (*input_line_pointer != 0
2476 && *input_line_pointer != ' '
2477 && *input_line_pointer != '\n')
2478 ++input_line_pointer;
2479
2480 saved_char = *input_line_pointer;
2481 *input_line_pointer = 0;
2482
2483 if (!*name)
2484 as_bad (_("invalid syntax for .unreq directive"));
2485 else
2486 {
2487 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2488 name);
2489
2490 if (!reg)
2491 as_bad (_("unknown register alias '%s'"), name);
2492 else if (reg->builtin)
2493 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2494 name);
2495 else
2496 {
2497 char * p;
2498 char * nbuf;
2499
2500 hash_delete (arm_reg_hsh, name, FALSE);
2501 free ((char *) reg->name);
2502 if (reg->neon)
2503 free (reg->neon);
2504 free (reg);
2505
2506 /* Also locate the all upper case and all lower case versions.
2507 Do not complain if we cannot find one or the other as it
2508 was probably deleted above. */
2509
2510 nbuf = strdup (name);
2511 for (p = nbuf; *p; p++)
2512 *p = TOUPPER (*p);
2513 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2514 if (reg)
2515 {
2516 hash_delete (arm_reg_hsh, nbuf, FALSE);
2517 free ((char *) reg->name);
2518 if (reg->neon)
2519 free (reg->neon);
2520 free (reg);
2521 }
2522
2523 for (p = nbuf; *p; p++)
2524 *p = TOLOWER (*p);
2525 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2526 if (reg)
2527 {
2528 hash_delete (arm_reg_hsh, nbuf, FALSE);
2529 free ((char *) reg->name);
2530 if (reg->neon)
2531 free (reg->neon);
2532 free (reg);
2533 }
2534
2535 free (nbuf);
2536 }
2537 }
2538
2539 *input_line_pointer = saved_char;
2540 demand_empty_rest_of_line ();
2541 }
2542
2543 /* Directives: Instruction set selection. */
2544
2545 #ifdef OBJ_ELF
2546 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2547 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2548 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2549 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2550
2551 /* Create a new mapping symbol for the transition to STATE. */
2552
2553 static void
2554 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2555 {
2556 symbolS * symbolP;
2557 const char * symname;
2558 int type;
2559
2560 switch (state)
2561 {
2562 case MAP_DATA:
2563 symname = "$d";
2564 type = BSF_NO_FLAGS;
2565 break;
2566 case MAP_ARM:
2567 symname = "$a";
2568 type = BSF_NO_FLAGS;
2569 break;
2570 case MAP_THUMB:
2571 symname = "$t";
2572 type = BSF_NO_FLAGS;
2573 break;
2574 default:
2575 abort ();
2576 }
2577
2578 symbolP = symbol_new (symname, now_seg, value, frag);
2579 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2580
2581 switch (state)
2582 {
2583 case MAP_ARM:
2584 THUMB_SET_FUNC (symbolP, 0);
2585 ARM_SET_THUMB (symbolP, 0);
2586 ARM_SET_INTERWORK (symbolP, support_interwork);
2587 break;
2588
2589 case MAP_THUMB:
2590 THUMB_SET_FUNC (symbolP, 1);
2591 ARM_SET_THUMB (symbolP, 1);
2592 ARM_SET_INTERWORK (symbolP, support_interwork);
2593 break;
2594
2595 case MAP_DATA:
2596 default:
2597 break;
2598 }
2599
2600 /* Save the mapping symbols for future reference. Also check that
2601 we do not place two mapping symbols at the same offset within a
2602 frag. We'll handle overlap between frags in
2603 check_mapping_symbols.
2604
2605 If .fill or other data filling directive generates zero sized data,
2606 the mapping symbol for the following code will have the same value
2607 as the one generated for the data filling directive. In this case,
2608 we replace the old symbol with the new one at the same address. */
2609 if (value == 0)
2610 {
2611 if (frag->tc_frag_data.first_map != NULL)
2612 {
2613 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2614 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2615 }
2616 frag->tc_frag_data.first_map = symbolP;
2617 }
2618 if (frag->tc_frag_data.last_map != NULL)
2619 {
2620 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2621 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2622 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2623 }
2624 frag->tc_frag_data.last_map = symbolP;
2625 }
2626
2627 /* We must sometimes convert a region marked as code to data during
2628 code alignment, if an odd number of bytes have to be padded. The
2629 code mapping symbol is pushed to an aligned address. */
2630
2631 static void
2632 insert_data_mapping_symbol (enum mstate state,
2633 valueT value, fragS *frag, offsetT bytes)
2634 {
2635 /* If there was already a mapping symbol, remove it. */
2636 if (frag->tc_frag_data.last_map != NULL
2637 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2638 {
2639 symbolS *symp = frag->tc_frag_data.last_map;
2640
2641 if (value == 0)
2642 {
2643 know (frag->tc_frag_data.first_map == symp);
2644 frag->tc_frag_data.first_map = NULL;
2645 }
2646 frag->tc_frag_data.last_map = NULL;
2647 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2648 }
2649
2650 make_mapping_symbol (MAP_DATA, value, frag);
2651 make_mapping_symbol (state, value + bytes, frag);
2652 }
2653
2654 static void mapping_state_2 (enum mstate state, int max_chars);
2655
2656 /* Set the mapping state to STATE. Only call this when about to
2657 emit some STATE bytes to the file. */
2658
2659 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2660 void
2661 mapping_state (enum mstate state)
2662 {
2663 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2664
2665 if (mapstate == state)
2666 /* The mapping symbol has already been emitted.
2667 There is nothing else to do. */
2668 return;
2669
2670 if (state == MAP_ARM || state == MAP_THUMB)
2671 /* PR gas/12931
2672 All ARM instructions require 4-byte alignment.
2673 (Almost) all Thumb instructions require 2-byte alignment.
2674
2675 When emitting instructions into any section, mark the section
2676 appropriately.
2677
2678 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2679 but themselves require 2-byte alignment; this applies to some
2680 PC- relative forms. However, these cases will invovle implicit
2681 literal pool generation or an explicit .align >=2, both of
2682 which will cause the section to me marked with sufficient
2683 alignment. Thus, we don't handle those cases here. */
2684 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2685
2686 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2687 /* This case will be evaluated later. */
2688 return;
2689
2690 mapping_state_2 (state, 0);
2691 }
2692
2693 /* Same as mapping_state, but MAX_CHARS bytes have already been
2694 allocated. Put the mapping symbol that far back. */
2695
2696 static void
2697 mapping_state_2 (enum mstate state, int max_chars)
2698 {
2699 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2700
2701 if (!SEG_NORMAL (now_seg))
2702 return;
2703
2704 if (mapstate == state)
2705 /* The mapping symbol has already been emitted.
2706 There is nothing else to do. */
2707 return;
2708
2709 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2710 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2711 {
2712 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2713 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2714
2715 if (add_symbol)
2716 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2717 }
2718
2719 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2720 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2721 }
2722 #undef TRANSITION
2723 #else
2724 #define mapping_state(x) ((void)0)
2725 #define mapping_state_2(x, y) ((void)0)
2726 #endif
2727
2728 /* Find the real, Thumb encoded start of a Thumb function. */
2729
2730 #ifdef OBJ_COFF
2731 static symbolS *
2732 find_real_start (symbolS * symbolP)
2733 {
2734 char * real_start;
2735 const char * name = S_GET_NAME (symbolP);
2736 symbolS * new_target;
2737
2738 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2739 #define STUB_NAME ".real_start_of"
2740
2741 if (name == NULL)
2742 abort ();
2743
2744 /* The compiler may generate BL instructions to local labels because
2745 it needs to perform a branch to a far away location. These labels
2746 do not have a corresponding ".real_start_of" label. We check
2747 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2748 the ".real_start_of" convention for nonlocal branches. */
2749 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2750 return symbolP;
2751
2752 real_start = ACONCAT ((STUB_NAME, name, NULL));
2753 new_target = symbol_find (real_start);
2754
2755 if (new_target == NULL)
2756 {
2757 as_warn (_("Failed to find real start of function: %s\n"), name);
2758 new_target = symbolP;
2759 }
2760
2761 return new_target;
2762 }
2763 #endif
2764
2765 static void
2766 opcode_select (int width)
2767 {
2768 switch (width)
2769 {
2770 case 16:
2771 if (! thumb_mode)
2772 {
2773 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2774 as_bad (_("selected processor does not support THUMB opcodes"));
2775
2776 thumb_mode = 1;
2777 /* No need to force the alignment, since we will have been
2778 coming from ARM mode, which is word-aligned. */
2779 record_alignment (now_seg, 1);
2780 }
2781 break;
2782
2783 case 32:
2784 if (thumb_mode)
2785 {
2786 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2787 as_bad (_("selected processor does not support ARM opcodes"));
2788
2789 thumb_mode = 0;
2790
2791 if (!need_pass_2)
2792 frag_align (2, 0, 0);
2793
2794 record_alignment (now_seg, 1);
2795 }
2796 break;
2797
2798 default:
2799 as_bad (_("invalid instruction size selected (%d)"), width);
2800 }
2801 }
2802
2803 static void
2804 s_arm (int ignore ATTRIBUTE_UNUSED)
2805 {
2806 opcode_select (32);
2807 demand_empty_rest_of_line ();
2808 }
2809
2810 static void
2811 s_thumb (int ignore ATTRIBUTE_UNUSED)
2812 {
2813 opcode_select (16);
2814 demand_empty_rest_of_line ();
2815 }
2816
2817 static void
2818 s_code (int unused ATTRIBUTE_UNUSED)
2819 {
2820 int temp;
2821
2822 temp = get_absolute_expression ();
2823 switch (temp)
2824 {
2825 case 16:
2826 case 32:
2827 opcode_select (temp);
2828 break;
2829
2830 default:
2831 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2832 }
2833 }
2834
2835 static void
2836 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2837 {
2838 /* If we are not already in thumb mode go into it, EVEN if
2839 the target processor does not support thumb instructions.
2840 This is used by gcc/config/arm/lib1funcs.asm for example
2841 to compile interworking support functions even if the
2842 target processor should not support interworking. */
2843 if (! thumb_mode)
2844 {
2845 thumb_mode = 2;
2846 record_alignment (now_seg, 1);
2847 }
2848
2849 demand_empty_rest_of_line ();
2850 }
2851
2852 static void
2853 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2854 {
2855 s_thumb (0);
2856
2857 /* The following label is the name/address of the start of a Thumb function.
2858 We need to know this for the interworking support. */
2859 label_is_thumb_function_name = TRUE;
2860 }
2861
2862 /* Perform a .set directive, but also mark the alias as
2863 being a thumb function. */
2864
2865 static void
2866 s_thumb_set (int equiv)
2867 {
2868 /* XXX the following is a duplicate of the code for s_set() in read.c
2869 We cannot just call that code as we need to get at the symbol that
2870 is created. */
2871 char * name;
2872 char delim;
2873 char * end_name;
2874 symbolS * symbolP;
2875
2876 /* Especial apologies for the random logic:
2877 This just grew, and could be parsed much more simply!
2878 Dean - in haste. */
2879 delim = get_symbol_name (& name);
2880 end_name = input_line_pointer;
2881 (void) restore_line_pointer (delim);
2882
2883 if (*input_line_pointer != ',')
2884 {
2885 *end_name = 0;
2886 as_bad (_("expected comma after name \"%s\""), name);
2887 *end_name = delim;
2888 ignore_rest_of_line ();
2889 return;
2890 }
2891
2892 input_line_pointer++;
2893 *end_name = 0;
2894
2895 if (name[0] == '.' && name[1] == '\0')
2896 {
2897 /* XXX - this should not happen to .thumb_set. */
2898 abort ();
2899 }
2900
2901 if ((symbolP = symbol_find (name)) == NULL
2902 && (symbolP = md_undefined_symbol (name)) == NULL)
2903 {
2904 #ifndef NO_LISTING
2905 /* When doing symbol listings, play games with dummy fragments living
2906 outside the normal fragment chain to record the file and line info
2907 for this symbol. */
2908 if (listing & LISTING_SYMBOLS)
2909 {
2910 extern struct list_info_struct * listing_tail;
2911 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2912
2913 memset (dummy_frag, 0, sizeof (fragS));
2914 dummy_frag->fr_type = rs_fill;
2915 dummy_frag->line = listing_tail;
2916 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2917 dummy_frag->fr_symbol = symbolP;
2918 }
2919 else
2920 #endif
2921 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2922
2923 #ifdef OBJ_COFF
2924 /* "set" symbols are local unless otherwise specified. */
2925 SF_SET_LOCAL (symbolP);
2926 #endif /* OBJ_COFF */
2927 } /* Make a new symbol. */
2928
2929 symbol_table_insert (symbolP);
2930
2931 * end_name = delim;
2932
2933 if (equiv
2934 && S_IS_DEFINED (symbolP)
2935 && S_GET_SEGMENT (symbolP) != reg_section)
2936 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2937
2938 pseudo_set (symbolP);
2939
2940 demand_empty_rest_of_line ();
2941
2942 /* XXX Now we come to the Thumb specific bit of code. */
2943
2944 THUMB_SET_FUNC (symbolP, 1);
2945 ARM_SET_THUMB (symbolP, 1);
2946 #if defined OBJ_ELF || defined OBJ_COFF
2947 ARM_SET_INTERWORK (symbolP, support_interwork);
2948 #endif
2949 }
2950
2951 /* Directives: Mode selection. */
2952
2953 /* .syntax [unified|divided] - choose the new unified syntax
2954 (same for Arm and Thumb encoding, modulo slight differences in what
2955 can be represented) or the old divergent syntax for each mode. */
2956 static void
2957 s_syntax (int unused ATTRIBUTE_UNUSED)
2958 {
2959 char *name, delim;
2960
2961 delim = get_symbol_name (& name);
2962
2963 if (!strcasecmp (name, "unified"))
2964 unified_syntax = TRUE;
2965 else if (!strcasecmp (name, "divided"))
2966 unified_syntax = FALSE;
2967 else
2968 {
2969 as_bad (_("unrecognized syntax mode \"%s\""), name);
2970 return;
2971 }
2972 (void) restore_line_pointer (delim);
2973 demand_empty_rest_of_line ();
2974 }
2975
2976 /* Directives: sectioning and alignment. */
2977
2978 static void
2979 s_bss (int ignore ATTRIBUTE_UNUSED)
2980 {
2981 /* We don't support putting frags in the BSS segment, we fake it by
2982 marking in_bss, then looking at s_skip for clues. */
2983 subseg_set (bss_section, 0);
2984 demand_empty_rest_of_line ();
2985
2986 #ifdef md_elf_section_change_hook
2987 md_elf_section_change_hook ();
2988 #endif
2989 }
2990
2991 static void
2992 s_even (int ignore ATTRIBUTE_UNUSED)
2993 {
2994 /* Never make frag if expect extra pass. */
2995 if (!need_pass_2)
2996 frag_align (1, 0, 0);
2997
2998 record_alignment (now_seg, 1);
2999
3000 demand_empty_rest_of_line ();
3001 }
3002
3003 /* Directives: CodeComposer Studio. */
3004
3005 /* .ref (for CodeComposer Studio syntax only). */
3006 static void
3007 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3008 {
3009 if (codecomposer_syntax)
3010 ignore_rest_of_line ();
3011 else
3012 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3013 }
3014
3015 /* If name is not NULL, then it is used for marking the beginning of a
3016 function, wherease if it is NULL then it means the function end. */
3017 static void
3018 asmfunc_debug (const char * name)
3019 {
3020 static const char * last_name = NULL;
3021
3022 if (name != NULL)
3023 {
3024 gas_assert (last_name == NULL);
3025 last_name = name;
3026
3027 if (debug_type == DEBUG_STABS)
3028 stabs_generate_asm_func (name, name);
3029 }
3030 else
3031 {
3032 gas_assert (last_name != NULL);
3033
3034 if (debug_type == DEBUG_STABS)
3035 stabs_generate_asm_endfunc (last_name, last_name);
3036
3037 last_name = NULL;
3038 }
3039 }
3040
3041 static void
3042 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3043 {
3044 if (codecomposer_syntax)
3045 {
3046 switch (asmfunc_state)
3047 {
3048 case OUTSIDE_ASMFUNC:
3049 asmfunc_state = WAITING_ASMFUNC_NAME;
3050 break;
3051
3052 case WAITING_ASMFUNC_NAME:
3053 as_bad (_(".asmfunc repeated."));
3054 break;
3055
3056 case WAITING_ENDASMFUNC:
3057 as_bad (_(".asmfunc without function."));
3058 break;
3059 }
3060 demand_empty_rest_of_line ();
3061 }
3062 else
3063 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3064 }
3065
3066 static void
3067 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3068 {
3069 if (codecomposer_syntax)
3070 {
3071 switch (asmfunc_state)
3072 {
3073 case OUTSIDE_ASMFUNC:
3074 as_bad (_(".endasmfunc without a .asmfunc."));
3075 break;
3076
3077 case WAITING_ASMFUNC_NAME:
3078 as_bad (_(".endasmfunc without function."));
3079 break;
3080
3081 case WAITING_ENDASMFUNC:
3082 asmfunc_state = OUTSIDE_ASMFUNC;
3083 asmfunc_debug (NULL);
3084 break;
3085 }
3086 demand_empty_rest_of_line ();
3087 }
3088 else
3089 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3090 }
3091
3092 static void
3093 s_ccs_def (int name)
3094 {
3095 if (codecomposer_syntax)
3096 s_globl (name);
3097 else
3098 as_bad (_(".def pseudo-op only available with -mccs flag."));
3099 }
3100
3101 /* Directives: Literal pools. */
3102
3103 static literal_pool *
3104 find_literal_pool (void)
3105 {
3106 literal_pool * pool;
3107
3108 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3109 {
3110 if (pool->section == now_seg
3111 && pool->sub_section == now_subseg)
3112 break;
3113 }
3114
3115 return pool;
3116 }
3117
3118 static literal_pool *
3119 find_or_make_literal_pool (void)
3120 {
3121 /* Next literal pool ID number. */
3122 static unsigned int latest_pool_num = 1;
3123 literal_pool * pool;
3124
3125 pool = find_literal_pool ();
3126
3127 if (pool == NULL)
3128 {
3129 /* Create a new pool. */
3130 pool = (literal_pool *) xmalloc (sizeof (* pool));
3131 if (! pool)
3132 return NULL;
3133
3134 pool->next_free_entry = 0;
3135 pool->section = now_seg;
3136 pool->sub_section = now_subseg;
3137 pool->next = list_of_pools;
3138 pool->symbol = NULL;
3139 pool->alignment = 2;
3140
3141 /* Add it to the list. */
3142 list_of_pools = pool;
3143 }
3144
3145 /* New pools, and emptied pools, will have a NULL symbol. */
3146 if (pool->symbol == NULL)
3147 {
3148 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3149 (valueT) 0, &zero_address_frag);
3150 pool->id = latest_pool_num ++;
3151 }
3152
3153 /* Done. */
3154 return pool;
3155 }
3156
3157 /* Add the literal in the global 'inst'
3158 structure to the relevant literal pool. */
3159
3160 static int
3161 add_to_lit_pool (unsigned int nbytes)
3162 {
3163 #define PADDING_SLOT 0x1
3164 #define LIT_ENTRY_SIZE_MASK 0xFF
3165 literal_pool * pool;
3166 unsigned int entry, pool_size = 0;
3167 bfd_boolean padding_slot_p = FALSE;
3168 unsigned imm1 = 0;
3169 unsigned imm2 = 0;
3170
3171 if (nbytes == 8)
3172 {
3173 imm1 = inst.operands[1].imm;
3174 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3175 : inst.reloc.exp.X_unsigned ? 0
3176 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3177 if (target_big_endian)
3178 {
3179 imm1 = imm2;
3180 imm2 = inst.operands[1].imm;
3181 }
3182 }
3183
3184 pool = find_or_make_literal_pool ();
3185
3186 /* Check if this literal value is already in the pool. */
3187 for (entry = 0; entry < pool->next_free_entry; entry ++)
3188 {
3189 if (nbytes == 4)
3190 {
3191 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3192 && (inst.reloc.exp.X_op == O_constant)
3193 && (pool->literals[entry].X_add_number
3194 == inst.reloc.exp.X_add_number)
3195 && (pool->literals[entry].X_md == nbytes)
3196 && (pool->literals[entry].X_unsigned
3197 == inst.reloc.exp.X_unsigned))
3198 break;
3199
3200 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3201 && (inst.reloc.exp.X_op == O_symbol)
3202 && (pool->literals[entry].X_add_number
3203 == inst.reloc.exp.X_add_number)
3204 && (pool->literals[entry].X_add_symbol
3205 == inst.reloc.exp.X_add_symbol)
3206 && (pool->literals[entry].X_op_symbol
3207 == inst.reloc.exp.X_op_symbol)
3208 && (pool->literals[entry].X_md == nbytes))
3209 break;
3210 }
3211 else if ((nbytes == 8)
3212 && !(pool_size & 0x7)
3213 && ((entry + 1) != pool->next_free_entry)
3214 && (pool->literals[entry].X_op == O_constant)
3215 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3216 && (pool->literals[entry].X_unsigned
3217 == inst.reloc.exp.X_unsigned)
3218 && (pool->literals[entry + 1].X_op == O_constant)
3219 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3220 && (pool->literals[entry + 1].X_unsigned
3221 == inst.reloc.exp.X_unsigned))
3222 break;
3223
3224 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3225 if (padding_slot_p && (nbytes == 4))
3226 break;
3227
3228 pool_size += 4;
3229 }
3230
3231 /* Do we need to create a new entry? */
3232 if (entry == pool->next_free_entry)
3233 {
3234 if (entry >= MAX_LITERAL_POOL_SIZE)
3235 {
3236 inst.error = _("literal pool overflow");
3237 return FAIL;
3238 }
3239
3240 if (nbytes == 8)
3241 {
3242 /* For 8-byte entries, we align to an 8-byte boundary,
3243 and split it into two 4-byte entries, because on 32-bit
3244 host, 8-byte constants are treated as big num, thus
3245 saved in "generic_bignum" which will be overwritten
3246 by later assignments.
3247
3248 We also need to make sure there is enough space for
3249 the split.
3250
3251 We also check to make sure the literal operand is a
3252 constant number. */
3253 if (!(inst.reloc.exp.X_op == O_constant
3254 || inst.reloc.exp.X_op == O_big))
3255 {
3256 inst.error = _("invalid type for literal pool");
3257 return FAIL;
3258 }
3259 else if (pool_size & 0x7)
3260 {
3261 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3262 {
3263 inst.error = _("literal pool overflow");
3264 return FAIL;
3265 }
3266
3267 pool->literals[entry] = inst.reloc.exp;
3268 pool->literals[entry].X_add_number = 0;
3269 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3270 pool->next_free_entry += 1;
3271 pool_size += 4;
3272 }
3273 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3274 {
3275 inst.error = _("literal pool overflow");
3276 return FAIL;
3277 }
3278
3279 pool->literals[entry] = inst.reloc.exp;
3280 pool->literals[entry].X_op = O_constant;
3281 pool->literals[entry].X_add_number = imm1;
3282 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3283 pool->literals[entry++].X_md = 4;
3284 pool->literals[entry] = inst.reloc.exp;
3285 pool->literals[entry].X_op = O_constant;
3286 pool->literals[entry].X_add_number = imm2;
3287 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3288 pool->literals[entry].X_md = 4;
3289 pool->alignment = 3;
3290 pool->next_free_entry += 1;
3291 }
3292 else
3293 {
3294 pool->literals[entry] = inst.reloc.exp;
3295 pool->literals[entry].X_md = 4;
3296 }
3297
3298 #ifdef OBJ_ELF
3299 /* PR ld/12974: Record the location of the first source line to reference
3300 this entry in the literal pool. If it turns out during linking that the
3301 symbol does not exist we will be able to give an accurate line number for
3302 the (first use of the) missing reference. */
3303 if (debug_type == DEBUG_DWARF2)
3304 dwarf2_where (pool->locs + entry);
3305 #endif
3306 pool->next_free_entry += 1;
3307 }
3308 else if (padding_slot_p)
3309 {
3310 pool->literals[entry] = inst.reloc.exp;
3311 pool->literals[entry].X_md = nbytes;
3312 }
3313
3314 inst.reloc.exp.X_op = O_symbol;
3315 inst.reloc.exp.X_add_number = pool_size;
3316 inst.reloc.exp.X_add_symbol = pool->symbol;
3317
3318 return SUCCESS;
3319 }
3320
3321 bfd_boolean
3322 tc_start_label_without_colon (void)
3323 {
3324 bfd_boolean ret = TRUE;
3325
3326 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3327 {
3328 const char *label = input_line_pointer;
3329
3330 while (!is_end_of_line[(int) label[-1]])
3331 --label;
3332
3333 if (*label == '.')
3334 {
3335 as_bad (_("Invalid label '%s'"), label);
3336 ret = FALSE;
3337 }
3338
3339 asmfunc_debug (label);
3340
3341 asmfunc_state = WAITING_ENDASMFUNC;
3342 }
3343
3344 return ret;
3345 }
3346
3347 /* Can't use symbol_new here, so have to create a symbol and then at
3348 a later date assign it a value. Thats what these functions do. */
3349
3350 static void
3351 symbol_locate (symbolS * symbolP,
3352 const char * name, /* It is copied, the caller can modify. */
3353 segT segment, /* Segment identifier (SEG_<something>). */
3354 valueT valu, /* Symbol value. */
3355 fragS * frag) /* Associated fragment. */
3356 {
3357 size_t name_length;
3358 char * preserved_copy_of_name;
3359
3360 name_length = strlen (name) + 1; /* +1 for \0. */
3361 obstack_grow (&notes, name, name_length);
3362 preserved_copy_of_name = (char *) obstack_finish (&notes);
3363
3364 #ifdef tc_canonicalize_symbol_name
3365 preserved_copy_of_name =
3366 tc_canonicalize_symbol_name (preserved_copy_of_name);
3367 #endif
3368
3369 S_SET_NAME (symbolP, preserved_copy_of_name);
3370
3371 S_SET_SEGMENT (symbolP, segment);
3372 S_SET_VALUE (symbolP, valu);
3373 symbol_clear_list_pointers (symbolP);
3374
3375 symbol_set_frag (symbolP, frag);
3376
3377 /* Link to end of symbol chain. */
3378 {
3379 extern int symbol_table_frozen;
3380
3381 if (symbol_table_frozen)
3382 abort ();
3383 }
3384
3385 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3386
3387 obj_symbol_new_hook (symbolP);
3388
3389 #ifdef tc_symbol_new_hook
3390 tc_symbol_new_hook (symbolP);
3391 #endif
3392
3393 #ifdef DEBUG_SYMS
3394 verify_symbol_chain (symbol_rootP, symbol_lastP);
3395 #endif /* DEBUG_SYMS */
3396 }
3397
3398 static void
3399 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3400 {
3401 unsigned int entry;
3402 literal_pool * pool;
3403 char sym_name[20];
3404
3405 pool = find_literal_pool ();
3406 if (pool == NULL
3407 || pool->symbol == NULL
3408 || pool->next_free_entry == 0)
3409 return;
3410
3411 /* Align pool as you have word accesses.
3412 Only make a frag if we have to. */
3413 if (!need_pass_2)
3414 frag_align (pool->alignment, 0, 0);
3415
3416 record_alignment (now_seg, 2);
3417
3418 #ifdef OBJ_ELF
3419 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3420 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3421 #endif
3422 sprintf (sym_name, "$$lit_\002%x", pool->id);
3423
3424 symbol_locate (pool->symbol, sym_name, now_seg,
3425 (valueT) frag_now_fix (), frag_now);
3426 symbol_table_insert (pool->symbol);
3427
3428 ARM_SET_THUMB (pool->symbol, thumb_mode);
3429
3430 #if defined OBJ_COFF || defined OBJ_ELF
3431 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3432 #endif
3433
3434 for (entry = 0; entry < pool->next_free_entry; entry ++)
3435 {
3436 #ifdef OBJ_ELF
3437 if (debug_type == DEBUG_DWARF2)
3438 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3439 #endif
3440 /* First output the expression in the instruction to the pool. */
3441 emit_expr (&(pool->literals[entry]),
3442 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3443 }
3444
3445 /* Mark the pool as empty. */
3446 pool->next_free_entry = 0;
3447 pool->symbol = NULL;
3448 }
3449
3450 #ifdef OBJ_ELF
3451 /* Forward declarations for functions below, in the MD interface
3452 section. */
3453 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3454 static valueT create_unwind_entry (int);
3455 static void start_unwind_section (const segT, int);
3456 static void add_unwind_opcode (valueT, int);
3457 static void flush_pending_unwind (void);
3458
3459 /* Directives: Data. */
3460
3461 static void
3462 s_arm_elf_cons (int nbytes)
3463 {
3464 expressionS exp;
3465
3466 #ifdef md_flush_pending_output
3467 md_flush_pending_output ();
3468 #endif
3469
3470 if (is_it_end_of_statement ())
3471 {
3472 demand_empty_rest_of_line ();
3473 return;
3474 }
3475
3476 #ifdef md_cons_align
3477 md_cons_align (nbytes);
3478 #endif
3479
3480 mapping_state (MAP_DATA);
3481 do
3482 {
3483 int reloc;
3484 char *base = input_line_pointer;
3485
3486 expression (& exp);
3487
3488 if (exp.X_op != O_symbol)
3489 emit_expr (&exp, (unsigned int) nbytes);
3490 else
3491 {
3492 char *before_reloc = input_line_pointer;
3493 reloc = parse_reloc (&input_line_pointer);
3494 if (reloc == -1)
3495 {
3496 as_bad (_("unrecognized relocation suffix"));
3497 ignore_rest_of_line ();
3498 return;
3499 }
3500 else if (reloc == BFD_RELOC_UNUSED)
3501 emit_expr (&exp, (unsigned int) nbytes);
3502 else
3503 {
3504 reloc_howto_type *howto = (reloc_howto_type *)
3505 bfd_reloc_type_lookup (stdoutput,
3506 (bfd_reloc_code_real_type) reloc);
3507 int size = bfd_get_reloc_size (howto);
3508
3509 if (reloc == BFD_RELOC_ARM_PLT32)
3510 {
3511 as_bad (_("(plt) is only valid on branch targets"));
3512 reloc = BFD_RELOC_UNUSED;
3513 size = 0;
3514 }
3515
3516 if (size > nbytes)
3517 as_bad (_("%s relocations do not fit in %d bytes"),
3518 howto->name, nbytes);
3519 else
3520 {
3521 /* We've parsed an expression stopping at O_symbol.
3522 But there may be more expression left now that we
3523 have parsed the relocation marker. Parse it again.
3524 XXX Surely there is a cleaner way to do this. */
3525 char *p = input_line_pointer;
3526 int offset;
3527 char *save_buf = (char *) alloca (input_line_pointer - base);
3528 memcpy (save_buf, base, input_line_pointer - base);
3529 memmove (base + (input_line_pointer - before_reloc),
3530 base, before_reloc - base);
3531
3532 input_line_pointer = base + (input_line_pointer-before_reloc);
3533 expression (&exp);
3534 memcpy (base, save_buf, p - base);
3535
3536 offset = nbytes - size;
3537 p = frag_more (nbytes);
3538 memset (p, 0, nbytes);
3539 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3540 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3541 }
3542 }
3543 }
3544 }
3545 while (*input_line_pointer++ == ',');
3546
3547 /* Put terminator back into stream. */
3548 input_line_pointer --;
3549 demand_empty_rest_of_line ();
3550 }
3551
3552 /* Emit an expression containing a 32-bit thumb instruction.
3553 Implementation based on put_thumb32_insn. */
3554
3555 static void
3556 emit_thumb32_expr (expressionS * exp)
3557 {
3558 expressionS exp_high = *exp;
3559
3560 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3561 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3562 exp->X_add_number &= 0xffff;
3563 emit_expr (exp, (unsigned int) THUMB_SIZE);
3564 }
3565
3566 /* Guess the instruction size based on the opcode. */
3567
3568 static int
3569 thumb_insn_size (int opcode)
3570 {
3571 if ((unsigned int) opcode < 0xe800u)
3572 return 2;
3573 else if ((unsigned int) opcode >= 0xe8000000u)
3574 return 4;
3575 else
3576 return 0;
3577 }
3578
3579 static bfd_boolean
3580 emit_insn (expressionS *exp, int nbytes)
3581 {
3582 int size = 0;
3583
3584 if (exp->X_op == O_constant)
3585 {
3586 size = nbytes;
3587
3588 if (size == 0)
3589 size = thumb_insn_size (exp->X_add_number);
3590
3591 if (size != 0)
3592 {
3593 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3594 {
3595 as_bad (_(".inst.n operand too big. "\
3596 "Use .inst.w instead"));
3597 size = 0;
3598 }
3599 else
3600 {
3601 if (now_it.state == AUTOMATIC_IT_BLOCK)
3602 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3603 else
3604 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3605
3606 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3607 emit_thumb32_expr (exp);
3608 else
3609 emit_expr (exp, (unsigned int) size);
3610
3611 it_fsm_post_encode ();
3612 }
3613 }
3614 else
3615 as_bad (_("cannot determine Thumb instruction size. " \
3616 "Use .inst.n/.inst.w instead"));
3617 }
3618 else
3619 as_bad (_("constant expression required"));
3620
3621 return (size != 0);
3622 }
3623
3624 /* Like s_arm_elf_cons but do not use md_cons_align and
3625 set the mapping state to MAP_ARM/MAP_THUMB. */
3626
3627 static void
3628 s_arm_elf_inst (int nbytes)
3629 {
3630 if (is_it_end_of_statement ())
3631 {
3632 demand_empty_rest_of_line ();
3633 return;
3634 }
3635
3636 /* Calling mapping_state () here will not change ARM/THUMB,
3637 but will ensure not to be in DATA state. */
3638
3639 if (thumb_mode)
3640 mapping_state (MAP_THUMB);
3641 else
3642 {
3643 if (nbytes != 0)
3644 {
3645 as_bad (_("width suffixes are invalid in ARM mode"));
3646 ignore_rest_of_line ();
3647 return;
3648 }
3649
3650 nbytes = 4;
3651
3652 mapping_state (MAP_ARM);
3653 }
3654
3655 do
3656 {
3657 expressionS exp;
3658
3659 expression (& exp);
3660
3661 if (! emit_insn (& exp, nbytes))
3662 {
3663 ignore_rest_of_line ();
3664 return;
3665 }
3666 }
3667 while (*input_line_pointer++ == ',');
3668
3669 /* Put terminator back into stream. */
3670 input_line_pointer --;
3671 demand_empty_rest_of_line ();
3672 }
3673
3674 /* Parse a .rel31 directive. */
3675
3676 static void
3677 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3678 {
3679 expressionS exp;
3680 char *p;
3681 valueT highbit;
3682
3683 highbit = 0;
3684 if (*input_line_pointer == '1')
3685 highbit = 0x80000000;
3686 else if (*input_line_pointer != '0')
3687 as_bad (_("expected 0 or 1"));
3688
3689 input_line_pointer++;
3690 if (*input_line_pointer != ',')
3691 as_bad (_("missing comma"));
3692 input_line_pointer++;
3693
3694 #ifdef md_flush_pending_output
3695 md_flush_pending_output ();
3696 #endif
3697
3698 #ifdef md_cons_align
3699 md_cons_align (4);
3700 #endif
3701
3702 mapping_state (MAP_DATA);
3703
3704 expression (&exp);
3705
3706 p = frag_more (4);
3707 md_number_to_chars (p, highbit, 4);
3708 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3709 BFD_RELOC_ARM_PREL31);
3710
3711 demand_empty_rest_of_line ();
3712 }
3713
3714 /* Directives: AEABI stack-unwind tables. */
3715
3716 /* Parse an unwind_fnstart directive. Simply records the current location. */
3717
3718 static void
3719 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3720 {
3721 demand_empty_rest_of_line ();
3722 if (unwind.proc_start)
3723 {
3724 as_bad (_("duplicate .fnstart directive"));
3725 return;
3726 }
3727
3728 /* Mark the start of the function. */
3729 unwind.proc_start = expr_build_dot ();
3730
3731 /* Reset the rest of the unwind info. */
3732 unwind.opcode_count = 0;
3733 unwind.table_entry = NULL;
3734 unwind.personality_routine = NULL;
3735 unwind.personality_index = -1;
3736 unwind.frame_size = 0;
3737 unwind.fp_offset = 0;
3738 unwind.fp_reg = REG_SP;
3739 unwind.fp_used = 0;
3740 unwind.sp_restored = 0;
3741 }
3742
3743
3744 /* Parse a handlerdata directive. Creates the exception handling table entry
3745 for the function. */
3746
3747 static void
3748 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3749 {
3750 demand_empty_rest_of_line ();
3751 if (!unwind.proc_start)
3752 as_bad (MISSING_FNSTART);
3753
3754 if (unwind.table_entry)
3755 as_bad (_("duplicate .handlerdata directive"));
3756
3757 create_unwind_entry (1);
3758 }
3759
3760 /* Parse an unwind_fnend directive. Generates the index table entry. */
3761
3762 static void
3763 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3764 {
3765 long where;
3766 char *ptr;
3767 valueT val;
3768 unsigned int marked_pr_dependency;
3769
3770 demand_empty_rest_of_line ();
3771
3772 if (!unwind.proc_start)
3773 {
3774 as_bad (_(".fnend directive without .fnstart"));
3775 return;
3776 }
3777
3778 /* Add eh table entry. */
3779 if (unwind.table_entry == NULL)
3780 val = create_unwind_entry (0);
3781 else
3782 val = 0;
3783
3784 /* Add index table entry. This is two words. */
3785 start_unwind_section (unwind.saved_seg, 1);
3786 frag_align (2, 0, 0);
3787 record_alignment (now_seg, 2);
3788
3789 ptr = frag_more (8);
3790 memset (ptr, 0, 8);
3791 where = frag_now_fix () - 8;
3792
3793 /* Self relative offset of the function start. */
3794 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3795 BFD_RELOC_ARM_PREL31);
3796
3797 /* Indicate dependency on EHABI-defined personality routines to the
3798 linker, if it hasn't been done already. */
3799 marked_pr_dependency
3800 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3801 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3802 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3803 {
3804 static const char *const name[] =
3805 {
3806 "__aeabi_unwind_cpp_pr0",
3807 "__aeabi_unwind_cpp_pr1",
3808 "__aeabi_unwind_cpp_pr2"
3809 };
3810 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3811 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3812 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3813 |= 1 << unwind.personality_index;
3814 }
3815
3816 if (val)
3817 /* Inline exception table entry. */
3818 md_number_to_chars (ptr + 4, val, 4);
3819 else
3820 /* Self relative offset of the table entry. */
3821 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3822 BFD_RELOC_ARM_PREL31);
3823
3824 /* Restore the original section. */
3825 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3826
3827 unwind.proc_start = NULL;
3828 }
3829
3830
3831 /* Parse an unwind_cantunwind directive. */
3832
3833 static void
3834 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3835 {
3836 demand_empty_rest_of_line ();
3837 if (!unwind.proc_start)
3838 as_bad (MISSING_FNSTART);
3839
3840 if (unwind.personality_routine || unwind.personality_index != -1)
3841 as_bad (_("personality routine specified for cantunwind frame"));
3842
3843 unwind.personality_index = -2;
3844 }
3845
3846
3847 /* Parse a personalityindex directive. */
3848
3849 static void
3850 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3851 {
3852 expressionS exp;
3853
3854 if (!unwind.proc_start)
3855 as_bad (MISSING_FNSTART);
3856
3857 if (unwind.personality_routine || unwind.personality_index != -1)
3858 as_bad (_("duplicate .personalityindex directive"));
3859
3860 expression (&exp);
3861
3862 if (exp.X_op != O_constant
3863 || exp.X_add_number < 0 || exp.X_add_number > 15)
3864 {
3865 as_bad (_("bad personality routine number"));
3866 ignore_rest_of_line ();
3867 return;
3868 }
3869
3870 unwind.personality_index = exp.X_add_number;
3871
3872 demand_empty_rest_of_line ();
3873 }
3874
3875
3876 /* Parse a personality directive. */
3877
3878 static void
3879 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3880 {
3881 char *name, *p, c;
3882
3883 if (!unwind.proc_start)
3884 as_bad (MISSING_FNSTART);
3885
3886 if (unwind.personality_routine || unwind.personality_index != -1)
3887 as_bad (_("duplicate .personality directive"));
3888
3889 c = get_symbol_name (& name);
3890 p = input_line_pointer;
3891 if (c == '"')
3892 ++ input_line_pointer;
3893 unwind.personality_routine = symbol_find_or_make (name);
3894 *p = c;
3895 demand_empty_rest_of_line ();
3896 }
3897
3898
3899 /* Parse a directive saving core registers. */
3900
3901 static void
3902 s_arm_unwind_save_core (void)
3903 {
3904 valueT op;
3905 long range;
3906 int n;
3907
3908 range = parse_reg_list (&input_line_pointer);
3909 if (range == FAIL)
3910 {
3911 as_bad (_("expected register list"));
3912 ignore_rest_of_line ();
3913 return;
3914 }
3915
3916 demand_empty_rest_of_line ();
3917
3918 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3919 into .unwind_save {..., sp...}. We aren't bothered about the value of
3920 ip because it is clobbered by calls. */
3921 if (unwind.sp_restored && unwind.fp_reg == 12
3922 && (range & 0x3000) == 0x1000)
3923 {
3924 unwind.opcode_count--;
3925 unwind.sp_restored = 0;
3926 range = (range | 0x2000) & ~0x1000;
3927 unwind.pending_offset = 0;
3928 }
3929
3930 /* Pop r4-r15. */
3931 if (range & 0xfff0)
3932 {
3933 /* See if we can use the short opcodes. These pop a block of up to 8
3934 registers starting with r4, plus maybe r14. */
3935 for (n = 0; n < 8; n++)
3936 {
3937 /* Break at the first non-saved register. */
3938 if ((range & (1 << (n + 4))) == 0)
3939 break;
3940 }
3941 /* See if there are any other bits set. */
3942 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3943 {
3944 /* Use the long form. */
3945 op = 0x8000 | ((range >> 4) & 0xfff);
3946 add_unwind_opcode (op, 2);
3947 }
3948 else
3949 {
3950 /* Use the short form. */
3951 if (range & 0x4000)
3952 op = 0xa8; /* Pop r14. */
3953 else
3954 op = 0xa0; /* Do not pop r14. */
3955 op |= (n - 1);
3956 add_unwind_opcode (op, 1);
3957 }
3958 }
3959
3960 /* Pop r0-r3. */
3961 if (range & 0xf)
3962 {
3963 op = 0xb100 | (range & 0xf);
3964 add_unwind_opcode (op, 2);
3965 }
3966
3967 /* Record the number of bytes pushed. */
3968 for (n = 0; n < 16; n++)
3969 {
3970 if (range & (1 << n))
3971 unwind.frame_size += 4;
3972 }
3973 }
3974
3975
3976 /* Parse a directive saving FPA registers. */
3977
3978 static void
3979 s_arm_unwind_save_fpa (int reg)
3980 {
3981 expressionS exp;
3982 int num_regs;
3983 valueT op;
3984
3985 /* Get Number of registers to transfer. */
3986 if (skip_past_comma (&input_line_pointer) != FAIL)
3987 expression (&exp);
3988 else
3989 exp.X_op = O_illegal;
3990
3991 if (exp.X_op != O_constant)
3992 {
3993 as_bad (_("expected , <constant>"));
3994 ignore_rest_of_line ();
3995 return;
3996 }
3997
3998 num_regs = exp.X_add_number;
3999
4000 if (num_regs < 1 || num_regs > 4)
4001 {
4002 as_bad (_("number of registers must be in the range [1:4]"));
4003 ignore_rest_of_line ();
4004 return;
4005 }
4006
4007 demand_empty_rest_of_line ();
4008
4009 if (reg == 4)
4010 {
4011 /* Short form. */
4012 op = 0xb4 | (num_regs - 1);
4013 add_unwind_opcode (op, 1);
4014 }
4015 else
4016 {
4017 /* Long form. */
4018 op = 0xc800 | (reg << 4) | (num_regs - 1);
4019 add_unwind_opcode (op, 2);
4020 }
4021 unwind.frame_size += num_regs * 12;
4022 }
4023
4024
4025 /* Parse a directive saving VFP registers for ARMv6 and above. */
4026
4027 static void
4028 s_arm_unwind_save_vfp_armv6 (void)
4029 {
4030 int count;
4031 unsigned int start;
4032 valueT op;
4033 int num_vfpv3_regs = 0;
4034 int num_regs_below_16;
4035
4036 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4037 if (count == FAIL)
4038 {
4039 as_bad (_("expected register list"));
4040 ignore_rest_of_line ();
4041 return;
4042 }
4043
4044 demand_empty_rest_of_line ();
4045
4046 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4047 than FSTMX/FLDMX-style ones). */
4048
4049 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4050 if (start >= 16)
4051 num_vfpv3_regs = count;
4052 else if (start + count > 16)
4053 num_vfpv3_regs = start + count - 16;
4054
4055 if (num_vfpv3_regs > 0)
4056 {
4057 int start_offset = start > 16 ? start - 16 : 0;
4058 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4059 add_unwind_opcode (op, 2);
4060 }
4061
4062 /* Generate opcode for registers numbered in the range 0 .. 15. */
4063 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4064 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4065 if (num_regs_below_16 > 0)
4066 {
4067 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4068 add_unwind_opcode (op, 2);
4069 }
4070
4071 unwind.frame_size += count * 8;
4072 }
4073
4074
4075 /* Parse a directive saving VFP registers for pre-ARMv6. */
4076
4077 static void
4078 s_arm_unwind_save_vfp (void)
4079 {
4080 int count;
4081 unsigned int reg;
4082 valueT op;
4083
4084 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4085 if (count == FAIL)
4086 {
4087 as_bad (_("expected register list"));
4088 ignore_rest_of_line ();
4089 return;
4090 }
4091
4092 demand_empty_rest_of_line ();
4093
4094 if (reg == 8)
4095 {
4096 /* Short form. */
4097 op = 0xb8 | (count - 1);
4098 add_unwind_opcode (op, 1);
4099 }
4100 else
4101 {
4102 /* Long form. */
4103 op = 0xb300 | (reg << 4) | (count - 1);
4104 add_unwind_opcode (op, 2);
4105 }
4106 unwind.frame_size += count * 8 + 4;
4107 }
4108
4109
4110 /* Parse a directive saving iWMMXt data registers. */
4111
4112 static void
4113 s_arm_unwind_save_mmxwr (void)
4114 {
4115 int reg;
4116 int hi_reg;
4117 int i;
4118 unsigned mask = 0;
4119 valueT op;
4120
4121 if (*input_line_pointer == '{')
4122 input_line_pointer++;
4123
4124 do
4125 {
4126 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4127
4128 if (reg == FAIL)
4129 {
4130 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4131 goto error;
4132 }
4133
4134 if (mask >> reg)
4135 as_tsktsk (_("register list not in ascending order"));
4136 mask |= 1 << reg;
4137
4138 if (*input_line_pointer == '-')
4139 {
4140 input_line_pointer++;
4141 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4142 if (hi_reg == FAIL)
4143 {
4144 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4145 goto error;
4146 }
4147 else if (reg >= hi_reg)
4148 {
4149 as_bad (_("bad register range"));
4150 goto error;
4151 }
4152 for (; reg < hi_reg; reg++)
4153 mask |= 1 << reg;
4154 }
4155 }
4156 while (skip_past_comma (&input_line_pointer) != FAIL);
4157
4158 skip_past_char (&input_line_pointer, '}');
4159
4160 demand_empty_rest_of_line ();
4161
4162 /* Generate any deferred opcodes because we're going to be looking at
4163 the list. */
4164 flush_pending_unwind ();
4165
4166 for (i = 0; i < 16; i++)
4167 {
4168 if (mask & (1 << i))
4169 unwind.frame_size += 8;
4170 }
4171
4172 /* Attempt to combine with a previous opcode. We do this because gcc
4173 likes to output separate unwind directives for a single block of
4174 registers. */
4175 if (unwind.opcode_count > 0)
4176 {
4177 i = unwind.opcodes[unwind.opcode_count - 1];
4178 if ((i & 0xf8) == 0xc0)
4179 {
4180 i &= 7;
4181 /* Only merge if the blocks are contiguous. */
4182 if (i < 6)
4183 {
4184 if ((mask & 0xfe00) == (1 << 9))
4185 {
4186 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4187 unwind.opcode_count--;
4188 }
4189 }
4190 else if (i == 6 && unwind.opcode_count >= 2)
4191 {
4192 i = unwind.opcodes[unwind.opcode_count - 2];
4193 reg = i >> 4;
4194 i &= 0xf;
4195
4196 op = 0xffff << (reg - 1);
4197 if (reg > 0
4198 && ((mask & op) == (1u << (reg - 1))))
4199 {
4200 op = (1 << (reg + i + 1)) - 1;
4201 op &= ~((1 << reg) - 1);
4202 mask |= op;
4203 unwind.opcode_count -= 2;
4204 }
4205 }
4206 }
4207 }
4208
4209 hi_reg = 15;
4210 /* We want to generate opcodes in the order the registers have been
4211 saved, ie. descending order. */
4212 for (reg = 15; reg >= -1; reg--)
4213 {
4214 /* Save registers in blocks. */
4215 if (reg < 0
4216 || !(mask & (1 << reg)))
4217 {
4218 /* We found an unsaved reg. Generate opcodes to save the
4219 preceding block. */
4220 if (reg != hi_reg)
4221 {
4222 if (reg == 9)
4223 {
4224 /* Short form. */
4225 op = 0xc0 | (hi_reg - 10);
4226 add_unwind_opcode (op, 1);
4227 }
4228 else
4229 {
4230 /* Long form. */
4231 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4232 add_unwind_opcode (op, 2);
4233 }
4234 }
4235 hi_reg = reg - 1;
4236 }
4237 }
4238
4239 return;
4240 error:
4241 ignore_rest_of_line ();
4242 }
4243
4244 static void
4245 s_arm_unwind_save_mmxwcg (void)
4246 {
4247 int reg;
4248 int hi_reg;
4249 unsigned mask = 0;
4250 valueT op;
4251
4252 if (*input_line_pointer == '{')
4253 input_line_pointer++;
4254
4255 skip_whitespace (input_line_pointer);
4256
4257 do
4258 {
4259 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4260
4261 if (reg == FAIL)
4262 {
4263 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4264 goto error;
4265 }
4266
4267 reg -= 8;
4268 if (mask >> reg)
4269 as_tsktsk (_("register list not in ascending order"));
4270 mask |= 1 << reg;
4271
4272 if (*input_line_pointer == '-')
4273 {
4274 input_line_pointer++;
4275 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4276 if (hi_reg == FAIL)
4277 {
4278 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4279 goto error;
4280 }
4281 else if (reg >= hi_reg)
4282 {
4283 as_bad (_("bad register range"));
4284 goto error;
4285 }
4286 for (; reg < hi_reg; reg++)
4287 mask |= 1 << reg;
4288 }
4289 }
4290 while (skip_past_comma (&input_line_pointer) != FAIL);
4291
4292 skip_past_char (&input_line_pointer, '}');
4293
4294 demand_empty_rest_of_line ();
4295
4296 /* Generate any deferred opcodes because we're going to be looking at
4297 the list. */
4298 flush_pending_unwind ();
4299
4300 for (reg = 0; reg < 16; reg++)
4301 {
4302 if (mask & (1 << reg))
4303 unwind.frame_size += 4;
4304 }
4305 op = 0xc700 | mask;
4306 add_unwind_opcode (op, 2);
4307 return;
4308 error:
4309 ignore_rest_of_line ();
4310 }
4311
4312
4313 /* Parse an unwind_save directive.
4314 If the argument is non-zero, this is a .vsave directive. */
4315
4316 static void
4317 s_arm_unwind_save (int arch_v6)
4318 {
4319 char *peek;
4320 struct reg_entry *reg;
4321 bfd_boolean had_brace = FALSE;
4322
4323 if (!unwind.proc_start)
4324 as_bad (MISSING_FNSTART);
4325
4326 /* Figure out what sort of save we have. */
4327 peek = input_line_pointer;
4328
4329 if (*peek == '{')
4330 {
4331 had_brace = TRUE;
4332 peek++;
4333 }
4334
4335 reg = arm_reg_parse_multi (&peek);
4336
4337 if (!reg)
4338 {
4339 as_bad (_("register expected"));
4340 ignore_rest_of_line ();
4341 return;
4342 }
4343
4344 switch (reg->type)
4345 {
4346 case REG_TYPE_FN:
4347 if (had_brace)
4348 {
4349 as_bad (_("FPA .unwind_save does not take a register list"));
4350 ignore_rest_of_line ();
4351 return;
4352 }
4353 input_line_pointer = peek;
4354 s_arm_unwind_save_fpa (reg->number);
4355 return;
4356
4357 case REG_TYPE_RN:
4358 s_arm_unwind_save_core ();
4359 return;
4360
4361 case REG_TYPE_VFD:
4362 if (arch_v6)
4363 s_arm_unwind_save_vfp_armv6 ();
4364 else
4365 s_arm_unwind_save_vfp ();
4366 return;
4367
4368 case REG_TYPE_MMXWR:
4369 s_arm_unwind_save_mmxwr ();
4370 return;
4371
4372 case REG_TYPE_MMXWCG:
4373 s_arm_unwind_save_mmxwcg ();
4374 return;
4375
4376 default:
4377 as_bad (_(".unwind_save does not support this kind of register"));
4378 ignore_rest_of_line ();
4379 }
4380 }
4381
4382
4383 /* Parse an unwind_movsp directive. */
4384
4385 static void
4386 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4387 {
4388 int reg;
4389 valueT op;
4390 int offset;
4391
4392 if (!unwind.proc_start)
4393 as_bad (MISSING_FNSTART);
4394
4395 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4396 if (reg == FAIL)
4397 {
4398 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4399 ignore_rest_of_line ();
4400 return;
4401 }
4402
4403 /* Optional constant. */
4404 if (skip_past_comma (&input_line_pointer) != FAIL)
4405 {
4406 if (immediate_for_directive (&offset) == FAIL)
4407 return;
4408 }
4409 else
4410 offset = 0;
4411
4412 demand_empty_rest_of_line ();
4413
4414 if (reg == REG_SP || reg == REG_PC)
4415 {
4416 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4417 return;
4418 }
4419
4420 if (unwind.fp_reg != REG_SP)
4421 as_bad (_("unexpected .unwind_movsp directive"));
4422
4423 /* Generate opcode to restore the value. */
4424 op = 0x90 | reg;
4425 add_unwind_opcode (op, 1);
4426
4427 /* Record the information for later. */
4428 unwind.fp_reg = reg;
4429 unwind.fp_offset = unwind.frame_size - offset;
4430 unwind.sp_restored = 1;
4431 }
4432
4433 /* Parse an unwind_pad directive. */
4434
4435 static void
4436 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4437 {
4438 int offset;
4439
4440 if (!unwind.proc_start)
4441 as_bad (MISSING_FNSTART);
4442
4443 if (immediate_for_directive (&offset) == FAIL)
4444 return;
4445
4446 if (offset & 3)
4447 {
4448 as_bad (_("stack increment must be multiple of 4"));
4449 ignore_rest_of_line ();
4450 return;
4451 }
4452
4453 /* Don't generate any opcodes, just record the details for later. */
4454 unwind.frame_size += offset;
4455 unwind.pending_offset += offset;
4456
4457 demand_empty_rest_of_line ();
4458 }
4459
4460 /* Parse an unwind_setfp directive. */
4461
4462 static void
4463 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4464 {
4465 int sp_reg;
4466 int fp_reg;
4467 int offset;
4468
4469 if (!unwind.proc_start)
4470 as_bad (MISSING_FNSTART);
4471
4472 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4473 if (skip_past_comma (&input_line_pointer) == FAIL)
4474 sp_reg = FAIL;
4475 else
4476 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4477
4478 if (fp_reg == FAIL || sp_reg == FAIL)
4479 {
4480 as_bad (_("expected <reg>, <reg>"));
4481 ignore_rest_of_line ();
4482 return;
4483 }
4484
4485 /* Optional constant. */
4486 if (skip_past_comma (&input_line_pointer) != FAIL)
4487 {
4488 if (immediate_for_directive (&offset) == FAIL)
4489 return;
4490 }
4491 else
4492 offset = 0;
4493
4494 demand_empty_rest_of_line ();
4495
4496 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4497 {
4498 as_bad (_("register must be either sp or set by a previous"
4499 "unwind_movsp directive"));
4500 return;
4501 }
4502
4503 /* Don't generate any opcodes, just record the information for later. */
4504 unwind.fp_reg = fp_reg;
4505 unwind.fp_used = 1;
4506 if (sp_reg == REG_SP)
4507 unwind.fp_offset = unwind.frame_size - offset;
4508 else
4509 unwind.fp_offset -= offset;
4510 }
4511
4512 /* Parse an unwind_raw directive. */
4513
4514 static void
4515 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4516 {
4517 expressionS exp;
4518 /* This is an arbitrary limit. */
4519 unsigned char op[16];
4520 int count;
4521
4522 if (!unwind.proc_start)
4523 as_bad (MISSING_FNSTART);
4524
4525 expression (&exp);
4526 if (exp.X_op == O_constant
4527 && skip_past_comma (&input_line_pointer) != FAIL)
4528 {
4529 unwind.frame_size += exp.X_add_number;
4530 expression (&exp);
4531 }
4532 else
4533 exp.X_op = O_illegal;
4534
4535 if (exp.X_op != O_constant)
4536 {
4537 as_bad (_("expected <offset>, <opcode>"));
4538 ignore_rest_of_line ();
4539 return;
4540 }
4541
4542 count = 0;
4543
4544 /* Parse the opcode. */
4545 for (;;)
4546 {
4547 if (count >= 16)
4548 {
4549 as_bad (_("unwind opcode too long"));
4550 ignore_rest_of_line ();
4551 }
4552 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4553 {
4554 as_bad (_("invalid unwind opcode"));
4555 ignore_rest_of_line ();
4556 return;
4557 }
4558 op[count++] = exp.X_add_number;
4559
4560 /* Parse the next byte. */
4561 if (skip_past_comma (&input_line_pointer) == FAIL)
4562 break;
4563
4564 expression (&exp);
4565 }
4566
4567 /* Add the opcode bytes in reverse order. */
4568 while (count--)
4569 add_unwind_opcode (op[count], 1);
4570
4571 demand_empty_rest_of_line ();
4572 }
4573
4574
4575 /* Parse a .eabi_attribute directive. */
4576
4577 static void
4578 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4579 {
4580 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4581
4582 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4583 attributes_set_explicitly[tag] = 1;
4584 }
4585
4586 /* Emit a tls fix for the symbol. */
4587
4588 static void
4589 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4590 {
4591 char *p;
4592 expressionS exp;
4593 #ifdef md_flush_pending_output
4594 md_flush_pending_output ();
4595 #endif
4596
4597 #ifdef md_cons_align
4598 md_cons_align (4);
4599 #endif
4600
4601 /* Since we're just labelling the code, there's no need to define a
4602 mapping symbol. */
4603 expression (&exp);
4604 p = obstack_next_free (&frchain_now->frch_obstack);
4605 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4606 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4607 : BFD_RELOC_ARM_TLS_DESCSEQ);
4608 }
4609 #endif /* OBJ_ELF */
4610
4611 static void s_arm_arch (int);
4612 static void s_arm_object_arch (int);
4613 static void s_arm_cpu (int);
4614 static void s_arm_fpu (int);
4615 static void s_arm_arch_extension (int);
4616
4617 #ifdef TE_PE
4618
4619 static void
4620 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4621 {
4622 expressionS exp;
4623
4624 do
4625 {
4626 expression (&exp);
4627 if (exp.X_op == O_symbol)
4628 exp.X_op = O_secrel;
4629
4630 emit_expr (&exp, 4);
4631 }
4632 while (*input_line_pointer++ == ',');
4633
4634 input_line_pointer--;
4635 demand_empty_rest_of_line ();
4636 }
4637 #endif /* TE_PE */
4638
4639 /* This table describes all the machine specific pseudo-ops the assembler
4640 has to support. The fields are:
4641 pseudo-op name without dot
4642 function to call to execute this pseudo-op
4643 Integer arg to pass to the function. */
4644
4645 const pseudo_typeS md_pseudo_table[] =
4646 {
4647 /* Never called because '.req' does not start a line. */
4648 { "req", s_req, 0 },
4649 /* Following two are likewise never called. */
4650 { "dn", s_dn, 0 },
4651 { "qn", s_qn, 0 },
4652 { "unreq", s_unreq, 0 },
4653 { "bss", s_bss, 0 },
4654 { "align", s_align_ptwo, 2 },
4655 { "arm", s_arm, 0 },
4656 { "thumb", s_thumb, 0 },
4657 { "code", s_code, 0 },
4658 { "force_thumb", s_force_thumb, 0 },
4659 { "thumb_func", s_thumb_func, 0 },
4660 { "thumb_set", s_thumb_set, 0 },
4661 { "even", s_even, 0 },
4662 { "ltorg", s_ltorg, 0 },
4663 { "pool", s_ltorg, 0 },
4664 { "syntax", s_syntax, 0 },
4665 { "cpu", s_arm_cpu, 0 },
4666 { "arch", s_arm_arch, 0 },
4667 { "object_arch", s_arm_object_arch, 0 },
4668 { "fpu", s_arm_fpu, 0 },
4669 { "arch_extension", s_arm_arch_extension, 0 },
4670 #ifdef OBJ_ELF
4671 { "word", s_arm_elf_cons, 4 },
4672 { "long", s_arm_elf_cons, 4 },
4673 { "inst.n", s_arm_elf_inst, 2 },
4674 { "inst.w", s_arm_elf_inst, 4 },
4675 { "inst", s_arm_elf_inst, 0 },
4676 { "rel31", s_arm_rel31, 0 },
4677 { "fnstart", s_arm_unwind_fnstart, 0 },
4678 { "fnend", s_arm_unwind_fnend, 0 },
4679 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4680 { "personality", s_arm_unwind_personality, 0 },
4681 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4682 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4683 { "save", s_arm_unwind_save, 0 },
4684 { "vsave", s_arm_unwind_save, 1 },
4685 { "movsp", s_arm_unwind_movsp, 0 },
4686 { "pad", s_arm_unwind_pad, 0 },
4687 { "setfp", s_arm_unwind_setfp, 0 },
4688 { "unwind_raw", s_arm_unwind_raw, 0 },
4689 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4690 { "tlsdescseq", s_arm_tls_descseq, 0 },
4691 #else
4692 { "word", cons, 4},
4693
4694 /* These are used for dwarf. */
4695 {"2byte", cons, 2},
4696 {"4byte", cons, 4},
4697 {"8byte", cons, 8},
4698 /* These are used for dwarf2. */
4699 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4700 { "loc", dwarf2_directive_loc, 0 },
4701 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4702 #endif
4703 { "extend", float_cons, 'x' },
4704 { "ldouble", float_cons, 'x' },
4705 { "packed", float_cons, 'p' },
4706 #ifdef TE_PE
4707 {"secrel32", pe_directive_secrel, 0},
4708 #endif
4709
4710 /* These are for compatibility with CodeComposer Studio. */
4711 {"ref", s_ccs_ref, 0},
4712 {"def", s_ccs_def, 0},
4713 {"asmfunc", s_ccs_asmfunc, 0},
4714 {"endasmfunc", s_ccs_endasmfunc, 0},
4715
4716 { 0, 0, 0 }
4717 };
4718 \f
4719 /* Parser functions used exclusively in instruction operands. */
4720
4721 /* Generic immediate-value read function for use in insn parsing.
4722 STR points to the beginning of the immediate (the leading #);
4723 VAL receives the value; if the value is outside [MIN, MAX]
4724 issue an error. PREFIX_OPT is true if the immediate prefix is
4725 optional. */
4726
4727 static int
4728 parse_immediate (char **str, int *val, int min, int max,
4729 bfd_boolean prefix_opt)
4730 {
4731 expressionS exp;
4732 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4733 if (exp.X_op != O_constant)
4734 {
4735 inst.error = _("constant expression required");
4736 return FAIL;
4737 }
4738
4739 if (exp.X_add_number < min || exp.X_add_number > max)
4740 {
4741 inst.error = _("immediate value out of range");
4742 return FAIL;
4743 }
4744
4745 *val = exp.X_add_number;
4746 return SUCCESS;
4747 }
4748
4749 /* Less-generic immediate-value read function with the possibility of loading a
4750 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4751 instructions. Puts the result directly in inst.operands[i]. */
4752
4753 static int
4754 parse_big_immediate (char **str, int i, expressionS *in_exp,
4755 bfd_boolean allow_symbol_p)
4756 {
4757 expressionS exp;
4758 expressionS *exp_p = in_exp ? in_exp : &exp;
4759 char *ptr = *str;
4760
4761 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4762
4763 if (exp_p->X_op == O_constant)
4764 {
4765 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4766 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4767 O_constant. We have to be careful not to break compilation for
4768 32-bit X_add_number, though. */
4769 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4770 {
4771 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4772 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4773 & 0xffffffff);
4774 inst.operands[i].regisimm = 1;
4775 }
4776 }
4777 else if (exp_p->X_op == O_big
4778 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4779 {
4780 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4781
4782 /* Bignums have their least significant bits in
4783 generic_bignum[0]. Make sure we put 32 bits in imm and
4784 32 bits in reg, in a (hopefully) portable way. */
4785 gas_assert (parts != 0);
4786
4787 /* Make sure that the number is not too big.
4788 PR 11972: Bignums can now be sign-extended to the
4789 size of a .octa so check that the out of range bits
4790 are all zero or all one. */
4791 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4792 {
4793 LITTLENUM_TYPE m = -1;
4794
4795 if (generic_bignum[parts * 2] != 0
4796 && generic_bignum[parts * 2] != m)
4797 return FAIL;
4798
4799 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4800 if (generic_bignum[j] != generic_bignum[j-1])
4801 return FAIL;
4802 }
4803
4804 inst.operands[i].imm = 0;
4805 for (j = 0; j < parts; j++, idx++)
4806 inst.operands[i].imm |= generic_bignum[idx]
4807 << (LITTLENUM_NUMBER_OF_BITS * j);
4808 inst.operands[i].reg = 0;
4809 for (j = 0; j < parts; j++, idx++)
4810 inst.operands[i].reg |= generic_bignum[idx]
4811 << (LITTLENUM_NUMBER_OF_BITS * j);
4812 inst.operands[i].regisimm = 1;
4813 }
4814 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4815 return FAIL;
4816
4817 *str = ptr;
4818
4819 return SUCCESS;
4820 }
4821
4822 /* Returns the pseudo-register number of an FPA immediate constant,
4823 or FAIL if there isn't a valid constant here. */
4824
4825 static int
4826 parse_fpa_immediate (char ** str)
4827 {
4828 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4829 char * save_in;
4830 expressionS exp;
4831 int i;
4832 int j;
4833
4834 /* First try and match exact strings, this is to guarantee
4835 that some formats will work even for cross assembly. */
4836
4837 for (i = 0; fp_const[i]; i++)
4838 {
4839 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4840 {
4841 char *start = *str;
4842
4843 *str += strlen (fp_const[i]);
4844 if (is_end_of_line[(unsigned char) **str])
4845 return i + 8;
4846 *str = start;
4847 }
4848 }
4849
4850 /* Just because we didn't get a match doesn't mean that the constant
4851 isn't valid, just that it is in a format that we don't
4852 automatically recognize. Try parsing it with the standard
4853 expression routines. */
4854
4855 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4856
4857 /* Look for a raw floating point number. */
4858 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4859 && is_end_of_line[(unsigned char) *save_in])
4860 {
4861 for (i = 0; i < NUM_FLOAT_VALS; i++)
4862 {
4863 for (j = 0; j < MAX_LITTLENUMS; j++)
4864 {
4865 if (words[j] != fp_values[i][j])
4866 break;
4867 }
4868
4869 if (j == MAX_LITTLENUMS)
4870 {
4871 *str = save_in;
4872 return i + 8;
4873 }
4874 }
4875 }
4876
4877 /* Try and parse a more complex expression, this will probably fail
4878 unless the code uses a floating point prefix (eg "0f"). */
4879 save_in = input_line_pointer;
4880 input_line_pointer = *str;
4881 if (expression (&exp) == absolute_section
4882 && exp.X_op == O_big
4883 && exp.X_add_number < 0)
4884 {
4885 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4886 Ditto for 15. */
4887 #define X_PRECISION 5
4888 #define E_PRECISION 15L
4889 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4890 {
4891 for (i = 0; i < NUM_FLOAT_VALS; i++)
4892 {
4893 for (j = 0; j < MAX_LITTLENUMS; j++)
4894 {
4895 if (words[j] != fp_values[i][j])
4896 break;
4897 }
4898
4899 if (j == MAX_LITTLENUMS)
4900 {
4901 *str = input_line_pointer;
4902 input_line_pointer = save_in;
4903 return i + 8;
4904 }
4905 }
4906 }
4907 }
4908
4909 *str = input_line_pointer;
4910 input_line_pointer = save_in;
4911 inst.error = _("invalid FPA immediate expression");
4912 return FAIL;
4913 }
4914
4915 /* Returns 1 if a number has "quarter-precision" float format
4916 0baBbbbbbc defgh000 00000000 00000000. */
4917
4918 static int
4919 is_quarter_float (unsigned imm)
4920 {
4921 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4922 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4923 }
4924
4925
4926 /* Detect the presence of a floating point or integer zero constant,
4927 i.e. #0.0 or #0. */
4928
4929 static bfd_boolean
4930 parse_ifimm_zero (char **in)
4931 {
4932 int error_code;
4933
4934 if (!is_immediate_prefix (**in))
4935 return FALSE;
4936
4937 ++*in;
4938
4939 /* Accept #0x0 as a synonym for #0. */
4940 if (strncmp (*in, "0x", 2) == 0)
4941 {
4942 int val;
4943 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4944 return FALSE;
4945 return TRUE;
4946 }
4947
4948 error_code = atof_generic (in, ".", EXP_CHARS,
4949 &generic_floating_point_number);
4950
4951 if (!error_code
4952 && generic_floating_point_number.sign == '+'
4953 && (generic_floating_point_number.low
4954 > generic_floating_point_number.leader))
4955 return TRUE;
4956
4957 return FALSE;
4958 }
4959
4960 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4961 0baBbbbbbc defgh000 00000000 00000000.
4962 The zero and minus-zero cases need special handling, since they can't be
4963 encoded in the "quarter-precision" float format, but can nonetheless be
4964 loaded as integer constants. */
4965
4966 static unsigned
4967 parse_qfloat_immediate (char **ccp, int *immed)
4968 {
4969 char *str = *ccp;
4970 char *fpnum;
4971 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4972 int found_fpchar = 0;
4973
4974 skip_past_char (&str, '#');
4975
4976 /* We must not accidentally parse an integer as a floating-point number. Make
4977 sure that the value we parse is not an integer by checking for special
4978 characters '.' or 'e'.
4979 FIXME: This is a horrible hack, but doing better is tricky because type
4980 information isn't in a very usable state at parse time. */
4981 fpnum = str;
4982 skip_whitespace (fpnum);
4983
4984 if (strncmp (fpnum, "0x", 2) == 0)
4985 return FAIL;
4986 else
4987 {
4988 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4989 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4990 {
4991 found_fpchar = 1;
4992 break;
4993 }
4994
4995 if (!found_fpchar)
4996 return FAIL;
4997 }
4998
4999 if ((str = atof_ieee (str, 's', words)) != NULL)
5000 {
5001 unsigned fpword = 0;
5002 int i;
5003
5004 /* Our FP word must be 32 bits (single-precision FP). */
5005 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5006 {
5007 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5008 fpword |= words[i];
5009 }
5010
5011 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5012 *immed = fpword;
5013 else
5014 return FAIL;
5015
5016 *ccp = str;
5017
5018 return SUCCESS;
5019 }
5020
5021 return FAIL;
5022 }
5023
5024 /* Shift operands. */
5025 enum shift_kind
5026 {
5027 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5028 };
5029
5030 struct asm_shift_name
5031 {
5032 const char *name;
5033 enum shift_kind kind;
5034 };
5035
5036 /* Third argument to parse_shift. */
5037 enum parse_shift_mode
5038 {
5039 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5040 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5041 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5042 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5043 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5044 };
5045
5046 /* Parse a <shift> specifier on an ARM data processing instruction.
5047 This has three forms:
5048
5049 (LSL|LSR|ASL|ASR|ROR) Rs
5050 (LSL|LSR|ASL|ASR|ROR) #imm
5051 RRX
5052
5053 Note that ASL is assimilated to LSL in the instruction encoding, and
5054 RRX to ROR #0 (which cannot be written as such). */
5055
5056 static int
5057 parse_shift (char **str, int i, enum parse_shift_mode mode)
5058 {
5059 const struct asm_shift_name *shift_name;
5060 enum shift_kind shift;
5061 char *s = *str;
5062 char *p = s;
5063 int reg;
5064
5065 for (p = *str; ISALPHA (*p); p++)
5066 ;
5067
5068 if (p == *str)
5069 {
5070 inst.error = _("shift expression expected");
5071 return FAIL;
5072 }
5073
5074 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5075 p - *str);
5076
5077 if (shift_name == NULL)
5078 {
5079 inst.error = _("shift expression expected");
5080 return FAIL;
5081 }
5082
5083 shift = shift_name->kind;
5084
5085 switch (mode)
5086 {
5087 case NO_SHIFT_RESTRICT:
5088 case SHIFT_IMMEDIATE: break;
5089
5090 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5091 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5092 {
5093 inst.error = _("'LSL' or 'ASR' required");
5094 return FAIL;
5095 }
5096 break;
5097
5098 case SHIFT_LSL_IMMEDIATE:
5099 if (shift != SHIFT_LSL)
5100 {
5101 inst.error = _("'LSL' required");
5102 return FAIL;
5103 }
5104 break;
5105
5106 case SHIFT_ASR_IMMEDIATE:
5107 if (shift != SHIFT_ASR)
5108 {
5109 inst.error = _("'ASR' required");
5110 return FAIL;
5111 }
5112 break;
5113
5114 default: abort ();
5115 }
5116
5117 if (shift != SHIFT_RRX)
5118 {
5119 /* Whitespace can appear here if the next thing is a bare digit. */
5120 skip_whitespace (p);
5121
5122 if (mode == NO_SHIFT_RESTRICT
5123 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5124 {
5125 inst.operands[i].imm = reg;
5126 inst.operands[i].immisreg = 1;
5127 }
5128 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5129 return FAIL;
5130 }
5131 inst.operands[i].shift_kind = shift;
5132 inst.operands[i].shifted = 1;
5133 *str = p;
5134 return SUCCESS;
5135 }
5136
5137 /* Parse a <shifter_operand> for an ARM data processing instruction:
5138
5139 #<immediate>
5140 #<immediate>, <rotate>
5141 <Rm>
5142 <Rm>, <shift>
5143
5144 where <shift> is defined by parse_shift above, and <rotate> is a
5145 multiple of 2 between 0 and 30. Validation of immediate operands
5146 is deferred to md_apply_fix. */
5147
5148 static int
5149 parse_shifter_operand (char **str, int i)
5150 {
5151 int value;
5152 expressionS exp;
5153
5154 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5155 {
5156 inst.operands[i].reg = value;
5157 inst.operands[i].isreg = 1;
5158
5159 /* parse_shift will override this if appropriate */
5160 inst.reloc.exp.X_op = O_constant;
5161 inst.reloc.exp.X_add_number = 0;
5162
5163 if (skip_past_comma (str) == FAIL)
5164 return SUCCESS;
5165
5166 /* Shift operation on register. */
5167 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5168 }
5169
5170 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5171 return FAIL;
5172
5173 if (skip_past_comma (str) == SUCCESS)
5174 {
5175 /* #x, y -- ie explicit rotation by Y. */
5176 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5177 return FAIL;
5178
5179 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5180 {
5181 inst.error = _("constant expression expected");
5182 return FAIL;
5183 }
5184
5185 value = exp.X_add_number;
5186 if (value < 0 || value > 30 || value % 2 != 0)
5187 {
5188 inst.error = _("invalid rotation");
5189 return FAIL;
5190 }
5191 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5192 {
5193 inst.error = _("invalid constant");
5194 return FAIL;
5195 }
5196
5197 /* Encode as specified. */
5198 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5199 return SUCCESS;
5200 }
5201
5202 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5203 inst.reloc.pc_rel = 0;
5204 return SUCCESS;
5205 }
5206
5207 /* Group relocation information. Each entry in the table contains the
5208 textual name of the relocation as may appear in assembler source
5209 and must end with a colon.
5210 Along with this textual name are the relocation codes to be used if
5211 the corresponding instruction is an ALU instruction (ADD or SUB only),
5212 an LDR, an LDRS, or an LDC. */
5213
5214 struct group_reloc_table_entry
5215 {
5216 const char *name;
5217 int alu_code;
5218 int ldr_code;
5219 int ldrs_code;
5220 int ldc_code;
5221 };
5222
5223 typedef enum
5224 {
5225 /* Varieties of non-ALU group relocation. */
5226
5227 GROUP_LDR,
5228 GROUP_LDRS,
5229 GROUP_LDC
5230 } group_reloc_type;
5231
5232 static struct group_reloc_table_entry group_reloc_table[] =
5233 { /* Program counter relative: */
5234 { "pc_g0_nc",
5235 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5236 0, /* LDR */
5237 0, /* LDRS */
5238 0 }, /* LDC */
5239 { "pc_g0",
5240 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5241 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5242 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5243 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5244 { "pc_g1_nc",
5245 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5246 0, /* LDR */
5247 0, /* LDRS */
5248 0 }, /* LDC */
5249 { "pc_g1",
5250 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5251 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5252 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5253 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5254 { "pc_g2",
5255 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5256 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5257 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5258 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5259 /* Section base relative */
5260 { "sb_g0_nc",
5261 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5262 0, /* LDR */
5263 0, /* LDRS */
5264 0 }, /* LDC */
5265 { "sb_g0",
5266 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5267 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5268 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5269 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5270 { "sb_g1_nc",
5271 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5272 0, /* LDR */
5273 0, /* LDRS */
5274 0 }, /* LDC */
5275 { "sb_g1",
5276 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5277 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5278 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5279 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5280 { "sb_g2",
5281 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5282 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5283 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5284 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5285 /* Absolute thumb alu relocations. */
5286 { "lower0_7",
5287 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5288 0, /* LDR. */
5289 0, /* LDRS. */
5290 0 }, /* LDC. */
5291 { "lower8_15",
5292 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5293 0, /* LDR. */
5294 0, /* LDRS. */
5295 0 }, /* LDC. */
5296 { "upper0_7",
5297 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5298 0, /* LDR. */
5299 0, /* LDRS. */
5300 0 }, /* LDC. */
5301 { "upper8_15",
5302 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5303 0, /* LDR. */
5304 0, /* LDRS. */
5305 0 } }; /* LDC. */
5306
5307 /* Given the address of a pointer pointing to the textual name of a group
5308 relocation as may appear in assembler source, attempt to find its details
5309 in group_reloc_table. The pointer will be updated to the character after
5310 the trailing colon. On failure, FAIL will be returned; SUCCESS
5311 otherwise. On success, *entry will be updated to point at the relevant
5312 group_reloc_table entry. */
5313
5314 static int
5315 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5316 {
5317 unsigned int i;
5318 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5319 {
5320 int length = strlen (group_reloc_table[i].name);
5321
5322 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5323 && (*str)[length] == ':')
5324 {
5325 *out = &group_reloc_table[i];
5326 *str += (length + 1);
5327 return SUCCESS;
5328 }
5329 }
5330
5331 return FAIL;
5332 }
5333
5334 /* Parse a <shifter_operand> for an ARM data processing instruction
5335 (as for parse_shifter_operand) where group relocations are allowed:
5336
5337 #<immediate>
5338 #<immediate>, <rotate>
5339 #:<group_reloc>:<expression>
5340 <Rm>
5341 <Rm>, <shift>
5342
5343 where <group_reloc> is one of the strings defined in group_reloc_table.
5344 The hashes are optional.
5345
5346 Everything else is as for parse_shifter_operand. */
5347
5348 static parse_operand_result
5349 parse_shifter_operand_group_reloc (char **str, int i)
5350 {
5351 /* Determine if we have the sequence of characters #: or just :
5352 coming next. If we do, then we check for a group relocation.
5353 If we don't, punt the whole lot to parse_shifter_operand. */
5354
5355 if (((*str)[0] == '#' && (*str)[1] == ':')
5356 || (*str)[0] == ':')
5357 {
5358 struct group_reloc_table_entry *entry;
5359
5360 if ((*str)[0] == '#')
5361 (*str) += 2;
5362 else
5363 (*str)++;
5364
5365 /* Try to parse a group relocation. Anything else is an error. */
5366 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5367 {
5368 inst.error = _("unknown group relocation");
5369 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5370 }
5371
5372 /* We now have the group relocation table entry corresponding to
5373 the name in the assembler source. Next, we parse the expression. */
5374 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5375 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5376
5377 /* Record the relocation type (always the ALU variant here). */
5378 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5379 gas_assert (inst.reloc.type != 0);
5380
5381 return PARSE_OPERAND_SUCCESS;
5382 }
5383 else
5384 return parse_shifter_operand (str, i) == SUCCESS
5385 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5386
5387 /* Never reached. */
5388 }
5389
5390 /* Parse a Neon alignment expression. Information is written to
5391 inst.operands[i]. We assume the initial ':' has been skipped.
5392
5393 align .imm = align << 8, .immisalign=1, .preind=0 */
5394 static parse_operand_result
5395 parse_neon_alignment (char **str, int i)
5396 {
5397 char *p = *str;
5398 expressionS exp;
5399
5400 my_get_expression (&exp, &p, GE_NO_PREFIX);
5401
5402 if (exp.X_op != O_constant)
5403 {
5404 inst.error = _("alignment must be constant");
5405 return PARSE_OPERAND_FAIL;
5406 }
5407
5408 inst.operands[i].imm = exp.X_add_number << 8;
5409 inst.operands[i].immisalign = 1;
5410 /* Alignments are not pre-indexes. */
5411 inst.operands[i].preind = 0;
5412
5413 *str = p;
5414 return PARSE_OPERAND_SUCCESS;
5415 }
5416
5417 /* Parse all forms of an ARM address expression. Information is written
5418 to inst.operands[i] and/or inst.reloc.
5419
5420 Preindexed addressing (.preind=1):
5421
5422 [Rn, #offset] .reg=Rn .reloc.exp=offset
5423 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5424 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5425 .shift_kind=shift .reloc.exp=shift_imm
5426
5427 These three may have a trailing ! which causes .writeback to be set also.
5428
5429 Postindexed addressing (.postind=1, .writeback=1):
5430
5431 [Rn], #offset .reg=Rn .reloc.exp=offset
5432 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5433 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5434 .shift_kind=shift .reloc.exp=shift_imm
5435
5436 Unindexed addressing (.preind=0, .postind=0):
5437
5438 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5439
5440 Other:
5441
5442 [Rn]{!} shorthand for [Rn,#0]{!}
5443 =immediate .isreg=0 .reloc.exp=immediate
5444 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5445
5446 It is the caller's responsibility to check for addressing modes not
5447 supported by the instruction, and to set inst.reloc.type. */
5448
5449 static parse_operand_result
5450 parse_address_main (char **str, int i, int group_relocations,
5451 group_reloc_type group_type)
5452 {
5453 char *p = *str;
5454 int reg;
5455
5456 if (skip_past_char (&p, '[') == FAIL)
5457 {
5458 if (skip_past_char (&p, '=') == FAIL)
5459 {
5460 /* Bare address - translate to PC-relative offset. */
5461 inst.reloc.pc_rel = 1;
5462 inst.operands[i].reg = REG_PC;
5463 inst.operands[i].isreg = 1;
5464 inst.operands[i].preind = 1;
5465
5466 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5467 return PARSE_OPERAND_FAIL;
5468 }
5469 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5470 /*allow_symbol_p=*/TRUE))
5471 return PARSE_OPERAND_FAIL;
5472
5473 *str = p;
5474 return PARSE_OPERAND_SUCCESS;
5475 }
5476
5477 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5478 skip_whitespace (p);
5479
5480 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5481 {
5482 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5483 return PARSE_OPERAND_FAIL;
5484 }
5485 inst.operands[i].reg = reg;
5486 inst.operands[i].isreg = 1;
5487
5488 if (skip_past_comma (&p) == SUCCESS)
5489 {
5490 inst.operands[i].preind = 1;
5491
5492 if (*p == '+') p++;
5493 else if (*p == '-') p++, inst.operands[i].negative = 1;
5494
5495 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5496 {
5497 inst.operands[i].imm = reg;
5498 inst.operands[i].immisreg = 1;
5499
5500 if (skip_past_comma (&p) == SUCCESS)
5501 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5502 return PARSE_OPERAND_FAIL;
5503 }
5504 else if (skip_past_char (&p, ':') == SUCCESS)
5505 {
5506 /* FIXME: '@' should be used here, but it's filtered out by generic
5507 code before we get to see it here. This may be subject to
5508 change. */
5509 parse_operand_result result = parse_neon_alignment (&p, i);
5510
5511 if (result != PARSE_OPERAND_SUCCESS)
5512 return result;
5513 }
5514 else
5515 {
5516 if (inst.operands[i].negative)
5517 {
5518 inst.operands[i].negative = 0;
5519 p--;
5520 }
5521
5522 if (group_relocations
5523 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5524 {
5525 struct group_reloc_table_entry *entry;
5526
5527 /* Skip over the #: or : sequence. */
5528 if (*p == '#')
5529 p += 2;
5530 else
5531 p++;
5532
5533 /* Try to parse a group relocation. Anything else is an
5534 error. */
5535 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5536 {
5537 inst.error = _("unknown group relocation");
5538 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5539 }
5540
5541 /* We now have the group relocation table entry corresponding to
5542 the name in the assembler source. Next, we parse the
5543 expression. */
5544 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5545 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5546
5547 /* Record the relocation type. */
5548 switch (group_type)
5549 {
5550 case GROUP_LDR:
5551 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5552 break;
5553
5554 case GROUP_LDRS:
5555 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5556 break;
5557
5558 case GROUP_LDC:
5559 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5560 break;
5561
5562 default:
5563 gas_assert (0);
5564 }
5565
5566 if (inst.reloc.type == 0)
5567 {
5568 inst.error = _("this group relocation is not allowed on this instruction");
5569 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5570 }
5571 }
5572 else
5573 {
5574 char *q = p;
5575 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5576 return PARSE_OPERAND_FAIL;
5577 /* If the offset is 0, find out if it's a +0 or -0. */
5578 if (inst.reloc.exp.X_op == O_constant
5579 && inst.reloc.exp.X_add_number == 0)
5580 {
5581 skip_whitespace (q);
5582 if (*q == '#')
5583 {
5584 q++;
5585 skip_whitespace (q);
5586 }
5587 if (*q == '-')
5588 inst.operands[i].negative = 1;
5589 }
5590 }
5591 }
5592 }
5593 else if (skip_past_char (&p, ':') == SUCCESS)
5594 {
5595 /* FIXME: '@' should be used here, but it's filtered out by generic code
5596 before we get to see it here. This may be subject to change. */
5597 parse_operand_result result = parse_neon_alignment (&p, i);
5598
5599 if (result != PARSE_OPERAND_SUCCESS)
5600 return result;
5601 }
5602
5603 if (skip_past_char (&p, ']') == FAIL)
5604 {
5605 inst.error = _("']' expected");
5606 return PARSE_OPERAND_FAIL;
5607 }
5608
5609 if (skip_past_char (&p, '!') == SUCCESS)
5610 inst.operands[i].writeback = 1;
5611
5612 else if (skip_past_comma (&p) == SUCCESS)
5613 {
5614 if (skip_past_char (&p, '{') == SUCCESS)
5615 {
5616 /* [Rn], {expr} - unindexed, with option */
5617 if (parse_immediate (&p, &inst.operands[i].imm,
5618 0, 255, TRUE) == FAIL)
5619 return PARSE_OPERAND_FAIL;
5620
5621 if (skip_past_char (&p, '}') == FAIL)
5622 {
5623 inst.error = _("'}' expected at end of 'option' field");
5624 return PARSE_OPERAND_FAIL;
5625 }
5626 if (inst.operands[i].preind)
5627 {
5628 inst.error = _("cannot combine index with option");
5629 return PARSE_OPERAND_FAIL;
5630 }
5631 *str = p;
5632 return PARSE_OPERAND_SUCCESS;
5633 }
5634 else
5635 {
5636 inst.operands[i].postind = 1;
5637 inst.operands[i].writeback = 1;
5638
5639 if (inst.operands[i].preind)
5640 {
5641 inst.error = _("cannot combine pre- and post-indexing");
5642 return PARSE_OPERAND_FAIL;
5643 }
5644
5645 if (*p == '+') p++;
5646 else if (*p == '-') p++, inst.operands[i].negative = 1;
5647
5648 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5649 {
5650 /* We might be using the immediate for alignment already. If we
5651 are, OR the register number into the low-order bits. */
5652 if (inst.operands[i].immisalign)
5653 inst.operands[i].imm |= reg;
5654 else
5655 inst.operands[i].imm = reg;
5656 inst.operands[i].immisreg = 1;
5657
5658 if (skip_past_comma (&p) == SUCCESS)
5659 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5660 return PARSE_OPERAND_FAIL;
5661 }
5662 else
5663 {
5664 char *q = p;
5665 if (inst.operands[i].negative)
5666 {
5667 inst.operands[i].negative = 0;
5668 p--;
5669 }
5670 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5671 return PARSE_OPERAND_FAIL;
5672 /* If the offset is 0, find out if it's a +0 or -0. */
5673 if (inst.reloc.exp.X_op == O_constant
5674 && inst.reloc.exp.X_add_number == 0)
5675 {
5676 skip_whitespace (q);
5677 if (*q == '#')
5678 {
5679 q++;
5680 skip_whitespace (q);
5681 }
5682 if (*q == '-')
5683 inst.operands[i].negative = 1;
5684 }
5685 }
5686 }
5687 }
5688
5689 /* If at this point neither .preind nor .postind is set, we have a
5690 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5691 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5692 {
5693 inst.operands[i].preind = 1;
5694 inst.reloc.exp.X_op = O_constant;
5695 inst.reloc.exp.X_add_number = 0;
5696 }
5697 *str = p;
5698 return PARSE_OPERAND_SUCCESS;
5699 }
5700
5701 static int
5702 parse_address (char **str, int i)
5703 {
5704 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5705 ? SUCCESS : FAIL;
5706 }
5707
5708 static parse_operand_result
5709 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5710 {
5711 return parse_address_main (str, i, 1, type);
5712 }
5713
5714 /* Parse an operand for a MOVW or MOVT instruction. */
5715 static int
5716 parse_half (char **str)
5717 {
5718 char * p;
5719
5720 p = *str;
5721 skip_past_char (&p, '#');
5722 if (strncasecmp (p, ":lower16:", 9) == 0)
5723 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5724 else if (strncasecmp (p, ":upper16:", 9) == 0)
5725 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5726
5727 if (inst.reloc.type != BFD_RELOC_UNUSED)
5728 {
5729 p += 9;
5730 skip_whitespace (p);
5731 }
5732
5733 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5734 return FAIL;
5735
5736 if (inst.reloc.type == BFD_RELOC_UNUSED)
5737 {
5738 if (inst.reloc.exp.X_op != O_constant)
5739 {
5740 inst.error = _("constant expression expected");
5741 return FAIL;
5742 }
5743 if (inst.reloc.exp.X_add_number < 0
5744 || inst.reloc.exp.X_add_number > 0xffff)
5745 {
5746 inst.error = _("immediate value out of range");
5747 return FAIL;
5748 }
5749 }
5750 *str = p;
5751 return SUCCESS;
5752 }
5753
5754 /* Miscellaneous. */
5755
5756 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5757 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5758 static int
5759 parse_psr (char **str, bfd_boolean lhs)
5760 {
5761 char *p;
5762 unsigned long psr_field;
5763 const struct asm_psr *psr;
5764 char *start;
5765 bfd_boolean is_apsr = FALSE;
5766 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5767
5768 /* PR gas/12698: If the user has specified -march=all then m_profile will
5769 be TRUE, but we want to ignore it in this case as we are building for any
5770 CPU type, including non-m variants. */
5771 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5772 m_profile = FALSE;
5773
5774 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5775 feature for ease of use and backwards compatibility. */
5776 p = *str;
5777 if (strncasecmp (p, "SPSR", 4) == 0)
5778 {
5779 if (m_profile)
5780 goto unsupported_psr;
5781
5782 psr_field = SPSR_BIT;
5783 }
5784 else if (strncasecmp (p, "CPSR", 4) == 0)
5785 {
5786 if (m_profile)
5787 goto unsupported_psr;
5788
5789 psr_field = 0;
5790 }
5791 else if (strncasecmp (p, "APSR", 4) == 0)
5792 {
5793 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5794 and ARMv7-R architecture CPUs. */
5795 is_apsr = TRUE;
5796 psr_field = 0;
5797 }
5798 else if (m_profile)
5799 {
5800 start = p;
5801 do
5802 p++;
5803 while (ISALNUM (*p) || *p == '_');
5804
5805 if (strncasecmp (start, "iapsr", 5) == 0
5806 || strncasecmp (start, "eapsr", 5) == 0
5807 || strncasecmp (start, "xpsr", 4) == 0
5808 || strncasecmp (start, "psr", 3) == 0)
5809 p = start + strcspn (start, "rR") + 1;
5810
5811 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5812 p - start);
5813
5814 if (!psr)
5815 return FAIL;
5816
5817 /* If APSR is being written, a bitfield may be specified. Note that
5818 APSR itself is handled above. */
5819 if (psr->field <= 3)
5820 {
5821 psr_field = psr->field;
5822 is_apsr = TRUE;
5823 goto check_suffix;
5824 }
5825
5826 *str = p;
5827 /* M-profile MSR instructions have the mask field set to "10", except
5828 *PSR variants which modify APSR, which may use a different mask (and
5829 have been handled already). Do that by setting the PSR_f field
5830 here. */
5831 return psr->field | (lhs ? PSR_f : 0);
5832 }
5833 else
5834 goto unsupported_psr;
5835
5836 p += 4;
5837 check_suffix:
5838 if (*p == '_')
5839 {
5840 /* A suffix follows. */
5841 p++;
5842 start = p;
5843
5844 do
5845 p++;
5846 while (ISALNUM (*p) || *p == '_');
5847
5848 if (is_apsr)
5849 {
5850 /* APSR uses a notation for bits, rather than fields. */
5851 unsigned int nzcvq_bits = 0;
5852 unsigned int g_bit = 0;
5853 char *bit;
5854
5855 for (bit = start; bit != p; bit++)
5856 {
5857 switch (TOLOWER (*bit))
5858 {
5859 case 'n':
5860 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5861 break;
5862
5863 case 'z':
5864 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5865 break;
5866
5867 case 'c':
5868 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5869 break;
5870
5871 case 'v':
5872 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5873 break;
5874
5875 case 'q':
5876 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5877 break;
5878
5879 case 'g':
5880 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5881 break;
5882
5883 default:
5884 inst.error = _("unexpected bit specified after APSR");
5885 return FAIL;
5886 }
5887 }
5888
5889 if (nzcvq_bits == 0x1f)
5890 psr_field |= PSR_f;
5891
5892 if (g_bit == 0x1)
5893 {
5894 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5895 {
5896 inst.error = _("selected processor does not "
5897 "support DSP extension");
5898 return FAIL;
5899 }
5900
5901 psr_field |= PSR_s;
5902 }
5903
5904 if ((nzcvq_bits & 0x20) != 0
5905 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5906 || (g_bit & 0x2) != 0)
5907 {
5908 inst.error = _("bad bitmask specified after APSR");
5909 return FAIL;
5910 }
5911 }
5912 else
5913 {
5914 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5915 p - start);
5916 if (!psr)
5917 goto error;
5918
5919 psr_field |= psr->field;
5920 }
5921 }
5922 else
5923 {
5924 if (ISALNUM (*p))
5925 goto error; /* Garbage after "[CS]PSR". */
5926
5927 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5928 is deprecated, but allow it anyway. */
5929 if (is_apsr && lhs)
5930 {
5931 psr_field |= PSR_f;
5932 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5933 "deprecated"));
5934 }
5935 else if (!m_profile)
5936 /* These bits are never right for M-profile devices: don't set them
5937 (only code paths which read/write APSR reach here). */
5938 psr_field |= (PSR_c | PSR_f);
5939 }
5940 *str = p;
5941 return psr_field;
5942
5943 unsupported_psr:
5944 inst.error = _("selected processor does not support requested special "
5945 "purpose register");
5946 return FAIL;
5947
5948 error:
5949 inst.error = _("flag for {c}psr instruction expected");
5950 return FAIL;
5951 }
5952
5953 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5954 value suitable for splatting into the AIF field of the instruction. */
5955
5956 static int
5957 parse_cps_flags (char **str)
5958 {
5959 int val = 0;
5960 int saw_a_flag = 0;
5961 char *s = *str;
5962
5963 for (;;)
5964 switch (*s++)
5965 {
5966 case '\0': case ',':
5967 goto done;
5968
5969 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5970 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5971 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5972
5973 default:
5974 inst.error = _("unrecognized CPS flag");
5975 return FAIL;
5976 }
5977
5978 done:
5979 if (saw_a_flag == 0)
5980 {
5981 inst.error = _("missing CPS flags");
5982 return FAIL;
5983 }
5984
5985 *str = s - 1;
5986 return val;
5987 }
5988
5989 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5990 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5991
5992 static int
5993 parse_endian_specifier (char **str)
5994 {
5995 int little_endian;
5996 char *s = *str;
5997
5998 if (strncasecmp (s, "BE", 2))
5999 little_endian = 0;
6000 else if (strncasecmp (s, "LE", 2))
6001 little_endian = 1;
6002 else
6003 {
6004 inst.error = _("valid endian specifiers are be or le");
6005 return FAIL;
6006 }
6007
6008 if (ISALNUM (s[2]) || s[2] == '_')
6009 {
6010 inst.error = _("valid endian specifiers are be or le");
6011 return FAIL;
6012 }
6013
6014 *str = s + 2;
6015 return little_endian;
6016 }
6017
6018 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6019 value suitable for poking into the rotate field of an sxt or sxta
6020 instruction, or FAIL on error. */
6021
6022 static int
6023 parse_ror (char **str)
6024 {
6025 int rot;
6026 char *s = *str;
6027
6028 if (strncasecmp (s, "ROR", 3) == 0)
6029 s += 3;
6030 else
6031 {
6032 inst.error = _("missing rotation field after comma");
6033 return FAIL;
6034 }
6035
6036 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6037 return FAIL;
6038
6039 switch (rot)
6040 {
6041 case 0: *str = s; return 0x0;
6042 case 8: *str = s; return 0x1;
6043 case 16: *str = s; return 0x2;
6044 case 24: *str = s; return 0x3;
6045
6046 default:
6047 inst.error = _("rotation can only be 0, 8, 16, or 24");
6048 return FAIL;
6049 }
6050 }
6051
6052 /* Parse a conditional code (from conds[] below). The value returned is in the
6053 range 0 .. 14, or FAIL. */
6054 static int
6055 parse_cond (char **str)
6056 {
6057 char *q;
6058 const struct asm_cond *c;
6059 int n;
6060 /* Condition codes are always 2 characters, so matching up to
6061 3 characters is sufficient. */
6062 char cond[3];
6063
6064 q = *str;
6065 n = 0;
6066 while (ISALPHA (*q) && n < 3)
6067 {
6068 cond[n] = TOLOWER (*q);
6069 q++;
6070 n++;
6071 }
6072
6073 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6074 if (!c)
6075 {
6076 inst.error = _("condition required");
6077 return FAIL;
6078 }
6079
6080 *str = q;
6081 return c->value;
6082 }
6083
6084 /* If the given feature available in the selected CPU, mark it as used.
6085 Returns TRUE iff feature is available. */
6086 static bfd_boolean
6087 mark_feature_used (const arm_feature_set *feature)
6088 {
6089 /* Ensure the option is valid on the current architecture. */
6090 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6091 return FALSE;
6092
6093 /* Add the appropriate architecture feature for the barrier option used.
6094 */
6095 if (thumb_mode)
6096 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6097 else
6098 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6099
6100 return TRUE;
6101 }
6102
6103 /* Parse an option for a barrier instruction. Returns the encoding for the
6104 option, or FAIL. */
6105 static int
6106 parse_barrier (char **str)
6107 {
6108 char *p, *q;
6109 const struct asm_barrier_opt *o;
6110
6111 p = q = *str;
6112 while (ISALPHA (*q))
6113 q++;
6114
6115 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6116 q - p);
6117 if (!o)
6118 return FAIL;
6119
6120 if (!mark_feature_used (&o->arch))
6121 return FAIL;
6122
6123 *str = q;
6124 return o->value;
6125 }
6126
6127 /* Parse the operands of a table branch instruction. Similar to a memory
6128 operand. */
6129 static int
6130 parse_tb (char **str)
6131 {
6132 char * p = *str;
6133 int reg;
6134
6135 if (skip_past_char (&p, '[') == FAIL)
6136 {
6137 inst.error = _("'[' expected");
6138 return FAIL;
6139 }
6140
6141 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6142 {
6143 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6144 return FAIL;
6145 }
6146 inst.operands[0].reg = reg;
6147
6148 if (skip_past_comma (&p) == FAIL)
6149 {
6150 inst.error = _("',' expected");
6151 return FAIL;
6152 }
6153
6154 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6155 {
6156 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6157 return FAIL;
6158 }
6159 inst.operands[0].imm = reg;
6160
6161 if (skip_past_comma (&p) == SUCCESS)
6162 {
6163 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6164 return FAIL;
6165 if (inst.reloc.exp.X_add_number != 1)
6166 {
6167 inst.error = _("invalid shift");
6168 return FAIL;
6169 }
6170 inst.operands[0].shifted = 1;
6171 }
6172
6173 if (skip_past_char (&p, ']') == FAIL)
6174 {
6175 inst.error = _("']' expected");
6176 return FAIL;
6177 }
6178 *str = p;
6179 return SUCCESS;
6180 }
6181
6182 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6183 information on the types the operands can take and how they are encoded.
6184 Up to four operands may be read; this function handles setting the
6185 ".present" field for each read operand itself.
6186 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6187 else returns FAIL. */
6188
6189 static int
6190 parse_neon_mov (char **str, int *which_operand)
6191 {
6192 int i = *which_operand, val;
6193 enum arm_reg_type rtype;
6194 char *ptr = *str;
6195 struct neon_type_el optype;
6196
6197 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6198 {
6199 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6200 inst.operands[i].reg = val;
6201 inst.operands[i].isscalar = 1;
6202 inst.operands[i].vectype = optype;
6203 inst.operands[i++].present = 1;
6204
6205 if (skip_past_comma (&ptr) == FAIL)
6206 goto wanted_comma;
6207
6208 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6209 goto wanted_arm;
6210
6211 inst.operands[i].reg = val;
6212 inst.operands[i].isreg = 1;
6213 inst.operands[i].present = 1;
6214 }
6215 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6216 != FAIL)
6217 {
6218 /* Cases 0, 1, 2, 3, 5 (D only). */
6219 if (skip_past_comma (&ptr) == FAIL)
6220 goto wanted_comma;
6221
6222 inst.operands[i].reg = val;
6223 inst.operands[i].isreg = 1;
6224 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6225 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6226 inst.operands[i].isvec = 1;
6227 inst.operands[i].vectype = optype;
6228 inst.operands[i++].present = 1;
6229
6230 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6231 {
6232 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6233 Case 13: VMOV <Sd>, <Rm> */
6234 inst.operands[i].reg = val;
6235 inst.operands[i].isreg = 1;
6236 inst.operands[i].present = 1;
6237
6238 if (rtype == REG_TYPE_NQ)
6239 {
6240 first_error (_("can't use Neon quad register here"));
6241 return FAIL;
6242 }
6243 else if (rtype != REG_TYPE_VFS)
6244 {
6245 i++;
6246 if (skip_past_comma (&ptr) == FAIL)
6247 goto wanted_comma;
6248 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6249 goto wanted_arm;
6250 inst.operands[i].reg = val;
6251 inst.operands[i].isreg = 1;
6252 inst.operands[i].present = 1;
6253 }
6254 }
6255 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6256 &optype)) != FAIL)
6257 {
6258 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6259 Case 1: VMOV<c><q> <Dd>, <Dm>
6260 Case 8: VMOV.F32 <Sd>, <Sm>
6261 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6262
6263 inst.operands[i].reg = val;
6264 inst.operands[i].isreg = 1;
6265 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6266 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6267 inst.operands[i].isvec = 1;
6268 inst.operands[i].vectype = optype;
6269 inst.operands[i].present = 1;
6270
6271 if (skip_past_comma (&ptr) == SUCCESS)
6272 {
6273 /* Case 15. */
6274 i++;
6275
6276 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6277 goto wanted_arm;
6278
6279 inst.operands[i].reg = val;
6280 inst.operands[i].isreg = 1;
6281 inst.operands[i++].present = 1;
6282
6283 if (skip_past_comma (&ptr) == FAIL)
6284 goto wanted_comma;
6285
6286 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6287 goto wanted_arm;
6288
6289 inst.operands[i].reg = val;
6290 inst.operands[i].isreg = 1;
6291 inst.operands[i].present = 1;
6292 }
6293 }
6294 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6295 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6296 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6297 Case 10: VMOV.F32 <Sd>, #<imm>
6298 Case 11: VMOV.F64 <Dd>, #<imm> */
6299 inst.operands[i].immisfloat = 1;
6300 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6301 == SUCCESS)
6302 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6303 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6304 ;
6305 else
6306 {
6307 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6308 return FAIL;
6309 }
6310 }
6311 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6312 {
6313 /* Cases 6, 7. */
6314 inst.operands[i].reg = val;
6315 inst.operands[i].isreg = 1;
6316 inst.operands[i++].present = 1;
6317
6318 if (skip_past_comma (&ptr) == FAIL)
6319 goto wanted_comma;
6320
6321 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6322 {
6323 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6324 inst.operands[i].reg = val;
6325 inst.operands[i].isscalar = 1;
6326 inst.operands[i].present = 1;
6327 inst.operands[i].vectype = optype;
6328 }
6329 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6330 {
6331 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6332 inst.operands[i].reg = val;
6333 inst.operands[i].isreg = 1;
6334 inst.operands[i++].present = 1;
6335
6336 if (skip_past_comma (&ptr) == FAIL)
6337 goto wanted_comma;
6338
6339 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6340 == FAIL)
6341 {
6342 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6343 return FAIL;
6344 }
6345
6346 inst.operands[i].reg = val;
6347 inst.operands[i].isreg = 1;
6348 inst.operands[i].isvec = 1;
6349 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6350 inst.operands[i].vectype = optype;
6351 inst.operands[i].present = 1;
6352
6353 if (rtype == REG_TYPE_VFS)
6354 {
6355 /* Case 14. */
6356 i++;
6357 if (skip_past_comma (&ptr) == FAIL)
6358 goto wanted_comma;
6359 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6360 &optype)) == FAIL)
6361 {
6362 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6363 return FAIL;
6364 }
6365 inst.operands[i].reg = val;
6366 inst.operands[i].isreg = 1;
6367 inst.operands[i].isvec = 1;
6368 inst.operands[i].issingle = 1;
6369 inst.operands[i].vectype = optype;
6370 inst.operands[i].present = 1;
6371 }
6372 }
6373 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6374 != FAIL)
6375 {
6376 /* Case 13. */
6377 inst.operands[i].reg = val;
6378 inst.operands[i].isreg = 1;
6379 inst.operands[i].isvec = 1;
6380 inst.operands[i].issingle = 1;
6381 inst.operands[i].vectype = optype;
6382 inst.operands[i].present = 1;
6383 }
6384 }
6385 else
6386 {
6387 first_error (_("parse error"));
6388 return FAIL;
6389 }
6390
6391 /* Successfully parsed the operands. Update args. */
6392 *which_operand = i;
6393 *str = ptr;
6394 return SUCCESS;
6395
6396 wanted_comma:
6397 first_error (_("expected comma"));
6398 return FAIL;
6399
6400 wanted_arm:
6401 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6402 return FAIL;
6403 }
6404
6405 /* Use this macro when the operand constraints are different
6406 for ARM and THUMB (e.g. ldrd). */
6407 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6408 ((arm_operand) | ((thumb_operand) << 16))
6409
6410 /* Matcher codes for parse_operands. */
6411 enum operand_parse_code
6412 {
6413 OP_stop, /* end of line */
6414
6415 OP_RR, /* ARM register */
6416 OP_RRnpc, /* ARM register, not r15 */
6417 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6418 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6419 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6420 optional trailing ! */
6421 OP_RRw, /* ARM register, not r15, optional trailing ! */
6422 OP_RCP, /* Coprocessor number */
6423 OP_RCN, /* Coprocessor register */
6424 OP_RF, /* FPA register */
6425 OP_RVS, /* VFP single precision register */
6426 OP_RVD, /* VFP double precision register (0..15) */
6427 OP_RND, /* Neon double precision register (0..31) */
6428 OP_RNQ, /* Neon quad precision register */
6429 OP_RVSD, /* VFP single or double precision register */
6430 OP_RNDQ, /* Neon double or quad precision register */
6431 OP_RNSDQ, /* Neon single, double or quad precision register */
6432 OP_RNSC, /* Neon scalar D[X] */
6433 OP_RVC, /* VFP control register */
6434 OP_RMF, /* Maverick F register */
6435 OP_RMD, /* Maverick D register */
6436 OP_RMFX, /* Maverick FX register */
6437 OP_RMDX, /* Maverick DX register */
6438 OP_RMAX, /* Maverick AX register */
6439 OP_RMDS, /* Maverick DSPSC register */
6440 OP_RIWR, /* iWMMXt wR register */
6441 OP_RIWC, /* iWMMXt wC register */
6442 OP_RIWG, /* iWMMXt wCG register */
6443 OP_RXA, /* XScale accumulator register */
6444
6445 OP_REGLST, /* ARM register list */
6446 OP_VRSLST, /* VFP single-precision register list */
6447 OP_VRDLST, /* VFP double-precision register list */
6448 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6449 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6450 OP_NSTRLST, /* Neon element/structure list */
6451
6452 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6453 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6454 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6455 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6456 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6457 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6458 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6459 OP_VMOV, /* Neon VMOV operands. */
6460 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6461 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6462 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6463
6464 OP_I0, /* immediate zero */
6465 OP_I7, /* immediate value 0 .. 7 */
6466 OP_I15, /* 0 .. 15 */
6467 OP_I16, /* 1 .. 16 */
6468 OP_I16z, /* 0 .. 16 */
6469 OP_I31, /* 0 .. 31 */
6470 OP_I31w, /* 0 .. 31, optional trailing ! */
6471 OP_I32, /* 1 .. 32 */
6472 OP_I32z, /* 0 .. 32 */
6473 OP_I63, /* 0 .. 63 */
6474 OP_I63s, /* -64 .. 63 */
6475 OP_I64, /* 1 .. 64 */
6476 OP_I64z, /* 0 .. 64 */
6477 OP_I255, /* 0 .. 255 */
6478
6479 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6480 OP_I7b, /* 0 .. 7 */
6481 OP_I15b, /* 0 .. 15 */
6482 OP_I31b, /* 0 .. 31 */
6483
6484 OP_SH, /* shifter operand */
6485 OP_SHG, /* shifter operand with possible group relocation */
6486 OP_ADDR, /* Memory address expression (any mode) */
6487 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6488 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6489 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6490 OP_EXP, /* arbitrary expression */
6491 OP_EXPi, /* same, with optional immediate prefix */
6492 OP_EXPr, /* same, with optional relocation suffix */
6493 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6494
6495 OP_CPSF, /* CPS flags */
6496 OP_ENDI, /* Endianness specifier */
6497 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6498 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6499 OP_COND, /* conditional code */
6500 OP_TB, /* Table branch. */
6501
6502 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6503
6504 OP_RRnpc_I0, /* ARM register or literal 0 */
6505 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6506 OP_RR_EXi, /* ARM register or expression with imm prefix */
6507 OP_RF_IF, /* FPA register or immediate */
6508 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6509 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6510
6511 /* Optional operands. */
6512 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6513 OP_oI31b, /* 0 .. 31 */
6514 OP_oI32b, /* 1 .. 32 */
6515 OP_oI32z, /* 0 .. 32 */
6516 OP_oIffffb, /* 0 .. 65535 */
6517 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6518
6519 OP_oRR, /* ARM register */
6520 OP_oRRnpc, /* ARM register, not the PC */
6521 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6522 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6523 OP_oRND, /* Optional Neon double precision register */
6524 OP_oRNQ, /* Optional Neon quad precision register */
6525 OP_oRNDQ, /* Optional Neon double or quad precision register */
6526 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6527 OP_oSHll, /* LSL immediate */
6528 OP_oSHar, /* ASR immediate */
6529 OP_oSHllar, /* LSL or ASR immediate */
6530 OP_oROR, /* ROR 0/8/16/24 */
6531 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6532
6533 /* Some pre-defined mixed (ARM/THUMB) operands. */
6534 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6535 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6536 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6537
6538 OP_FIRST_OPTIONAL = OP_oI7b
6539 };
6540
6541 /* Generic instruction operand parser. This does no encoding and no
6542 semantic validation; it merely squirrels values away in the inst
6543 structure. Returns SUCCESS or FAIL depending on whether the
6544 specified grammar matched. */
6545 static int
6546 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6547 {
6548 unsigned const int *upat = pattern;
6549 char *backtrack_pos = 0;
6550 const char *backtrack_error = 0;
6551 int i, val = 0, backtrack_index = 0;
6552 enum arm_reg_type rtype;
6553 parse_operand_result result;
6554 unsigned int op_parse_code;
6555
6556 #define po_char_or_fail(chr) \
6557 do \
6558 { \
6559 if (skip_past_char (&str, chr) == FAIL) \
6560 goto bad_args; \
6561 } \
6562 while (0)
6563
6564 #define po_reg_or_fail(regtype) \
6565 do \
6566 { \
6567 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6568 & inst.operands[i].vectype); \
6569 if (val == FAIL) \
6570 { \
6571 first_error (_(reg_expected_msgs[regtype])); \
6572 goto failure; \
6573 } \
6574 inst.operands[i].reg = val; \
6575 inst.operands[i].isreg = 1; \
6576 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6577 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6578 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6579 || rtype == REG_TYPE_VFD \
6580 || rtype == REG_TYPE_NQ); \
6581 } \
6582 while (0)
6583
6584 #define po_reg_or_goto(regtype, label) \
6585 do \
6586 { \
6587 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6588 & inst.operands[i].vectype); \
6589 if (val == FAIL) \
6590 goto label; \
6591 \
6592 inst.operands[i].reg = val; \
6593 inst.operands[i].isreg = 1; \
6594 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6595 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6596 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6597 || rtype == REG_TYPE_VFD \
6598 || rtype == REG_TYPE_NQ); \
6599 } \
6600 while (0)
6601
6602 #define po_imm_or_fail(min, max, popt) \
6603 do \
6604 { \
6605 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6606 goto failure; \
6607 inst.operands[i].imm = val; \
6608 } \
6609 while (0)
6610
6611 #define po_scalar_or_goto(elsz, label) \
6612 do \
6613 { \
6614 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6615 if (val == FAIL) \
6616 goto label; \
6617 inst.operands[i].reg = val; \
6618 inst.operands[i].isscalar = 1; \
6619 } \
6620 while (0)
6621
6622 #define po_misc_or_fail(expr) \
6623 do \
6624 { \
6625 if (expr) \
6626 goto failure; \
6627 } \
6628 while (0)
6629
6630 #define po_misc_or_fail_no_backtrack(expr) \
6631 do \
6632 { \
6633 result = expr; \
6634 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6635 backtrack_pos = 0; \
6636 if (result != PARSE_OPERAND_SUCCESS) \
6637 goto failure; \
6638 } \
6639 while (0)
6640
6641 #define po_barrier_or_imm(str) \
6642 do \
6643 { \
6644 val = parse_barrier (&str); \
6645 if (val == FAIL && ! ISALPHA (*str)) \
6646 goto immediate; \
6647 if (val == FAIL \
6648 /* ISB can only take SY as an option. */ \
6649 || ((inst.instruction & 0xf0) == 0x60 \
6650 && val != 0xf)) \
6651 { \
6652 inst.error = _("invalid barrier type"); \
6653 backtrack_pos = 0; \
6654 goto failure; \
6655 } \
6656 } \
6657 while (0)
6658
6659 skip_whitespace (str);
6660
6661 for (i = 0; upat[i] != OP_stop; i++)
6662 {
6663 op_parse_code = upat[i];
6664 if (op_parse_code >= 1<<16)
6665 op_parse_code = thumb ? (op_parse_code >> 16)
6666 : (op_parse_code & ((1<<16)-1));
6667
6668 if (op_parse_code >= OP_FIRST_OPTIONAL)
6669 {
6670 /* Remember where we are in case we need to backtrack. */
6671 gas_assert (!backtrack_pos);
6672 backtrack_pos = str;
6673 backtrack_error = inst.error;
6674 backtrack_index = i;
6675 }
6676
6677 if (i > 0 && (i > 1 || inst.operands[0].present))
6678 po_char_or_fail (',');
6679
6680 switch (op_parse_code)
6681 {
6682 /* Registers */
6683 case OP_oRRnpc:
6684 case OP_oRRnpcsp:
6685 case OP_RRnpc:
6686 case OP_RRnpcsp:
6687 case OP_oRR:
6688 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6689 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6690 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6691 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6692 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6693 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6694 case OP_oRND:
6695 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6696 case OP_RVC:
6697 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6698 break;
6699 /* Also accept generic coprocessor regs for unknown registers. */
6700 coproc_reg:
6701 po_reg_or_fail (REG_TYPE_CN);
6702 break;
6703 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6704 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6705 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6706 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6707 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6708 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6709 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6710 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6711 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6712 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6713 case OP_oRNQ:
6714 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6715 case OP_oRNDQ:
6716 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6717 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6718 case OP_oRNSDQ:
6719 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6720
6721 /* Neon scalar. Using an element size of 8 means that some invalid
6722 scalars are accepted here, so deal with those in later code. */
6723 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6724
6725 case OP_RNDQ_I0:
6726 {
6727 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6728 break;
6729 try_imm0:
6730 po_imm_or_fail (0, 0, TRUE);
6731 }
6732 break;
6733
6734 case OP_RVSD_I0:
6735 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6736 break;
6737
6738 case OP_RSVD_FI0:
6739 {
6740 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6741 break;
6742 try_ifimm0:
6743 if (parse_ifimm_zero (&str))
6744 inst.operands[i].imm = 0;
6745 else
6746 {
6747 inst.error
6748 = _("only floating point zero is allowed as immediate value");
6749 goto failure;
6750 }
6751 }
6752 break;
6753
6754 case OP_RR_RNSC:
6755 {
6756 po_scalar_or_goto (8, try_rr);
6757 break;
6758 try_rr:
6759 po_reg_or_fail (REG_TYPE_RN);
6760 }
6761 break;
6762
6763 case OP_RNSDQ_RNSC:
6764 {
6765 po_scalar_or_goto (8, try_nsdq);
6766 break;
6767 try_nsdq:
6768 po_reg_or_fail (REG_TYPE_NSDQ);
6769 }
6770 break;
6771
6772 case OP_RNDQ_RNSC:
6773 {
6774 po_scalar_or_goto (8, try_ndq);
6775 break;
6776 try_ndq:
6777 po_reg_or_fail (REG_TYPE_NDQ);
6778 }
6779 break;
6780
6781 case OP_RND_RNSC:
6782 {
6783 po_scalar_or_goto (8, try_vfd);
6784 break;
6785 try_vfd:
6786 po_reg_or_fail (REG_TYPE_VFD);
6787 }
6788 break;
6789
6790 case OP_VMOV:
6791 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6792 not careful then bad things might happen. */
6793 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6794 break;
6795
6796 case OP_RNDQ_Ibig:
6797 {
6798 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6799 break;
6800 try_immbig:
6801 /* There's a possibility of getting a 64-bit immediate here, so
6802 we need special handling. */
6803 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6804 == FAIL)
6805 {
6806 inst.error = _("immediate value is out of range");
6807 goto failure;
6808 }
6809 }
6810 break;
6811
6812 case OP_RNDQ_I63b:
6813 {
6814 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6815 break;
6816 try_shimm:
6817 po_imm_or_fail (0, 63, TRUE);
6818 }
6819 break;
6820
6821 case OP_RRnpcb:
6822 po_char_or_fail ('[');
6823 po_reg_or_fail (REG_TYPE_RN);
6824 po_char_or_fail (']');
6825 break;
6826
6827 case OP_RRnpctw:
6828 case OP_RRw:
6829 case OP_oRRw:
6830 po_reg_or_fail (REG_TYPE_RN);
6831 if (skip_past_char (&str, '!') == SUCCESS)
6832 inst.operands[i].writeback = 1;
6833 break;
6834
6835 /* Immediates */
6836 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6837 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6838 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6839 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6840 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6841 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6842 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6843 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6844 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6845 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6846 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6847 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6848
6849 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6850 case OP_oI7b:
6851 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6852 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6853 case OP_oI31b:
6854 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6855 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6856 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6857 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6858
6859 /* Immediate variants */
6860 case OP_oI255c:
6861 po_char_or_fail ('{');
6862 po_imm_or_fail (0, 255, TRUE);
6863 po_char_or_fail ('}');
6864 break;
6865
6866 case OP_I31w:
6867 /* The expression parser chokes on a trailing !, so we have
6868 to find it first and zap it. */
6869 {
6870 char *s = str;
6871 while (*s && *s != ',')
6872 s++;
6873 if (s[-1] == '!')
6874 {
6875 s[-1] = '\0';
6876 inst.operands[i].writeback = 1;
6877 }
6878 po_imm_or_fail (0, 31, TRUE);
6879 if (str == s - 1)
6880 str = s;
6881 }
6882 break;
6883
6884 /* Expressions */
6885 case OP_EXPi: EXPi:
6886 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6887 GE_OPT_PREFIX));
6888 break;
6889
6890 case OP_EXP:
6891 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6892 GE_NO_PREFIX));
6893 break;
6894
6895 case OP_EXPr: EXPr:
6896 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6897 GE_NO_PREFIX));
6898 if (inst.reloc.exp.X_op == O_symbol)
6899 {
6900 val = parse_reloc (&str);
6901 if (val == -1)
6902 {
6903 inst.error = _("unrecognized relocation suffix");
6904 goto failure;
6905 }
6906 else if (val != BFD_RELOC_UNUSED)
6907 {
6908 inst.operands[i].imm = val;
6909 inst.operands[i].hasreloc = 1;
6910 }
6911 }
6912 break;
6913
6914 /* Operand for MOVW or MOVT. */
6915 case OP_HALF:
6916 po_misc_or_fail (parse_half (&str));
6917 break;
6918
6919 /* Register or expression. */
6920 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6921 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6922
6923 /* Register or immediate. */
6924 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6925 I0: po_imm_or_fail (0, 0, FALSE); break;
6926
6927 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6928 IF:
6929 if (!is_immediate_prefix (*str))
6930 goto bad_args;
6931 str++;
6932 val = parse_fpa_immediate (&str);
6933 if (val == FAIL)
6934 goto failure;
6935 /* FPA immediates are encoded as registers 8-15.
6936 parse_fpa_immediate has already applied the offset. */
6937 inst.operands[i].reg = val;
6938 inst.operands[i].isreg = 1;
6939 break;
6940
6941 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6942 I32z: po_imm_or_fail (0, 32, FALSE); break;
6943
6944 /* Two kinds of register. */
6945 case OP_RIWR_RIWC:
6946 {
6947 struct reg_entry *rege = arm_reg_parse_multi (&str);
6948 if (!rege
6949 || (rege->type != REG_TYPE_MMXWR
6950 && rege->type != REG_TYPE_MMXWC
6951 && rege->type != REG_TYPE_MMXWCG))
6952 {
6953 inst.error = _("iWMMXt data or control register expected");
6954 goto failure;
6955 }
6956 inst.operands[i].reg = rege->number;
6957 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6958 }
6959 break;
6960
6961 case OP_RIWC_RIWG:
6962 {
6963 struct reg_entry *rege = arm_reg_parse_multi (&str);
6964 if (!rege
6965 || (rege->type != REG_TYPE_MMXWC
6966 && rege->type != REG_TYPE_MMXWCG))
6967 {
6968 inst.error = _("iWMMXt control register expected");
6969 goto failure;
6970 }
6971 inst.operands[i].reg = rege->number;
6972 inst.operands[i].isreg = 1;
6973 }
6974 break;
6975
6976 /* Misc */
6977 case OP_CPSF: val = parse_cps_flags (&str); break;
6978 case OP_ENDI: val = parse_endian_specifier (&str); break;
6979 case OP_oROR: val = parse_ror (&str); break;
6980 case OP_COND: val = parse_cond (&str); break;
6981 case OP_oBARRIER_I15:
6982 po_barrier_or_imm (str); break;
6983 immediate:
6984 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6985 goto failure;
6986 break;
6987
6988 case OP_wPSR:
6989 case OP_rPSR:
6990 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6991 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6992 {
6993 inst.error = _("Banked registers are not available with this "
6994 "architecture.");
6995 goto failure;
6996 }
6997 break;
6998 try_psr:
6999 val = parse_psr (&str, op_parse_code == OP_wPSR);
7000 break;
7001
7002 case OP_APSR_RR:
7003 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7004 break;
7005 try_apsr:
7006 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7007 instruction). */
7008 if (strncasecmp (str, "APSR_", 5) == 0)
7009 {
7010 unsigned found = 0;
7011 str += 5;
7012 while (found < 15)
7013 switch (*str++)
7014 {
7015 case 'c': found = (found & 1) ? 16 : found | 1; break;
7016 case 'n': found = (found & 2) ? 16 : found | 2; break;
7017 case 'z': found = (found & 4) ? 16 : found | 4; break;
7018 case 'v': found = (found & 8) ? 16 : found | 8; break;
7019 default: found = 16;
7020 }
7021 if (found != 15)
7022 goto failure;
7023 inst.operands[i].isvec = 1;
7024 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7025 inst.operands[i].reg = REG_PC;
7026 }
7027 else
7028 goto failure;
7029 break;
7030
7031 case OP_TB:
7032 po_misc_or_fail (parse_tb (&str));
7033 break;
7034
7035 /* Register lists. */
7036 case OP_REGLST:
7037 val = parse_reg_list (&str);
7038 if (*str == '^')
7039 {
7040 inst.operands[i].writeback = 1;
7041 str++;
7042 }
7043 break;
7044
7045 case OP_VRSLST:
7046 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7047 break;
7048
7049 case OP_VRDLST:
7050 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7051 break;
7052
7053 case OP_VRSDLST:
7054 /* Allow Q registers too. */
7055 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7056 REGLIST_NEON_D);
7057 if (val == FAIL)
7058 {
7059 inst.error = NULL;
7060 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7061 REGLIST_VFP_S);
7062 inst.operands[i].issingle = 1;
7063 }
7064 break;
7065
7066 case OP_NRDLST:
7067 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7068 REGLIST_NEON_D);
7069 break;
7070
7071 case OP_NSTRLST:
7072 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7073 &inst.operands[i].vectype);
7074 break;
7075
7076 /* Addressing modes */
7077 case OP_ADDR:
7078 po_misc_or_fail (parse_address (&str, i));
7079 break;
7080
7081 case OP_ADDRGLDR:
7082 po_misc_or_fail_no_backtrack (
7083 parse_address_group_reloc (&str, i, GROUP_LDR));
7084 break;
7085
7086 case OP_ADDRGLDRS:
7087 po_misc_or_fail_no_backtrack (
7088 parse_address_group_reloc (&str, i, GROUP_LDRS));
7089 break;
7090
7091 case OP_ADDRGLDC:
7092 po_misc_or_fail_no_backtrack (
7093 parse_address_group_reloc (&str, i, GROUP_LDC));
7094 break;
7095
7096 case OP_SH:
7097 po_misc_or_fail (parse_shifter_operand (&str, i));
7098 break;
7099
7100 case OP_SHG:
7101 po_misc_or_fail_no_backtrack (
7102 parse_shifter_operand_group_reloc (&str, i));
7103 break;
7104
7105 case OP_oSHll:
7106 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7107 break;
7108
7109 case OP_oSHar:
7110 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7111 break;
7112
7113 case OP_oSHllar:
7114 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7115 break;
7116
7117 default:
7118 as_fatal (_("unhandled operand code %d"), op_parse_code);
7119 }
7120
7121 /* Various value-based sanity checks and shared operations. We
7122 do not signal immediate failures for the register constraints;
7123 this allows a syntax error to take precedence. */
7124 switch (op_parse_code)
7125 {
7126 case OP_oRRnpc:
7127 case OP_RRnpc:
7128 case OP_RRnpcb:
7129 case OP_RRw:
7130 case OP_oRRw:
7131 case OP_RRnpc_I0:
7132 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7133 inst.error = BAD_PC;
7134 break;
7135
7136 case OP_oRRnpcsp:
7137 case OP_RRnpcsp:
7138 if (inst.operands[i].isreg)
7139 {
7140 if (inst.operands[i].reg == REG_PC)
7141 inst.error = BAD_PC;
7142 else if (inst.operands[i].reg == REG_SP)
7143 inst.error = BAD_SP;
7144 }
7145 break;
7146
7147 case OP_RRnpctw:
7148 if (inst.operands[i].isreg
7149 && inst.operands[i].reg == REG_PC
7150 && (inst.operands[i].writeback || thumb))
7151 inst.error = BAD_PC;
7152 break;
7153
7154 case OP_CPSF:
7155 case OP_ENDI:
7156 case OP_oROR:
7157 case OP_wPSR:
7158 case OP_rPSR:
7159 case OP_COND:
7160 case OP_oBARRIER_I15:
7161 case OP_REGLST:
7162 case OP_VRSLST:
7163 case OP_VRDLST:
7164 case OP_VRSDLST:
7165 case OP_NRDLST:
7166 case OP_NSTRLST:
7167 if (val == FAIL)
7168 goto failure;
7169 inst.operands[i].imm = val;
7170 break;
7171
7172 default:
7173 break;
7174 }
7175
7176 /* If we get here, this operand was successfully parsed. */
7177 inst.operands[i].present = 1;
7178 continue;
7179
7180 bad_args:
7181 inst.error = BAD_ARGS;
7182
7183 failure:
7184 if (!backtrack_pos)
7185 {
7186 /* The parse routine should already have set inst.error, but set a
7187 default here just in case. */
7188 if (!inst.error)
7189 inst.error = _("syntax error");
7190 return FAIL;
7191 }
7192
7193 /* Do not backtrack over a trailing optional argument that
7194 absorbed some text. We will only fail again, with the
7195 'garbage following instruction' error message, which is
7196 probably less helpful than the current one. */
7197 if (backtrack_index == i && backtrack_pos != str
7198 && upat[i+1] == OP_stop)
7199 {
7200 if (!inst.error)
7201 inst.error = _("syntax error");
7202 return FAIL;
7203 }
7204
7205 /* Try again, skipping the optional argument at backtrack_pos. */
7206 str = backtrack_pos;
7207 inst.error = backtrack_error;
7208 inst.operands[backtrack_index].present = 0;
7209 i = backtrack_index;
7210 backtrack_pos = 0;
7211 }
7212
7213 /* Check that we have parsed all the arguments. */
7214 if (*str != '\0' && !inst.error)
7215 inst.error = _("garbage following instruction");
7216
7217 return inst.error ? FAIL : SUCCESS;
7218 }
7219
7220 #undef po_char_or_fail
7221 #undef po_reg_or_fail
7222 #undef po_reg_or_goto
7223 #undef po_imm_or_fail
7224 #undef po_scalar_or_fail
7225 #undef po_barrier_or_imm
7226
7227 /* Shorthand macro for instruction encoding functions issuing errors. */
7228 #define constraint(expr, err) \
7229 do \
7230 { \
7231 if (expr) \
7232 { \
7233 inst.error = err; \
7234 return; \
7235 } \
7236 } \
7237 while (0)
7238
7239 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7240 instructions are unpredictable if these registers are used. This
7241 is the BadReg predicate in ARM's Thumb-2 documentation. */
7242 #define reject_bad_reg(reg) \
7243 do \
7244 if (reg == REG_SP || reg == REG_PC) \
7245 { \
7246 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7247 return; \
7248 } \
7249 while (0)
7250
7251 /* If REG is R13 (the stack pointer), warn that its use is
7252 deprecated. */
7253 #define warn_deprecated_sp(reg) \
7254 do \
7255 if (warn_on_deprecated && reg == REG_SP) \
7256 as_tsktsk (_("use of r13 is deprecated")); \
7257 while (0)
7258
7259 /* Functions for operand encoding. ARM, then Thumb. */
7260
7261 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7262
7263 /* If VAL can be encoded in the immediate field of an ARM instruction,
7264 return the encoded form. Otherwise, return FAIL. */
7265
7266 static unsigned int
7267 encode_arm_immediate (unsigned int val)
7268 {
7269 unsigned int a, i;
7270
7271 if (val <= 0xff)
7272 return val;
7273
7274 for (i = 2; i < 32; i += 2)
7275 if ((a = rotate_left (val, i)) <= 0xff)
7276 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7277
7278 return FAIL;
7279 }
7280
7281 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7282 return the encoded form. Otherwise, return FAIL. */
7283 static unsigned int
7284 encode_thumb32_immediate (unsigned int val)
7285 {
7286 unsigned int a, i;
7287
7288 if (val <= 0xff)
7289 return val;
7290
7291 for (i = 1; i <= 24; i++)
7292 {
7293 a = val >> i;
7294 if ((val & ~(0xff << i)) == 0)
7295 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7296 }
7297
7298 a = val & 0xff;
7299 if (val == ((a << 16) | a))
7300 return 0x100 | a;
7301 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7302 return 0x300 | a;
7303
7304 a = val & 0xff00;
7305 if (val == ((a << 16) | a))
7306 return 0x200 | (a >> 8);
7307
7308 return FAIL;
7309 }
7310 /* Encode a VFP SP or DP register number into inst.instruction. */
7311
7312 static void
7313 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7314 {
7315 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7316 && reg > 15)
7317 {
7318 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7319 {
7320 if (thumb_mode)
7321 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7322 fpu_vfp_ext_d32);
7323 else
7324 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7325 fpu_vfp_ext_d32);
7326 }
7327 else
7328 {
7329 first_error (_("D register out of range for selected VFP version"));
7330 return;
7331 }
7332 }
7333
7334 switch (pos)
7335 {
7336 case VFP_REG_Sd:
7337 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7338 break;
7339
7340 case VFP_REG_Sn:
7341 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7342 break;
7343
7344 case VFP_REG_Sm:
7345 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7346 break;
7347
7348 case VFP_REG_Dd:
7349 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7350 break;
7351
7352 case VFP_REG_Dn:
7353 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7354 break;
7355
7356 case VFP_REG_Dm:
7357 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7358 break;
7359
7360 default:
7361 abort ();
7362 }
7363 }
7364
7365 /* Encode a <shift> in an ARM-format instruction. The immediate,
7366 if any, is handled by md_apply_fix. */
7367 static void
7368 encode_arm_shift (int i)
7369 {
7370 if (inst.operands[i].shift_kind == SHIFT_RRX)
7371 inst.instruction |= SHIFT_ROR << 5;
7372 else
7373 {
7374 inst.instruction |= inst.operands[i].shift_kind << 5;
7375 if (inst.operands[i].immisreg)
7376 {
7377 inst.instruction |= SHIFT_BY_REG;
7378 inst.instruction |= inst.operands[i].imm << 8;
7379 }
7380 else
7381 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7382 }
7383 }
7384
7385 static void
7386 encode_arm_shifter_operand (int i)
7387 {
7388 if (inst.operands[i].isreg)
7389 {
7390 inst.instruction |= inst.operands[i].reg;
7391 encode_arm_shift (i);
7392 }
7393 else
7394 {
7395 inst.instruction |= INST_IMMEDIATE;
7396 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7397 inst.instruction |= inst.operands[i].imm;
7398 }
7399 }
7400
7401 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7402 static void
7403 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7404 {
7405 /* PR 14260:
7406 Generate an error if the operand is not a register. */
7407 constraint (!inst.operands[i].isreg,
7408 _("Instruction does not support =N addresses"));
7409
7410 inst.instruction |= inst.operands[i].reg << 16;
7411
7412 if (inst.operands[i].preind)
7413 {
7414 if (is_t)
7415 {
7416 inst.error = _("instruction does not accept preindexed addressing");
7417 return;
7418 }
7419 inst.instruction |= PRE_INDEX;
7420 if (inst.operands[i].writeback)
7421 inst.instruction |= WRITE_BACK;
7422
7423 }
7424 else if (inst.operands[i].postind)
7425 {
7426 gas_assert (inst.operands[i].writeback);
7427 if (is_t)
7428 inst.instruction |= WRITE_BACK;
7429 }
7430 else /* unindexed - only for coprocessor */
7431 {
7432 inst.error = _("instruction does not accept unindexed addressing");
7433 return;
7434 }
7435
7436 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7437 && (((inst.instruction & 0x000f0000) >> 16)
7438 == ((inst.instruction & 0x0000f000) >> 12)))
7439 as_warn ((inst.instruction & LOAD_BIT)
7440 ? _("destination register same as write-back base")
7441 : _("source register same as write-back base"));
7442 }
7443
7444 /* inst.operands[i] was set up by parse_address. Encode it into an
7445 ARM-format mode 2 load or store instruction. If is_t is true,
7446 reject forms that cannot be used with a T instruction (i.e. not
7447 post-indexed). */
7448 static void
7449 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7450 {
7451 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7452
7453 encode_arm_addr_mode_common (i, is_t);
7454
7455 if (inst.operands[i].immisreg)
7456 {
7457 constraint ((inst.operands[i].imm == REG_PC
7458 || (is_pc && inst.operands[i].writeback)),
7459 BAD_PC_ADDRESSING);
7460 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7461 inst.instruction |= inst.operands[i].imm;
7462 if (!inst.operands[i].negative)
7463 inst.instruction |= INDEX_UP;
7464 if (inst.operands[i].shifted)
7465 {
7466 if (inst.operands[i].shift_kind == SHIFT_RRX)
7467 inst.instruction |= SHIFT_ROR << 5;
7468 else
7469 {
7470 inst.instruction |= inst.operands[i].shift_kind << 5;
7471 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7472 }
7473 }
7474 }
7475 else /* immediate offset in inst.reloc */
7476 {
7477 if (is_pc && !inst.reloc.pc_rel)
7478 {
7479 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7480
7481 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7482 cannot use PC in addressing.
7483 PC cannot be used in writeback addressing, either. */
7484 constraint ((is_t || inst.operands[i].writeback),
7485 BAD_PC_ADDRESSING);
7486
7487 /* Use of PC in str is deprecated for ARMv7. */
7488 if (warn_on_deprecated
7489 && !is_load
7490 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7491 as_tsktsk (_("use of PC in this instruction is deprecated"));
7492 }
7493
7494 if (inst.reloc.type == BFD_RELOC_UNUSED)
7495 {
7496 /* Prefer + for zero encoded value. */
7497 if (!inst.operands[i].negative)
7498 inst.instruction |= INDEX_UP;
7499 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7500 }
7501 }
7502 }
7503
7504 /* inst.operands[i] was set up by parse_address. Encode it into an
7505 ARM-format mode 3 load or store instruction. Reject forms that
7506 cannot be used with such instructions. If is_t is true, reject
7507 forms that cannot be used with a T instruction (i.e. not
7508 post-indexed). */
7509 static void
7510 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7511 {
7512 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7513 {
7514 inst.error = _("instruction does not accept scaled register index");
7515 return;
7516 }
7517
7518 encode_arm_addr_mode_common (i, is_t);
7519
7520 if (inst.operands[i].immisreg)
7521 {
7522 constraint ((inst.operands[i].imm == REG_PC
7523 || (is_t && inst.operands[i].reg == REG_PC)),
7524 BAD_PC_ADDRESSING);
7525 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7526 BAD_PC_WRITEBACK);
7527 inst.instruction |= inst.operands[i].imm;
7528 if (!inst.operands[i].negative)
7529 inst.instruction |= INDEX_UP;
7530 }
7531 else /* immediate offset in inst.reloc */
7532 {
7533 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7534 && inst.operands[i].writeback),
7535 BAD_PC_WRITEBACK);
7536 inst.instruction |= HWOFFSET_IMM;
7537 if (inst.reloc.type == BFD_RELOC_UNUSED)
7538 {
7539 /* Prefer + for zero encoded value. */
7540 if (!inst.operands[i].negative)
7541 inst.instruction |= INDEX_UP;
7542
7543 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7544 }
7545 }
7546 }
7547
7548 /* Write immediate bits [7:0] to the following locations:
7549
7550 |28/24|23 19|18 16|15 4|3 0|
7551 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7552
7553 This function is used by VMOV/VMVN/VORR/VBIC. */
7554
7555 static void
7556 neon_write_immbits (unsigned immbits)
7557 {
7558 inst.instruction |= immbits & 0xf;
7559 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7560 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7561 }
7562
7563 /* Invert low-order SIZE bits of XHI:XLO. */
7564
7565 static void
7566 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7567 {
7568 unsigned immlo = xlo ? *xlo : 0;
7569 unsigned immhi = xhi ? *xhi : 0;
7570
7571 switch (size)
7572 {
7573 case 8:
7574 immlo = (~immlo) & 0xff;
7575 break;
7576
7577 case 16:
7578 immlo = (~immlo) & 0xffff;
7579 break;
7580
7581 case 64:
7582 immhi = (~immhi) & 0xffffffff;
7583 /* fall through. */
7584
7585 case 32:
7586 immlo = (~immlo) & 0xffffffff;
7587 break;
7588
7589 default:
7590 abort ();
7591 }
7592
7593 if (xlo)
7594 *xlo = immlo;
7595
7596 if (xhi)
7597 *xhi = immhi;
7598 }
7599
7600 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7601 A, B, C, D. */
7602
7603 static int
7604 neon_bits_same_in_bytes (unsigned imm)
7605 {
7606 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7607 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7608 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7609 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7610 }
7611
7612 /* For immediate of above form, return 0bABCD. */
7613
7614 static unsigned
7615 neon_squash_bits (unsigned imm)
7616 {
7617 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7618 | ((imm & 0x01000000) >> 21);
7619 }
7620
7621 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7622
7623 static unsigned
7624 neon_qfloat_bits (unsigned imm)
7625 {
7626 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7627 }
7628
7629 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7630 the instruction. *OP is passed as the initial value of the op field, and
7631 may be set to a different value depending on the constant (i.e.
7632 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7633 MVN). If the immediate looks like a repeated pattern then also
7634 try smaller element sizes. */
7635
7636 static int
7637 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7638 unsigned *immbits, int *op, int size,
7639 enum neon_el_type type)
7640 {
7641 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7642 float. */
7643 if (type == NT_float && !float_p)
7644 return FAIL;
7645
7646 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7647 {
7648 if (size != 32 || *op == 1)
7649 return FAIL;
7650 *immbits = neon_qfloat_bits (immlo);
7651 return 0xf;
7652 }
7653
7654 if (size == 64)
7655 {
7656 if (neon_bits_same_in_bytes (immhi)
7657 && neon_bits_same_in_bytes (immlo))
7658 {
7659 if (*op == 1)
7660 return FAIL;
7661 *immbits = (neon_squash_bits (immhi) << 4)
7662 | neon_squash_bits (immlo);
7663 *op = 1;
7664 return 0xe;
7665 }
7666
7667 if (immhi != immlo)
7668 return FAIL;
7669 }
7670
7671 if (size >= 32)
7672 {
7673 if (immlo == (immlo & 0x000000ff))
7674 {
7675 *immbits = immlo;
7676 return 0x0;
7677 }
7678 else if (immlo == (immlo & 0x0000ff00))
7679 {
7680 *immbits = immlo >> 8;
7681 return 0x2;
7682 }
7683 else if (immlo == (immlo & 0x00ff0000))
7684 {
7685 *immbits = immlo >> 16;
7686 return 0x4;
7687 }
7688 else if (immlo == (immlo & 0xff000000))
7689 {
7690 *immbits = immlo >> 24;
7691 return 0x6;
7692 }
7693 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7694 {
7695 *immbits = (immlo >> 8) & 0xff;
7696 return 0xc;
7697 }
7698 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7699 {
7700 *immbits = (immlo >> 16) & 0xff;
7701 return 0xd;
7702 }
7703
7704 if ((immlo & 0xffff) != (immlo >> 16))
7705 return FAIL;
7706 immlo &= 0xffff;
7707 }
7708
7709 if (size >= 16)
7710 {
7711 if (immlo == (immlo & 0x000000ff))
7712 {
7713 *immbits = immlo;
7714 return 0x8;
7715 }
7716 else if (immlo == (immlo & 0x0000ff00))
7717 {
7718 *immbits = immlo >> 8;
7719 return 0xa;
7720 }
7721
7722 if ((immlo & 0xff) != (immlo >> 8))
7723 return FAIL;
7724 immlo &= 0xff;
7725 }
7726
7727 if (immlo == (immlo & 0x000000ff))
7728 {
7729 /* Don't allow MVN with 8-bit immediate. */
7730 if (*op == 1)
7731 return FAIL;
7732 *immbits = immlo;
7733 return 0xe;
7734 }
7735
7736 return FAIL;
7737 }
7738
7739 #if defined BFD_HOST_64_BIT
7740 /* Returns TRUE if double precision value V may be cast
7741 to single precision without loss of accuracy. */
7742
7743 static bfd_boolean
7744 is_double_a_single (bfd_int64_t v)
7745 {
7746 int exp = (int)((v >> 52) & 0x7FF);
7747 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7748
7749 return (exp == 0 || exp == 0x7FF
7750 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7751 && (mantissa & 0x1FFFFFFFl) == 0;
7752 }
7753
7754 /* Returns a double precision value casted to single precision
7755 (ignoring the least significant bits in exponent and mantissa). */
7756
7757 static int
7758 double_to_single (bfd_int64_t v)
7759 {
7760 int sign = (int) ((v >> 63) & 1l);
7761 int exp = (int) ((v >> 52) & 0x7FF);
7762 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7763
7764 if (exp == 0x7FF)
7765 exp = 0xFF;
7766 else
7767 {
7768 exp = exp - 1023 + 127;
7769 if (exp >= 0xFF)
7770 {
7771 /* Infinity. */
7772 exp = 0x7F;
7773 mantissa = 0;
7774 }
7775 else if (exp < 0)
7776 {
7777 /* No denormalized numbers. */
7778 exp = 0;
7779 mantissa = 0;
7780 }
7781 }
7782 mantissa >>= 29;
7783 return (sign << 31) | (exp << 23) | mantissa;
7784 }
7785 #endif /* BFD_HOST_64_BIT */
7786
7787 enum lit_type
7788 {
7789 CONST_THUMB,
7790 CONST_ARM,
7791 CONST_VEC
7792 };
7793
7794 static void do_vfp_nsyn_opcode (const char *);
7795
7796 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7797 Determine whether it can be performed with a move instruction; if
7798 it can, convert inst.instruction to that move instruction and
7799 return TRUE; if it can't, convert inst.instruction to a literal-pool
7800 load and return FALSE. If this is not a valid thing to do in the
7801 current context, set inst.error and return TRUE.
7802
7803 inst.operands[i] describes the destination register. */
7804
7805 static bfd_boolean
7806 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7807 {
7808 unsigned long tbit;
7809 bfd_boolean thumb_p = (t == CONST_THUMB);
7810 bfd_boolean arm_p = (t == CONST_ARM);
7811
7812 if (thumb_p)
7813 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7814 else
7815 tbit = LOAD_BIT;
7816
7817 if ((inst.instruction & tbit) == 0)
7818 {
7819 inst.error = _("invalid pseudo operation");
7820 return TRUE;
7821 }
7822
7823 if (inst.reloc.exp.X_op != O_constant
7824 && inst.reloc.exp.X_op != O_symbol
7825 && inst.reloc.exp.X_op != O_big)
7826 {
7827 inst.error = _("constant expression expected");
7828 return TRUE;
7829 }
7830
7831 if (inst.reloc.exp.X_op == O_constant
7832 || inst.reloc.exp.X_op == O_big)
7833 {
7834 #if defined BFD_HOST_64_BIT
7835 bfd_int64_t v;
7836 #else
7837 offsetT v;
7838 #endif
7839 if (inst.reloc.exp.X_op == O_big)
7840 {
7841 LITTLENUM_TYPE w[X_PRECISION];
7842 LITTLENUM_TYPE * l;
7843
7844 if (inst.reloc.exp.X_add_number == -1)
7845 {
7846 gen_to_words (w, X_PRECISION, E_PRECISION);
7847 l = w;
7848 /* FIXME: Should we check words w[2..5] ? */
7849 }
7850 else
7851 l = generic_bignum;
7852
7853 #if defined BFD_HOST_64_BIT
7854 v =
7855 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7856 << LITTLENUM_NUMBER_OF_BITS)
7857 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7858 << LITTLENUM_NUMBER_OF_BITS)
7859 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7860 << LITTLENUM_NUMBER_OF_BITS)
7861 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7862 #else
7863 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7864 | (l[0] & LITTLENUM_MASK);
7865 #endif
7866 }
7867 else
7868 v = inst.reloc.exp.X_add_number;
7869
7870 if (!inst.operands[i].issingle)
7871 {
7872 if (thumb_p)
7873 {
7874 /* This can be encoded only for a low register. */
7875 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7876 {
7877 /* This can be done with a mov(1) instruction. */
7878 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7879 inst.instruction |= v;
7880 return TRUE;
7881 }
7882
7883 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7884 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7885 {
7886 /* Check if on thumb2 it can be done with a mov.w, mvn or
7887 movw instruction. */
7888 unsigned int newimm;
7889 bfd_boolean isNegated;
7890
7891 newimm = encode_thumb32_immediate (v);
7892 if (newimm != (unsigned int) FAIL)
7893 isNegated = FALSE;
7894 else
7895 {
7896 newimm = encode_thumb32_immediate (~v);
7897 if (newimm != (unsigned int) FAIL)
7898 isNegated = TRUE;
7899 }
7900
7901 /* The number can be loaded with a mov.w or mvn
7902 instruction. */
7903 if (newimm != (unsigned int) FAIL
7904 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7905 {
7906 inst.instruction = (0xf04f0000 /* MOV.W. */
7907 | (inst.operands[i].reg << 8));
7908 /* Change to MOVN. */
7909 inst.instruction |= (isNegated ? 0x200000 : 0);
7910 inst.instruction |= (newimm & 0x800) << 15;
7911 inst.instruction |= (newimm & 0x700) << 4;
7912 inst.instruction |= (newimm & 0x0ff);
7913 return TRUE;
7914 }
7915 /* The number can be loaded with a movw instruction. */
7916 else if ((v & ~0xFFFF) == 0
7917 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7918 {
7919 int imm = v & 0xFFFF;
7920
7921 inst.instruction = 0xf2400000; /* MOVW. */
7922 inst.instruction |= (inst.operands[i].reg << 8);
7923 inst.instruction |= (imm & 0xf000) << 4;
7924 inst.instruction |= (imm & 0x0800) << 15;
7925 inst.instruction |= (imm & 0x0700) << 4;
7926 inst.instruction |= (imm & 0x00ff);
7927 return TRUE;
7928 }
7929 }
7930 }
7931 else if (arm_p)
7932 {
7933 int value = encode_arm_immediate (v);
7934
7935 if (value != FAIL)
7936 {
7937 /* This can be done with a mov instruction. */
7938 inst.instruction &= LITERAL_MASK;
7939 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7940 inst.instruction |= value & 0xfff;
7941 return TRUE;
7942 }
7943
7944 value = encode_arm_immediate (~ v);
7945 if (value != FAIL)
7946 {
7947 /* This can be done with a mvn instruction. */
7948 inst.instruction &= LITERAL_MASK;
7949 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7950 inst.instruction |= value & 0xfff;
7951 return TRUE;
7952 }
7953 }
7954 else if (t == CONST_VEC)
7955 {
7956 int op = 0;
7957 unsigned immbits = 0;
7958 unsigned immlo = inst.operands[1].imm;
7959 unsigned immhi = inst.operands[1].regisimm
7960 ? inst.operands[1].reg
7961 : inst.reloc.exp.X_unsigned
7962 ? 0
7963 : ((bfd_int64_t)((int) immlo)) >> 32;
7964 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7965 &op, 64, NT_invtype);
7966
7967 if (cmode == FAIL)
7968 {
7969 neon_invert_size (&immlo, &immhi, 64);
7970 op = !op;
7971 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7972 &op, 64, NT_invtype);
7973 }
7974
7975 if (cmode != FAIL)
7976 {
7977 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7978 | (1 << 23)
7979 | (cmode << 8)
7980 | (op << 5)
7981 | (1 << 4);
7982
7983 /* Fill other bits in vmov encoding for both thumb and arm. */
7984 if (thumb_mode)
7985 inst.instruction |= (0x7U << 29) | (0xF << 24);
7986 else
7987 inst.instruction |= (0xFU << 28) | (0x1 << 25);
7988 neon_write_immbits (immbits);
7989 return TRUE;
7990 }
7991 }
7992 }
7993
7994 if (t == CONST_VEC)
7995 {
7996 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
7997 if (inst.operands[i].issingle
7998 && is_quarter_float (inst.operands[1].imm)
7999 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8000 {
8001 inst.operands[1].imm =
8002 neon_qfloat_bits (v);
8003 do_vfp_nsyn_opcode ("fconsts");
8004 return TRUE;
8005 }
8006
8007 /* If our host does not support a 64-bit type then we cannot perform
8008 the following optimization. This mean that there will be a
8009 discrepancy between the output produced by an assembler built for
8010 a 32-bit-only host and the output produced from a 64-bit host, but
8011 this cannot be helped. */
8012 #if defined BFD_HOST_64_BIT
8013 else if (!inst.operands[1].issingle
8014 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8015 {
8016 if (is_double_a_single (v)
8017 && is_quarter_float (double_to_single (v)))
8018 {
8019 inst.operands[1].imm =
8020 neon_qfloat_bits (double_to_single (v));
8021 do_vfp_nsyn_opcode ("fconstd");
8022 return TRUE;
8023 }
8024 }
8025 #endif
8026 }
8027 }
8028
8029 if (add_to_lit_pool ((!inst.operands[i].isvec
8030 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8031 return TRUE;
8032
8033 inst.operands[1].reg = REG_PC;
8034 inst.operands[1].isreg = 1;
8035 inst.operands[1].preind = 1;
8036 inst.reloc.pc_rel = 1;
8037 inst.reloc.type = (thumb_p
8038 ? BFD_RELOC_ARM_THUMB_OFFSET
8039 : (mode_3
8040 ? BFD_RELOC_ARM_HWLITERAL
8041 : BFD_RELOC_ARM_LITERAL));
8042 return FALSE;
8043 }
8044
8045 /* inst.operands[i] was set up by parse_address. Encode it into an
8046 ARM-format instruction. Reject all forms which cannot be encoded
8047 into a coprocessor load/store instruction. If wb_ok is false,
8048 reject use of writeback; if unind_ok is false, reject use of
8049 unindexed addressing. If reloc_override is not 0, use it instead
8050 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8051 (in which case it is preserved). */
8052
8053 static int
8054 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8055 {
8056 if (!inst.operands[i].isreg)
8057 {
8058 /* PR 18256 */
8059 if (! inst.operands[0].isvec)
8060 {
8061 inst.error = _("invalid co-processor operand");
8062 return FAIL;
8063 }
8064 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8065 return SUCCESS;
8066 }
8067
8068 inst.instruction |= inst.operands[i].reg << 16;
8069
8070 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8071
8072 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8073 {
8074 gas_assert (!inst.operands[i].writeback);
8075 if (!unind_ok)
8076 {
8077 inst.error = _("instruction does not support unindexed addressing");
8078 return FAIL;
8079 }
8080 inst.instruction |= inst.operands[i].imm;
8081 inst.instruction |= INDEX_UP;
8082 return SUCCESS;
8083 }
8084
8085 if (inst.operands[i].preind)
8086 inst.instruction |= PRE_INDEX;
8087
8088 if (inst.operands[i].writeback)
8089 {
8090 if (inst.operands[i].reg == REG_PC)
8091 {
8092 inst.error = _("pc may not be used with write-back");
8093 return FAIL;
8094 }
8095 if (!wb_ok)
8096 {
8097 inst.error = _("instruction does not support writeback");
8098 return FAIL;
8099 }
8100 inst.instruction |= WRITE_BACK;
8101 }
8102
8103 if (reloc_override)
8104 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8105 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8106 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8107 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8108 {
8109 if (thumb_mode)
8110 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8111 else
8112 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8113 }
8114
8115 /* Prefer + for zero encoded value. */
8116 if (!inst.operands[i].negative)
8117 inst.instruction |= INDEX_UP;
8118
8119 return SUCCESS;
8120 }
8121
8122 /* Functions for instruction encoding, sorted by sub-architecture.
8123 First some generics; their names are taken from the conventional
8124 bit positions for register arguments in ARM format instructions. */
8125
8126 static void
8127 do_noargs (void)
8128 {
8129 }
8130
8131 static void
8132 do_rd (void)
8133 {
8134 inst.instruction |= inst.operands[0].reg << 12;
8135 }
8136
8137 static void
8138 do_rd_rm (void)
8139 {
8140 inst.instruction |= inst.operands[0].reg << 12;
8141 inst.instruction |= inst.operands[1].reg;
8142 }
8143
8144 static void
8145 do_rm_rn (void)
8146 {
8147 inst.instruction |= inst.operands[0].reg;
8148 inst.instruction |= inst.operands[1].reg << 16;
8149 }
8150
8151 static void
8152 do_rd_rn (void)
8153 {
8154 inst.instruction |= inst.operands[0].reg << 12;
8155 inst.instruction |= inst.operands[1].reg << 16;
8156 }
8157
8158 static void
8159 do_rn_rd (void)
8160 {
8161 inst.instruction |= inst.operands[0].reg << 16;
8162 inst.instruction |= inst.operands[1].reg << 12;
8163 }
8164
8165 static void
8166 do_tt (void)
8167 {
8168 inst.instruction |= inst.operands[0].reg << 8;
8169 inst.instruction |= inst.operands[1].reg << 16;
8170 }
8171
8172 static bfd_boolean
8173 check_obsolete (const arm_feature_set *feature, const char *msg)
8174 {
8175 if (ARM_CPU_IS_ANY (cpu_variant))
8176 {
8177 as_tsktsk ("%s", msg);
8178 return TRUE;
8179 }
8180 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8181 {
8182 as_bad ("%s", msg);
8183 return TRUE;
8184 }
8185
8186 return FALSE;
8187 }
8188
8189 static void
8190 do_rd_rm_rn (void)
8191 {
8192 unsigned Rn = inst.operands[2].reg;
8193 /* Enforce restrictions on SWP instruction. */
8194 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8195 {
8196 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8197 _("Rn must not overlap other operands"));
8198
8199 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8200 */
8201 if (!check_obsolete (&arm_ext_v8,
8202 _("swp{b} use is obsoleted for ARMv8 and later"))
8203 && warn_on_deprecated
8204 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8205 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8206 }
8207
8208 inst.instruction |= inst.operands[0].reg << 12;
8209 inst.instruction |= inst.operands[1].reg;
8210 inst.instruction |= Rn << 16;
8211 }
8212
8213 static void
8214 do_rd_rn_rm (void)
8215 {
8216 inst.instruction |= inst.operands[0].reg << 12;
8217 inst.instruction |= inst.operands[1].reg << 16;
8218 inst.instruction |= inst.operands[2].reg;
8219 }
8220
8221 static void
8222 do_rm_rd_rn (void)
8223 {
8224 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8225 constraint (((inst.reloc.exp.X_op != O_constant
8226 && inst.reloc.exp.X_op != O_illegal)
8227 || inst.reloc.exp.X_add_number != 0),
8228 BAD_ADDR_MODE);
8229 inst.instruction |= inst.operands[0].reg;
8230 inst.instruction |= inst.operands[1].reg << 12;
8231 inst.instruction |= inst.operands[2].reg << 16;
8232 }
8233
8234 static void
8235 do_imm0 (void)
8236 {
8237 inst.instruction |= inst.operands[0].imm;
8238 }
8239
8240 static void
8241 do_rd_cpaddr (void)
8242 {
8243 inst.instruction |= inst.operands[0].reg << 12;
8244 encode_arm_cp_address (1, TRUE, TRUE, 0);
8245 }
8246
8247 /* ARM instructions, in alphabetical order by function name (except
8248 that wrapper functions appear immediately after the function they
8249 wrap). */
8250
8251 /* This is a pseudo-op of the form "adr rd, label" to be converted
8252 into a relative address of the form "add rd, pc, #label-.-8". */
8253
8254 static void
8255 do_adr (void)
8256 {
8257 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8258
8259 /* Frag hacking will turn this into a sub instruction if the offset turns
8260 out to be negative. */
8261 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8262 inst.reloc.pc_rel = 1;
8263 inst.reloc.exp.X_add_number -= 8;
8264 }
8265
8266 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8267 into a relative address of the form:
8268 add rd, pc, #low(label-.-8)"
8269 add rd, rd, #high(label-.-8)" */
8270
8271 static void
8272 do_adrl (void)
8273 {
8274 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8275
8276 /* Frag hacking will turn this into a sub instruction if the offset turns
8277 out to be negative. */
8278 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8279 inst.reloc.pc_rel = 1;
8280 inst.size = INSN_SIZE * 2;
8281 inst.reloc.exp.X_add_number -= 8;
8282 }
8283
8284 static void
8285 do_arit (void)
8286 {
8287 if (!inst.operands[1].present)
8288 inst.operands[1].reg = inst.operands[0].reg;
8289 inst.instruction |= inst.operands[0].reg << 12;
8290 inst.instruction |= inst.operands[1].reg << 16;
8291 encode_arm_shifter_operand (2);
8292 }
8293
8294 static void
8295 do_barrier (void)
8296 {
8297 if (inst.operands[0].present)
8298 inst.instruction |= inst.operands[0].imm;
8299 else
8300 inst.instruction |= 0xf;
8301 }
8302
8303 static void
8304 do_bfc (void)
8305 {
8306 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8307 constraint (msb > 32, _("bit-field extends past end of register"));
8308 /* The instruction encoding stores the LSB and MSB,
8309 not the LSB and width. */
8310 inst.instruction |= inst.operands[0].reg << 12;
8311 inst.instruction |= inst.operands[1].imm << 7;
8312 inst.instruction |= (msb - 1) << 16;
8313 }
8314
8315 static void
8316 do_bfi (void)
8317 {
8318 unsigned int msb;
8319
8320 /* #0 in second position is alternative syntax for bfc, which is
8321 the same instruction but with REG_PC in the Rm field. */
8322 if (!inst.operands[1].isreg)
8323 inst.operands[1].reg = REG_PC;
8324
8325 msb = inst.operands[2].imm + inst.operands[3].imm;
8326 constraint (msb > 32, _("bit-field extends past end of register"));
8327 /* The instruction encoding stores the LSB and MSB,
8328 not the LSB and width. */
8329 inst.instruction |= inst.operands[0].reg << 12;
8330 inst.instruction |= inst.operands[1].reg;
8331 inst.instruction |= inst.operands[2].imm << 7;
8332 inst.instruction |= (msb - 1) << 16;
8333 }
8334
8335 static void
8336 do_bfx (void)
8337 {
8338 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8339 _("bit-field extends past end of register"));
8340 inst.instruction |= inst.operands[0].reg << 12;
8341 inst.instruction |= inst.operands[1].reg;
8342 inst.instruction |= inst.operands[2].imm << 7;
8343 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8344 }
8345
8346 /* ARM V5 breakpoint instruction (argument parse)
8347 BKPT <16 bit unsigned immediate>
8348 Instruction is not conditional.
8349 The bit pattern given in insns[] has the COND_ALWAYS condition,
8350 and it is an error if the caller tried to override that. */
8351
8352 static void
8353 do_bkpt (void)
8354 {
8355 /* Top 12 of 16 bits to bits 19:8. */
8356 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8357
8358 /* Bottom 4 of 16 bits to bits 3:0. */
8359 inst.instruction |= inst.operands[0].imm & 0xf;
8360 }
8361
8362 static void
8363 encode_branch (int default_reloc)
8364 {
8365 if (inst.operands[0].hasreloc)
8366 {
8367 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8368 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8369 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8370 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8371 ? BFD_RELOC_ARM_PLT32
8372 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8373 }
8374 else
8375 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8376 inst.reloc.pc_rel = 1;
8377 }
8378
8379 static void
8380 do_branch (void)
8381 {
8382 #ifdef OBJ_ELF
8383 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8384 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8385 else
8386 #endif
8387 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8388 }
8389
8390 static void
8391 do_bl (void)
8392 {
8393 #ifdef OBJ_ELF
8394 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8395 {
8396 if (inst.cond == COND_ALWAYS)
8397 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8398 else
8399 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8400 }
8401 else
8402 #endif
8403 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8404 }
8405
8406 /* ARM V5 branch-link-exchange instruction (argument parse)
8407 BLX <target_addr> ie BLX(1)
8408 BLX{<condition>} <Rm> ie BLX(2)
8409 Unfortunately, there are two different opcodes for this mnemonic.
8410 So, the insns[].value is not used, and the code here zaps values
8411 into inst.instruction.
8412 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8413
8414 static void
8415 do_blx (void)
8416 {
8417 if (inst.operands[0].isreg)
8418 {
8419 /* Arg is a register; the opcode provided by insns[] is correct.
8420 It is not illegal to do "blx pc", just useless. */
8421 if (inst.operands[0].reg == REG_PC)
8422 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8423
8424 inst.instruction |= inst.operands[0].reg;
8425 }
8426 else
8427 {
8428 /* Arg is an address; this instruction cannot be executed
8429 conditionally, and the opcode must be adjusted.
8430 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8431 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8432 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8433 inst.instruction = 0xfa000000;
8434 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8435 }
8436 }
8437
8438 static void
8439 do_bx (void)
8440 {
8441 bfd_boolean want_reloc;
8442
8443 if (inst.operands[0].reg == REG_PC)
8444 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8445
8446 inst.instruction |= inst.operands[0].reg;
8447 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8448 it is for ARMv4t or earlier. */
8449 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8450 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8451 want_reloc = TRUE;
8452
8453 #ifdef OBJ_ELF
8454 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8455 #endif
8456 want_reloc = FALSE;
8457
8458 if (want_reloc)
8459 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8460 }
8461
8462
8463 /* ARM v5TEJ. Jump to Jazelle code. */
8464
8465 static void
8466 do_bxj (void)
8467 {
8468 if (inst.operands[0].reg == REG_PC)
8469 as_tsktsk (_("use of r15 in bxj is not really useful"));
8470
8471 inst.instruction |= inst.operands[0].reg;
8472 }
8473
8474 /* Co-processor data operation:
8475 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8476 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8477 static void
8478 do_cdp (void)
8479 {
8480 inst.instruction |= inst.operands[0].reg << 8;
8481 inst.instruction |= inst.operands[1].imm << 20;
8482 inst.instruction |= inst.operands[2].reg << 12;
8483 inst.instruction |= inst.operands[3].reg << 16;
8484 inst.instruction |= inst.operands[4].reg;
8485 inst.instruction |= inst.operands[5].imm << 5;
8486 }
8487
8488 static void
8489 do_cmp (void)
8490 {
8491 inst.instruction |= inst.operands[0].reg << 16;
8492 encode_arm_shifter_operand (1);
8493 }
8494
8495 /* Transfer between coprocessor and ARM registers.
8496 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8497 MRC2
8498 MCR{cond}
8499 MCR2
8500
8501 No special properties. */
8502
8503 struct deprecated_coproc_regs_s
8504 {
8505 unsigned cp;
8506 int opc1;
8507 unsigned crn;
8508 unsigned crm;
8509 int opc2;
8510 arm_feature_set deprecated;
8511 arm_feature_set obsoleted;
8512 const char *dep_msg;
8513 const char *obs_msg;
8514 };
8515
8516 #define DEPR_ACCESS_V8 \
8517 N_("This coprocessor register access is deprecated in ARMv8")
8518
8519 /* Table of all deprecated coprocessor registers. */
8520 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8521 {
8522 {15, 0, 7, 10, 5, /* CP15DMB. */
8523 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8524 DEPR_ACCESS_V8, NULL},
8525 {15, 0, 7, 10, 4, /* CP15DSB. */
8526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8527 DEPR_ACCESS_V8, NULL},
8528 {15, 0, 7, 5, 4, /* CP15ISB. */
8529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8530 DEPR_ACCESS_V8, NULL},
8531 {14, 6, 1, 0, 0, /* TEEHBR. */
8532 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8533 DEPR_ACCESS_V8, NULL},
8534 {14, 6, 0, 0, 0, /* TEECR. */
8535 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8536 DEPR_ACCESS_V8, NULL},
8537 };
8538
8539 #undef DEPR_ACCESS_V8
8540
8541 static const size_t deprecated_coproc_reg_count =
8542 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8543
8544 static void
8545 do_co_reg (void)
8546 {
8547 unsigned Rd;
8548 size_t i;
8549
8550 Rd = inst.operands[2].reg;
8551 if (thumb_mode)
8552 {
8553 if (inst.instruction == 0xee000010
8554 || inst.instruction == 0xfe000010)
8555 /* MCR, MCR2 */
8556 reject_bad_reg (Rd);
8557 else
8558 /* MRC, MRC2 */
8559 constraint (Rd == REG_SP, BAD_SP);
8560 }
8561 else
8562 {
8563 /* MCR */
8564 if (inst.instruction == 0xe000010)
8565 constraint (Rd == REG_PC, BAD_PC);
8566 }
8567
8568 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8569 {
8570 const struct deprecated_coproc_regs_s *r =
8571 deprecated_coproc_regs + i;
8572
8573 if (inst.operands[0].reg == r->cp
8574 && inst.operands[1].imm == r->opc1
8575 && inst.operands[3].reg == r->crn
8576 && inst.operands[4].reg == r->crm
8577 && inst.operands[5].imm == r->opc2)
8578 {
8579 if (! ARM_CPU_IS_ANY (cpu_variant)
8580 && warn_on_deprecated
8581 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8582 as_tsktsk ("%s", r->dep_msg);
8583 }
8584 }
8585
8586 inst.instruction |= inst.operands[0].reg << 8;
8587 inst.instruction |= inst.operands[1].imm << 21;
8588 inst.instruction |= Rd << 12;
8589 inst.instruction |= inst.operands[3].reg << 16;
8590 inst.instruction |= inst.operands[4].reg;
8591 inst.instruction |= inst.operands[5].imm << 5;
8592 }
8593
8594 /* Transfer between coprocessor register and pair of ARM registers.
8595 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8596 MCRR2
8597 MRRC{cond}
8598 MRRC2
8599
8600 Two XScale instructions are special cases of these:
8601
8602 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8603 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8604
8605 Result unpredictable if Rd or Rn is R15. */
8606
8607 static void
8608 do_co_reg2c (void)
8609 {
8610 unsigned Rd, Rn;
8611
8612 Rd = inst.operands[2].reg;
8613 Rn = inst.operands[3].reg;
8614
8615 if (thumb_mode)
8616 {
8617 reject_bad_reg (Rd);
8618 reject_bad_reg (Rn);
8619 }
8620 else
8621 {
8622 constraint (Rd == REG_PC, BAD_PC);
8623 constraint (Rn == REG_PC, BAD_PC);
8624 }
8625
8626 inst.instruction |= inst.operands[0].reg << 8;
8627 inst.instruction |= inst.operands[1].imm << 4;
8628 inst.instruction |= Rd << 12;
8629 inst.instruction |= Rn << 16;
8630 inst.instruction |= inst.operands[4].reg;
8631 }
8632
8633 static void
8634 do_cpsi (void)
8635 {
8636 inst.instruction |= inst.operands[0].imm << 6;
8637 if (inst.operands[1].present)
8638 {
8639 inst.instruction |= CPSI_MMOD;
8640 inst.instruction |= inst.operands[1].imm;
8641 }
8642 }
8643
8644 static void
8645 do_dbg (void)
8646 {
8647 inst.instruction |= inst.operands[0].imm;
8648 }
8649
8650 static void
8651 do_div (void)
8652 {
8653 unsigned Rd, Rn, Rm;
8654
8655 Rd = inst.operands[0].reg;
8656 Rn = (inst.operands[1].present
8657 ? inst.operands[1].reg : Rd);
8658 Rm = inst.operands[2].reg;
8659
8660 constraint ((Rd == REG_PC), BAD_PC);
8661 constraint ((Rn == REG_PC), BAD_PC);
8662 constraint ((Rm == REG_PC), BAD_PC);
8663
8664 inst.instruction |= Rd << 16;
8665 inst.instruction |= Rn << 0;
8666 inst.instruction |= Rm << 8;
8667 }
8668
8669 static void
8670 do_it (void)
8671 {
8672 /* There is no IT instruction in ARM mode. We
8673 process it to do the validation as if in
8674 thumb mode, just in case the code gets
8675 assembled for thumb using the unified syntax. */
8676
8677 inst.size = 0;
8678 if (unified_syntax)
8679 {
8680 set_it_insn_type (IT_INSN);
8681 now_it.mask = (inst.instruction & 0xf) | 0x10;
8682 now_it.cc = inst.operands[0].imm;
8683 }
8684 }
8685
8686 /* If there is only one register in the register list,
8687 then return its register number. Otherwise return -1. */
8688 static int
8689 only_one_reg_in_list (int range)
8690 {
8691 int i = ffs (range) - 1;
8692 return (i > 15 || range != (1 << i)) ? -1 : i;
8693 }
8694
8695 static void
8696 encode_ldmstm(int from_push_pop_mnem)
8697 {
8698 int base_reg = inst.operands[0].reg;
8699 int range = inst.operands[1].imm;
8700 int one_reg;
8701
8702 inst.instruction |= base_reg << 16;
8703 inst.instruction |= range;
8704
8705 if (inst.operands[1].writeback)
8706 inst.instruction |= LDM_TYPE_2_OR_3;
8707
8708 if (inst.operands[0].writeback)
8709 {
8710 inst.instruction |= WRITE_BACK;
8711 /* Check for unpredictable uses of writeback. */
8712 if (inst.instruction & LOAD_BIT)
8713 {
8714 /* Not allowed in LDM type 2. */
8715 if ((inst.instruction & LDM_TYPE_2_OR_3)
8716 && ((range & (1 << REG_PC)) == 0))
8717 as_warn (_("writeback of base register is UNPREDICTABLE"));
8718 /* Only allowed if base reg not in list for other types. */
8719 else if (range & (1 << base_reg))
8720 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8721 }
8722 else /* STM. */
8723 {
8724 /* Not allowed for type 2. */
8725 if (inst.instruction & LDM_TYPE_2_OR_3)
8726 as_warn (_("writeback of base register is UNPREDICTABLE"));
8727 /* Only allowed if base reg not in list, or first in list. */
8728 else if ((range & (1 << base_reg))
8729 && (range & ((1 << base_reg) - 1)))
8730 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8731 }
8732 }
8733
8734 /* If PUSH/POP has only one register, then use the A2 encoding. */
8735 one_reg = only_one_reg_in_list (range);
8736 if (from_push_pop_mnem && one_reg >= 0)
8737 {
8738 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8739
8740 inst.instruction &= A_COND_MASK;
8741 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8742 inst.instruction |= one_reg << 12;
8743 }
8744 }
8745
8746 static void
8747 do_ldmstm (void)
8748 {
8749 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8750 }
8751
8752 /* ARMv5TE load-consecutive (argument parse)
8753 Mode is like LDRH.
8754
8755 LDRccD R, mode
8756 STRccD R, mode. */
8757
8758 static void
8759 do_ldrd (void)
8760 {
8761 constraint (inst.operands[0].reg % 2 != 0,
8762 _("first transfer register must be even"));
8763 constraint (inst.operands[1].present
8764 && inst.operands[1].reg != inst.operands[0].reg + 1,
8765 _("can only transfer two consecutive registers"));
8766 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8767 constraint (!inst.operands[2].isreg, _("'[' expected"));
8768
8769 if (!inst.operands[1].present)
8770 inst.operands[1].reg = inst.operands[0].reg + 1;
8771
8772 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8773 register and the first register written; we have to diagnose
8774 overlap between the base and the second register written here. */
8775
8776 if (inst.operands[2].reg == inst.operands[1].reg
8777 && (inst.operands[2].writeback || inst.operands[2].postind))
8778 as_warn (_("base register written back, and overlaps "
8779 "second transfer register"));
8780
8781 if (!(inst.instruction & V4_STR_BIT))
8782 {
8783 /* For an index-register load, the index register must not overlap the
8784 destination (even if not write-back). */
8785 if (inst.operands[2].immisreg
8786 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8787 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8788 as_warn (_("index register overlaps transfer register"));
8789 }
8790 inst.instruction |= inst.operands[0].reg << 12;
8791 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8792 }
8793
8794 static void
8795 do_ldrex (void)
8796 {
8797 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8798 || inst.operands[1].postind || inst.operands[1].writeback
8799 || inst.operands[1].immisreg || inst.operands[1].shifted
8800 || inst.operands[1].negative
8801 /* This can arise if the programmer has written
8802 strex rN, rM, foo
8803 or if they have mistakenly used a register name as the last
8804 operand, eg:
8805 strex rN, rM, rX
8806 It is very difficult to distinguish between these two cases
8807 because "rX" might actually be a label. ie the register
8808 name has been occluded by a symbol of the same name. So we
8809 just generate a general 'bad addressing mode' type error
8810 message and leave it up to the programmer to discover the
8811 true cause and fix their mistake. */
8812 || (inst.operands[1].reg == REG_PC),
8813 BAD_ADDR_MODE);
8814
8815 constraint (inst.reloc.exp.X_op != O_constant
8816 || inst.reloc.exp.X_add_number != 0,
8817 _("offset must be zero in ARM encoding"));
8818
8819 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8820
8821 inst.instruction |= inst.operands[0].reg << 12;
8822 inst.instruction |= inst.operands[1].reg << 16;
8823 inst.reloc.type = BFD_RELOC_UNUSED;
8824 }
8825
8826 static void
8827 do_ldrexd (void)
8828 {
8829 constraint (inst.operands[0].reg % 2 != 0,
8830 _("even register required"));
8831 constraint (inst.operands[1].present
8832 && inst.operands[1].reg != inst.operands[0].reg + 1,
8833 _("can only load two consecutive registers"));
8834 /* If op 1 were present and equal to PC, this function wouldn't
8835 have been called in the first place. */
8836 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8837
8838 inst.instruction |= inst.operands[0].reg << 12;
8839 inst.instruction |= inst.operands[2].reg << 16;
8840 }
8841
8842 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8843 which is not a multiple of four is UNPREDICTABLE. */
8844 static void
8845 check_ldr_r15_aligned (void)
8846 {
8847 constraint (!(inst.operands[1].immisreg)
8848 && (inst.operands[0].reg == REG_PC
8849 && inst.operands[1].reg == REG_PC
8850 && (inst.reloc.exp.X_add_number & 0x3)),
8851 _("ldr to register 15 must be 4-byte alligned"));
8852 }
8853
8854 static void
8855 do_ldst (void)
8856 {
8857 inst.instruction |= inst.operands[0].reg << 12;
8858 if (!inst.operands[1].isreg)
8859 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8860 return;
8861 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8862 check_ldr_r15_aligned ();
8863 }
8864
8865 static void
8866 do_ldstt (void)
8867 {
8868 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8869 reject [Rn,...]. */
8870 if (inst.operands[1].preind)
8871 {
8872 constraint (inst.reloc.exp.X_op != O_constant
8873 || inst.reloc.exp.X_add_number != 0,
8874 _("this instruction requires a post-indexed address"));
8875
8876 inst.operands[1].preind = 0;
8877 inst.operands[1].postind = 1;
8878 inst.operands[1].writeback = 1;
8879 }
8880 inst.instruction |= inst.operands[0].reg << 12;
8881 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8882 }
8883
8884 /* Halfword and signed-byte load/store operations. */
8885
8886 static void
8887 do_ldstv4 (void)
8888 {
8889 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8890 inst.instruction |= inst.operands[0].reg << 12;
8891 if (!inst.operands[1].isreg)
8892 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8893 return;
8894 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8895 }
8896
8897 static void
8898 do_ldsttv4 (void)
8899 {
8900 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8901 reject [Rn,...]. */
8902 if (inst.operands[1].preind)
8903 {
8904 constraint (inst.reloc.exp.X_op != O_constant
8905 || inst.reloc.exp.X_add_number != 0,
8906 _("this instruction requires a post-indexed address"));
8907
8908 inst.operands[1].preind = 0;
8909 inst.operands[1].postind = 1;
8910 inst.operands[1].writeback = 1;
8911 }
8912 inst.instruction |= inst.operands[0].reg << 12;
8913 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8914 }
8915
8916 /* Co-processor register load/store.
8917 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8918 static void
8919 do_lstc (void)
8920 {
8921 inst.instruction |= inst.operands[0].reg << 8;
8922 inst.instruction |= inst.operands[1].reg << 12;
8923 encode_arm_cp_address (2, TRUE, TRUE, 0);
8924 }
8925
8926 static void
8927 do_mlas (void)
8928 {
8929 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8930 if (inst.operands[0].reg == inst.operands[1].reg
8931 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8932 && !(inst.instruction & 0x00400000))
8933 as_tsktsk (_("Rd and Rm should be different in mla"));
8934
8935 inst.instruction |= inst.operands[0].reg << 16;
8936 inst.instruction |= inst.operands[1].reg;
8937 inst.instruction |= inst.operands[2].reg << 8;
8938 inst.instruction |= inst.operands[3].reg << 12;
8939 }
8940
8941 static void
8942 do_mov (void)
8943 {
8944 inst.instruction |= inst.operands[0].reg << 12;
8945 encode_arm_shifter_operand (1);
8946 }
8947
8948 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8949 static void
8950 do_mov16 (void)
8951 {
8952 bfd_vma imm;
8953 bfd_boolean top;
8954
8955 top = (inst.instruction & 0x00400000) != 0;
8956 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8957 _(":lower16: not allowed this instruction"));
8958 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8959 _(":upper16: not allowed instruction"));
8960 inst.instruction |= inst.operands[0].reg << 12;
8961 if (inst.reloc.type == BFD_RELOC_UNUSED)
8962 {
8963 imm = inst.reloc.exp.X_add_number;
8964 /* The value is in two pieces: 0:11, 16:19. */
8965 inst.instruction |= (imm & 0x00000fff);
8966 inst.instruction |= (imm & 0x0000f000) << 4;
8967 }
8968 }
8969
8970 static int
8971 do_vfp_nsyn_mrs (void)
8972 {
8973 if (inst.operands[0].isvec)
8974 {
8975 if (inst.operands[1].reg != 1)
8976 first_error (_("operand 1 must be FPSCR"));
8977 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8978 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8979 do_vfp_nsyn_opcode ("fmstat");
8980 }
8981 else if (inst.operands[1].isvec)
8982 do_vfp_nsyn_opcode ("fmrx");
8983 else
8984 return FAIL;
8985
8986 return SUCCESS;
8987 }
8988
8989 static int
8990 do_vfp_nsyn_msr (void)
8991 {
8992 if (inst.operands[0].isvec)
8993 do_vfp_nsyn_opcode ("fmxr");
8994 else
8995 return FAIL;
8996
8997 return SUCCESS;
8998 }
8999
9000 static void
9001 do_vmrs (void)
9002 {
9003 unsigned Rt = inst.operands[0].reg;
9004
9005 if (thumb_mode && Rt == REG_SP)
9006 {
9007 inst.error = BAD_SP;
9008 return;
9009 }
9010
9011 /* APSR_ sets isvec. All other refs to PC are illegal. */
9012 if (!inst.operands[0].isvec && Rt == REG_PC)
9013 {
9014 inst.error = BAD_PC;
9015 return;
9016 }
9017
9018 /* If we get through parsing the register name, we just insert the number
9019 generated into the instruction without further validation. */
9020 inst.instruction |= (inst.operands[1].reg << 16);
9021 inst.instruction |= (Rt << 12);
9022 }
9023
9024 static void
9025 do_vmsr (void)
9026 {
9027 unsigned Rt = inst.operands[1].reg;
9028
9029 if (thumb_mode)
9030 reject_bad_reg (Rt);
9031 else if (Rt == REG_PC)
9032 {
9033 inst.error = BAD_PC;
9034 return;
9035 }
9036
9037 /* If we get through parsing the register name, we just insert the number
9038 generated into the instruction without further validation. */
9039 inst.instruction |= (inst.operands[0].reg << 16);
9040 inst.instruction |= (Rt << 12);
9041 }
9042
9043 static void
9044 do_mrs (void)
9045 {
9046 unsigned br;
9047
9048 if (do_vfp_nsyn_mrs () == SUCCESS)
9049 return;
9050
9051 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9052 inst.instruction |= inst.operands[0].reg << 12;
9053
9054 if (inst.operands[1].isreg)
9055 {
9056 br = inst.operands[1].reg;
9057 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9058 as_bad (_("bad register for mrs"));
9059 }
9060 else
9061 {
9062 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9063 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9064 != (PSR_c|PSR_f),
9065 _("'APSR', 'CPSR' or 'SPSR' expected"));
9066 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9067 }
9068
9069 inst.instruction |= br;
9070 }
9071
9072 /* Two possible forms:
9073 "{C|S}PSR_<field>, Rm",
9074 "{C|S}PSR_f, #expression". */
9075
9076 static void
9077 do_msr (void)
9078 {
9079 if (do_vfp_nsyn_msr () == SUCCESS)
9080 return;
9081
9082 inst.instruction |= inst.operands[0].imm;
9083 if (inst.operands[1].isreg)
9084 inst.instruction |= inst.operands[1].reg;
9085 else
9086 {
9087 inst.instruction |= INST_IMMEDIATE;
9088 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9089 inst.reloc.pc_rel = 0;
9090 }
9091 }
9092
9093 static void
9094 do_mul (void)
9095 {
9096 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9097
9098 if (!inst.operands[2].present)
9099 inst.operands[2].reg = inst.operands[0].reg;
9100 inst.instruction |= inst.operands[0].reg << 16;
9101 inst.instruction |= inst.operands[1].reg;
9102 inst.instruction |= inst.operands[2].reg << 8;
9103
9104 if (inst.operands[0].reg == inst.operands[1].reg
9105 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9106 as_tsktsk (_("Rd and Rm should be different in mul"));
9107 }
9108
9109 /* Long Multiply Parser
9110 UMULL RdLo, RdHi, Rm, Rs
9111 SMULL RdLo, RdHi, Rm, Rs
9112 UMLAL RdLo, RdHi, Rm, Rs
9113 SMLAL RdLo, RdHi, Rm, Rs. */
9114
9115 static void
9116 do_mull (void)
9117 {
9118 inst.instruction |= inst.operands[0].reg << 12;
9119 inst.instruction |= inst.operands[1].reg << 16;
9120 inst.instruction |= inst.operands[2].reg;
9121 inst.instruction |= inst.operands[3].reg << 8;
9122
9123 /* rdhi and rdlo must be different. */
9124 if (inst.operands[0].reg == inst.operands[1].reg)
9125 as_tsktsk (_("rdhi and rdlo must be different"));
9126
9127 /* rdhi, rdlo and rm must all be different before armv6. */
9128 if ((inst.operands[0].reg == inst.operands[2].reg
9129 || inst.operands[1].reg == inst.operands[2].reg)
9130 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9131 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9132 }
9133
9134 static void
9135 do_nop (void)
9136 {
9137 if (inst.operands[0].present
9138 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9139 {
9140 /* Architectural NOP hints are CPSR sets with no bits selected. */
9141 inst.instruction &= 0xf0000000;
9142 inst.instruction |= 0x0320f000;
9143 if (inst.operands[0].present)
9144 inst.instruction |= inst.operands[0].imm;
9145 }
9146 }
9147
9148 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9149 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9150 Condition defaults to COND_ALWAYS.
9151 Error if Rd, Rn or Rm are R15. */
9152
9153 static void
9154 do_pkhbt (void)
9155 {
9156 inst.instruction |= inst.operands[0].reg << 12;
9157 inst.instruction |= inst.operands[1].reg << 16;
9158 inst.instruction |= inst.operands[2].reg;
9159 if (inst.operands[3].present)
9160 encode_arm_shift (3);
9161 }
9162
9163 /* ARM V6 PKHTB (Argument Parse). */
9164
9165 static void
9166 do_pkhtb (void)
9167 {
9168 if (!inst.operands[3].present)
9169 {
9170 /* If the shift specifier is omitted, turn the instruction
9171 into pkhbt rd, rm, rn. */
9172 inst.instruction &= 0xfff00010;
9173 inst.instruction |= inst.operands[0].reg << 12;
9174 inst.instruction |= inst.operands[1].reg;
9175 inst.instruction |= inst.operands[2].reg << 16;
9176 }
9177 else
9178 {
9179 inst.instruction |= inst.operands[0].reg << 12;
9180 inst.instruction |= inst.operands[1].reg << 16;
9181 inst.instruction |= inst.operands[2].reg;
9182 encode_arm_shift (3);
9183 }
9184 }
9185
9186 /* ARMv5TE: Preload-Cache
9187 MP Extensions: Preload for write
9188
9189 PLD(W) <addr_mode>
9190
9191 Syntactically, like LDR with B=1, W=0, L=1. */
9192
9193 static void
9194 do_pld (void)
9195 {
9196 constraint (!inst.operands[0].isreg,
9197 _("'[' expected after PLD mnemonic"));
9198 constraint (inst.operands[0].postind,
9199 _("post-indexed expression used in preload instruction"));
9200 constraint (inst.operands[0].writeback,
9201 _("writeback used in preload instruction"));
9202 constraint (!inst.operands[0].preind,
9203 _("unindexed addressing used in preload instruction"));
9204 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9205 }
9206
9207 /* ARMv7: PLI <addr_mode> */
9208 static void
9209 do_pli (void)
9210 {
9211 constraint (!inst.operands[0].isreg,
9212 _("'[' expected after PLI mnemonic"));
9213 constraint (inst.operands[0].postind,
9214 _("post-indexed expression used in preload instruction"));
9215 constraint (inst.operands[0].writeback,
9216 _("writeback used in preload instruction"));
9217 constraint (!inst.operands[0].preind,
9218 _("unindexed addressing used in preload instruction"));
9219 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9220 inst.instruction &= ~PRE_INDEX;
9221 }
9222
9223 static void
9224 do_push_pop (void)
9225 {
9226 constraint (inst.operands[0].writeback,
9227 _("push/pop do not support {reglist}^"));
9228 inst.operands[1] = inst.operands[0];
9229 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9230 inst.operands[0].isreg = 1;
9231 inst.operands[0].writeback = 1;
9232 inst.operands[0].reg = REG_SP;
9233 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9234 }
9235
9236 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9237 word at the specified address and the following word
9238 respectively.
9239 Unconditionally executed.
9240 Error if Rn is R15. */
9241
9242 static void
9243 do_rfe (void)
9244 {
9245 inst.instruction |= inst.operands[0].reg << 16;
9246 if (inst.operands[0].writeback)
9247 inst.instruction |= WRITE_BACK;
9248 }
9249
9250 /* ARM V6 ssat (argument parse). */
9251
9252 static void
9253 do_ssat (void)
9254 {
9255 inst.instruction |= inst.operands[0].reg << 12;
9256 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9257 inst.instruction |= inst.operands[2].reg;
9258
9259 if (inst.operands[3].present)
9260 encode_arm_shift (3);
9261 }
9262
9263 /* ARM V6 usat (argument parse). */
9264
9265 static void
9266 do_usat (void)
9267 {
9268 inst.instruction |= inst.operands[0].reg << 12;
9269 inst.instruction |= inst.operands[1].imm << 16;
9270 inst.instruction |= inst.operands[2].reg;
9271
9272 if (inst.operands[3].present)
9273 encode_arm_shift (3);
9274 }
9275
9276 /* ARM V6 ssat16 (argument parse). */
9277
9278 static void
9279 do_ssat16 (void)
9280 {
9281 inst.instruction |= inst.operands[0].reg << 12;
9282 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9283 inst.instruction |= inst.operands[2].reg;
9284 }
9285
9286 static void
9287 do_usat16 (void)
9288 {
9289 inst.instruction |= inst.operands[0].reg << 12;
9290 inst.instruction |= inst.operands[1].imm << 16;
9291 inst.instruction |= inst.operands[2].reg;
9292 }
9293
9294 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9295 preserving the other bits.
9296
9297 setend <endian_specifier>, where <endian_specifier> is either
9298 BE or LE. */
9299
9300 static void
9301 do_setend (void)
9302 {
9303 if (warn_on_deprecated
9304 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9305 as_tsktsk (_("setend use is deprecated for ARMv8"));
9306
9307 if (inst.operands[0].imm)
9308 inst.instruction |= 0x200;
9309 }
9310
9311 static void
9312 do_shift (void)
9313 {
9314 unsigned int Rm = (inst.operands[1].present
9315 ? inst.operands[1].reg
9316 : inst.operands[0].reg);
9317
9318 inst.instruction |= inst.operands[0].reg << 12;
9319 inst.instruction |= Rm;
9320 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9321 {
9322 inst.instruction |= inst.operands[2].reg << 8;
9323 inst.instruction |= SHIFT_BY_REG;
9324 /* PR 12854: Error on extraneous shifts. */
9325 constraint (inst.operands[2].shifted,
9326 _("extraneous shift as part of operand to shift insn"));
9327 }
9328 else
9329 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9330 }
9331
9332 static void
9333 do_smc (void)
9334 {
9335 inst.reloc.type = BFD_RELOC_ARM_SMC;
9336 inst.reloc.pc_rel = 0;
9337 }
9338
9339 static void
9340 do_hvc (void)
9341 {
9342 inst.reloc.type = BFD_RELOC_ARM_HVC;
9343 inst.reloc.pc_rel = 0;
9344 }
9345
9346 static void
9347 do_swi (void)
9348 {
9349 inst.reloc.type = BFD_RELOC_ARM_SWI;
9350 inst.reloc.pc_rel = 0;
9351 }
9352
9353 static void
9354 do_setpan (void)
9355 {
9356 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9357 _("selected processor does not support SETPAN instruction"));
9358
9359 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9360 }
9361
9362 static void
9363 do_t_setpan (void)
9364 {
9365 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9366 _("selected processor does not support SETPAN instruction"));
9367
9368 inst.instruction |= (inst.operands[0].imm << 3);
9369 }
9370
9371 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9372 SMLAxy{cond} Rd,Rm,Rs,Rn
9373 SMLAWy{cond} Rd,Rm,Rs,Rn
9374 Error if any register is R15. */
9375
9376 static void
9377 do_smla (void)
9378 {
9379 inst.instruction |= inst.operands[0].reg << 16;
9380 inst.instruction |= inst.operands[1].reg;
9381 inst.instruction |= inst.operands[2].reg << 8;
9382 inst.instruction |= inst.operands[3].reg << 12;
9383 }
9384
9385 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9386 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9387 Error if any register is R15.
9388 Warning if Rdlo == Rdhi. */
9389
9390 static void
9391 do_smlal (void)
9392 {
9393 inst.instruction |= inst.operands[0].reg << 12;
9394 inst.instruction |= inst.operands[1].reg << 16;
9395 inst.instruction |= inst.operands[2].reg;
9396 inst.instruction |= inst.operands[3].reg << 8;
9397
9398 if (inst.operands[0].reg == inst.operands[1].reg)
9399 as_tsktsk (_("rdhi and rdlo must be different"));
9400 }
9401
9402 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9403 SMULxy{cond} Rd,Rm,Rs
9404 Error if any register is R15. */
9405
9406 static void
9407 do_smul (void)
9408 {
9409 inst.instruction |= inst.operands[0].reg << 16;
9410 inst.instruction |= inst.operands[1].reg;
9411 inst.instruction |= inst.operands[2].reg << 8;
9412 }
9413
9414 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9415 the same for both ARM and Thumb-2. */
9416
9417 static void
9418 do_srs (void)
9419 {
9420 int reg;
9421
9422 if (inst.operands[0].present)
9423 {
9424 reg = inst.operands[0].reg;
9425 constraint (reg != REG_SP, _("SRS base register must be r13"));
9426 }
9427 else
9428 reg = REG_SP;
9429
9430 inst.instruction |= reg << 16;
9431 inst.instruction |= inst.operands[1].imm;
9432 if (inst.operands[0].writeback || inst.operands[1].writeback)
9433 inst.instruction |= WRITE_BACK;
9434 }
9435
9436 /* ARM V6 strex (argument parse). */
9437
9438 static void
9439 do_strex (void)
9440 {
9441 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9442 || inst.operands[2].postind || inst.operands[2].writeback
9443 || inst.operands[2].immisreg || inst.operands[2].shifted
9444 || inst.operands[2].negative
9445 /* See comment in do_ldrex(). */
9446 || (inst.operands[2].reg == REG_PC),
9447 BAD_ADDR_MODE);
9448
9449 constraint (inst.operands[0].reg == inst.operands[1].reg
9450 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9451
9452 constraint (inst.reloc.exp.X_op != O_constant
9453 || inst.reloc.exp.X_add_number != 0,
9454 _("offset must be zero in ARM encoding"));
9455
9456 inst.instruction |= inst.operands[0].reg << 12;
9457 inst.instruction |= inst.operands[1].reg;
9458 inst.instruction |= inst.operands[2].reg << 16;
9459 inst.reloc.type = BFD_RELOC_UNUSED;
9460 }
9461
9462 static void
9463 do_t_strexbh (void)
9464 {
9465 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9466 || inst.operands[2].postind || inst.operands[2].writeback
9467 || inst.operands[2].immisreg || inst.operands[2].shifted
9468 || inst.operands[2].negative,
9469 BAD_ADDR_MODE);
9470
9471 constraint (inst.operands[0].reg == inst.operands[1].reg
9472 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9473
9474 do_rm_rd_rn ();
9475 }
9476
9477 static void
9478 do_strexd (void)
9479 {
9480 constraint (inst.operands[1].reg % 2 != 0,
9481 _("even register required"));
9482 constraint (inst.operands[2].present
9483 && inst.operands[2].reg != inst.operands[1].reg + 1,
9484 _("can only store two consecutive registers"));
9485 /* If op 2 were present and equal to PC, this function wouldn't
9486 have been called in the first place. */
9487 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9488
9489 constraint (inst.operands[0].reg == inst.operands[1].reg
9490 || inst.operands[0].reg == inst.operands[1].reg + 1
9491 || inst.operands[0].reg == inst.operands[3].reg,
9492 BAD_OVERLAP);
9493
9494 inst.instruction |= inst.operands[0].reg << 12;
9495 inst.instruction |= inst.operands[1].reg;
9496 inst.instruction |= inst.operands[3].reg << 16;
9497 }
9498
9499 /* ARM V8 STRL. */
9500 static void
9501 do_stlex (void)
9502 {
9503 constraint (inst.operands[0].reg == inst.operands[1].reg
9504 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9505
9506 do_rd_rm_rn ();
9507 }
9508
9509 static void
9510 do_t_stlex (void)
9511 {
9512 constraint (inst.operands[0].reg == inst.operands[1].reg
9513 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9514
9515 do_rm_rd_rn ();
9516 }
9517
9518 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9519 extends it to 32-bits, and adds the result to a value in another
9520 register. You can specify a rotation by 0, 8, 16, or 24 bits
9521 before extracting the 16-bit value.
9522 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9523 Condition defaults to COND_ALWAYS.
9524 Error if any register uses R15. */
9525
9526 static void
9527 do_sxtah (void)
9528 {
9529 inst.instruction |= inst.operands[0].reg << 12;
9530 inst.instruction |= inst.operands[1].reg << 16;
9531 inst.instruction |= inst.operands[2].reg;
9532 inst.instruction |= inst.operands[3].imm << 10;
9533 }
9534
9535 /* ARM V6 SXTH.
9536
9537 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9538 Condition defaults to COND_ALWAYS.
9539 Error if any register uses R15. */
9540
9541 static void
9542 do_sxth (void)
9543 {
9544 inst.instruction |= inst.operands[0].reg << 12;
9545 inst.instruction |= inst.operands[1].reg;
9546 inst.instruction |= inst.operands[2].imm << 10;
9547 }
9548 \f
9549 /* VFP instructions. In a logical order: SP variant first, monad
9550 before dyad, arithmetic then move then load/store. */
9551
9552 static void
9553 do_vfp_sp_monadic (void)
9554 {
9555 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9556 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9557 }
9558
9559 static void
9560 do_vfp_sp_dyadic (void)
9561 {
9562 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9563 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9564 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9565 }
9566
9567 static void
9568 do_vfp_sp_compare_z (void)
9569 {
9570 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9571 }
9572
9573 static void
9574 do_vfp_dp_sp_cvt (void)
9575 {
9576 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9577 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9578 }
9579
9580 static void
9581 do_vfp_sp_dp_cvt (void)
9582 {
9583 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9584 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9585 }
9586
9587 static void
9588 do_vfp_reg_from_sp (void)
9589 {
9590 inst.instruction |= inst.operands[0].reg << 12;
9591 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9592 }
9593
9594 static void
9595 do_vfp_reg2_from_sp2 (void)
9596 {
9597 constraint (inst.operands[2].imm != 2,
9598 _("only two consecutive VFP SP registers allowed here"));
9599 inst.instruction |= inst.operands[0].reg << 12;
9600 inst.instruction |= inst.operands[1].reg << 16;
9601 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9602 }
9603
9604 static void
9605 do_vfp_sp_from_reg (void)
9606 {
9607 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9608 inst.instruction |= inst.operands[1].reg << 12;
9609 }
9610
9611 static void
9612 do_vfp_sp2_from_reg2 (void)
9613 {
9614 constraint (inst.operands[0].imm != 2,
9615 _("only two consecutive VFP SP registers allowed here"));
9616 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9617 inst.instruction |= inst.operands[1].reg << 12;
9618 inst.instruction |= inst.operands[2].reg << 16;
9619 }
9620
9621 static void
9622 do_vfp_sp_ldst (void)
9623 {
9624 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9625 encode_arm_cp_address (1, FALSE, TRUE, 0);
9626 }
9627
9628 static void
9629 do_vfp_dp_ldst (void)
9630 {
9631 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9632 encode_arm_cp_address (1, FALSE, TRUE, 0);
9633 }
9634
9635
9636 static void
9637 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9638 {
9639 if (inst.operands[0].writeback)
9640 inst.instruction |= WRITE_BACK;
9641 else
9642 constraint (ldstm_type != VFP_LDSTMIA,
9643 _("this addressing mode requires base-register writeback"));
9644 inst.instruction |= inst.operands[0].reg << 16;
9645 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9646 inst.instruction |= inst.operands[1].imm;
9647 }
9648
9649 static void
9650 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9651 {
9652 int count;
9653
9654 if (inst.operands[0].writeback)
9655 inst.instruction |= WRITE_BACK;
9656 else
9657 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9658 _("this addressing mode requires base-register writeback"));
9659
9660 inst.instruction |= inst.operands[0].reg << 16;
9661 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9662
9663 count = inst.operands[1].imm << 1;
9664 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9665 count += 1;
9666
9667 inst.instruction |= count;
9668 }
9669
9670 static void
9671 do_vfp_sp_ldstmia (void)
9672 {
9673 vfp_sp_ldstm (VFP_LDSTMIA);
9674 }
9675
9676 static void
9677 do_vfp_sp_ldstmdb (void)
9678 {
9679 vfp_sp_ldstm (VFP_LDSTMDB);
9680 }
9681
9682 static void
9683 do_vfp_dp_ldstmia (void)
9684 {
9685 vfp_dp_ldstm (VFP_LDSTMIA);
9686 }
9687
9688 static void
9689 do_vfp_dp_ldstmdb (void)
9690 {
9691 vfp_dp_ldstm (VFP_LDSTMDB);
9692 }
9693
9694 static void
9695 do_vfp_xp_ldstmia (void)
9696 {
9697 vfp_dp_ldstm (VFP_LDSTMIAX);
9698 }
9699
9700 static void
9701 do_vfp_xp_ldstmdb (void)
9702 {
9703 vfp_dp_ldstm (VFP_LDSTMDBX);
9704 }
9705
9706 static void
9707 do_vfp_dp_rd_rm (void)
9708 {
9709 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9710 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9711 }
9712
9713 static void
9714 do_vfp_dp_rn_rd (void)
9715 {
9716 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9717 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9718 }
9719
9720 static void
9721 do_vfp_dp_rd_rn (void)
9722 {
9723 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9724 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9725 }
9726
9727 static void
9728 do_vfp_dp_rd_rn_rm (void)
9729 {
9730 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9731 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9732 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9733 }
9734
9735 static void
9736 do_vfp_dp_rd (void)
9737 {
9738 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9739 }
9740
9741 static void
9742 do_vfp_dp_rm_rd_rn (void)
9743 {
9744 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9745 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9746 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9747 }
9748
9749 /* VFPv3 instructions. */
9750 static void
9751 do_vfp_sp_const (void)
9752 {
9753 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9754 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9755 inst.instruction |= (inst.operands[1].imm & 0x0f);
9756 }
9757
9758 static void
9759 do_vfp_dp_const (void)
9760 {
9761 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9762 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9763 inst.instruction |= (inst.operands[1].imm & 0x0f);
9764 }
9765
9766 static void
9767 vfp_conv (int srcsize)
9768 {
9769 int immbits = srcsize - inst.operands[1].imm;
9770
9771 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9772 {
9773 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9774 i.e. immbits must be in range 0 - 16. */
9775 inst.error = _("immediate value out of range, expected range [0, 16]");
9776 return;
9777 }
9778 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9779 {
9780 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9781 i.e. immbits must be in range 0 - 31. */
9782 inst.error = _("immediate value out of range, expected range [1, 32]");
9783 return;
9784 }
9785
9786 inst.instruction |= (immbits & 1) << 5;
9787 inst.instruction |= (immbits >> 1);
9788 }
9789
9790 static void
9791 do_vfp_sp_conv_16 (void)
9792 {
9793 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9794 vfp_conv (16);
9795 }
9796
9797 static void
9798 do_vfp_dp_conv_16 (void)
9799 {
9800 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9801 vfp_conv (16);
9802 }
9803
9804 static void
9805 do_vfp_sp_conv_32 (void)
9806 {
9807 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9808 vfp_conv (32);
9809 }
9810
9811 static void
9812 do_vfp_dp_conv_32 (void)
9813 {
9814 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9815 vfp_conv (32);
9816 }
9817 \f
9818 /* FPA instructions. Also in a logical order. */
9819
9820 static void
9821 do_fpa_cmp (void)
9822 {
9823 inst.instruction |= inst.operands[0].reg << 16;
9824 inst.instruction |= inst.operands[1].reg;
9825 }
9826
9827 static void
9828 do_fpa_ldmstm (void)
9829 {
9830 inst.instruction |= inst.operands[0].reg << 12;
9831 switch (inst.operands[1].imm)
9832 {
9833 case 1: inst.instruction |= CP_T_X; break;
9834 case 2: inst.instruction |= CP_T_Y; break;
9835 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9836 case 4: break;
9837 default: abort ();
9838 }
9839
9840 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9841 {
9842 /* The instruction specified "ea" or "fd", so we can only accept
9843 [Rn]{!}. The instruction does not really support stacking or
9844 unstacking, so we have to emulate these by setting appropriate
9845 bits and offsets. */
9846 constraint (inst.reloc.exp.X_op != O_constant
9847 || inst.reloc.exp.X_add_number != 0,
9848 _("this instruction does not support indexing"));
9849
9850 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9851 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9852
9853 if (!(inst.instruction & INDEX_UP))
9854 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9855
9856 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9857 {
9858 inst.operands[2].preind = 0;
9859 inst.operands[2].postind = 1;
9860 }
9861 }
9862
9863 encode_arm_cp_address (2, TRUE, TRUE, 0);
9864 }
9865 \f
9866 /* iWMMXt instructions: strictly in alphabetical order. */
9867
9868 static void
9869 do_iwmmxt_tandorc (void)
9870 {
9871 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9872 }
9873
9874 static void
9875 do_iwmmxt_textrc (void)
9876 {
9877 inst.instruction |= inst.operands[0].reg << 12;
9878 inst.instruction |= inst.operands[1].imm;
9879 }
9880
9881 static void
9882 do_iwmmxt_textrm (void)
9883 {
9884 inst.instruction |= inst.operands[0].reg << 12;
9885 inst.instruction |= inst.operands[1].reg << 16;
9886 inst.instruction |= inst.operands[2].imm;
9887 }
9888
9889 static void
9890 do_iwmmxt_tinsr (void)
9891 {
9892 inst.instruction |= inst.operands[0].reg << 16;
9893 inst.instruction |= inst.operands[1].reg << 12;
9894 inst.instruction |= inst.operands[2].imm;
9895 }
9896
9897 static void
9898 do_iwmmxt_tmia (void)
9899 {
9900 inst.instruction |= inst.operands[0].reg << 5;
9901 inst.instruction |= inst.operands[1].reg;
9902 inst.instruction |= inst.operands[2].reg << 12;
9903 }
9904
9905 static void
9906 do_iwmmxt_waligni (void)
9907 {
9908 inst.instruction |= inst.operands[0].reg << 12;
9909 inst.instruction |= inst.operands[1].reg << 16;
9910 inst.instruction |= inst.operands[2].reg;
9911 inst.instruction |= inst.operands[3].imm << 20;
9912 }
9913
9914 static void
9915 do_iwmmxt_wmerge (void)
9916 {
9917 inst.instruction |= inst.operands[0].reg << 12;
9918 inst.instruction |= inst.operands[1].reg << 16;
9919 inst.instruction |= inst.operands[2].reg;
9920 inst.instruction |= inst.operands[3].imm << 21;
9921 }
9922
9923 static void
9924 do_iwmmxt_wmov (void)
9925 {
9926 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9927 inst.instruction |= inst.operands[0].reg << 12;
9928 inst.instruction |= inst.operands[1].reg << 16;
9929 inst.instruction |= inst.operands[1].reg;
9930 }
9931
9932 static void
9933 do_iwmmxt_wldstbh (void)
9934 {
9935 int reloc;
9936 inst.instruction |= inst.operands[0].reg << 12;
9937 if (thumb_mode)
9938 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9939 else
9940 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9941 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9942 }
9943
9944 static void
9945 do_iwmmxt_wldstw (void)
9946 {
9947 /* RIWR_RIWC clears .isreg for a control register. */
9948 if (!inst.operands[0].isreg)
9949 {
9950 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9951 inst.instruction |= 0xf0000000;
9952 }
9953
9954 inst.instruction |= inst.operands[0].reg << 12;
9955 encode_arm_cp_address (1, TRUE, TRUE, 0);
9956 }
9957
9958 static void
9959 do_iwmmxt_wldstd (void)
9960 {
9961 inst.instruction |= inst.operands[0].reg << 12;
9962 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9963 && inst.operands[1].immisreg)
9964 {
9965 inst.instruction &= ~0x1a000ff;
9966 inst.instruction |= (0xfU << 28);
9967 if (inst.operands[1].preind)
9968 inst.instruction |= PRE_INDEX;
9969 if (!inst.operands[1].negative)
9970 inst.instruction |= INDEX_UP;
9971 if (inst.operands[1].writeback)
9972 inst.instruction |= WRITE_BACK;
9973 inst.instruction |= inst.operands[1].reg << 16;
9974 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9975 inst.instruction |= inst.operands[1].imm;
9976 }
9977 else
9978 encode_arm_cp_address (1, TRUE, FALSE, 0);
9979 }
9980
9981 static void
9982 do_iwmmxt_wshufh (void)
9983 {
9984 inst.instruction |= inst.operands[0].reg << 12;
9985 inst.instruction |= inst.operands[1].reg << 16;
9986 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9987 inst.instruction |= (inst.operands[2].imm & 0x0f);
9988 }
9989
9990 static void
9991 do_iwmmxt_wzero (void)
9992 {
9993 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9994 inst.instruction |= inst.operands[0].reg;
9995 inst.instruction |= inst.operands[0].reg << 12;
9996 inst.instruction |= inst.operands[0].reg << 16;
9997 }
9998
9999 static void
10000 do_iwmmxt_wrwrwr_or_imm5 (void)
10001 {
10002 if (inst.operands[2].isreg)
10003 do_rd_rn_rm ();
10004 else {
10005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10006 _("immediate operand requires iWMMXt2"));
10007 do_rd_rn ();
10008 if (inst.operands[2].imm == 0)
10009 {
10010 switch ((inst.instruction >> 20) & 0xf)
10011 {
10012 case 4:
10013 case 5:
10014 case 6:
10015 case 7:
10016 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10017 inst.operands[2].imm = 16;
10018 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10019 break;
10020 case 8:
10021 case 9:
10022 case 10:
10023 case 11:
10024 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10025 inst.operands[2].imm = 32;
10026 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10027 break;
10028 case 12:
10029 case 13:
10030 case 14:
10031 case 15:
10032 {
10033 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10034 unsigned long wrn;
10035 wrn = (inst.instruction >> 16) & 0xf;
10036 inst.instruction &= 0xff0fff0f;
10037 inst.instruction |= wrn;
10038 /* Bail out here; the instruction is now assembled. */
10039 return;
10040 }
10041 }
10042 }
10043 /* Map 32 -> 0, etc. */
10044 inst.operands[2].imm &= 0x1f;
10045 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10046 }
10047 }
10048 \f
10049 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10050 operations first, then control, shift, and load/store. */
10051
10052 /* Insns like "foo X,Y,Z". */
10053
10054 static void
10055 do_mav_triple (void)
10056 {
10057 inst.instruction |= inst.operands[0].reg << 16;
10058 inst.instruction |= inst.operands[1].reg;
10059 inst.instruction |= inst.operands[2].reg << 12;
10060 }
10061
10062 /* Insns like "foo W,X,Y,Z".
10063 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10064
10065 static void
10066 do_mav_quad (void)
10067 {
10068 inst.instruction |= inst.operands[0].reg << 5;
10069 inst.instruction |= inst.operands[1].reg << 12;
10070 inst.instruction |= inst.operands[2].reg << 16;
10071 inst.instruction |= inst.operands[3].reg;
10072 }
10073
10074 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10075 static void
10076 do_mav_dspsc (void)
10077 {
10078 inst.instruction |= inst.operands[1].reg << 12;
10079 }
10080
10081 /* Maverick shift immediate instructions.
10082 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10083 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10084
10085 static void
10086 do_mav_shift (void)
10087 {
10088 int imm = inst.operands[2].imm;
10089
10090 inst.instruction |= inst.operands[0].reg << 12;
10091 inst.instruction |= inst.operands[1].reg << 16;
10092
10093 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10094 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10095 Bit 4 should be 0. */
10096 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10097
10098 inst.instruction |= imm;
10099 }
10100 \f
10101 /* XScale instructions. Also sorted arithmetic before move. */
10102
10103 /* Xscale multiply-accumulate (argument parse)
10104 MIAcc acc0,Rm,Rs
10105 MIAPHcc acc0,Rm,Rs
10106 MIAxycc acc0,Rm,Rs. */
10107
10108 static void
10109 do_xsc_mia (void)
10110 {
10111 inst.instruction |= inst.operands[1].reg;
10112 inst.instruction |= inst.operands[2].reg << 12;
10113 }
10114
10115 /* Xscale move-accumulator-register (argument parse)
10116
10117 MARcc acc0,RdLo,RdHi. */
10118
10119 static void
10120 do_xsc_mar (void)
10121 {
10122 inst.instruction |= inst.operands[1].reg << 12;
10123 inst.instruction |= inst.operands[2].reg << 16;
10124 }
10125
10126 /* Xscale move-register-accumulator (argument parse)
10127
10128 MRAcc RdLo,RdHi,acc0. */
10129
10130 static void
10131 do_xsc_mra (void)
10132 {
10133 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10134 inst.instruction |= inst.operands[0].reg << 12;
10135 inst.instruction |= inst.operands[1].reg << 16;
10136 }
10137 \f
10138 /* Encoding functions relevant only to Thumb. */
10139
10140 /* inst.operands[i] is a shifted-register operand; encode
10141 it into inst.instruction in the format used by Thumb32. */
10142
10143 static void
10144 encode_thumb32_shifted_operand (int i)
10145 {
10146 unsigned int value = inst.reloc.exp.X_add_number;
10147 unsigned int shift = inst.operands[i].shift_kind;
10148
10149 constraint (inst.operands[i].immisreg,
10150 _("shift by register not allowed in thumb mode"));
10151 inst.instruction |= inst.operands[i].reg;
10152 if (shift == SHIFT_RRX)
10153 inst.instruction |= SHIFT_ROR << 4;
10154 else
10155 {
10156 constraint (inst.reloc.exp.X_op != O_constant,
10157 _("expression too complex"));
10158
10159 constraint (value > 32
10160 || (value == 32 && (shift == SHIFT_LSL
10161 || shift == SHIFT_ROR)),
10162 _("shift expression is too large"));
10163
10164 if (value == 0)
10165 shift = SHIFT_LSL;
10166 else if (value == 32)
10167 value = 0;
10168
10169 inst.instruction |= shift << 4;
10170 inst.instruction |= (value & 0x1c) << 10;
10171 inst.instruction |= (value & 0x03) << 6;
10172 }
10173 }
10174
10175
10176 /* inst.operands[i] was set up by parse_address. Encode it into a
10177 Thumb32 format load or store instruction. Reject forms that cannot
10178 be used with such instructions. If is_t is true, reject forms that
10179 cannot be used with a T instruction; if is_d is true, reject forms
10180 that cannot be used with a D instruction. If it is a store insn,
10181 reject PC in Rn. */
10182
10183 static void
10184 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10185 {
10186 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10187
10188 constraint (!inst.operands[i].isreg,
10189 _("Instruction does not support =N addresses"));
10190
10191 inst.instruction |= inst.operands[i].reg << 16;
10192 if (inst.operands[i].immisreg)
10193 {
10194 constraint (is_pc, BAD_PC_ADDRESSING);
10195 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10196 constraint (inst.operands[i].negative,
10197 _("Thumb does not support negative register indexing"));
10198 constraint (inst.operands[i].postind,
10199 _("Thumb does not support register post-indexing"));
10200 constraint (inst.operands[i].writeback,
10201 _("Thumb does not support register indexing with writeback"));
10202 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10203 _("Thumb supports only LSL in shifted register indexing"));
10204
10205 inst.instruction |= inst.operands[i].imm;
10206 if (inst.operands[i].shifted)
10207 {
10208 constraint (inst.reloc.exp.X_op != O_constant,
10209 _("expression too complex"));
10210 constraint (inst.reloc.exp.X_add_number < 0
10211 || inst.reloc.exp.X_add_number > 3,
10212 _("shift out of range"));
10213 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10214 }
10215 inst.reloc.type = BFD_RELOC_UNUSED;
10216 }
10217 else if (inst.operands[i].preind)
10218 {
10219 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10220 constraint (is_t && inst.operands[i].writeback,
10221 _("cannot use writeback with this instruction"));
10222 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10223 BAD_PC_ADDRESSING);
10224
10225 if (is_d)
10226 {
10227 inst.instruction |= 0x01000000;
10228 if (inst.operands[i].writeback)
10229 inst.instruction |= 0x00200000;
10230 }
10231 else
10232 {
10233 inst.instruction |= 0x00000c00;
10234 if (inst.operands[i].writeback)
10235 inst.instruction |= 0x00000100;
10236 }
10237 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10238 }
10239 else if (inst.operands[i].postind)
10240 {
10241 gas_assert (inst.operands[i].writeback);
10242 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10243 constraint (is_t, _("cannot use post-indexing with this instruction"));
10244
10245 if (is_d)
10246 inst.instruction |= 0x00200000;
10247 else
10248 inst.instruction |= 0x00000900;
10249 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10250 }
10251 else /* unindexed - only for coprocessor */
10252 inst.error = _("instruction does not accept unindexed addressing");
10253 }
10254
10255 /* Table of Thumb instructions which exist in both 16- and 32-bit
10256 encodings (the latter only in post-V6T2 cores). The index is the
10257 value used in the insns table below. When there is more than one
10258 possible 16-bit encoding for the instruction, this table always
10259 holds variant (1).
10260 Also contains several pseudo-instructions used during relaxation. */
10261 #define T16_32_TAB \
10262 X(_adc, 4140, eb400000), \
10263 X(_adcs, 4140, eb500000), \
10264 X(_add, 1c00, eb000000), \
10265 X(_adds, 1c00, eb100000), \
10266 X(_addi, 0000, f1000000), \
10267 X(_addis, 0000, f1100000), \
10268 X(_add_pc,000f, f20f0000), \
10269 X(_add_sp,000d, f10d0000), \
10270 X(_adr, 000f, f20f0000), \
10271 X(_and, 4000, ea000000), \
10272 X(_ands, 4000, ea100000), \
10273 X(_asr, 1000, fa40f000), \
10274 X(_asrs, 1000, fa50f000), \
10275 X(_b, e000, f000b000), \
10276 X(_bcond, d000, f0008000), \
10277 X(_bic, 4380, ea200000), \
10278 X(_bics, 4380, ea300000), \
10279 X(_cmn, 42c0, eb100f00), \
10280 X(_cmp, 2800, ebb00f00), \
10281 X(_cpsie, b660, f3af8400), \
10282 X(_cpsid, b670, f3af8600), \
10283 X(_cpy, 4600, ea4f0000), \
10284 X(_dec_sp,80dd, f1ad0d00), \
10285 X(_eor, 4040, ea800000), \
10286 X(_eors, 4040, ea900000), \
10287 X(_inc_sp,00dd, f10d0d00), \
10288 X(_ldmia, c800, e8900000), \
10289 X(_ldr, 6800, f8500000), \
10290 X(_ldrb, 7800, f8100000), \
10291 X(_ldrh, 8800, f8300000), \
10292 X(_ldrsb, 5600, f9100000), \
10293 X(_ldrsh, 5e00, f9300000), \
10294 X(_ldr_pc,4800, f85f0000), \
10295 X(_ldr_pc2,4800, f85f0000), \
10296 X(_ldr_sp,9800, f85d0000), \
10297 X(_lsl, 0000, fa00f000), \
10298 X(_lsls, 0000, fa10f000), \
10299 X(_lsr, 0800, fa20f000), \
10300 X(_lsrs, 0800, fa30f000), \
10301 X(_mov, 2000, ea4f0000), \
10302 X(_movs, 2000, ea5f0000), \
10303 X(_mul, 4340, fb00f000), \
10304 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10305 X(_mvn, 43c0, ea6f0000), \
10306 X(_mvns, 43c0, ea7f0000), \
10307 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10308 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10309 X(_orr, 4300, ea400000), \
10310 X(_orrs, 4300, ea500000), \
10311 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10312 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10313 X(_rev, ba00, fa90f080), \
10314 X(_rev16, ba40, fa90f090), \
10315 X(_revsh, bac0, fa90f0b0), \
10316 X(_ror, 41c0, fa60f000), \
10317 X(_rors, 41c0, fa70f000), \
10318 X(_sbc, 4180, eb600000), \
10319 X(_sbcs, 4180, eb700000), \
10320 X(_stmia, c000, e8800000), \
10321 X(_str, 6000, f8400000), \
10322 X(_strb, 7000, f8000000), \
10323 X(_strh, 8000, f8200000), \
10324 X(_str_sp,9000, f84d0000), \
10325 X(_sub, 1e00, eba00000), \
10326 X(_subs, 1e00, ebb00000), \
10327 X(_subi, 8000, f1a00000), \
10328 X(_subis, 8000, f1b00000), \
10329 X(_sxtb, b240, fa4ff080), \
10330 X(_sxth, b200, fa0ff080), \
10331 X(_tst, 4200, ea100f00), \
10332 X(_uxtb, b2c0, fa5ff080), \
10333 X(_uxth, b280, fa1ff080), \
10334 X(_nop, bf00, f3af8000), \
10335 X(_yield, bf10, f3af8001), \
10336 X(_wfe, bf20, f3af8002), \
10337 X(_wfi, bf30, f3af8003), \
10338 X(_sev, bf40, f3af8004), \
10339 X(_sevl, bf50, f3af8005), \
10340 X(_udf, de00, f7f0a000)
10341
10342 /* To catch errors in encoding functions, the codes are all offset by
10343 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10344 as 16-bit instructions. */
10345 #define X(a,b,c) T_MNEM##a
10346 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10347 #undef X
10348
10349 #define X(a,b,c) 0x##b
10350 static const unsigned short thumb_op16[] = { T16_32_TAB };
10351 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10352 #undef X
10353
10354 #define X(a,b,c) 0x##c
10355 static const unsigned int thumb_op32[] = { T16_32_TAB };
10356 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10357 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10358 #undef X
10359 #undef T16_32_TAB
10360
10361 /* Thumb instruction encoders, in alphabetical order. */
10362
10363 /* ADDW or SUBW. */
10364
10365 static void
10366 do_t_add_sub_w (void)
10367 {
10368 int Rd, Rn;
10369
10370 Rd = inst.operands[0].reg;
10371 Rn = inst.operands[1].reg;
10372
10373 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10374 is the SP-{plus,minus}-immediate form of the instruction. */
10375 if (Rn == REG_SP)
10376 constraint (Rd == REG_PC, BAD_PC);
10377 else
10378 reject_bad_reg (Rd);
10379
10380 inst.instruction |= (Rn << 16) | (Rd << 8);
10381 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10382 }
10383
10384 /* Parse an add or subtract instruction. We get here with inst.instruction
10385 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10386
10387 static void
10388 do_t_add_sub (void)
10389 {
10390 int Rd, Rs, Rn;
10391
10392 Rd = inst.operands[0].reg;
10393 Rs = (inst.operands[1].present
10394 ? inst.operands[1].reg /* Rd, Rs, foo */
10395 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10396
10397 if (Rd == REG_PC)
10398 set_it_insn_type_last ();
10399
10400 if (unified_syntax)
10401 {
10402 bfd_boolean flags;
10403 bfd_boolean narrow;
10404 int opcode;
10405
10406 flags = (inst.instruction == T_MNEM_adds
10407 || inst.instruction == T_MNEM_subs);
10408 if (flags)
10409 narrow = !in_it_block ();
10410 else
10411 narrow = in_it_block ();
10412 if (!inst.operands[2].isreg)
10413 {
10414 int add;
10415
10416 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10417
10418 add = (inst.instruction == T_MNEM_add
10419 || inst.instruction == T_MNEM_adds);
10420 opcode = 0;
10421 if (inst.size_req != 4)
10422 {
10423 /* Attempt to use a narrow opcode, with relaxation if
10424 appropriate. */
10425 if (Rd == REG_SP && Rs == REG_SP && !flags)
10426 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10427 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10428 opcode = T_MNEM_add_sp;
10429 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10430 opcode = T_MNEM_add_pc;
10431 else if (Rd <= 7 && Rs <= 7 && narrow)
10432 {
10433 if (flags)
10434 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10435 else
10436 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10437 }
10438 if (opcode)
10439 {
10440 inst.instruction = THUMB_OP16(opcode);
10441 inst.instruction |= (Rd << 4) | Rs;
10442 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10443 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10444 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10445 if (inst.size_req != 2)
10446 inst.relax = opcode;
10447 }
10448 else
10449 constraint (inst.size_req == 2, BAD_HIREG);
10450 }
10451 if (inst.size_req == 4
10452 || (inst.size_req != 2 && !opcode))
10453 {
10454 if (Rd == REG_PC)
10455 {
10456 constraint (add, BAD_PC);
10457 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10458 _("only SUBS PC, LR, #const allowed"));
10459 constraint (inst.reloc.exp.X_op != O_constant,
10460 _("expression too complex"));
10461 constraint (inst.reloc.exp.X_add_number < 0
10462 || inst.reloc.exp.X_add_number > 0xff,
10463 _("immediate value out of range"));
10464 inst.instruction = T2_SUBS_PC_LR
10465 | inst.reloc.exp.X_add_number;
10466 inst.reloc.type = BFD_RELOC_UNUSED;
10467 return;
10468 }
10469 else if (Rs == REG_PC)
10470 {
10471 /* Always use addw/subw. */
10472 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10473 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10474 }
10475 else
10476 {
10477 inst.instruction = THUMB_OP32 (inst.instruction);
10478 inst.instruction = (inst.instruction & 0xe1ffffff)
10479 | 0x10000000;
10480 if (flags)
10481 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10482 else
10483 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10484 }
10485 inst.instruction |= Rd << 8;
10486 inst.instruction |= Rs << 16;
10487 }
10488 }
10489 else
10490 {
10491 unsigned int value = inst.reloc.exp.X_add_number;
10492 unsigned int shift = inst.operands[2].shift_kind;
10493
10494 Rn = inst.operands[2].reg;
10495 /* See if we can do this with a 16-bit instruction. */
10496 if (!inst.operands[2].shifted && inst.size_req != 4)
10497 {
10498 if (Rd > 7 || Rs > 7 || Rn > 7)
10499 narrow = FALSE;
10500
10501 if (narrow)
10502 {
10503 inst.instruction = ((inst.instruction == T_MNEM_adds
10504 || inst.instruction == T_MNEM_add)
10505 ? T_OPCODE_ADD_R3
10506 : T_OPCODE_SUB_R3);
10507 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10508 return;
10509 }
10510
10511 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10512 {
10513 /* Thumb-1 cores (except v6-M) require at least one high
10514 register in a narrow non flag setting add. */
10515 if (Rd > 7 || Rn > 7
10516 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10517 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10518 {
10519 if (Rd == Rn)
10520 {
10521 Rn = Rs;
10522 Rs = Rd;
10523 }
10524 inst.instruction = T_OPCODE_ADD_HI;
10525 inst.instruction |= (Rd & 8) << 4;
10526 inst.instruction |= (Rd & 7);
10527 inst.instruction |= Rn << 3;
10528 return;
10529 }
10530 }
10531 }
10532
10533 constraint (Rd == REG_PC, BAD_PC);
10534 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10535 constraint (Rs == REG_PC, BAD_PC);
10536 reject_bad_reg (Rn);
10537
10538 /* If we get here, it can't be done in 16 bits. */
10539 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10540 _("shift must be constant"));
10541 inst.instruction = THUMB_OP32 (inst.instruction);
10542 inst.instruction |= Rd << 8;
10543 inst.instruction |= Rs << 16;
10544 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10545 _("shift value over 3 not allowed in thumb mode"));
10546 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10547 _("only LSL shift allowed in thumb mode"));
10548 encode_thumb32_shifted_operand (2);
10549 }
10550 }
10551 else
10552 {
10553 constraint (inst.instruction == T_MNEM_adds
10554 || inst.instruction == T_MNEM_subs,
10555 BAD_THUMB32);
10556
10557 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10558 {
10559 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10560 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10561 BAD_HIREG);
10562
10563 inst.instruction = (inst.instruction == T_MNEM_add
10564 ? 0x0000 : 0x8000);
10565 inst.instruction |= (Rd << 4) | Rs;
10566 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10567 return;
10568 }
10569
10570 Rn = inst.operands[2].reg;
10571 constraint (inst.operands[2].shifted, _("unshifted register required"));
10572
10573 /* We now have Rd, Rs, and Rn set to registers. */
10574 if (Rd > 7 || Rs > 7 || Rn > 7)
10575 {
10576 /* Can't do this for SUB. */
10577 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10578 inst.instruction = T_OPCODE_ADD_HI;
10579 inst.instruction |= (Rd & 8) << 4;
10580 inst.instruction |= (Rd & 7);
10581 if (Rs == Rd)
10582 inst.instruction |= Rn << 3;
10583 else if (Rn == Rd)
10584 inst.instruction |= Rs << 3;
10585 else
10586 constraint (1, _("dest must overlap one source register"));
10587 }
10588 else
10589 {
10590 inst.instruction = (inst.instruction == T_MNEM_add
10591 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10592 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10593 }
10594 }
10595 }
10596
10597 static void
10598 do_t_adr (void)
10599 {
10600 unsigned Rd;
10601
10602 Rd = inst.operands[0].reg;
10603 reject_bad_reg (Rd);
10604
10605 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10606 {
10607 /* Defer to section relaxation. */
10608 inst.relax = inst.instruction;
10609 inst.instruction = THUMB_OP16 (inst.instruction);
10610 inst.instruction |= Rd << 4;
10611 }
10612 else if (unified_syntax && inst.size_req != 2)
10613 {
10614 /* Generate a 32-bit opcode. */
10615 inst.instruction = THUMB_OP32 (inst.instruction);
10616 inst.instruction |= Rd << 8;
10617 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10618 inst.reloc.pc_rel = 1;
10619 }
10620 else
10621 {
10622 /* Generate a 16-bit opcode. */
10623 inst.instruction = THUMB_OP16 (inst.instruction);
10624 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10625 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10626 inst.reloc.pc_rel = 1;
10627
10628 inst.instruction |= Rd << 4;
10629 }
10630 }
10631
10632 /* Arithmetic instructions for which there is just one 16-bit
10633 instruction encoding, and it allows only two low registers.
10634 For maximal compatibility with ARM syntax, we allow three register
10635 operands even when Thumb-32 instructions are not available, as long
10636 as the first two are identical. For instance, both "sbc r0,r1" and
10637 "sbc r0,r0,r1" are allowed. */
10638 static void
10639 do_t_arit3 (void)
10640 {
10641 int Rd, Rs, Rn;
10642
10643 Rd = inst.operands[0].reg;
10644 Rs = (inst.operands[1].present
10645 ? inst.operands[1].reg /* Rd, Rs, foo */
10646 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10647 Rn = inst.operands[2].reg;
10648
10649 reject_bad_reg (Rd);
10650 reject_bad_reg (Rs);
10651 if (inst.operands[2].isreg)
10652 reject_bad_reg (Rn);
10653
10654 if (unified_syntax)
10655 {
10656 if (!inst.operands[2].isreg)
10657 {
10658 /* For an immediate, we always generate a 32-bit opcode;
10659 section relaxation will shrink it later if possible. */
10660 inst.instruction = THUMB_OP32 (inst.instruction);
10661 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10662 inst.instruction |= Rd << 8;
10663 inst.instruction |= Rs << 16;
10664 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10665 }
10666 else
10667 {
10668 bfd_boolean narrow;
10669
10670 /* See if we can do this with a 16-bit instruction. */
10671 if (THUMB_SETS_FLAGS (inst.instruction))
10672 narrow = !in_it_block ();
10673 else
10674 narrow = in_it_block ();
10675
10676 if (Rd > 7 || Rn > 7 || Rs > 7)
10677 narrow = FALSE;
10678 if (inst.operands[2].shifted)
10679 narrow = FALSE;
10680 if (inst.size_req == 4)
10681 narrow = FALSE;
10682
10683 if (narrow
10684 && Rd == Rs)
10685 {
10686 inst.instruction = THUMB_OP16 (inst.instruction);
10687 inst.instruction |= Rd;
10688 inst.instruction |= Rn << 3;
10689 return;
10690 }
10691
10692 /* If we get here, it can't be done in 16 bits. */
10693 constraint (inst.operands[2].shifted
10694 && inst.operands[2].immisreg,
10695 _("shift must be constant"));
10696 inst.instruction = THUMB_OP32 (inst.instruction);
10697 inst.instruction |= Rd << 8;
10698 inst.instruction |= Rs << 16;
10699 encode_thumb32_shifted_operand (2);
10700 }
10701 }
10702 else
10703 {
10704 /* On its face this is a lie - the instruction does set the
10705 flags. However, the only supported mnemonic in this mode
10706 says it doesn't. */
10707 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10708
10709 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10710 _("unshifted register required"));
10711 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10712 constraint (Rd != Rs,
10713 _("dest and source1 must be the same register"));
10714
10715 inst.instruction = THUMB_OP16 (inst.instruction);
10716 inst.instruction |= Rd;
10717 inst.instruction |= Rn << 3;
10718 }
10719 }
10720
10721 /* Similarly, but for instructions where the arithmetic operation is
10722 commutative, so we can allow either of them to be different from
10723 the destination operand in a 16-bit instruction. For instance, all
10724 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10725 accepted. */
10726 static void
10727 do_t_arit3c (void)
10728 {
10729 int Rd, Rs, Rn;
10730
10731 Rd = inst.operands[0].reg;
10732 Rs = (inst.operands[1].present
10733 ? inst.operands[1].reg /* Rd, Rs, foo */
10734 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10735 Rn = inst.operands[2].reg;
10736
10737 reject_bad_reg (Rd);
10738 reject_bad_reg (Rs);
10739 if (inst.operands[2].isreg)
10740 reject_bad_reg (Rn);
10741
10742 if (unified_syntax)
10743 {
10744 if (!inst.operands[2].isreg)
10745 {
10746 /* For an immediate, we always generate a 32-bit opcode;
10747 section relaxation will shrink it later if possible. */
10748 inst.instruction = THUMB_OP32 (inst.instruction);
10749 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10750 inst.instruction |= Rd << 8;
10751 inst.instruction |= Rs << 16;
10752 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10753 }
10754 else
10755 {
10756 bfd_boolean narrow;
10757
10758 /* See if we can do this with a 16-bit instruction. */
10759 if (THUMB_SETS_FLAGS (inst.instruction))
10760 narrow = !in_it_block ();
10761 else
10762 narrow = in_it_block ();
10763
10764 if (Rd > 7 || Rn > 7 || Rs > 7)
10765 narrow = FALSE;
10766 if (inst.operands[2].shifted)
10767 narrow = FALSE;
10768 if (inst.size_req == 4)
10769 narrow = FALSE;
10770
10771 if (narrow)
10772 {
10773 if (Rd == Rs)
10774 {
10775 inst.instruction = THUMB_OP16 (inst.instruction);
10776 inst.instruction |= Rd;
10777 inst.instruction |= Rn << 3;
10778 return;
10779 }
10780 if (Rd == Rn)
10781 {
10782 inst.instruction = THUMB_OP16 (inst.instruction);
10783 inst.instruction |= Rd;
10784 inst.instruction |= Rs << 3;
10785 return;
10786 }
10787 }
10788
10789 /* If we get here, it can't be done in 16 bits. */
10790 constraint (inst.operands[2].shifted
10791 && inst.operands[2].immisreg,
10792 _("shift must be constant"));
10793 inst.instruction = THUMB_OP32 (inst.instruction);
10794 inst.instruction |= Rd << 8;
10795 inst.instruction |= Rs << 16;
10796 encode_thumb32_shifted_operand (2);
10797 }
10798 }
10799 else
10800 {
10801 /* On its face this is a lie - the instruction does set the
10802 flags. However, the only supported mnemonic in this mode
10803 says it doesn't. */
10804 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10805
10806 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10807 _("unshifted register required"));
10808 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10809
10810 inst.instruction = THUMB_OP16 (inst.instruction);
10811 inst.instruction |= Rd;
10812
10813 if (Rd == Rs)
10814 inst.instruction |= Rn << 3;
10815 else if (Rd == Rn)
10816 inst.instruction |= Rs << 3;
10817 else
10818 constraint (1, _("dest must overlap one source register"));
10819 }
10820 }
10821
10822 static void
10823 do_t_bfc (void)
10824 {
10825 unsigned Rd;
10826 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10827 constraint (msb > 32, _("bit-field extends past end of register"));
10828 /* The instruction encoding stores the LSB and MSB,
10829 not the LSB and width. */
10830 Rd = inst.operands[0].reg;
10831 reject_bad_reg (Rd);
10832 inst.instruction |= Rd << 8;
10833 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10834 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10835 inst.instruction |= msb - 1;
10836 }
10837
10838 static void
10839 do_t_bfi (void)
10840 {
10841 int Rd, Rn;
10842 unsigned int msb;
10843
10844 Rd = inst.operands[0].reg;
10845 reject_bad_reg (Rd);
10846
10847 /* #0 in second position is alternative syntax for bfc, which is
10848 the same instruction but with REG_PC in the Rm field. */
10849 if (!inst.operands[1].isreg)
10850 Rn = REG_PC;
10851 else
10852 {
10853 Rn = inst.operands[1].reg;
10854 reject_bad_reg (Rn);
10855 }
10856
10857 msb = inst.operands[2].imm + inst.operands[3].imm;
10858 constraint (msb > 32, _("bit-field extends past end of register"));
10859 /* The instruction encoding stores the LSB and MSB,
10860 not the LSB and width. */
10861 inst.instruction |= Rd << 8;
10862 inst.instruction |= Rn << 16;
10863 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10864 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10865 inst.instruction |= msb - 1;
10866 }
10867
10868 static void
10869 do_t_bfx (void)
10870 {
10871 unsigned Rd, Rn;
10872
10873 Rd = inst.operands[0].reg;
10874 Rn = inst.operands[1].reg;
10875
10876 reject_bad_reg (Rd);
10877 reject_bad_reg (Rn);
10878
10879 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10880 _("bit-field extends past end of register"));
10881 inst.instruction |= Rd << 8;
10882 inst.instruction |= Rn << 16;
10883 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10884 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10885 inst.instruction |= inst.operands[3].imm - 1;
10886 }
10887
10888 /* ARM V5 Thumb BLX (argument parse)
10889 BLX <target_addr> which is BLX(1)
10890 BLX <Rm> which is BLX(2)
10891 Unfortunately, there are two different opcodes for this mnemonic.
10892 So, the insns[].value is not used, and the code here zaps values
10893 into inst.instruction.
10894
10895 ??? How to take advantage of the additional two bits of displacement
10896 available in Thumb32 mode? Need new relocation? */
10897
10898 static void
10899 do_t_blx (void)
10900 {
10901 set_it_insn_type_last ();
10902
10903 if (inst.operands[0].isreg)
10904 {
10905 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10906 /* We have a register, so this is BLX(2). */
10907 inst.instruction |= inst.operands[0].reg << 3;
10908 }
10909 else
10910 {
10911 /* No register. This must be BLX(1). */
10912 inst.instruction = 0xf000e800;
10913 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10914 }
10915 }
10916
10917 static void
10918 do_t_branch (void)
10919 {
10920 int opcode;
10921 int cond;
10922 int reloc;
10923
10924 cond = inst.cond;
10925 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10926
10927 if (in_it_block ())
10928 {
10929 /* Conditional branches inside IT blocks are encoded as unconditional
10930 branches. */
10931 cond = COND_ALWAYS;
10932 }
10933 else
10934 cond = inst.cond;
10935
10936 if (cond != COND_ALWAYS)
10937 opcode = T_MNEM_bcond;
10938 else
10939 opcode = inst.instruction;
10940
10941 if (unified_syntax
10942 && (inst.size_req == 4
10943 || (inst.size_req != 2
10944 && (inst.operands[0].hasreloc
10945 || inst.reloc.exp.X_op == O_constant))))
10946 {
10947 inst.instruction = THUMB_OP32(opcode);
10948 if (cond == COND_ALWAYS)
10949 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10950 else
10951 {
10952 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
10953 _("selected architecture does not support "
10954 "wide conditional branch instruction"));
10955
10956 gas_assert (cond != 0xF);
10957 inst.instruction |= cond << 22;
10958 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10959 }
10960 }
10961 else
10962 {
10963 inst.instruction = THUMB_OP16(opcode);
10964 if (cond == COND_ALWAYS)
10965 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10966 else
10967 {
10968 inst.instruction |= cond << 8;
10969 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10970 }
10971 /* Allow section relaxation. */
10972 if (unified_syntax && inst.size_req != 2)
10973 inst.relax = opcode;
10974 }
10975 inst.reloc.type = reloc;
10976 inst.reloc.pc_rel = 1;
10977 }
10978
10979 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10980 between the two is the maximum immediate allowed - which is passed in
10981 RANGE. */
10982 static void
10983 do_t_bkpt_hlt1 (int range)
10984 {
10985 constraint (inst.cond != COND_ALWAYS,
10986 _("instruction is always unconditional"));
10987 if (inst.operands[0].present)
10988 {
10989 constraint (inst.operands[0].imm > range,
10990 _("immediate value out of range"));
10991 inst.instruction |= inst.operands[0].imm;
10992 }
10993
10994 set_it_insn_type (NEUTRAL_IT_INSN);
10995 }
10996
10997 static void
10998 do_t_hlt (void)
10999 {
11000 do_t_bkpt_hlt1 (63);
11001 }
11002
11003 static void
11004 do_t_bkpt (void)
11005 {
11006 do_t_bkpt_hlt1 (255);
11007 }
11008
11009 static void
11010 do_t_branch23 (void)
11011 {
11012 set_it_insn_type_last ();
11013 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11014
11015 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11016 this file. We used to simply ignore the PLT reloc type here --
11017 the branch encoding is now needed to deal with TLSCALL relocs.
11018 So if we see a PLT reloc now, put it back to how it used to be to
11019 keep the preexisting behaviour. */
11020 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11021 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11022
11023 #if defined(OBJ_COFF)
11024 /* If the destination of the branch is a defined symbol which does not have
11025 the THUMB_FUNC attribute, then we must be calling a function which has
11026 the (interfacearm) attribute. We look for the Thumb entry point to that
11027 function and change the branch to refer to that function instead. */
11028 if ( inst.reloc.exp.X_op == O_symbol
11029 && inst.reloc.exp.X_add_symbol != NULL
11030 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11031 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11032 inst.reloc.exp.X_add_symbol =
11033 find_real_start (inst.reloc.exp.X_add_symbol);
11034 #endif
11035 }
11036
11037 static void
11038 do_t_bx (void)
11039 {
11040 set_it_insn_type_last ();
11041 inst.instruction |= inst.operands[0].reg << 3;
11042 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11043 should cause the alignment to be checked once it is known. This is
11044 because BX PC only works if the instruction is word aligned. */
11045 }
11046
11047 static void
11048 do_t_bxj (void)
11049 {
11050 int Rm;
11051
11052 set_it_insn_type_last ();
11053 Rm = inst.operands[0].reg;
11054 reject_bad_reg (Rm);
11055 inst.instruction |= Rm << 16;
11056 }
11057
11058 static void
11059 do_t_clz (void)
11060 {
11061 unsigned Rd;
11062 unsigned Rm;
11063
11064 Rd = inst.operands[0].reg;
11065 Rm = inst.operands[1].reg;
11066
11067 reject_bad_reg (Rd);
11068 reject_bad_reg (Rm);
11069
11070 inst.instruction |= Rd << 8;
11071 inst.instruction |= Rm << 16;
11072 inst.instruction |= Rm;
11073 }
11074
11075 static void
11076 do_t_cps (void)
11077 {
11078 set_it_insn_type (OUTSIDE_IT_INSN);
11079 inst.instruction |= inst.operands[0].imm;
11080 }
11081
11082 static void
11083 do_t_cpsi (void)
11084 {
11085 set_it_insn_type (OUTSIDE_IT_INSN);
11086 if (unified_syntax
11087 && (inst.operands[1].present || inst.size_req == 4)
11088 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11089 {
11090 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11091 inst.instruction = 0xf3af8000;
11092 inst.instruction |= imod << 9;
11093 inst.instruction |= inst.operands[0].imm << 5;
11094 if (inst.operands[1].present)
11095 inst.instruction |= 0x100 | inst.operands[1].imm;
11096 }
11097 else
11098 {
11099 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11100 && (inst.operands[0].imm & 4),
11101 _("selected processor does not support 'A' form "
11102 "of this instruction"));
11103 constraint (inst.operands[1].present || inst.size_req == 4,
11104 _("Thumb does not support the 2-argument "
11105 "form of this instruction"));
11106 inst.instruction |= inst.operands[0].imm;
11107 }
11108 }
11109
11110 /* THUMB CPY instruction (argument parse). */
11111
11112 static void
11113 do_t_cpy (void)
11114 {
11115 if (inst.size_req == 4)
11116 {
11117 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11118 inst.instruction |= inst.operands[0].reg << 8;
11119 inst.instruction |= inst.operands[1].reg;
11120 }
11121 else
11122 {
11123 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11124 inst.instruction |= (inst.operands[0].reg & 0x7);
11125 inst.instruction |= inst.operands[1].reg << 3;
11126 }
11127 }
11128
11129 static void
11130 do_t_cbz (void)
11131 {
11132 set_it_insn_type (OUTSIDE_IT_INSN);
11133 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11134 inst.instruction |= inst.operands[0].reg;
11135 inst.reloc.pc_rel = 1;
11136 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11137 }
11138
11139 static void
11140 do_t_dbg (void)
11141 {
11142 inst.instruction |= inst.operands[0].imm;
11143 }
11144
11145 static void
11146 do_t_div (void)
11147 {
11148 unsigned Rd, Rn, Rm;
11149
11150 Rd = inst.operands[0].reg;
11151 Rn = (inst.operands[1].present
11152 ? inst.operands[1].reg : Rd);
11153 Rm = inst.operands[2].reg;
11154
11155 reject_bad_reg (Rd);
11156 reject_bad_reg (Rn);
11157 reject_bad_reg (Rm);
11158
11159 inst.instruction |= Rd << 8;
11160 inst.instruction |= Rn << 16;
11161 inst.instruction |= Rm;
11162 }
11163
11164 static void
11165 do_t_hint (void)
11166 {
11167 if (unified_syntax && inst.size_req == 4)
11168 inst.instruction = THUMB_OP32 (inst.instruction);
11169 else
11170 inst.instruction = THUMB_OP16 (inst.instruction);
11171 }
11172
11173 static void
11174 do_t_it (void)
11175 {
11176 unsigned int cond = inst.operands[0].imm;
11177
11178 set_it_insn_type (IT_INSN);
11179 now_it.mask = (inst.instruction & 0xf) | 0x10;
11180 now_it.cc = cond;
11181 now_it.warn_deprecated = FALSE;
11182
11183 /* If the condition is a negative condition, invert the mask. */
11184 if ((cond & 0x1) == 0x0)
11185 {
11186 unsigned int mask = inst.instruction & 0x000f;
11187
11188 if ((mask & 0x7) == 0)
11189 {
11190 /* No conversion needed. */
11191 now_it.block_length = 1;
11192 }
11193 else if ((mask & 0x3) == 0)
11194 {
11195 mask ^= 0x8;
11196 now_it.block_length = 2;
11197 }
11198 else if ((mask & 0x1) == 0)
11199 {
11200 mask ^= 0xC;
11201 now_it.block_length = 3;
11202 }
11203 else
11204 {
11205 mask ^= 0xE;
11206 now_it.block_length = 4;
11207 }
11208
11209 inst.instruction &= 0xfff0;
11210 inst.instruction |= mask;
11211 }
11212
11213 inst.instruction |= cond << 4;
11214 }
11215
11216 /* Helper function used for both push/pop and ldm/stm. */
11217 static void
11218 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11219 {
11220 bfd_boolean load;
11221
11222 load = (inst.instruction & (1 << 20)) != 0;
11223
11224 if (mask & (1 << 13))
11225 inst.error = _("SP not allowed in register list");
11226
11227 if ((mask & (1 << base)) != 0
11228 && writeback)
11229 inst.error = _("having the base register in the register list when "
11230 "using write back is UNPREDICTABLE");
11231
11232 if (load)
11233 {
11234 if (mask & (1 << 15))
11235 {
11236 if (mask & (1 << 14))
11237 inst.error = _("LR and PC should not both be in register list");
11238 else
11239 set_it_insn_type_last ();
11240 }
11241 }
11242 else
11243 {
11244 if (mask & (1 << 15))
11245 inst.error = _("PC not allowed in register list");
11246 }
11247
11248 if ((mask & (mask - 1)) == 0)
11249 {
11250 /* Single register transfers implemented as str/ldr. */
11251 if (writeback)
11252 {
11253 if (inst.instruction & (1 << 23))
11254 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11255 else
11256 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11257 }
11258 else
11259 {
11260 if (inst.instruction & (1 << 23))
11261 inst.instruction = 0x00800000; /* ia -> [base] */
11262 else
11263 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11264 }
11265
11266 inst.instruction |= 0xf8400000;
11267 if (load)
11268 inst.instruction |= 0x00100000;
11269
11270 mask = ffs (mask) - 1;
11271 mask <<= 12;
11272 }
11273 else if (writeback)
11274 inst.instruction |= WRITE_BACK;
11275
11276 inst.instruction |= mask;
11277 inst.instruction |= base << 16;
11278 }
11279
11280 static void
11281 do_t_ldmstm (void)
11282 {
11283 /* This really doesn't seem worth it. */
11284 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11285 _("expression too complex"));
11286 constraint (inst.operands[1].writeback,
11287 _("Thumb load/store multiple does not support {reglist}^"));
11288
11289 if (unified_syntax)
11290 {
11291 bfd_boolean narrow;
11292 unsigned mask;
11293
11294 narrow = FALSE;
11295 /* See if we can use a 16-bit instruction. */
11296 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11297 && inst.size_req != 4
11298 && !(inst.operands[1].imm & ~0xff))
11299 {
11300 mask = 1 << inst.operands[0].reg;
11301
11302 if (inst.operands[0].reg <= 7)
11303 {
11304 if (inst.instruction == T_MNEM_stmia
11305 ? inst.operands[0].writeback
11306 : (inst.operands[0].writeback
11307 == !(inst.operands[1].imm & mask)))
11308 {
11309 if (inst.instruction == T_MNEM_stmia
11310 && (inst.operands[1].imm & mask)
11311 && (inst.operands[1].imm & (mask - 1)))
11312 as_warn (_("value stored for r%d is UNKNOWN"),
11313 inst.operands[0].reg);
11314
11315 inst.instruction = THUMB_OP16 (inst.instruction);
11316 inst.instruction |= inst.operands[0].reg << 8;
11317 inst.instruction |= inst.operands[1].imm;
11318 narrow = TRUE;
11319 }
11320 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11321 {
11322 /* This means 1 register in reg list one of 3 situations:
11323 1. Instruction is stmia, but without writeback.
11324 2. lmdia without writeback, but with Rn not in
11325 reglist.
11326 3. ldmia with writeback, but with Rn in reglist.
11327 Case 3 is UNPREDICTABLE behaviour, so we handle
11328 case 1 and 2 which can be converted into a 16-bit
11329 str or ldr. The SP cases are handled below. */
11330 unsigned long opcode;
11331 /* First, record an error for Case 3. */
11332 if (inst.operands[1].imm & mask
11333 && inst.operands[0].writeback)
11334 inst.error =
11335 _("having the base register in the register list when "
11336 "using write back is UNPREDICTABLE");
11337
11338 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11339 : T_MNEM_ldr);
11340 inst.instruction = THUMB_OP16 (opcode);
11341 inst.instruction |= inst.operands[0].reg << 3;
11342 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11343 narrow = TRUE;
11344 }
11345 }
11346 else if (inst.operands[0] .reg == REG_SP)
11347 {
11348 if (inst.operands[0].writeback)
11349 {
11350 inst.instruction =
11351 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11352 ? T_MNEM_push : T_MNEM_pop);
11353 inst.instruction |= inst.operands[1].imm;
11354 narrow = TRUE;
11355 }
11356 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11357 {
11358 inst.instruction =
11359 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11360 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11361 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11362 narrow = TRUE;
11363 }
11364 }
11365 }
11366
11367 if (!narrow)
11368 {
11369 if (inst.instruction < 0xffff)
11370 inst.instruction = THUMB_OP32 (inst.instruction);
11371
11372 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11373 inst.operands[0].writeback);
11374 }
11375 }
11376 else
11377 {
11378 constraint (inst.operands[0].reg > 7
11379 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11380 constraint (inst.instruction != T_MNEM_ldmia
11381 && inst.instruction != T_MNEM_stmia,
11382 _("Thumb-2 instruction only valid in unified syntax"));
11383 if (inst.instruction == T_MNEM_stmia)
11384 {
11385 if (!inst.operands[0].writeback)
11386 as_warn (_("this instruction will write back the base register"));
11387 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11388 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11389 as_warn (_("value stored for r%d is UNKNOWN"),
11390 inst.operands[0].reg);
11391 }
11392 else
11393 {
11394 if (!inst.operands[0].writeback
11395 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11396 as_warn (_("this instruction will write back the base register"));
11397 else if (inst.operands[0].writeback
11398 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11399 as_warn (_("this instruction will not write back the base register"));
11400 }
11401
11402 inst.instruction = THUMB_OP16 (inst.instruction);
11403 inst.instruction |= inst.operands[0].reg << 8;
11404 inst.instruction |= inst.operands[1].imm;
11405 }
11406 }
11407
11408 static void
11409 do_t_ldrex (void)
11410 {
11411 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11412 || inst.operands[1].postind || inst.operands[1].writeback
11413 || inst.operands[1].immisreg || inst.operands[1].shifted
11414 || inst.operands[1].negative,
11415 BAD_ADDR_MODE);
11416
11417 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11418
11419 inst.instruction |= inst.operands[0].reg << 12;
11420 inst.instruction |= inst.operands[1].reg << 16;
11421 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11422 }
11423
11424 static void
11425 do_t_ldrexd (void)
11426 {
11427 if (!inst.operands[1].present)
11428 {
11429 constraint (inst.operands[0].reg == REG_LR,
11430 _("r14 not allowed as first register "
11431 "when second register is omitted"));
11432 inst.operands[1].reg = inst.operands[0].reg + 1;
11433 }
11434 constraint (inst.operands[0].reg == inst.operands[1].reg,
11435 BAD_OVERLAP);
11436
11437 inst.instruction |= inst.operands[0].reg << 12;
11438 inst.instruction |= inst.operands[1].reg << 8;
11439 inst.instruction |= inst.operands[2].reg << 16;
11440 }
11441
11442 static void
11443 do_t_ldst (void)
11444 {
11445 unsigned long opcode;
11446 int Rn;
11447
11448 if (inst.operands[0].isreg
11449 && !inst.operands[0].preind
11450 && inst.operands[0].reg == REG_PC)
11451 set_it_insn_type_last ();
11452
11453 opcode = inst.instruction;
11454 if (unified_syntax)
11455 {
11456 if (!inst.operands[1].isreg)
11457 {
11458 if (opcode <= 0xffff)
11459 inst.instruction = THUMB_OP32 (opcode);
11460 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11461 return;
11462 }
11463 if (inst.operands[1].isreg
11464 && !inst.operands[1].writeback
11465 && !inst.operands[1].shifted && !inst.operands[1].postind
11466 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11467 && opcode <= 0xffff
11468 && inst.size_req != 4)
11469 {
11470 /* Insn may have a 16-bit form. */
11471 Rn = inst.operands[1].reg;
11472 if (inst.operands[1].immisreg)
11473 {
11474 inst.instruction = THUMB_OP16 (opcode);
11475 /* [Rn, Rik] */
11476 if (Rn <= 7 && inst.operands[1].imm <= 7)
11477 goto op16;
11478 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11479 reject_bad_reg (inst.operands[1].imm);
11480 }
11481 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11482 && opcode != T_MNEM_ldrsb)
11483 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11484 || (Rn == REG_SP && opcode == T_MNEM_str))
11485 {
11486 /* [Rn, #const] */
11487 if (Rn > 7)
11488 {
11489 if (Rn == REG_PC)
11490 {
11491 if (inst.reloc.pc_rel)
11492 opcode = T_MNEM_ldr_pc2;
11493 else
11494 opcode = T_MNEM_ldr_pc;
11495 }
11496 else
11497 {
11498 if (opcode == T_MNEM_ldr)
11499 opcode = T_MNEM_ldr_sp;
11500 else
11501 opcode = T_MNEM_str_sp;
11502 }
11503 inst.instruction = inst.operands[0].reg << 8;
11504 }
11505 else
11506 {
11507 inst.instruction = inst.operands[0].reg;
11508 inst.instruction |= inst.operands[1].reg << 3;
11509 }
11510 inst.instruction |= THUMB_OP16 (opcode);
11511 if (inst.size_req == 2)
11512 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11513 else
11514 inst.relax = opcode;
11515 return;
11516 }
11517 }
11518 /* Definitely a 32-bit variant. */
11519
11520 /* Warning for Erratum 752419. */
11521 if (opcode == T_MNEM_ldr
11522 && inst.operands[0].reg == REG_SP
11523 && inst.operands[1].writeback == 1
11524 && !inst.operands[1].immisreg)
11525 {
11526 if (no_cpu_selected ()
11527 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11528 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11529 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11530 as_warn (_("This instruction may be unpredictable "
11531 "if executed on M-profile cores "
11532 "with interrupts enabled."));
11533 }
11534
11535 /* Do some validations regarding addressing modes. */
11536 if (inst.operands[1].immisreg)
11537 reject_bad_reg (inst.operands[1].imm);
11538
11539 constraint (inst.operands[1].writeback == 1
11540 && inst.operands[0].reg == inst.operands[1].reg,
11541 BAD_OVERLAP);
11542
11543 inst.instruction = THUMB_OP32 (opcode);
11544 inst.instruction |= inst.operands[0].reg << 12;
11545 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11546 check_ldr_r15_aligned ();
11547 return;
11548 }
11549
11550 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11551
11552 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11553 {
11554 /* Only [Rn,Rm] is acceptable. */
11555 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11556 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11557 || inst.operands[1].postind || inst.operands[1].shifted
11558 || inst.operands[1].negative,
11559 _("Thumb does not support this addressing mode"));
11560 inst.instruction = THUMB_OP16 (inst.instruction);
11561 goto op16;
11562 }
11563
11564 inst.instruction = THUMB_OP16 (inst.instruction);
11565 if (!inst.operands[1].isreg)
11566 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11567 return;
11568
11569 constraint (!inst.operands[1].preind
11570 || inst.operands[1].shifted
11571 || inst.operands[1].writeback,
11572 _("Thumb does not support this addressing mode"));
11573 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11574 {
11575 constraint (inst.instruction & 0x0600,
11576 _("byte or halfword not valid for base register"));
11577 constraint (inst.operands[1].reg == REG_PC
11578 && !(inst.instruction & THUMB_LOAD_BIT),
11579 _("r15 based store not allowed"));
11580 constraint (inst.operands[1].immisreg,
11581 _("invalid base register for register offset"));
11582
11583 if (inst.operands[1].reg == REG_PC)
11584 inst.instruction = T_OPCODE_LDR_PC;
11585 else if (inst.instruction & THUMB_LOAD_BIT)
11586 inst.instruction = T_OPCODE_LDR_SP;
11587 else
11588 inst.instruction = T_OPCODE_STR_SP;
11589
11590 inst.instruction |= inst.operands[0].reg << 8;
11591 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11592 return;
11593 }
11594
11595 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11596 if (!inst.operands[1].immisreg)
11597 {
11598 /* Immediate offset. */
11599 inst.instruction |= inst.operands[0].reg;
11600 inst.instruction |= inst.operands[1].reg << 3;
11601 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11602 return;
11603 }
11604
11605 /* Register offset. */
11606 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11607 constraint (inst.operands[1].negative,
11608 _("Thumb does not support this addressing mode"));
11609
11610 op16:
11611 switch (inst.instruction)
11612 {
11613 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11614 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11615 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11616 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11617 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11618 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11619 case 0x5600 /* ldrsb */:
11620 case 0x5e00 /* ldrsh */: break;
11621 default: abort ();
11622 }
11623
11624 inst.instruction |= inst.operands[0].reg;
11625 inst.instruction |= inst.operands[1].reg << 3;
11626 inst.instruction |= inst.operands[1].imm << 6;
11627 }
11628
11629 static void
11630 do_t_ldstd (void)
11631 {
11632 if (!inst.operands[1].present)
11633 {
11634 inst.operands[1].reg = inst.operands[0].reg + 1;
11635 constraint (inst.operands[0].reg == REG_LR,
11636 _("r14 not allowed here"));
11637 constraint (inst.operands[0].reg == REG_R12,
11638 _("r12 not allowed here"));
11639 }
11640
11641 if (inst.operands[2].writeback
11642 && (inst.operands[0].reg == inst.operands[2].reg
11643 || inst.operands[1].reg == inst.operands[2].reg))
11644 as_warn (_("base register written back, and overlaps "
11645 "one of transfer registers"));
11646
11647 inst.instruction |= inst.operands[0].reg << 12;
11648 inst.instruction |= inst.operands[1].reg << 8;
11649 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11650 }
11651
11652 static void
11653 do_t_ldstt (void)
11654 {
11655 inst.instruction |= inst.operands[0].reg << 12;
11656 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11657 }
11658
11659 static void
11660 do_t_mla (void)
11661 {
11662 unsigned Rd, Rn, Rm, Ra;
11663
11664 Rd = inst.operands[0].reg;
11665 Rn = inst.operands[1].reg;
11666 Rm = inst.operands[2].reg;
11667 Ra = inst.operands[3].reg;
11668
11669 reject_bad_reg (Rd);
11670 reject_bad_reg (Rn);
11671 reject_bad_reg (Rm);
11672 reject_bad_reg (Ra);
11673
11674 inst.instruction |= Rd << 8;
11675 inst.instruction |= Rn << 16;
11676 inst.instruction |= Rm;
11677 inst.instruction |= Ra << 12;
11678 }
11679
11680 static void
11681 do_t_mlal (void)
11682 {
11683 unsigned RdLo, RdHi, Rn, Rm;
11684
11685 RdLo = inst.operands[0].reg;
11686 RdHi = inst.operands[1].reg;
11687 Rn = inst.operands[2].reg;
11688 Rm = inst.operands[3].reg;
11689
11690 reject_bad_reg (RdLo);
11691 reject_bad_reg (RdHi);
11692 reject_bad_reg (Rn);
11693 reject_bad_reg (Rm);
11694
11695 inst.instruction |= RdLo << 12;
11696 inst.instruction |= RdHi << 8;
11697 inst.instruction |= Rn << 16;
11698 inst.instruction |= Rm;
11699 }
11700
11701 static void
11702 do_t_mov_cmp (void)
11703 {
11704 unsigned Rn, Rm;
11705
11706 Rn = inst.operands[0].reg;
11707 Rm = inst.operands[1].reg;
11708
11709 if (Rn == REG_PC)
11710 set_it_insn_type_last ();
11711
11712 if (unified_syntax)
11713 {
11714 int r0off = (inst.instruction == T_MNEM_mov
11715 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11716 unsigned long opcode;
11717 bfd_boolean narrow;
11718 bfd_boolean low_regs;
11719
11720 low_regs = (Rn <= 7 && Rm <= 7);
11721 opcode = inst.instruction;
11722 if (in_it_block ())
11723 narrow = opcode != T_MNEM_movs;
11724 else
11725 narrow = opcode != T_MNEM_movs || low_regs;
11726 if (inst.size_req == 4
11727 || inst.operands[1].shifted)
11728 narrow = FALSE;
11729
11730 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11731 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11732 && !inst.operands[1].shifted
11733 && Rn == REG_PC
11734 && Rm == REG_LR)
11735 {
11736 inst.instruction = T2_SUBS_PC_LR;
11737 return;
11738 }
11739
11740 if (opcode == T_MNEM_cmp)
11741 {
11742 constraint (Rn == REG_PC, BAD_PC);
11743 if (narrow)
11744 {
11745 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11746 but valid. */
11747 warn_deprecated_sp (Rm);
11748 /* R15 was documented as a valid choice for Rm in ARMv6,
11749 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11750 tools reject R15, so we do too. */
11751 constraint (Rm == REG_PC, BAD_PC);
11752 }
11753 else
11754 reject_bad_reg (Rm);
11755 }
11756 else if (opcode == T_MNEM_mov
11757 || opcode == T_MNEM_movs)
11758 {
11759 if (inst.operands[1].isreg)
11760 {
11761 if (opcode == T_MNEM_movs)
11762 {
11763 reject_bad_reg (Rn);
11764 reject_bad_reg (Rm);
11765 }
11766 else if (narrow)
11767 {
11768 /* This is mov.n. */
11769 if ((Rn == REG_SP || Rn == REG_PC)
11770 && (Rm == REG_SP || Rm == REG_PC))
11771 {
11772 as_tsktsk (_("Use of r%u as a source register is "
11773 "deprecated when r%u is the destination "
11774 "register."), Rm, Rn);
11775 }
11776 }
11777 else
11778 {
11779 /* This is mov.w. */
11780 constraint (Rn == REG_PC, BAD_PC);
11781 constraint (Rm == REG_PC, BAD_PC);
11782 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11783 }
11784 }
11785 else
11786 reject_bad_reg (Rn);
11787 }
11788
11789 if (!inst.operands[1].isreg)
11790 {
11791 /* Immediate operand. */
11792 if (!in_it_block () && opcode == T_MNEM_mov)
11793 narrow = 0;
11794 if (low_regs && narrow)
11795 {
11796 inst.instruction = THUMB_OP16 (opcode);
11797 inst.instruction |= Rn << 8;
11798 if (inst.size_req == 2)
11799 {
11800 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11801 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11802 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11803 }
11804 else
11805 inst.relax = opcode;
11806 }
11807 else
11808 {
11809 inst.instruction = THUMB_OP32 (inst.instruction);
11810 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11811 inst.instruction |= Rn << r0off;
11812 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11813 }
11814 }
11815 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11816 && (inst.instruction == T_MNEM_mov
11817 || inst.instruction == T_MNEM_movs))
11818 {
11819 /* Register shifts are encoded as separate shift instructions. */
11820 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11821
11822 if (in_it_block ())
11823 narrow = !flags;
11824 else
11825 narrow = flags;
11826
11827 if (inst.size_req == 4)
11828 narrow = FALSE;
11829
11830 if (!low_regs || inst.operands[1].imm > 7)
11831 narrow = FALSE;
11832
11833 if (Rn != Rm)
11834 narrow = FALSE;
11835
11836 switch (inst.operands[1].shift_kind)
11837 {
11838 case SHIFT_LSL:
11839 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11840 break;
11841 case SHIFT_ASR:
11842 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11843 break;
11844 case SHIFT_LSR:
11845 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11846 break;
11847 case SHIFT_ROR:
11848 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11849 break;
11850 default:
11851 abort ();
11852 }
11853
11854 inst.instruction = opcode;
11855 if (narrow)
11856 {
11857 inst.instruction |= Rn;
11858 inst.instruction |= inst.operands[1].imm << 3;
11859 }
11860 else
11861 {
11862 if (flags)
11863 inst.instruction |= CONDS_BIT;
11864
11865 inst.instruction |= Rn << 8;
11866 inst.instruction |= Rm << 16;
11867 inst.instruction |= inst.operands[1].imm;
11868 }
11869 }
11870 else if (!narrow)
11871 {
11872 /* Some mov with immediate shift have narrow variants.
11873 Register shifts are handled above. */
11874 if (low_regs && inst.operands[1].shifted
11875 && (inst.instruction == T_MNEM_mov
11876 || inst.instruction == T_MNEM_movs))
11877 {
11878 if (in_it_block ())
11879 narrow = (inst.instruction == T_MNEM_mov);
11880 else
11881 narrow = (inst.instruction == T_MNEM_movs);
11882 }
11883
11884 if (narrow)
11885 {
11886 switch (inst.operands[1].shift_kind)
11887 {
11888 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11889 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11890 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11891 default: narrow = FALSE; break;
11892 }
11893 }
11894
11895 if (narrow)
11896 {
11897 inst.instruction |= Rn;
11898 inst.instruction |= Rm << 3;
11899 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11900 }
11901 else
11902 {
11903 inst.instruction = THUMB_OP32 (inst.instruction);
11904 inst.instruction |= Rn << r0off;
11905 encode_thumb32_shifted_operand (1);
11906 }
11907 }
11908 else
11909 switch (inst.instruction)
11910 {
11911 case T_MNEM_mov:
11912 /* In v4t or v5t a move of two lowregs produces unpredictable
11913 results. Don't allow this. */
11914 if (low_regs)
11915 {
11916 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11917 "MOV Rd, Rs with two low registers is not "
11918 "permitted on this architecture");
11919 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11920 arm_ext_v6);
11921 }
11922
11923 inst.instruction = T_OPCODE_MOV_HR;
11924 inst.instruction |= (Rn & 0x8) << 4;
11925 inst.instruction |= (Rn & 0x7);
11926 inst.instruction |= Rm << 3;
11927 break;
11928
11929 case T_MNEM_movs:
11930 /* We know we have low registers at this point.
11931 Generate LSLS Rd, Rs, #0. */
11932 inst.instruction = T_OPCODE_LSL_I;
11933 inst.instruction |= Rn;
11934 inst.instruction |= Rm << 3;
11935 break;
11936
11937 case T_MNEM_cmp:
11938 if (low_regs)
11939 {
11940 inst.instruction = T_OPCODE_CMP_LR;
11941 inst.instruction |= Rn;
11942 inst.instruction |= Rm << 3;
11943 }
11944 else
11945 {
11946 inst.instruction = T_OPCODE_CMP_HR;
11947 inst.instruction |= (Rn & 0x8) << 4;
11948 inst.instruction |= (Rn & 0x7);
11949 inst.instruction |= Rm << 3;
11950 }
11951 break;
11952 }
11953 return;
11954 }
11955
11956 inst.instruction = THUMB_OP16 (inst.instruction);
11957
11958 /* PR 10443: Do not silently ignore shifted operands. */
11959 constraint (inst.operands[1].shifted,
11960 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11961
11962 if (inst.operands[1].isreg)
11963 {
11964 if (Rn < 8 && Rm < 8)
11965 {
11966 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11967 since a MOV instruction produces unpredictable results. */
11968 if (inst.instruction == T_OPCODE_MOV_I8)
11969 inst.instruction = T_OPCODE_ADD_I3;
11970 else
11971 inst.instruction = T_OPCODE_CMP_LR;
11972
11973 inst.instruction |= Rn;
11974 inst.instruction |= Rm << 3;
11975 }
11976 else
11977 {
11978 if (inst.instruction == T_OPCODE_MOV_I8)
11979 inst.instruction = T_OPCODE_MOV_HR;
11980 else
11981 inst.instruction = T_OPCODE_CMP_HR;
11982 do_t_cpy ();
11983 }
11984 }
11985 else
11986 {
11987 constraint (Rn > 7,
11988 _("only lo regs allowed with immediate"));
11989 inst.instruction |= Rn << 8;
11990 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11991 }
11992 }
11993
11994 static void
11995 do_t_mov16 (void)
11996 {
11997 unsigned Rd;
11998 bfd_vma imm;
11999 bfd_boolean top;
12000
12001 top = (inst.instruction & 0x00800000) != 0;
12002 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12003 {
12004 constraint (top, _(":lower16: not allowed this instruction"));
12005 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12006 }
12007 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12008 {
12009 constraint (!top, _(":upper16: not allowed this instruction"));
12010 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12011 }
12012
12013 Rd = inst.operands[0].reg;
12014 reject_bad_reg (Rd);
12015
12016 inst.instruction |= Rd << 8;
12017 if (inst.reloc.type == BFD_RELOC_UNUSED)
12018 {
12019 imm = inst.reloc.exp.X_add_number;
12020 inst.instruction |= (imm & 0xf000) << 4;
12021 inst.instruction |= (imm & 0x0800) << 15;
12022 inst.instruction |= (imm & 0x0700) << 4;
12023 inst.instruction |= (imm & 0x00ff);
12024 }
12025 }
12026
12027 static void
12028 do_t_mvn_tst (void)
12029 {
12030 unsigned Rn, Rm;
12031
12032 Rn = inst.operands[0].reg;
12033 Rm = inst.operands[1].reg;
12034
12035 if (inst.instruction == T_MNEM_cmp
12036 || inst.instruction == T_MNEM_cmn)
12037 constraint (Rn == REG_PC, BAD_PC);
12038 else
12039 reject_bad_reg (Rn);
12040 reject_bad_reg (Rm);
12041
12042 if (unified_syntax)
12043 {
12044 int r0off = (inst.instruction == T_MNEM_mvn
12045 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12046 bfd_boolean narrow;
12047
12048 if (inst.size_req == 4
12049 || inst.instruction > 0xffff
12050 || inst.operands[1].shifted
12051 || Rn > 7 || Rm > 7)
12052 narrow = FALSE;
12053 else if (inst.instruction == T_MNEM_cmn
12054 || inst.instruction == T_MNEM_tst)
12055 narrow = TRUE;
12056 else if (THUMB_SETS_FLAGS (inst.instruction))
12057 narrow = !in_it_block ();
12058 else
12059 narrow = in_it_block ();
12060
12061 if (!inst.operands[1].isreg)
12062 {
12063 /* For an immediate, we always generate a 32-bit opcode;
12064 section relaxation will shrink it later if possible. */
12065 if (inst.instruction < 0xffff)
12066 inst.instruction = THUMB_OP32 (inst.instruction);
12067 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12068 inst.instruction |= Rn << r0off;
12069 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12070 }
12071 else
12072 {
12073 /* See if we can do this with a 16-bit instruction. */
12074 if (narrow)
12075 {
12076 inst.instruction = THUMB_OP16 (inst.instruction);
12077 inst.instruction |= Rn;
12078 inst.instruction |= Rm << 3;
12079 }
12080 else
12081 {
12082 constraint (inst.operands[1].shifted
12083 && inst.operands[1].immisreg,
12084 _("shift must be constant"));
12085 if (inst.instruction < 0xffff)
12086 inst.instruction = THUMB_OP32 (inst.instruction);
12087 inst.instruction |= Rn << r0off;
12088 encode_thumb32_shifted_operand (1);
12089 }
12090 }
12091 }
12092 else
12093 {
12094 constraint (inst.instruction > 0xffff
12095 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12096 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12097 _("unshifted register required"));
12098 constraint (Rn > 7 || Rm > 7,
12099 BAD_HIREG);
12100
12101 inst.instruction = THUMB_OP16 (inst.instruction);
12102 inst.instruction |= Rn;
12103 inst.instruction |= Rm << 3;
12104 }
12105 }
12106
12107 static void
12108 do_t_mrs (void)
12109 {
12110 unsigned Rd;
12111
12112 if (do_vfp_nsyn_mrs () == SUCCESS)
12113 return;
12114
12115 Rd = inst.operands[0].reg;
12116 reject_bad_reg (Rd);
12117 inst.instruction |= Rd << 8;
12118
12119 if (inst.operands[1].isreg)
12120 {
12121 unsigned br = inst.operands[1].reg;
12122 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12123 as_bad (_("bad register for mrs"));
12124
12125 inst.instruction |= br & (0xf << 16);
12126 inst.instruction |= (br & 0x300) >> 4;
12127 inst.instruction |= (br & SPSR_BIT) >> 2;
12128 }
12129 else
12130 {
12131 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12132
12133 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12134 {
12135 /* PR gas/12698: The constraint is only applied for m_profile.
12136 If the user has specified -march=all, we want to ignore it as
12137 we are building for any CPU type, including non-m variants. */
12138 bfd_boolean m_profile =
12139 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12140 constraint ((flags != 0) && m_profile, _("selected processor does "
12141 "not support requested special purpose register"));
12142 }
12143 else
12144 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12145 devices). */
12146 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12147 _("'APSR', 'CPSR' or 'SPSR' expected"));
12148
12149 inst.instruction |= (flags & SPSR_BIT) >> 2;
12150 inst.instruction |= inst.operands[1].imm & 0xff;
12151 inst.instruction |= 0xf0000;
12152 }
12153 }
12154
12155 static void
12156 do_t_msr (void)
12157 {
12158 int flags;
12159 unsigned Rn;
12160
12161 if (do_vfp_nsyn_msr () == SUCCESS)
12162 return;
12163
12164 constraint (!inst.operands[1].isreg,
12165 _("Thumb encoding does not support an immediate here"));
12166
12167 if (inst.operands[0].isreg)
12168 flags = (int)(inst.operands[0].reg);
12169 else
12170 flags = inst.operands[0].imm;
12171
12172 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12173 {
12174 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12175
12176 /* PR gas/12698: The constraint is only applied for m_profile.
12177 If the user has specified -march=all, we want to ignore it as
12178 we are building for any CPU type, including non-m variants. */
12179 bfd_boolean m_profile =
12180 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12181 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12182 && (bits & ~(PSR_s | PSR_f)) != 0)
12183 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12184 && bits != PSR_f)) && m_profile,
12185 _("selected processor does not support requested special "
12186 "purpose register"));
12187 }
12188 else
12189 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12190 "requested special purpose register"));
12191
12192 Rn = inst.operands[1].reg;
12193 reject_bad_reg (Rn);
12194
12195 inst.instruction |= (flags & SPSR_BIT) >> 2;
12196 inst.instruction |= (flags & 0xf0000) >> 8;
12197 inst.instruction |= (flags & 0x300) >> 4;
12198 inst.instruction |= (flags & 0xff);
12199 inst.instruction |= Rn << 16;
12200 }
12201
12202 static void
12203 do_t_mul (void)
12204 {
12205 bfd_boolean narrow;
12206 unsigned Rd, Rn, Rm;
12207
12208 if (!inst.operands[2].present)
12209 inst.operands[2].reg = inst.operands[0].reg;
12210
12211 Rd = inst.operands[0].reg;
12212 Rn = inst.operands[1].reg;
12213 Rm = inst.operands[2].reg;
12214
12215 if (unified_syntax)
12216 {
12217 if (inst.size_req == 4
12218 || (Rd != Rn
12219 && Rd != Rm)
12220 || Rn > 7
12221 || Rm > 7)
12222 narrow = FALSE;
12223 else if (inst.instruction == T_MNEM_muls)
12224 narrow = !in_it_block ();
12225 else
12226 narrow = in_it_block ();
12227 }
12228 else
12229 {
12230 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12231 constraint (Rn > 7 || Rm > 7,
12232 BAD_HIREG);
12233 narrow = TRUE;
12234 }
12235
12236 if (narrow)
12237 {
12238 /* 16-bit MULS/Conditional MUL. */
12239 inst.instruction = THUMB_OP16 (inst.instruction);
12240 inst.instruction |= Rd;
12241
12242 if (Rd == Rn)
12243 inst.instruction |= Rm << 3;
12244 else if (Rd == Rm)
12245 inst.instruction |= Rn << 3;
12246 else
12247 constraint (1, _("dest must overlap one source register"));
12248 }
12249 else
12250 {
12251 constraint (inst.instruction != T_MNEM_mul,
12252 _("Thumb-2 MUL must not set flags"));
12253 /* 32-bit MUL. */
12254 inst.instruction = THUMB_OP32 (inst.instruction);
12255 inst.instruction |= Rd << 8;
12256 inst.instruction |= Rn << 16;
12257 inst.instruction |= Rm << 0;
12258
12259 reject_bad_reg (Rd);
12260 reject_bad_reg (Rn);
12261 reject_bad_reg (Rm);
12262 }
12263 }
12264
12265 static void
12266 do_t_mull (void)
12267 {
12268 unsigned RdLo, RdHi, Rn, Rm;
12269
12270 RdLo = inst.operands[0].reg;
12271 RdHi = inst.operands[1].reg;
12272 Rn = inst.operands[2].reg;
12273 Rm = inst.operands[3].reg;
12274
12275 reject_bad_reg (RdLo);
12276 reject_bad_reg (RdHi);
12277 reject_bad_reg (Rn);
12278 reject_bad_reg (Rm);
12279
12280 inst.instruction |= RdLo << 12;
12281 inst.instruction |= RdHi << 8;
12282 inst.instruction |= Rn << 16;
12283 inst.instruction |= Rm;
12284
12285 if (RdLo == RdHi)
12286 as_tsktsk (_("rdhi and rdlo must be different"));
12287 }
12288
12289 static void
12290 do_t_nop (void)
12291 {
12292 set_it_insn_type (NEUTRAL_IT_INSN);
12293
12294 if (unified_syntax)
12295 {
12296 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12297 {
12298 inst.instruction = THUMB_OP32 (inst.instruction);
12299 inst.instruction |= inst.operands[0].imm;
12300 }
12301 else
12302 {
12303 /* PR9722: Check for Thumb2 availability before
12304 generating a thumb2 nop instruction. */
12305 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12306 {
12307 inst.instruction = THUMB_OP16 (inst.instruction);
12308 inst.instruction |= inst.operands[0].imm << 4;
12309 }
12310 else
12311 inst.instruction = 0x46c0;
12312 }
12313 }
12314 else
12315 {
12316 constraint (inst.operands[0].present,
12317 _("Thumb does not support NOP with hints"));
12318 inst.instruction = 0x46c0;
12319 }
12320 }
12321
12322 static void
12323 do_t_neg (void)
12324 {
12325 if (unified_syntax)
12326 {
12327 bfd_boolean narrow;
12328
12329 if (THUMB_SETS_FLAGS (inst.instruction))
12330 narrow = !in_it_block ();
12331 else
12332 narrow = in_it_block ();
12333 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12334 narrow = FALSE;
12335 if (inst.size_req == 4)
12336 narrow = FALSE;
12337
12338 if (!narrow)
12339 {
12340 inst.instruction = THUMB_OP32 (inst.instruction);
12341 inst.instruction |= inst.operands[0].reg << 8;
12342 inst.instruction |= inst.operands[1].reg << 16;
12343 }
12344 else
12345 {
12346 inst.instruction = THUMB_OP16 (inst.instruction);
12347 inst.instruction |= inst.operands[0].reg;
12348 inst.instruction |= inst.operands[1].reg << 3;
12349 }
12350 }
12351 else
12352 {
12353 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12354 BAD_HIREG);
12355 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12356
12357 inst.instruction = THUMB_OP16 (inst.instruction);
12358 inst.instruction |= inst.operands[0].reg;
12359 inst.instruction |= inst.operands[1].reg << 3;
12360 }
12361 }
12362
12363 static void
12364 do_t_orn (void)
12365 {
12366 unsigned Rd, Rn;
12367
12368 Rd = inst.operands[0].reg;
12369 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12370
12371 reject_bad_reg (Rd);
12372 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12373 reject_bad_reg (Rn);
12374
12375 inst.instruction |= Rd << 8;
12376 inst.instruction |= Rn << 16;
12377
12378 if (!inst.operands[2].isreg)
12379 {
12380 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12381 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12382 }
12383 else
12384 {
12385 unsigned Rm;
12386
12387 Rm = inst.operands[2].reg;
12388 reject_bad_reg (Rm);
12389
12390 constraint (inst.operands[2].shifted
12391 && inst.operands[2].immisreg,
12392 _("shift must be constant"));
12393 encode_thumb32_shifted_operand (2);
12394 }
12395 }
12396
12397 static void
12398 do_t_pkhbt (void)
12399 {
12400 unsigned Rd, Rn, Rm;
12401
12402 Rd = inst.operands[0].reg;
12403 Rn = inst.operands[1].reg;
12404 Rm = inst.operands[2].reg;
12405
12406 reject_bad_reg (Rd);
12407 reject_bad_reg (Rn);
12408 reject_bad_reg (Rm);
12409
12410 inst.instruction |= Rd << 8;
12411 inst.instruction |= Rn << 16;
12412 inst.instruction |= Rm;
12413 if (inst.operands[3].present)
12414 {
12415 unsigned int val = inst.reloc.exp.X_add_number;
12416 constraint (inst.reloc.exp.X_op != O_constant,
12417 _("expression too complex"));
12418 inst.instruction |= (val & 0x1c) << 10;
12419 inst.instruction |= (val & 0x03) << 6;
12420 }
12421 }
12422
12423 static void
12424 do_t_pkhtb (void)
12425 {
12426 if (!inst.operands[3].present)
12427 {
12428 unsigned Rtmp;
12429
12430 inst.instruction &= ~0x00000020;
12431
12432 /* PR 10168. Swap the Rm and Rn registers. */
12433 Rtmp = inst.operands[1].reg;
12434 inst.operands[1].reg = inst.operands[2].reg;
12435 inst.operands[2].reg = Rtmp;
12436 }
12437 do_t_pkhbt ();
12438 }
12439
12440 static void
12441 do_t_pld (void)
12442 {
12443 if (inst.operands[0].immisreg)
12444 reject_bad_reg (inst.operands[0].imm);
12445
12446 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12447 }
12448
12449 static void
12450 do_t_push_pop (void)
12451 {
12452 unsigned mask;
12453
12454 constraint (inst.operands[0].writeback,
12455 _("push/pop do not support {reglist}^"));
12456 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12457 _("expression too complex"));
12458
12459 mask = inst.operands[0].imm;
12460 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12461 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12462 else if (inst.size_req != 4
12463 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12464 ? REG_LR : REG_PC)))
12465 {
12466 inst.instruction = THUMB_OP16 (inst.instruction);
12467 inst.instruction |= THUMB_PP_PC_LR;
12468 inst.instruction |= mask & 0xff;
12469 }
12470 else if (unified_syntax)
12471 {
12472 inst.instruction = THUMB_OP32 (inst.instruction);
12473 encode_thumb2_ldmstm (13, mask, TRUE);
12474 }
12475 else
12476 {
12477 inst.error = _("invalid register list to push/pop instruction");
12478 return;
12479 }
12480 }
12481
12482 static void
12483 do_t_rbit (void)
12484 {
12485 unsigned Rd, Rm;
12486
12487 Rd = inst.operands[0].reg;
12488 Rm = inst.operands[1].reg;
12489
12490 reject_bad_reg (Rd);
12491 reject_bad_reg (Rm);
12492
12493 inst.instruction |= Rd << 8;
12494 inst.instruction |= Rm << 16;
12495 inst.instruction |= Rm;
12496 }
12497
12498 static void
12499 do_t_rev (void)
12500 {
12501 unsigned Rd, Rm;
12502
12503 Rd = inst.operands[0].reg;
12504 Rm = inst.operands[1].reg;
12505
12506 reject_bad_reg (Rd);
12507 reject_bad_reg (Rm);
12508
12509 if (Rd <= 7 && Rm <= 7
12510 && inst.size_req != 4)
12511 {
12512 inst.instruction = THUMB_OP16 (inst.instruction);
12513 inst.instruction |= Rd;
12514 inst.instruction |= Rm << 3;
12515 }
12516 else if (unified_syntax)
12517 {
12518 inst.instruction = THUMB_OP32 (inst.instruction);
12519 inst.instruction |= Rd << 8;
12520 inst.instruction |= Rm << 16;
12521 inst.instruction |= Rm;
12522 }
12523 else
12524 inst.error = BAD_HIREG;
12525 }
12526
12527 static void
12528 do_t_rrx (void)
12529 {
12530 unsigned Rd, Rm;
12531
12532 Rd = inst.operands[0].reg;
12533 Rm = inst.operands[1].reg;
12534
12535 reject_bad_reg (Rd);
12536 reject_bad_reg (Rm);
12537
12538 inst.instruction |= Rd << 8;
12539 inst.instruction |= Rm;
12540 }
12541
12542 static void
12543 do_t_rsb (void)
12544 {
12545 unsigned Rd, Rs;
12546
12547 Rd = inst.operands[0].reg;
12548 Rs = (inst.operands[1].present
12549 ? inst.operands[1].reg /* Rd, Rs, foo */
12550 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12551
12552 reject_bad_reg (Rd);
12553 reject_bad_reg (Rs);
12554 if (inst.operands[2].isreg)
12555 reject_bad_reg (inst.operands[2].reg);
12556
12557 inst.instruction |= Rd << 8;
12558 inst.instruction |= Rs << 16;
12559 if (!inst.operands[2].isreg)
12560 {
12561 bfd_boolean narrow;
12562
12563 if ((inst.instruction & 0x00100000) != 0)
12564 narrow = !in_it_block ();
12565 else
12566 narrow = in_it_block ();
12567
12568 if (Rd > 7 || Rs > 7)
12569 narrow = FALSE;
12570
12571 if (inst.size_req == 4 || !unified_syntax)
12572 narrow = FALSE;
12573
12574 if (inst.reloc.exp.X_op != O_constant
12575 || inst.reloc.exp.X_add_number != 0)
12576 narrow = FALSE;
12577
12578 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12579 relaxation, but it doesn't seem worth the hassle. */
12580 if (narrow)
12581 {
12582 inst.reloc.type = BFD_RELOC_UNUSED;
12583 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12584 inst.instruction |= Rs << 3;
12585 inst.instruction |= Rd;
12586 }
12587 else
12588 {
12589 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12590 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12591 }
12592 }
12593 else
12594 encode_thumb32_shifted_operand (2);
12595 }
12596
12597 static void
12598 do_t_setend (void)
12599 {
12600 if (warn_on_deprecated
12601 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12602 as_tsktsk (_("setend use is deprecated for ARMv8"));
12603
12604 set_it_insn_type (OUTSIDE_IT_INSN);
12605 if (inst.operands[0].imm)
12606 inst.instruction |= 0x8;
12607 }
12608
12609 static void
12610 do_t_shift (void)
12611 {
12612 if (!inst.operands[1].present)
12613 inst.operands[1].reg = inst.operands[0].reg;
12614
12615 if (unified_syntax)
12616 {
12617 bfd_boolean narrow;
12618 int shift_kind;
12619
12620 switch (inst.instruction)
12621 {
12622 case T_MNEM_asr:
12623 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12624 case T_MNEM_lsl:
12625 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12626 case T_MNEM_lsr:
12627 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12628 case T_MNEM_ror:
12629 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12630 default: abort ();
12631 }
12632
12633 if (THUMB_SETS_FLAGS (inst.instruction))
12634 narrow = !in_it_block ();
12635 else
12636 narrow = in_it_block ();
12637 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12638 narrow = FALSE;
12639 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12640 narrow = FALSE;
12641 if (inst.operands[2].isreg
12642 && (inst.operands[1].reg != inst.operands[0].reg
12643 || inst.operands[2].reg > 7))
12644 narrow = FALSE;
12645 if (inst.size_req == 4)
12646 narrow = FALSE;
12647
12648 reject_bad_reg (inst.operands[0].reg);
12649 reject_bad_reg (inst.operands[1].reg);
12650
12651 if (!narrow)
12652 {
12653 if (inst.operands[2].isreg)
12654 {
12655 reject_bad_reg (inst.operands[2].reg);
12656 inst.instruction = THUMB_OP32 (inst.instruction);
12657 inst.instruction |= inst.operands[0].reg << 8;
12658 inst.instruction |= inst.operands[1].reg << 16;
12659 inst.instruction |= inst.operands[2].reg;
12660
12661 /* PR 12854: Error on extraneous shifts. */
12662 constraint (inst.operands[2].shifted,
12663 _("extraneous shift as part of operand to shift insn"));
12664 }
12665 else
12666 {
12667 inst.operands[1].shifted = 1;
12668 inst.operands[1].shift_kind = shift_kind;
12669 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12670 ? T_MNEM_movs : T_MNEM_mov);
12671 inst.instruction |= inst.operands[0].reg << 8;
12672 encode_thumb32_shifted_operand (1);
12673 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12674 inst.reloc.type = BFD_RELOC_UNUSED;
12675 }
12676 }
12677 else
12678 {
12679 if (inst.operands[2].isreg)
12680 {
12681 switch (shift_kind)
12682 {
12683 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12684 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12685 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12686 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12687 default: abort ();
12688 }
12689
12690 inst.instruction |= inst.operands[0].reg;
12691 inst.instruction |= inst.operands[2].reg << 3;
12692
12693 /* PR 12854: Error on extraneous shifts. */
12694 constraint (inst.operands[2].shifted,
12695 _("extraneous shift as part of operand to shift insn"));
12696 }
12697 else
12698 {
12699 switch (shift_kind)
12700 {
12701 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12702 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12703 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12704 default: abort ();
12705 }
12706 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12707 inst.instruction |= inst.operands[0].reg;
12708 inst.instruction |= inst.operands[1].reg << 3;
12709 }
12710 }
12711 }
12712 else
12713 {
12714 constraint (inst.operands[0].reg > 7
12715 || inst.operands[1].reg > 7, BAD_HIREG);
12716 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12717
12718 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12719 {
12720 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12721 constraint (inst.operands[0].reg != inst.operands[1].reg,
12722 _("source1 and dest must be same register"));
12723
12724 switch (inst.instruction)
12725 {
12726 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12727 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12728 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12729 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12730 default: abort ();
12731 }
12732
12733 inst.instruction |= inst.operands[0].reg;
12734 inst.instruction |= inst.operands[2].reg << 3;
12735
12736 /* PR 12854: Error on extraneous shifts. */
12737 constraint (inst.operands[2].shifted,
12738 _("extraneous shift as part of operand to shift insn"));
12739 }
12740 else
12741 {
12742 switch (inst.instruction)
12743 {
12744 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12745 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12746 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12747 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12748 default: abort ();
12749 }
12750 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12751 inst.instruction |= inst.operands[0].reg;
12752 inst.instruction |= inst.operands[1].reg << 3;
12753 }
12754 }
12755 }
12756
12757 static void
12758 do_t_simd (void)
12759 {
12760 unsigned Rd, Rn, Rm;
12761
12762 Rd = inst.operands[0].reg;
12763 Rn = inst.operands[1].reg;
12764 Rm = inst.operands[2].reg;
12765
12766 reject_bad_reg (Rd);
12767 reject_bad_reg (Rn);
12768 reject_bad_reg (Rm);
12769
12770 inst.instruction |= Rd << 8;
12771 inst.instruction |= Rn << 16;
12772 inst.instruction |= Rm;
12773 }
12774
12775 static void
12776 do_t_simd2 (void)
12777 {
12778 unsigned Rd, Rn, Rm;
12779
12780 Rd = inst.operands[0].reg;
12781 Rm = inst.operands[1].reg;
12782 Rn = inst.operands[2].reg;
12783
12784 reject_bad_reg (Rd);
12785 reject_bad_reg (Rn);
12786 reject_bad_reg (Rm);
12787
12788 inst.instruction |= Rd << 8;
12789 inst.instruction |= Rn << 16;
12790 inst.instruction |= Rm;
12791 }
12792
12793 static void
12794 do_t_smc (void)
12795 {
12796 unsigned int value = inst.reloc.exp.X_add_number;
12797 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12798 _("SMC is not permitted on this architecture"));
12799 constraint (inst.reloc.exp.X_op != O_constant,
12800 _("expression too complex"));
12801 inst.reloc.type = BFD_RELOC_UNUSED;
12802 inst.instruction |= (value & 0xf000) >> 12;
12803 inst.instruction |= (value & 0x0ff0);
12804 inst.instruction |= (value & 0x000f) << 16;
12805 /* PR gas/15623: SMC instructions must be last in an IT block. */
12806 set_it_insn_type_last ();
12807 }
12808
12809 static void
12810 do_t_hvc (void)
12811 {
12812 unsigned int value = inst.reloc.exp.X_add_number;
12813
12814 inst.reloc.type = BFD_RELOC_UNUSED;
12815 inst.instruction |= (value & 0x0fff);
12816 inst.instruction |= (value & 0xf000) << 4;
12817 }
12818
12819 static void
12820 do_t_ssat_usat (int bias)
12821 {
12822 unsigned Rd, Rn;
12823
12824 Rd = inst.operands[0].reg;
12825 Rn = inst.operands[2].reg;
12826
12827 reject_bad_reg (Rd);
12828 reject_bad_reg (Rn);
12829
12830 inst.instruction |= Rd << 8;
12831 inst.instruction |= inst.operands[1].imm - bias;
12832 inst.instruction |= Rn << 16;
12833
12834 if (inst.operands[3].present)
12835 {
12836 offsetT shift_amount = inst.reloc.exp.X_add_number;
12837
12838 inst.reloc.type = BFD_RELOC_UNUSED;
12839
12840 constraint (inst.reloc.exp.X_op != O_constant,
12841 _("expression too complex"));
12842
12843 if (shift_amount != 0)
12844 {
12845 constraint (shift_amount > 31,
12846 _("shift expression is too large"));
12847
12848 if (inst.operands[3].shift_kind == SHIFT_ASR)
12849 inst.instruction |= 0x00200000; /* sh bit. */
12850
12851 inst.instruction |= (shift_amount & 0x1c) << 10;
12852 inst.instruction |= (shift_amount & 0x03) << 6;
12853 }
12854 }
12855 }
12856
12857 static void
12858 do_t_ssat (void)
12859 {
12860 do_t_ssat_usat (1);
12861 }
12862
12863 static void
12864 do_t_ssat16 (void)
12865 {
12866 unsigned Rd, Rn;
12867
12868 Rd = inst.operands[0].reg;
12869 Rn = inst.operands[2].reg;
12870
12871 reject_bad_reg (Rd);
12872 reject_bad_reg (Rn);
12873
12874 inst.instruction |= Rd << 8;
12875 inst.instruction |= inst.operands[1].imm - 1;
12876 inst.instruction |= Rn << 16;
12877 }
12878
12879 static void
12880 do_t_strex (void)
12881 {
12882 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12883 || inst.operands[2].postind || inst.operands[2].writeback
12884 || inst.operands[2].immisreg || inst.operands[2].shifted
12885 || inst.operands[2].negative,
12886 BAD_ADDR_MODE);
12887
12888 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12889
12890 inst.instruction |= inst.operands[0].reg << 8;
12891 inst.instruction |= inst.operands[1].reg << 12;
12892 inst.instruction |= inst.operands[2].reg << 16;
12893 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12894 }
12895
12896 static void
12897 do_t_strexd (void)
12898 {
12899 if (!inst.operands[2].present)
12900 inst.operands[2].reg = inst.operands[1].reg + 1;
12901
12902 constraint (inst.operands[0].reg == inst.operands[1].reg
12903 || inst.operands[0].reg == inst.operands[2].reg
12904 || inst.operands[0].reg == inst.operands[3].reg,
12905 BAD_OVERLAP);
12906
12907 inst.instruction |= inst.operands[0].reg;
12908 inst.instruction |= inst.operands[1].reg << 12;
12909 inst.instruction |= inst.operands[2].reg << 8;
12910 inst.instruction |= inst.operands[3].reg << 16;
12911 }
12912
12913 static void
12914 do_t_sxtah (void)
12915 {
12916 unsigned Rd, Rn, Rm;
12917
12918 Rd = inst.operands[0].reg;
12919 Rn = inst.operands[1].reg;
12920 Rm = inst.operands[2].reg;
12921
12922 reject_bad_reg (Rd);
12923 reject_bad_reg (Rn);
12924 reject_bad_reg (Rm);
12925
12926 inst.instruction |= Rd << 8;
12927 inst.instruction |= Rn << 16;
12928 inst.instruction |= Rm;
12929 inst.instruction |= inst.operands[3].imm << 4;
12930 }
12931
12932 static void
12933 do_t_sxth (void)
12934 {
12935 unsigned Rd, Rm;
12936
12937 Rd = inst.operands[0].reg;
12938 Rm = inst.operands[1].reg;
12939
12940 reject_bad_reg (Rd);
12941 reject_bad_reg (Rm);
12942
12943 if (inst.instruction <= 0xffff
12944 && inst.size_req != 4
12945 && Rd <= 7 && Rm <= 7
12946 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12947 {
12948 inst.instruction = THUMB_OP16 (inst.instruction);
12949 inst.instruction |= Rd;
12950 inst.instruction |= Rm << 3;
12951 }
12952 else if (unified_syntax)
12953 {
12954 if (inst.instruction <= 0xffff)
12955 inst.instruction = THUMB_OP32 (inst.instruction);
12956 inst.instruction |= Rd << 8;
12957 inst.instruction |= Rm;
12958 inst.instruction |= inst.operands[2].imm << 4;
12959 }
12960 else
12961 {
12962 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12963 _("Thumb encoding does not support rotation"));
12964 constraint (1, BAD_HIREG);
12965 }
12966 }
12967
12968 static void
12969 do_t_swi (void)
12970 {
12971 /* We have to do the following check manually as ARM_EXT_OS only applies
12972 to ARM_EXT_V6M. */
12973 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12974 {
12975 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12976 /* This only applies to the v6m howver, not later architectures. */
12977 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12978 as_bad (_("SVC is not permitted on this architecture"));
12979 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12980 }
12981
12982 inst.reloc.type = BFD_RELOC_ARM_SWI;
12983 }
12984
12985 static void
12986 do_t_tb (void)
12987 {
12988 unsigned Rn, Rm;
12989 int half;
12990
12991 half = (inst.instruction & 0x10) != 0;
12992 set_it_insn_type_last ();
12993 constraint (inst.operands[0].immisreg,
12994 _("instruction requires register index"));
12995
12996 Rn = inst.operands[0].reg;
12997 Rm = inst.operands[0].imm;
12998
12999 constraint (Rn == REG_SP, BAD_SP);
13000 reject_bad_reg (Rm);
13001
13002 constraint (!half && inst.operands[0].shifted,
13003 _("instruction does not allow shifted index"));
13004 inst.instruction |= (Rn << 16) | Rm;
13005 }
13006
13007 static void
13008 do_t_udf (void)
13009 {
13010 if (!inst.operands[0].present)
13011 inst.operands[0].imm = 0;
13012
13013 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13014 {
13015 constraint (inst.size_req == 2,
13016 _("immediate value out of range"));
13017 inst.instruction = THUMB_OP32 (inst.instruction);
13018 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13019 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13020 }
13021 else
13022 {
13023 inst.instruction = THUMB_OP16 (inst.instruction);
13024 inst.instruction |= inst.operands[0].imm;
13025 }
13026
13027 set_it_insn_type (NEUTRAL_IT_INSN);
13028 }
13029
13030
13031 static void
13032 do_t_usat (void)
13033 {
13034 do_t_ssat_usat (0);
13035 }
13036
13037 static void
13038 do_t_usat16 (void)
13039 {
13040 unsigned Rd, Rn;
13041
13042 Rd = inst.operands[0].reg;
13043 Rn = inst.operands[2].reg;
13044
13045 reject_bad_reg (Rd);
13046 reject_bad_reg (Rn);
13047
13048 inst.instruction |= Rd << 8;
13049 inst.instruction |= inst.operands[1].imm;
13050 inst.instruction |= Rn << 16;
13051 }
13052
13053 /* Neon instruction encoder helpers. */
13054
13055 /* Encodings for the different types for various Neon opcodes. */
13056
13057 /* An "invalid" code for the following tables. */
13058 #define N_INV -1u
13059
13060 struct neon_tab_entry
13061 {
13062 unsigned integer;
13063 unsigned float_or_poly;
13064 unsigned scalar_or_imm;
13065 };
13066
13067 /* Map overloaded Neon opcodes to their respective encodings. */
13068 #define NEON_ENC_TAB \
13069 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13070 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13071 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13072 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13073 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13074 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13075 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13076 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13077 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13078 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13079 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13080 /* Register variants of the following two instructions are encoded as
13081 vcge / vcgt with the operands reversed. */ \
13082 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13083 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13084 X(vfma, N_INV, 0x0000c10, N_INV), \
13085 X(vfms, N_INV, 0x0200c10, N_INV), \
13086 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13087 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13088 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13089 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13090 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13091 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13092 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13093 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13094 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13095 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13096 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13097 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13098 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13099 X(vshl, 0x0000400, N_INV, 0x0800510), \
13100 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13101 X(vand, 0x0000110, N_INV, 0x0800030), \
13102 X(vbic, 0x0100110, N_INV, 0x0800030), \
13103 X(veor, 0x1000110, N_INV, N_INV), \
13104 X(vorn, 0x0300110, N_INV, 0x0800010), \
13105 X(vorr, 0x0200110, N_INV, 0x0800010), \
13106 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13107 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13108 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13109 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13110 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13111 X(vst1, 0x0000000, 0x0800000, N_INV), \
13112 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13113 X(vst2, 0x0000100, 0x0800100, N_INV), \
13114 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13115 X(vst3, 0x0000200, 0x0800200, N_INV), \
13116 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13117 X(vst4, 0x0000300, 0x0800300, N_INV), \
13118 X(vmovn, 0x1b20200, N_INV, N_INV), \
13119 X(vtrn, 0x1b20080, N_INV, N_INV), \
13120 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13121 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13122 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13123 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13124 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13125 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13126 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13127 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13128 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13129 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13130 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13131 X(vseleq, 0xe000a00, N_INV, N_INV), \
13132 X(vselvs, 0xe100a00, N_INV, N_INV), \
13133 X(vselge, 0xe200a00, N_INV, N_INV), \
13134 X(vselgt, 0xe300a00, N_INV, N_INV), \
13135 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13136 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13137 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13138 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13139 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13140 X(aes, 0x3b00300, N_INV, N_INV), \
13141 X(sha3op, 0x2000c00, N_INV, N_INV), \
13142 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13143 X(sha2op, 0x3ba0380, N_INV, N_INV)
13144
13145 enum neon_opc
13146 {
13147 #define X(OPC,I,F,S) N_MNEM_##OPC
13148 NEON_ENC_TAB
13149 #undef X
13150 };
13151
13152 static const struct neon_tab_entry neon_enc_tab[] =
13153 {
13154 #define X(OPC,I,F,S) { (I), (F), (S) }
13155 NEON_ENC_TAB
13156 #undef X
13157 };
13158
13159 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13160 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13161 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13162 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13163 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13164 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13165 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13166 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13167 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13168 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13169 #define NEON_ENC_SINGLE_(X) \
13170 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13171 #define NEON_ENC_DOUBLE_(X) \
13172 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13173 #define NEON_ENC_FPV8_(X) \
13174 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13175
13176 #define NEON_ENCODE(type, inst) \
13177 do \
13178 { \
13179 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13180 inst.is_neon = 1; \
13181 } \
13182 while (0)
13183
13184 #define check_neon_suffixes \
13185 do \
13186 { \
13187 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13188 { \
13189 as_bad (_("invalid neon suffix for non neon instruction")); \
13190 return; \
13191 } \
13192 } \
13193 while (0)
13194
13195 /* Define shapes for instruction operands. The following mnemonic characters
13196 are used in this table:
13197
13198 F - VFP S<n> register
13199 D - Neon D<n> register
13200 Q - Neon Q<n> register
13201 I - Immediate
13202 S - Scalar
13203 R - ARM register
13204 L - D<n> register list
13205
13206 This table is used to generate various data:
13207 - enumerations of the form NS_DDR to be used as arguments to
13208 neon_select_shape.
13209 - a table classifying shapes into single, double, quad, mixed.
13210 - a table used to drive neon_select_shape. */
13211
13212 #define NEON_SHAPE_DEF \
13213 X(3, (D, D, D), DOUBLE), \
13214 X(3, (Q, Q, Q), QUAD), \
13215 X(3, (D, D, I), DOUBLE), \
13216 X(3, (Q, Q, I), QUAD), \
13217 X(3, (D, D, S), DOUBLE), \
13218 X(3, (Q, Q, S), QUAD), \
13219 X(2, (D, D), DOUBLE), \
13220 X(2, (Q, Q), QUAD), \
13221 X(2, (D, S), DOUBLE), \
13222 X(2, (Q, S), QUAD), \
13223 X(2, (D, R), DOUBLE), \
13224 X(2, (Q, R), QUAD), \
13225 X(2, (D, I), DOUBLE), \
13226 X(2, (Q, I), QUAD), \
13227 X(3, (D, L, D), DOUBLE), \
13228 X(2, (D, Q), MIXED), \
13229 X(2, (Q, D), MIXED), \
13230 X(3, (D, Q, I), MIXED), \
13231 X(3, (Q, D, I), MIXED), \
13232 X(3, (Q, D, D), MIXED), \
13233 X(3, (D, Q, Q), MIXED), \
13234 X(3, (Q, Q, D), MIXED), \
13235 X(3, (Q, D, S), MIXED), \
13236 X(3, (D, Q, S), MIXED), \
13237 X(4, (D, D, D, I), DOUBLE), \
13238 X(4, (Q, Q, Q, I), QUAD), \
13239 X(2, (F, F), SINGLE), \
13240 X(3, (F, F, F), SINGLE), \
13241 X(2, (F, I), SINGLE), \
13242 X(2, (F, D), MIXED), \
13243 X(2, (D, F), MIXED), \
13244 X(3, (F, F, I), MIXED), \
13245 X(4, (R, R, F, F), SINGLE), \
13246 X(4, (F, F, R, R), SINGLE), \
13247 X(3, (D, R, R), DOUBLE), \
13248 X(3, (R, R, D), DOUBLE), \
13249 X(2, (S, R), SINGLE), \
13250 X(2, (R, S), SINGLE), \
13251 X(2, (F, R), SINGLE), \
13252 X(2, (R, F), SINGLE)
13253
13254 #define S2(A,B) NS_##A##B
13255 #define S3(A,B,C) NS_##A##B##C
13256 #define S4(A,B,C,D) NS_##A##B##C##D
13257
13258 #define X(N, L, C) S##N L
13259
13260 enum neon_shape
13261 {
13262 NEON_SHAPE_DEF,
13263 NS_NULL
13264 };
13265
13266 #undef X
13267 #undef S2
13268 #undef S3
13269 #undef S4
13270
13271 enum neon_shape_class
13272 {
13273 SC_SINGLE,
13274 SC_DOUBLE,
13275 SC_QUAD,
13276 SC_MIXED
13277 };
13278
13279 #define X(N, L, C) SC_##C
13280
13281 static enum neon_shape_class neon_shape_class[] =
13282 {
13283 NEON_SHAPE_DEF
13284 };
13285
13286 #undef X
13287
13288 enum neon_shape_el
13289 {
13290 SE_F,
13291 SE_D,
13292 SE_Q,
13293 SE_I,
13294 SE_S,
13295 SE_R,
13296 SE_L
13297 };
13298
13299 /* Register widths of above. */
13300 static unsigned neon_shape_el_size[] =
13301 {
13302 32,
13303 64,
13304 128,
13305 0,
13306 32,
13307 32,
13308 0
13309 };
13310
13311 struct neon_shape_info
13312 {
13313 unsigned els;
13314 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13315 };
13316
13317 #define S2(A,B) { SE_##A, SE_##B }
13318 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13319 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13320
13321 #define X(N, L, C) { N, S##N L }
13322
13323 static struct neon_shape_info neon_shape_tab[] =
13324 {
13325 NEON_SHAPE_DEF
13326 };
13327
13328 #undef X
13329 #undef S2
13330 #undef S3
13331 #undef S4
13332
13333 /* Bit masks used in type checking given instructions.
13334 'N_EQK' means the type must be the same as (or based on in some way) the key
13335 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13336 set, various other bits can be set as well in order to modify the meaning of
13337 the type constraint. */
13338
13339 enum neon_type_mask
13340 {
13341 N_S8 = 0x0000001,
13342 N_S16 = 0x0000002,
13343 N_S32 = 0x0000004,
13344 N_S64 = 0x0000008,
13345 N_U8 = 0x0000010,
13346 N_U16 = 0x0000020,
13347 N_U32 = 0x0000040,
13348 N_U64 = 0x0000080,
13349 N_I8 = 0x0000100,
13350 N_I16 = 0x0000200,
13351 N_I32 = 0x0000400,
13352 N_I64 = 0x0000800,
13353 N_8 = 0x0001000,
13354 N_16 = 0x0002000,
13355 N_32 = 0x0004000,
13356 N_64 = 0x0008000,
13357 N_P8 = 0x0010000,
13358 N_P16 = 0x0020000,
13359 N_F16 = 0x0040000,
13360 N_F32 = 0x0080000,
13361 N_F64 = 0x0100000,
13362 N_P64 = 0x0200000,
13363 N_KEY = 0x1000000, /* Key element (main type specifier). */
13364 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13365 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13366 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13367 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13368 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13369 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13370 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13371 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13372 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13373 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13374 N_UTYP = 0,
13375 N_MAX_NONSPECIAL = N_P64
13376 };
13377
13378 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13379
13380 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13381 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13382 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13383 #define N_SUF_32 (N_SU_32 | N_F32)
13384 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13385 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13386
13387 /* Pass this as the first type argument to neon_check_type to ignore types
13388 altogether. */
13389 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13390
13391 /* Select a "shape" for the current instruction (describing register types or
13392 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13393 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13394 function of operand parsing, so this function doesn't need to be called.
13395 Shapes should be listed in order of decreasing length. */
13396
13397 static enum neon_shape
13398 neon_select_shape (enum neon_shape shape, ...)
13399 {
13400 va_list ap;
13401 enum neon_shape first_shape = shape;
13402
13403 /* Fix missing optional operands. FIXME: we don't know at this point how
13404 many arguments we should have, so this makes the assumption that we have
13405 > 1. This is true of all current Neon opcodes, I think, but may not be
13406 true in the future. */
13407 if (!inst.operands[1].present)
13408 inst.operands[1] = inst.operands[0];
13409
13410 va_start (ap, shape);
13411
13412 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13413 {
13414 unsigned j;
13415 int matches = 1;
13416
13417 for (j = 0; j < neon_shape_tab[shape].els; j++)
13418 {
13419 if (!inst.operands[j].present)
13420 {
13421 matches = 0;
13422 break;
13423 }
13424
13425 switch (neon_shape_tab[shape].el[j])
13426 {
13427 case SE_F:
13428 if (!(inst.operands[j].isreg
13429 && inst.operands[j].isvec
13430 && inst.operands[j].issingle
13431 && !inst.operands[j].isquad))
13432 matches = 0;
13433 break;
13434
13435 case SE_D:
13436 if (!(inst.operands[j].isreg
13437 && inst.operands[j].isvec
13438 && !inst.operands[j].isquad
13439 && !inst.operands[j].issingle))
13440 matches = 0;
13441 break;
13442
13443 case SE_R:
13444 if (!(inst.operands[j].isreg
13445 && !inst.operands[j].isvec))
13446 matches = 0;
13447 break;
13448
13449 case SE_Q:
13450 if (!(inst.operands[j].isreg
13451 && inst.operands[j].isvec
13452 && inst.operands[j].isquad
13453 && !inst.operands[j].issingle))
13454 matches = 0;
13455 break;
13456
13457 case SE_I:
13458 if (!(!inst.operands[j].isreg
13459 && !inst.operands[j].isscalar))
13460 matches = 0;
13461 break;
13462
13463 case SE_S:
13464 if (!(!inst.operands[j].isreg
13465 && inst.operands[j].isscalar))
13466 matches = 0;
13467 break;
13468
13469 case SE_L:
13470 break;
13471 }
13472 if (!matches)
13473 break;
13474 }
13475 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13476 /* We've matched all the entries in the shape table, and we don't
13477 have any left over operands which have not been matched. */
13478 break;
13479 }
13480
13481 va_end (ap);
13482
13483 if (shape == NS_NULL && first_shape != NS_NULL)
13484 first_error (_("invalid instruction shape"));
13485
13486 return shape;
13487 }
13488
13489 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13490 means the Q bit should be set). */
13491
13492 static int
13493 neon_quad (enum neon_shape shape)
13494 {
13495 return neon_shape_class[shape] == SC_QUAD;
13496 }
13497
13498 static void
13499 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13500 unsigned *g_size)
13501 {
13502 /* Allow modification to be made to types which are constrained to be
13503 based on the key element, based on bits set alongside N_EQK. */
13504 if ((typebits & N_EQK) != 0)
13505 {
13506 if ((typebits & N_HLF) != 0)
13507 *g_size /= 2;
13508 else if ((typebits & N_DBL) != 0)
13509 *g_size *= 2;
13510 if ((typebits & N_SGN) != 0)
13511 *g_type = NT_signed;
13512 else if ((typebits & N_UNS) != 0)
13513 *g_type = NT_unsigned;
13514 else if ((typebits & N_INT) != 0)
13515 *g_type = NT_integer;
13516 else if ((typebits & N_FLT) != 0)
13517 *g_type = NT_float;
13518 else if ((typebits & N_SIZ) != 0)
13519 *g_type = NT_untyped;
13520 }
13521 }
13522
13523 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13524 operand type, i.e. the single type specified in a Neon instruction when it
13525 is the only one given. */
13526
13527 static struct neon_type_el
13528 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13529 {
13530 struct neon_type_el dest = *key;
13531
13532 gas_assert ((thisarg & N_EQK) != 0);
13533
13534 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13535
13536 return dest;
13537 }
13538
13539 /* Convert Neon type and size into compact bitmask representation. */
13540
13541 static enum neon_type_mask
13542 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13543 {
13544 switch (type)
13545 {
13546 case NT_untyped:
13547 switch (size)
13548 {
13549 case 8: return N_8;
13550 case 16: return N_16;
13551 case 32: return N_32;
13552 case 64: return N_64;
13553 default: ;
13554 }
13555 break;
13556
13557 case NT_integer:
13558 switch (size)
13559 {
13560 case 8: return N_I8;
13561 case 16: return N_I16;
13562 case 32: return N_I32;
13563 case 64: return N_I64;
13564 default: ;
13565 }
13566 break;
13567
13568 case NT_float:
13569 switch (size)
13570 {
13571 case 16: return N_F16;
13572 case 32: return N_F32;
13573 case 64: return N_F64;
13574 default: ;
13575 }
13576 break;
13577
13578 case NT_poly:
13579 switch (size)
13580 {
13581 case 8: return N_P8;
13582 case 16: return N_P16;
13583 case 64: return N_P64;
13584 default: ;
13585 }
13586 break;
13587
13588 case NT_signed:
13589 switch (size)
13590 {
13591 case 8: return N_S8;
13592 case 16: return N_S16;
13593 case 32: return N_S32;
13594 case 64: return N_S64;
13595 default: ;
13596 }
13597 break;
13598
13599 case NT_unsigned:
13600 switch (size)
13601 {
13602 case 8: return N_U8;
13603 case 16: return N_U16;
13604 case 32: return N_U32;
13605 case 64: return N_U64;
13606 default: ;
13607 }
13608 break;
13609
13610 default: ;
13611 }
13612
13613 return N_UTYP;
13614 }
13615
13616 /* Convert compact Neon bitmask type representation to a type and size. Only
13617 handles the case where a single bit is set in the mask. */
13618
13619 static int
13620 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13621 enum neon_type_mask mask)
13622 {
13623 if ((mask & N_EQK) != 0)
13624 return FAIL;
13625
13626 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13627 *size = 8;
13628 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13629 *size = 16;
13630 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13631 *size = 32;
13632 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13633 *size = 64;
13634 else
13635 return FAIL;
13636
13637 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13638 *type = NT_signed;
13639 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13640 *type = NT_unsigned;
13641 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13642 *type = NT_integer;
13643 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13644 *type = NT_untyped;
13645 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13646 *type = NT_poly;
13647 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13648 *type = NT_float;
13649 else
13650 return FAIL;
13651
13652 return SUCCESS;
13653 }
13654
13655 /* Modify a bitmask of allowed types. This is only needed for type
13656 relaxation. */
13657
13658 static unsigned
13659 modify_types_allowed (unsigned allowed, unsigned mods)
13660 {
13661 unsigned size;
13662 enum neon_el_type type;
13663 unsigned destmask;
13664 int i;
13665
13666 destmask = 0;
13667
13668 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13669 {
13670 if (el_type_of_type_chk (&type, &size,
13671 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13672 {
13673 neon_modify_type_size (mods, &type, &size);
13674 destmask |= type_chk_of_el_type (type, size);
13675 }
13676 }
13677
13678 return destmask;
13679 }
13680
13681 /* Check type and return type classification.
13682 The manual states (paraphrase): If one datatype is given, it indicates the
13683 type given in:
13684 - the second operand, if there is one
13685 - the operand, if there is no second operand
13686 - the result, if there are no operands.
13687 This isn't quite good enough though, so we use a concept of a "key" datatype
13688 which is set on a per-instruction basis, which is the one which matters when
13689 only one data type is written.
13690 Note: this function has side-effects (e.g. filling in missing operands). All
13691 Neon instructions should call it before performing bit encoding. */
13692
13693 static struct neon_type_el
13694 neon_check_type (unsigned els, enum neon_shape ns, ...)
13695 {
13696 va_list ap;
13697 unsigned i, pass, key_el = 0;
13698 unsigned types[NEON_MAX_TYPE_ELS];
13699 enum neon_el_type k_type = NT_invtype;
13700 unsigned k_size = -1u;
13701 struct neon_type_el badtype = {NT_invtype, -1};
13702 unsigned key_allowed = 0;
13703
13704 /* Optional registers in Neon instructions are always (not) in operand 1.
13705 Fill in the missing operand here, if it was omitted. */
13706 if (els > 1 && !inst.operands[1].present)
13707 inst.operands[1] = inst.operands[0];
13708
13709 /* Suck up all the varargs. */
13710 va_start (ap, ns);
13711 for (i = 0; i < els; i++)
13712 {
13713 unsigned thisarg = va_arg (ap, unsigned);
13714 if (thisarg == N_IGNORE_TYPE)
13715 {
13716 va_end (ap);
13717 return badtype;
13718 }
13719 types[i] = thisarg;
13720 if ((thisarg & N_KEY) != 0)
13721 key_el = i;
13722 }
13723 va_end (ap);
13724
13725 if (inst.vectype.elems > 0)
13726 for (i = 0; i < els; i++)
13727 if (inst.operands[i].vectype.type != NT_invtype)
13728 {
13729 first_error (_("types specified in both the mnemonic and operands"));
13730 return badtype;
13731 }
13732
13733 /* Duplicate inst.vectype elements here as necessary.
13734 FIXME: No idea if this is exactly the same as the ARM assembler,
13735 particularly when an insn takes one register and one non-register
13736 operand. */
13737 if (inst.vectype.elems == 1 && els > 1)
13738 {
13739 unsigned j;
13740 inst.vectype.elems = els;
13741 inst.vectype.el[key_el] = inst.vectype.el[0];
13742 for (j = 0; j < els; j++)
13743 if (j != key_el)
13744 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13745 types[j]);
13746 }
13747 else if (inst.vectype.elems == 0 && els > 0)
13748 {
13749 unsigned j;
13750 /* No types were given after the mnemonic, so look for types specified
13751 after each operand. We allow some flexibility here; as long as the
13752 "key" operand has a type, we can infer the others. */
13753 for (j = 0; j < els; j++)
13754 if (inst.operands[j].vectype.type != NT_invtype)
13755 inst.vectype.el[j] = inst.operands[j].vectype;
13756
13757 if (inst.operands[key_el].vectype.type != NT_invtype)
13758 {
13759 for (j = 0; j < els; j++)
13760 if (inst.operands[j].vectype.type == NT_invtype)
13761 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13762 types[j]);
13763 }
13764 else
13765 {
13766 first_error (_("operand types can't be inferred"));
13767 return badtype;
13768 }
13769 }
13770 else if (inst.vectype.elems != els)
13771 {
13772 first_error (_("type specifier has the wrong number of parts"));
13773 return badtype;
13774 }
13775
13776 for (pass = 0; pass < 2; pass++)
13777 {
13778 for (i = 0; i < els; i++)
13779 {
13780 unsigned thisarg = types[i];
13781 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13782 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13783 enum neon_el_type g_type = inst.vectype.el[i].type;
13784 unsigned g_size = inst.vectype.el[i].size;
13785
13786 /* Decay more-specific signed & unsigned types to sign-insensitive
13787 integer types if sign-specific variants are unavailable. */
13788 if ((g_type == NT_signed || g_type == NT_unsigned)
13789 && (types_allowed & N_SU_ALL) == 0)
13790 g_type = NT_integer;
13791
13792 /* If only untyped args are allowed, decay any more specific types to
13793 them. Some instructions only care about signs for some element
13794 sizes, so handle that properly. */
13795 if (((types_allowed & N_UNT) == 0)
13796 && ((g_size == 8 && (types_allowed & N_8) != 0)
13797 || (g_size == 16 && (types_allowed & N_16) != 0)
13798 || (g_size == 32 && (types_allowed & N_32) != 0)
13799 || (g_size == 64 && (types_allowed & N_64) != 0)))
13800 g_type = NT_untyped;
13801
13802 if (pass == 0)
13803 {
13804 if ((thisarg & N_KEY) != 0)
13805 {
13806 k_type = g_type;
13807 k_size = g_size;
13808 key_allowed = thisarg & ~N_KEY;
13809 }
13810 }
13811 else
13812 {
13813 if ((thisarg & N_VFP) != 0)
13814 {
13815 enum neon_shape_el regshape;
13816 unsigned regwidth, match;
13817
13818 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13819 if (ns == NS_NULL)
13820 {
13821 first_error (_("invalid instruction shape"));
13822 return badtype;
13823 }
13824 regshape = neon_shape_tab[ns].el[i];
13825 regwidth = neon_shape_el_size[regshape];
13826
13827 /* In VFP mode, operands must match register widths. If we
13828 have a key operand, use its width, else use the width of
13829 the current operand. */
13830 if (k_size != -1u)
13831 match = k_size;
13832 else
13833 match = g_size;
13834
13835 if (regwidth != match)
13836 {
13837 first_error (_("operand size must match register width"));
13838 return badtype;
13839 }
13840 }
13841
13842 if ((thisarg & N_EQK) == 0)
13843 {
13844 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13845
13846 if ((given_type & types_allowed) == 0)
13847 {
13848 first_error (_("bad type in Neon instruction"));
13849 return badtype;
13850 }
13851 }
13852 else
13853 {
13854 enum neon_el_type mod_k_type = k_type;
13855 unsigned mod_k_size = k_size;
13856 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13857 if (g_type != mod_k_type || g_size != mod_k_size)
13858 {
13859 first_error (_("inconsistent types in Neon instruction"));
13860 return badtype;
13861 }
13862 }
13863 }
13864 }
13865 }
13866
13867 return inst.vectype.el[key_el];
13868 }
13869
13870 /* Neon-style VFP instruction forwarding. */
13871
13872 /* Thumb VFP instructions have 0xE in the condition field. */
13873
13874 static void
13875 do_vfp_cond_or_thumb (void)
13876 {
13877 inst.is_neon = 1;
13878
13879 if (thumb_mode)
13880 inst.instruction |= 0xe0000000;
13881 else
13882 inst.instruction |= inst.cond << 28;
13883 }
13884
13885 /* Look up and encode a simple mnemonic, for use as a helper function for the
13886 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13887 etc. It is assumed that operand parsing has already been done, and that the
13888 operands are in the form expected by the given opcode (this isn't necessarily
13889 the same as the form in which they were parsed, hence some massaging must
13890 take place before this function is called).
13891 Checks current arch version against that in the looked-up opcode. */
13892
13893 static void
13894 do_vfp_nsyn_opcode (const char *opname)
13895 {
13896 const struct asm_opcode *opcode;
13897
13898 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13899
13900 if (!opcode)
13901 abort ();
13902
13903 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13904 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13905 _(BAD_FPU));
13906
13907 inst.is_neon = 1;
13908
13909 if (thumb_mode)
13910 {
13911 inst.instruction = opcode->tvalue;
13912 opcode->tencode ();
13913 }
13914 else
13915 {
13916 inst.instruction = (inst.cond << 28) | opcode->avalue;
13917 opcode->aencode ();
13918 }
13919 }
13920
13921 static void
13922 do_vfp_nsyn_add_sub (enum neon_shape rs)
13923 {
13924 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13925
13926 if (rs == NS_FFF)
13927 {
13928 if (is_add)
13929 do_vfp_nsyn_opcode ("fadds");
13930 else
13931 do_vfp_nsyn_opcode ("fsubs");
13932 }
13933 else
13934 {
13935 if (is_add)
13936 do_vfp_nsyn_opcode ("faddd");
13937 else
13938 do_vfp_nsyn_opcode ("fsubd");
13939 }
13940 }
13941
13942 /* Check operand types to see if this is a VFP instruction, and if so call
13943 PFN (). */
13944
13945 static int
13946 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13947 {
13948 enum neon_shape rs;
13949 struct neon_type_el et;
13950
13951 switch (args)
13952 {
13953 case 2:
13954 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13955 et = neon_check_type (2, rs,
13956 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13957 break;
13958
13959 case 3:
13960 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13961 et = neon_check_type (3, rs,
13962 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13963 break;
13964
13965 default:
13966 abort ();
13967 }
13968
13969 if (et.type != NT_invtype)
13970 {
13971 pfn (rs);
13972 return SUCCESS;
13973 }
13974
13975 inst.error = NULL;
13976 return FAIL;
13977 }
13978
13979 static void
13980 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13981 {
13982 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13983
13984 if (rs == NS_FFF)
13985 {
13986 if (is_mla)
13987 do_vfp_nsyn_opcode ("fmacs");
13988 else
13989 do_vfp_nsyn_opcode ("fnmacs");
13990 }
13991 else
13992 {
13993 if (is_mla)
13994 do_vfp_nsyn_opcode ("fmacd");
13995 else
13996 do_vfp_nsyn_opcode ("fnmacd");
13997 }
13998 }
13999
14000 static void
14001 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14002 {
14003 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14004
14005 if (rs == NS_FFF)
14006 {
14007 if (is_fma)
14008 do_vfp_nsyn_opcode ("ffmas");
14009 else
14010 do_vfp_nsyn_opcode ("ffnmas");
14011 }
14012 else
14013 {
14014 if (is_fma)
14015 do_vfp_nsyn_opcode ("ffmad");
14016 else
14017 do_vfp_nsyn_opcode ("ffnmad");
14018 }
14019 }
14020
14021 static void
14022 do_vfp_nsyn_mul (enum neon_shape rs)
14023 {
14024 if (rs == NS_FFF)
14025 do_vfp_nsyn_opcode ("fmuls");
14026 else
14027 do_vfp_nsyn_opcode ("fmuld");
14028 }
14029
14030 static void
14031 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14032 {
14033 int is_neg = (inst.instruction & 0x80) != 0;
14034 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
14035
14036 if (rs == NS_FF)
14037 {
14038 if (is_neg)
14039 do_vfp_nsyn_opcode ("fnegs");
14040 else
14041 do_vfp_nsyn_opcode ("fabss");
14042 }
14043 else
14044 {
14045 if (is_neg)
14046 do_vfp_nsyn_opcode ("fnegd");
14047 else
14048 do_vfp_nsyn_opcode ("fabsd");
14049 }
14050 }
14051
14052 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14053 insns belong to Neon, and are handled elsewhere. */
14054
14055 static void
14056 do_vfp_nsyn_ldm_stm (int is_dbmode)
14057 {
14058 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14059 if (is_ldm)
14060 {
14061 if (is_dbmode)
14062 do_vfp_nsyn_opcode ("fldmdbs");
14063 else
14064 do_vfp_nsyn_opcode ("fldmias");
14065 }
14066 else
14067 {
14068 if (is_dbmode)
14069 do_vfp_nsyn_opcode ("fstmdbs");
14070 else
14071 do_vfp_nsyn_opcode ("fstmias");
14072 }
14073 }
14074
14075 static void
14076 do_vfp_nsyn_sqrt (void)
14077 {
14078 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14079 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14080
14081 if (rs == NS_FF)
14082 do_vfp_nsyn_opcode ("fsqrts");
14083 else
14084 do_vfp_nsyn_opcode ("fsqrtd");
14085 }
14086
14087 static void
14088 do_vfp_nsyn_div (void)
14089 {
14090 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14091 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14092 N_F32 | N_F64 | N_KEY | N_VFP);
14093
14094 if (rs == NS_FFF)
14095 do_vfp_nsyn_opcode ("fdivs");
14096 else
14097 do_vfp_nsyn_opcode ("fdivd");
14098 }
14099
14100 static void
14101 do_vfp_nsyn_nmul (void)
14102 {
14103 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14104 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14105 N_F32 | N_F64 | N_KEY | N_VFP);
14106
14107 if (rs == NS_FFF)
14108 {
14109 NEON_ENCODE (SINGLE, inst);
14110 do_vfp_sp_dyadic ();
14111 }
14112 else
14113 {
14114 NEON_ENCODE (DOUBLE, inst);
14115 do_vfp_dp_rd_rn_rm ();
14116 }
14117 do_vfp_cond_or_thumb ();
14118 }
14119
14120 static void
14121 do_vfp_nsyn_cmp (void)
14122 {
14123 if (inst.operands[1].isreg)
14124 {
14125 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14126 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14127
14128 if (rs == NS_FF)
14129 {
14130 NEON_ENCODE (SINGLE, inst);
14131 do_vfp_sp_monadic ();
14132 }
14133 else
14134 {
14135 NEON_ENCODE (DOUBLE, inst);
14136 do_vfp_dp_rd_rm ();
14137 }
14138 }
14139 else
14140 {
14141 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
14142 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
14143
14144 switch (inst.instruction & 0x0fffffff)
14145 {
14146 case N_MNEM_vcmp:
14147 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14148 break;
14149 case N_MNEM_vcmpe:
14150 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14151 break;
14152 default:
14153 abort ();
14154 }
14155
14156 if (rs == NS_FI)
14157 {
14158 NEON_ENCODE (SINGLE, inst);
14159 do_vfp_sp_compare_z ();
14160 }
14161 else
14162 {
14163 NEON_ENCODE (DOUBLE, inst);
14164 do_vfp_dp_rd ();
14165 }
14166 }
14167 do_vfp_cond_or_thumb ();
14168 }
14169
14170 static void
14171 nsyn_insert_sp (void)
14172 {
14173 inst.operands[1] = inst.operands[0];
14174 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14175 inst.operands[0].reg = REG_SP;
14176 inst.operands[0].isreg = 1;
14177 inst.operands[0].writeback = 1;
14178 inst.operands[0].present = 1;
14179 }
14180
14181 static void
14182 do_vfp_nsyn_push (void)
14183 {
14184 nsyn_insert_sp ();
14185 if (inst.operands[1].issingle)
14186 do_vfp_nsyn_opcode ("fstmdbs");
14187 else
14188 do_vfp_nsyn_opcode ("fstmdbd");
14189 }
14190
14191 static void
14192 do_vfp_nsyn_pop (void)
14193 {
14194 nsyn_insert_sp ();
14195 if (inst.operands[1].issingle)
14196 do_vfp_nsyn_opcode ("fldmias");
14197 else
14198 do_vfp_nsyn_opcode ("fldmiad");
14199 }
14200
14201 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14202 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14203
14204 static void
14205 neon_dp_fixup (struct arm_it* insn)
14206 {
14207 unsigned int i = insn->instruction;
14208 insn->is_neon = 1;
14209
14210 if (thumb_mode)
14211 {
14212 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14213 if (i & (1 << 24))
14214 i |= 1 << 28;
14215
14216 i &= ~(1 << 24);
14217
14218 i |= 0xef000000;
14219 }
14220 else
14221 i |= 0xf2000000;
14222
14223 insn->instruction = i;
14224 }
14225
14226 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14227 (0, 1, 2, 3). */
14228
14229 static unsigned
14230 neon_logbits (unsigned x)
14231 {
14232 return ffs (x) - 4;
14233 }
14234
14235 #define LOW4(R) ((R) & 0xf)
14236 #define HI1(R) (((R) >> 4) & 1)
14237
14238 /* Encode insns with bit pattern:
14239
14240 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14241 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14242
14243 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14244 different meaning for some instruction. */
14245
14246 static void
14247 neon_three_same (int isquad, int ubit, int size)
14248 {
14249 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14250 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14251 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14252 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14253 inst.instruction |= LOW4 (inst.operands[2].reg);
14254 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14255 inst.instruction |= (isquad != 0) << 6;
14256 inst.instruction |= (ubit != 0) << 24;
14257 if (size != -1)
14258 inst.instruction |= neon_logbits (size) << 20;
14259
14260 neon_dp_fixup (&inst);
14261 }
14262
14263 /* Encode instructions of the form:
14264
14265 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14266 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14267
14268 Don't write size if SIZE == -1. */
14269
14270 static void
14271 neon_two_same (int qbit, int ubit, int size)
14272 {
14273 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14274 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14275 inst.instruction |= LOW4 (inst.operands[1].reg);
14276 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14277 inst.instruction |= (qbit != 0) << 6;
14278 inst.instruction |= (ubit != 0) << 24;
14279
14280 if (size != -1)
14281 inst.instruction |= neon_logbits (size) << 18;
14282
14283 neon_dp_fixup (&inst);
14284 }
14285
14286 /* Neon instruction encoders, in approximate order of appearance. */
14287
14288 static void
14289 do_neon_dyadic_i_su (void)
14290 {
14291 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14292 struct neon_type_el et = neon_check_type (3, rs,
14293 N_EQK, N_EQK, N_SU_32 | N_KEY);
14294 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14295 }
14296
14297 static void
14298 do_neon_dyadic_i64_su (void)
14299 {
14300 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14301 struct neon_type_el et = neon_check_type (3, rs,
14302 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14303 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14304 }
14305
14306 static void
14307 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14308 unsigned immbits)
14309 {
14310 unsigned size = et.size >> 3;
14311 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14312 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14313 inst.instruction |= LOW4 (inst.operands[1].reg);
14314 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14315 inst.instruction |= (isquad != 0) << 6;
14316 inst.instruction |= immbits << 16;
14317 inst.instruction |= (size >> 3) << 7;
14318 inst.instruction |= (size & 0x7) << 19;
14319 if (write_ubit)
14320 inst.instruction |= (uval != 0) << 24;
14321
14322 neon_dp_fixup (&inst);
14323 }
14324
14325 static void
14326 do_neon_shl_imm (void)
14327 {
14328 if (!inst.operands[2].isreg)
14329 {
14330 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14331 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14332 int imm = inst.operands[2].imm;
14333
14334 constraint (imm < 0 || (unsigned)imm >= et.size,
14335 _("immediate out of range for shift"));
14336 NEON_ENCODE (IMMED, inst);
14337 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14338 }
14339 else
14340 {
14341 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14342 struct neon_type_el et = neon_check_type (3, rs,
14343 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14344 unsigned int tmp;
14345
14346 /* VSHL/VQSHL 3-register variants have syntax such as:
14347 vshl.xx Dd, Dm, Dn
14348 whereas other 3-register operations encoded by neon_three_same have
14349 syntax like:
14350 vadd.xx Dd, Dn, Dm
14351 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14352 here. */
14353 tmp = inst.operands[2].reg;
14354 inst.operands[2].reg = inst.operands[1].reg;
14355 inst.operands[1].reg = tmp;
14356 NEON_ENCODE (INTEGER, inst);
14357 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14358 }
14359 }
14360
14361 static void
14362 do_neon_qshl_imm (void)
14363 {
14364 if (!inst.operands[2].isreg)
14365 {
14366 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14367 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14368 int imm = inst.operands[2].imm;
14369
14370 constraint (imm < 0 || (unsigned)imm >= et.size,
14371 _("immediate out of range for shift"));
14372 NEON_ENCODE (IMMED, inst);
14373 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14374 }
14375 else
14376 {
14377 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14378 struct neon_type_el et = neon_check_type (3, rs,
14379 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14380 unsigned int tmp;
14381
14382 /* See note in do_neon_shl_imm. */
14383 tmp = inst.operands[2].reg;
14384 inst.operands[2].reg = inst.operands[1].reg;
14385 inst.operands[1].reg = tmp;
14386 NEON_ENCODE (INTEGER, inst);
14387 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14388 }
14389 }
14390
14391 static void
14392 do_neon_rshl (void)
14393 {
14394 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14395 struct neon_type_el et = neon_check_type (3, rs,
14396 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14397 unsigned int tmp;
14398
14399 tmp = inst.operands[2].reg;
14400 inst.operands[2].reg = inst.operands[1].reg;
14401 inst.operands[1].reg = tmp;
14402 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14403 }
14404
14405 static int
14406 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14407 {
14408 /* Handle .I8 pseudo-instructions. */
14409 if (size == 8)
14410 {
14411 /* Unfortunately, this will make everything apart from zero out-of-range.
14412 FIXME is this the intended semantics? There doesn't seem much point in
14413 accepting .I8 if so. */
14414 immediate |= immediate << 8;
14415 size = 16;
14416 }
14417
14418 if (size >= 32)
14419 {
14420 if (immediate == (immediate & 0x000000ff))
14421 {
14422 *immbits = immediate;
14423 return 0x1;
14424 }
14425 else if (immediate == (immediate & 0x0000ff00))
14426 {
14427 *immbits = immediate >> 8;
14428 return 0x3;
14429 }
14430 else if (immediate == (immediate & 0x00ff0000))
14431 {
14432 *immbits = immediate >> 16;
14433 return 0x5;
14434 }
14435 else if (immediate == (immediate & 0xff000000))
14436 {
14437 *immbits = immediate >> 24;
14438 return 0x7;
14439 }
14440 if ((immediate & 0xffff) != (immediate >> 16))
14441 goto bad_immediate;
14442 immediate &= 0xffff;
14443 }
14444
14445 if (immediate == (immediate & 0x000000ff))
14446 {
14447 *immbits = immediate;
14448 return 0x9;
14449 }
14450 else if (immediate == (immediate & 0x0000ff00))
14451 {
14452 *immbits = immediate >> 8;
14453 return 0xb;
14454 }
14455
14456 bad_immediate:
14457 first_error (_("immediate value out of range"));
14458 return FAIL;
14459 }
14460
14461 static void
14462 do_neon_logic (void)
14463 {
14464 if (inst.operands[2].present && inst.operands[2].isreg)
14465 {
14466 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14467 neon_check_type (3, rs, N_IGNORE_TYPE);
14468 /* U bit and size field were set as part of the bitmask. */
14469 NEON_ENCODE (INTEGER, inst);
14470 neon_three_same (neon_quad (rs), 0, -1);
14471 }
14472 else
14473 {
14474 const int three_ops_form = (inst.operands[2].present
14475 && !inst.operands[2].isreg);
14476 const int immoperand = (three_ops_form ? 2 : 1);
14477 enum neon_shape rs = (three_ops_form
14478 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14479 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14480 struct neon_type_el et = neon_check_type (2, rs,
14481 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14482 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14483 unsigned immbits;
14484 int cmode;
14485
14486 if (et.type == NT_invtype)
14487 return;
14488
14489 if (three_ops_form)
14490 constraint (inst.operands[0].reg != inst.operands[1].reg,
14491 _("first and second operands shall be the same register"));
14492
14493 NEON_ENCODE (IMMED, inst);
14494
14495 immbits = inst.operands[immoperand].imm;
14496 if (et.size == 64)
14497 {
14498 /* .i64 is a pseudo-op, so the immediate must be a repeating
14499 pattern. */
14500 if (immbits != (inst.operands[immoperand].regisimm ?
14501 inst.operands[immoperand].reg : 0))
14502 {
14503 /* Set immbits to an invalid constant. */
14504 immbits = 0xdeadbeef;
14505 }
14506 }
14507
14508 switch (opcode)
14509 {
14510 case N_MNEM_vbic:
14511 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14512 break;
14513
14514 case N_MNEM_vorr:
14515 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14516 break;
14517
14518 case N_MNEM_vand:
14519 /* Pseudo-instruction for VBIC. */
14520 neon_invert_size (&immbits, 0, et.size);
14521 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14522 break;
14523
14524 case N_MNEM_vorn:
14525 /* Pseudo-instruction for VORR. */
14526 neon_invert_size (&immbits, 0, et.size);
14527 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14528 break;
14529
14530 default:
14531 abort ();
14532 }
14533
14534 if (cmode == FAIL)
14535 return;
14536
14537 inst.instruction |= neon_quad (rs) << 6;
14538 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14539 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14540 inst.instruction |= cmode << 8;
14541 neon_write_immbits (immbits);
14542
14543 neon_dp_fixup (&inst);
14544 }
14545 }
14546
14547 static void
14548 do_neon_bitfield (void)
14549 {
14550 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14551 neon_check_type (3, rs, N_IGNORE_TYPE);
14552 neon_three_same (neon_quad (rs), 0, -1);
14553 }
14554
14555 static void
14556 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14557 unsigned destbits)
14558 {
14559 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14560 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14561 types | N_KEY);
14562 if (et.type == NT_float)
14563 {
14564 NEON_ENCODE (FLOAT, inst);
14565 neon_three_same (neon_quad (rs), 0, -1);
14566 }
14567 else
14568 {
14569 NEON_ENCODE (INTEGER, inst);
14570 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14571 }
14572 }
14573
14574 static void
14575 do_neon_dyadic_if_su (void)
14576 {
14577 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14578 }
14579
14580 static void
14581 do_neon_dyadic_if_su_d (void)
14582 {
14583 /* This version only allow D registers, but that constraint is enforced during
14584 operand parsing so we don't need to do anything extra here. */
14585 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14586 }
14587
14588 static void
14589 do_neon_dyadic_if_i_d (void)
14590 {
14591 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14592 affected if we specify unsigned args. */
14593 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14594 }
14595
14596 enum vfp_or_neon_is_neon_bits
14597 {
14598 NEON_CHECK_CC = 1,
14599 NEON_CHECK_ARCH = 2,
14600 NEON_CHECK_ARCH8 = 4
14601 };
14602
14603 /* Call this function if an instruction which may have belonged to the VFP or
14604 Neon instruction sets, but turned out to be a Neon instruction (due to the
14605 operand types involved, etc.). We have to check and/or fix-up a couple of
14606 things:
14607
14608 - Make sure the user hasn't attempted to make a Neon instruction
14609 conditional.
14610 - Alter the value in the condition code field if necessary.
14611 - Make sure that the arch supports Neon instructions.
14612
14613 Which of these operations take place depends on bits from enum
14614 vfp_or_neon_is_neon_bits.
14615
14616 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14617 current instruction's condition is COND_ALWAYS, the condition field is
14618 changed to inst.uncond_value. This is necessary because instructions shared
14619 between VFP and Neon may be conditional for the VFP variants only, and the
14620 unconditional Neon version must have, e.g., 0xF in the condition field. */
14621
14622 static int
14623 vfp_or_neon_is_neon (unsigned check)
14624 {
14625 /* Conditions are always legal in Thumb mode (IT blocks). */
14626 if (!thumb_mode && (check & NEON_CHECK_CC))
14627 {
14628 if (inst.cond != COND_ALWAYS)
14629 {
14630 first_error (_(BAD_COND));
14631 return FAIL;
14632 }
14633 if (inst.uncond_value != -1)
14634 inst.instruction |= inst.uncond_value << 28;
14635 }
14636
14637 if ((check & NEON_CHECK_ARCH)
14638 && !mark_feature_used (&fpu_neon_ext_v1))
14639 {
14640 first_error (_(BAD_FPU));
14641 return FAIL;
14642 }
14643
14644 if ((check & NEON_CHECK_ARCH8)
14645 && !mark_feature_used (&fpu_neon_ext_armv8))
14646 {
14647 first_error (_(BAD_FPU));
14648 return FAIL;
14649 }
14650
14651 return SUCCESS;
14652 }
14653
14654 static void
14655 do_neon_addsub_if_i (void)
14656 {
14657 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14658 return;
14659
14660 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14661 return;
14662
14663 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14664 affected if we specify unsigned args. */
14665 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14666 }
14667
14668 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14669 result to be:
14670 V<op> A,B (A is operand 0, B is operand 2)
14671 to mean:
14672 V<op> A,B,A
14673 not:
14674 V<op> A,B,B
14675 so handle that case specially. */
14676
14677 static void
14678 neon_exchange_operands (void)
14679 {
14680 void *scratch = alloca (sizeof (inst.operands[0]));
14681 if (inst.operands[1].present)
14682 {
14683 /* Swap operands[1] and operands[2]. */
14684 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14685 inst.operands[1] = inst.operands[2];
14686 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14687 }
14688 else
14689 {
14690 inst.operands[1] = inst.operands[2];
14691 inst.operands[2] = inst.operands[0];
14692 }
14693 }
14694
14695 static void
14696 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14697 {
14698 if (inst.operands[2].isreg)
14699 {
14700 if (invert)
14701 neon_exchange_operands ();
14702 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14703 }
14704 else
14705 {
14706 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14707 struct neon_type_el et = neon_check_type (2, rs,
14708 N_EQK | N_SIZ, immtypes | N_KEY);
14709
14710 NEON_ENCODE (IMMED, inst);
14711 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14712 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14713 inst.instruction |= LOW4 (inst.operands[1].reg);
14714 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14715 inst.instruction |= neon_quad (rs) << 6;
14716 inst.instruction |= (et.type == NT_float) << 10;
14717 inst.instruction |= neon_logbits (et.size) << 18;
14718
14719 neon_dp_fixup (&inst);
14720 }
14721 }
14722
14723 static void
14724 do_neon_cmp (void)
14725 {
14726 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14727 }
14728
14729 static void
14730 do_neon_cmp_inv (void)
14731 {
14732 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14733 }
14734
14735 static void
14736 do_neon_ceq (void)
14737 {
14738 neon_compare (N_IF_32, N_IF_32, FALSE);
14739 }
14740
14741 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14742 scalars, which are encoded in 5 bits, M : Rm.
14743 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14744 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14745 index in M. */
14746
14747 static unsigned
14748 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14749 {
14750 unsigned regno = NEON_SCALAR_REG (scalar);
14751 unsigned elno = NEON_SCALAR_INDEX (scalar);
14752
14753 switch (elsize)
14754 {
14755 case 16:
14756 if (regno > 7 || elno > 3)
14757 goto bad_scalar;
14758 return regno | (elno << 3);
14759
14760 case 32:
14761 if (regno > 15 || elno > 1)
14762 goto bad_scalar;
14763 return regno | (elno << 4);
14764
14765 default:
14766 bad_scalar:
14767 first_error (_("scalar out of range for multiply instruction"));
14768 }
14769
14770 return 0;
14771 }
14772
14773 /* Encode multiply / multiply-accumulate scalar instructions. */
14774
14775 static void
14776 neon_mul_mac (struct neon_type_el et, int ubit)
14777 {
14778 unsigned scalar;
14779
14780 /* Give a more helpful error message if we have an invalid type. */
14781 if (et.type == NT_invtype)
14782 return;
14783
14784 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14785 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14786 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14787 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14788 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14789 inst.instruction |= LOW4 (scalar);
14790 inst.instruction |= HI1 (scalar) << 5;
14791 inst.instruction |= (et.type == NT_float) << 8;
14792 inst.instruction |= neon_logbits (et.size) << 20;
14793 inst.instruction |= (ubit != 0) << 24;
14794
14795 neon_dp_fixup (&inst);
14796 }
14797
14798 static void
14799 do_neon_mac_maybe_scalar (void)
14800 {
14801 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14802 return;
14803
14804 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14805 return;
14806
14807 if (inst.operands[2].isscalar)
14808 {
14809 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14810 struct neon_type_el et = neon_check_type (3, rs,
14811 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14812 NEON_ENCODE (SCALAR, inst);
14813 neon_mul_mac (et, neon_quad (rs));
14814 }
14815 else
14816 {
14817 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14818 affected if we specify unsigned args. */
14819 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14820 }
14821 }
14822
14823 static void
14824 do_neon_fmac (void)
14825 {
14826 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14827 return;
14828
14829 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14830 return;
14831
14832 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14833 }
14834
14835 static void
14836 do_neon_tst (void)
14837 {
14838 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14839 struct neon_type_el et = neon_check_type (3, rs,
14840 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14841 neon_three_same (neon_quad (rs), 0, et.size);
14842 }
14843
14844 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14845 same types as the MAC equivalents. The polynomial type for this instruction
14846 is encoded the same as the integer type. */
14847
14848 static void
14849 do_neon_mul (void)
14850 {
14851 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14852 return;
14853
14854 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14855 return;
14856
14857 if (inst.operands[2].isscalar)
14858 do_neon_mac_maybe_scalar ();
14859 else
14860 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14861 }
14862
14863 static void
14864 do_neon_qdmulh (void)
14865 {
14866 if (inst.operands[2].isscalar)
14867 {
14868 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14869 struct neon_type_el et = neon_check_type (3, rs,
14870 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14871 NEON_ENCODE (SCALAR, inst);
14872 neon_mul_mac (et, neon_quad (rs));
14873 }
14874 else
14875 {
14876 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14877 struct neon_type_el et = neon_check_type (3, rs,
14878 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14879 NEON_ENCODE (INTEGER, inst);
14880 /* The U bit (rounding) comes from bit mask. */
14881 neon_three_same (neon_quad (rs), 0, et.size);
14882 }
14883 }
14884
14885 static void
14886 do_neon_fcmp_absolute (void)
14887 {
14888 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14889 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14890 /* Size field comes from bit mask. */
14891 neon_three_same (neon_quad (rs), 1, -1);
14892 }
14893
14894 static void
14895 do_neon_fcmp_absolute_inv (void)
14896 {
14897 neon_exchange_operands ();
14898 do_neon_fcmp_absolute ();
14899 }
14900
14901 static void
14902 do_neon_step (void)
14903 {
14904 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14905 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14906 neon_three_same (neon_quad (rs), 0, -1);
14907 }
14908
14909 static void
14910 do_neon_abs_neg (void)
14911 {
14912 enum neon_shape rs;
14913 struct neon_type_el et;
14914
14915 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14916 return;
14917
14918 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14919 return;
14920
14921 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14922 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14923
14924 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14925 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14926 inst.instruction |= LOW4 (inst.operands[1].reg);
14927 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14928 inst.instruction |= neon_quad (rs) << 6;
14929 inst.instruction |= (et.type == NT_float) << 10;
14930 inst.instruction |= neon_logbits (et.size) << 18;
14931
14932 neon_dp_fixup (&inst);
14933 }
14934
14935 static void
14936 do_neon_sli (void)
14937 {
14938 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14939 struct neon_type_el et = neon_check_type (2, rs,
14940 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14941 int imm = inst.operands[2].imm;
14942 constraint (imm < 0 || (unsigned)imm >= et.size,
14943 _("immediate out of range for insert"));
14944 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14945 }
14946
14947 static void
14948 do_neon_sri (void)
14949 {
14950 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14951 struct neon_type_el et = neon_check_type (2, rs,
14952 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14953 int imm = inst.operands[2].imm;
14954 constraint (imm < 1 || (unsigned)imm > et.size,
14955 _("immediate out of range for insert"));
14956 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14957 }
14958
14959 static void
14960 do_neon_qshlu_imm (void)
14961 {
14962 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14963 struct neon_type_el et = neon_check_type (2, rs,
14964 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14965 int imm = inst.operands[2].imm;
14966 constraint (imm < 0 || (unsigned)imm >= et.size,
14967 _("immediate out of range for shift"));
14968 /* Only encodes the 'U present' variant of the instruction.
14969 In this case, signed types have OP (bit 8) set to 0.
14970 Unsigned types have OP set to 1. */
14971 inst.instruction |= (et.type == NT_unsigned) << 8;
14972 /* The rest of the bits are the same as other immediate shifts. */
14973 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14974 }
14975
14976 static void
14977 do_neon_qmovn (void)
14978 {
14979 struct neon_type_el et = neon_check_type (2, NS_DQ,
14980 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14981 /* Saturating move where operands can be signed or unsigned, and the
14982 destination has the same signedness. */
14983 NEON_ENCODE (INTEGER, inst);
14984 if (et.type == NT_unsigned)
14985 inst.instruction |= 0xc0;
14986 else
14987 inst.instruction |= 0x80;
14988 neon_two_same (0, 1, et.size / 2);
14989 }
14990
14991 static void
14992 do_neon_qmovun (void)
14993 {
14994 struct neon_type_el et = neon_check_type (2, NS_DQ,
14995 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14996 /* Saturating move with unsigned results. Operands must be signed. */
14997 NEON_ENCODE (INTEGER, inst);
14998 neon_two_same (0, 1, et.size / 2);
14999 }
15000
15001 static void
15002 do_neon_rshift_sat_narrow (void)
15003 {
15004 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15005 or unsigned. If operands are unsigned, results must also be unsigned. */
15006 struct neon_type_el et = neon_check_type (2, NS_DQI,
15007 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15008 int imm = inst.operands[2].imm;
15009 /* This gets the bounds check, size encoding and immediate bits calculation
15010 right. */
15011 et.size /= 2;
15012
15013 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15014 VQMOVN.I<size> <Dd>, <Qm>. */
15015 if (imm == 0)
15016 {
15017 inst.operands[2].present = 0;
15018 inst.instruction = N_MNEM_vqmovn;
15019 do_neon_qmovn ();
15020 return;
15021 }
15022
15023 constraint (imm < 1 || (unsigned)imm > et.size,
15024 _("immediate out of range"));
15025 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15026 }
15027
15028 static void
15029 do_neon_rshift_sat_narrow_u (void)
15030 {
15031 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15032 or unsigned. If operands are unsigned, results must also be unsigned. */
15033 struct neon_type_el et = neon_check_type (2, NS_DQI,
15034 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15035 int imm = inst.operands[2].imm;
15036 /* This gets the bounds check, size encoding and immediate bits calculation
15037 right. */
15038 et.size /= 2;
15039
15040 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15041 VQMOVUN.I<size> <Dd>, <Qm>. */
15042 if (imm == 0)
15043 {
15044 inst.operands[2].present = 0;
15045 inst.instruction = N_MNEM_vqmovun;
15046 do_neon_qmovun ();
15047 return;
15048 }
15049
15050 constraint (imm < 1 || (unsigned)imm > et.size,
15051 _("immediate out of range"));
15052 /* FIXME: The manual is kind of unclear about what value U should have in
15053 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15054 must be 1. */
15055 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15056 }
15057
15058 static void
15059 do_neon_movn (void)
15060 {
15061 struct neon_type_el et = neon_check_type (2, NS_DQ,
15062 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15063 NEON_ENCODE (INTEGER, inst);
15064 neon_two_same (0, 1, et.size / 2);
15065 }
15066
15067 static void
15068 do_neon_rshift_narrow (void)
15069 {
15070 struct neon_type_el et = neon_check_type (2, NS_DQI,
15071 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15072 int imm = inst.operands[2].imm;
15073 /* This gets the bounds check, size encoding and immediate bits calculation
15074 right. */
15075 et.size /= 2;
15076
15077 /* If immediate is zero then we are a pseudo-instruction for
15078 VMOVN.I<size> <Dd>, <Qm> */
15079 if (imm == 0)
15080 {
15081 inst.operands[2].present = 0;
15082 inst.instruction = N_MNEM_vmovn;
15083 do_neon_movn ();
15084 return;
15085 }
15086
15087 constraint (imm < 1 || (unsigned)imm > et.size,
15088 _("immediate out of range for narrowing operation"));
15089 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15090 }
15091
15092 static void
15093 do_neon_shll (void)
15094 {
15095 /* FIXME: Type checking when lengthening. */
15096 struct neon_type_el et = neon_check_type (2, NS_QDI,
15097 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15098 unsigned imm = inst.operands[2].imm;
15099
15100 if (imm == et.size)
15101 {
15102 /* Maximum shift variant. */
15103 NEON_ENCODE (INTEGER, inst);
15104 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15105 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15106 inst.instruction |= LOW4 (inst.operands[1].reg);
15107 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15108 inst.instruction |= neon_logbits (et.size) << 18;
15109
15110 neon_dp_fixup (&inst);
15111 }
15112 else
15113 {
15114 /* A more-specific type check for non-max versions. */
15115 et = neon_check_type (2, NS_QDI,
15116 N_EQK | N_DBL, N_SU_32 | N_KEY);
15117 NEON_ENCODE (IMMED, inst);
15118 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15119 }
15120 }
15121
15122 /* Check the various types for the VCVT instruction, and return which version
15123 the current instruction is. */
15124
15125 #define CVT_FLAVOUR_VAR \
15126 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15127 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15128 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15129 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15130 /* Half-precision conversions. */ \
15131 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15132 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15133 /* VFP instructions. */ \
15134 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15135 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15136 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15137 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15138 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15139 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15140 /* VFP instructions with bitshift. */ \
15141 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15142 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15143 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15144 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15145 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15146 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15147 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15148 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15149
15150 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15151 neon_cvt_flavour_##C,
15152
15153 /* The different types of conversions we can do. */
15154 enum neon_cvt_flavour
15155 {
15156 CVT_FLAVOUR_VAR
15157 neon_cvt_flavour_invalid,
15158 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15159 };
15160
15161 #undef CVT_VAR
15162
15163 static enum neon_cvt_flavour
15164 get_neon_cvt_flavour (enum neon_shape rs)
15165 {
15166 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15167 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15168 if (et.type != NT_invtype) \
15169 { \
15170 inst.error = NULL; \
15171 return (neon_cvt_flavour_##C); \
15172 }
15173
15174 struct neon_type_el et;
15175 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15176 || rs == NS_FF) ? N_VFP : 0;
15177 /* The instruction versions which take an immediate take one register
15178 argument, which is extended to the width of the full register. Thus the
15179 "source" and "destination" registers must have the same width. Hack that
15180 here by making the size equal to the key (wider, in this case) operand. */
15181 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15182
15183 CVT_FLAVOUR_VAR;
15184
15185 return neon_cvt_flavour_invalid;
15186 #undef CVT_VAR
15187 }
15188
15189 enum neon_cvt_mode
15190 {
15191 neon_cvt_mode_a,
15192 neon_cvt_mode_n,
15193 neon_cvt_mode_p,
15194 neon_cvt_mode_m,
15195 neon_cvt_mode_z,
15196 neon_cvt_mode_x,
15197 neon_cvt_mode_r
15198 };
15199
15200 /* Neon-syntax VFP conversions. */
15201
15202 static void
15203 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15204 {
15205 const char *opname = 0;
15206
15207 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
15208 {
15209 /* Conversions with immediate bitshift. */
15210 const char *enc[] =
15211 {
15212 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15213 CVT_FLAVOUR_VAR
15214 NULL
15215 #undef CVT_VAR
15216 };
15217
15218 if (flavour < (int) ARRAY_SIZE (enc))
15219 {
15220 opname = enc[flavour];
15221 constraint (inst.operands[0].reg != inst.operands[1].reg,
15222 _("operands 0 and 1 must be the same register"));
15223 inst.operands[1] = inst.operands[2];
15224 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15225 }
15226 }
15227 else
15228 {
15229 /* Conversions without bitshift. */
15230 const char *enc[] =
15231 {
15232 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15233 CVT_FLAVOUR_VAR
15234 NULL
15235 #undef CVT_VAR
15236 };
15237
15238 if (flavour < (int) ARRAY_SIZE (enc))
15239 opname = enc[flavour];
15240 }
15241
15242 if (opname)
15243 do_vfp_nsyn_opcode (opname);
15244 }
15245
15246 static void
15247 do_vfp_nsyn_cvtz (void)
15248 {
15249 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15250 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15251 const char *enc[] =
15252 {
15253 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15254 CVT_FLAVOUR_VAR
15255 NULL
15256 #undef CVT_VAR
15257 };
15258
15259 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15260 do_vfp_nsyn_opcode (enc[flavour]);
15261 }
15262
15263 static void
15264 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15265 enum neon_cvt_mode mode)
15266 {
15267 int sz, op;
15268 int rm;
15269
15270 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15271 D register operands. */
15272 if (flavour == neon_cvt_flavour_s32_f64
15273 || flavour == neon_cvt_flavour_u32_f64)
15274 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15275 _(BAD_FPU));
15276
15277 set_it_insn_type (OUTSIDE_IT_INSN);
15278
15279 switch (flavour)
15280 {
15281 case neon_cvt_flavour_s32_f64:
15282 sz = 1;
15283 op = 1;
15284 break;
15285 case neon_cvt_flavour_s32_f32:
15286 sz = 0;
15287 op = 1;
15288 break;
15289 case neon_cvt_flavour_u32_f64:
15290 sz = 1;
15291 op = 0;
15292 break;
15293 case neon_cvt_flavour_u32_f32:
15294 sz = 0;
15295 op = 0;
15296 break;
15297 default:
15298 first_error (_("invalid instruction shape"));
15299 return;
15300 }
15301
15302 switch (mode)
15303 {
15304 case neon_cvt_mode_a: rm = 0; break;
15305 case neon_cvt_mode_n: rm = 1; break;
15306 case neon_cvt_mode_p: rm = 2; break;
15307 case neon_cvt_mode_m: rm = 3; break;
15308 default: first_error (_("invalid rounding mode")); return;
15309 }
15310
15311 NEON_ENCODE (FPV8, inst);
15312 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15313 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15314 inst.instruction |= sz << 8;
15315 inst.instruction |= op << 7;
15316 inst.instruction |= rm << 16;
15317 inst.instruction |= 0xf0000000;
15318 inst.is_neon = TRUE;
15319 }
15320
15321 static void
15322 do_neon_cvt_1 (enum neon_cvt_mode mode)
15323 {
15324 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15325 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15326 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15327
15328 /* PR11109: Handle round-to-zero for VCVT conversions. */
15329 if (mode == neon_cvt_mode_z
15330 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15331 && (flavour == neon_cvt_flavour_s32_f32
15332 || flavour == neon_cvt_flavour_u32_f32
15333 || flavour == neon_cvt_flavour_s32_f64
15334 || flavour == neon_cvt_flavour_u32_f64)
15335 && (rs == NS_FD || rs == NS_FF))
15336 {
15337 do_vfp_nsyn_cvtz ();
15338 return;
15339 }
15340
15341 /* VFP rather than Neon conversions. */
15342 if (flavour >= neon_cvt_flavour_first_fp)
15343 {
15344 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15345 do_vfp_nsyn_cvt (rs, flavour);
15346 else
15347 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15348
15349 return;
15350 }
15351
15352 switch (rs)
15353 {
15354 case NS_DDI:
15355 case NS_QQI:
15356 {
15357 unsigned immbits;
15358 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15359
15360 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15361 return;
15362
15363 /* Fixed-point conversion with #0 immediate is encoded as an
15364 integer conversion. */
15365 if (inst.operands[2].present && inst.operands[2].imm == 0)
15366 goto int_encode;
15367 immbits = 32 - inst.operands[2].imm;
15368 NEON_ENCODE (IMMED, inst);
15369 if (flavour != neon_cvt_flavour_invalid)
15370 inst.instruction |= enctab[flavour];
15371 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15372 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15373 inst.instruction |= LOW4 (inst.operands[1].reg);
15374 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15375 inst.instruction |= neon_quad (rs) << 6;
15376 inst.instruction |= 1 << 21;
15377 inst.instruction |= immbits << 16;
15378
15379 neon_dp_fixup (&inst);
15380 }
15381 break;
15382
15383 case NS_DD:
15384 case NS_QQ:
15385 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15386 {
15387 NEON_ENCODE (FLOAT, inst);
15388 set_it_insn_type (OUTSIDE_IT_INSN);
15389
15390 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15391 return;
15392
15393 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15394 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15395 inst.instruction |= LOW4 (inst.operands[1].reg);
15396 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15397 inst.instruction |= neon_quad (rs) << 6;
15398 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15399 inst.instruction |= mode << 8;
15400 if (thumb_mode)
15401 inst.instruction |= 0xfc000000;
15402 else
15403 inst.instruction |= 0xf0000000;
15404 }
15405 else
15406 {
15407 int_encode:
15408 {
15409 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15410
15411 NEON_ENCODE (INTEGER, inst);
15412
15413 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15414 return;
15415
15416 if (flavour != neon_cvt_flavour_invalid)
15417 inst.instruction |= enctab[flavour];
15418
15419 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15420 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15421 inst.instruction |= LOW4 (inst.operands[1].reg);
15422 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15423 inst.instruction |= neon_quad (rs) << 6;
15424 inst.instruction |= 2 << 18;
15425
15426 neon_dp_fixup (&inst);
15427 }
15428 }
15429 break;
15430
15431 /* Half-precision conversions for Advanced SIMD -- neon. */
15432 case NS_QD:
15433 case NS_DQ:
15434
15435 if ((rs == NS_DQ)
15436 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15437 {
15438 as_bad (_("operand size must match register width"));
15439 break;
15440 }
15441
15442 if ((rs == NS_QD)
15443 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15444 {
15445 as_bad (_("operand size must match register width"));
15446 break;
15447 }
15448
15449 if (rs == NS_DQ)
15450 inst.instruction = 0x3b60600;
15451 else
15452 inst.instruction = 0x3b60700;
15453
15454 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15455 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15456 inst.instruction |= LOW4 (inst.operands[1].reg);
15457 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15458 neon_dp_fixup (&inst);
15459 break;
15460
15461 default:
15462 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15463 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15464 do_vfp_nsyn_cvt (rs, flavour);
15465 else
15466 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15467 }
15468 }
15469
15470 static void
15471 do_neon_cvtr (void)
15472 {
15473 do_neon_cvt_1 (neon_cvt_mode_x);
15474 }
15475
15476 static void
15477 do_neon_cvt (void)
15478 {
15479 do_neon_cvt_1 (neon_cvt_mode_z);
15480 }
15481
15482 static void
15483 do_neon_cvta (void)
15484 {
15485 do_neon_cvt_1 (neon_cvt_mode_a);
15486 }
15487
15488 static void
15489 do_neon_cvtn (void)
15490 {
15491 do_neon_cvt_1 (neon_cvt_mode_n);
15492 }
15493
15494 static void
15495 do_neon_cvtp (void)
15496 {
15497 do_neon_cvt_1 (neon_cvt_mode_p);
15498 }
15499
15500 static void
15501 do_neon_cvtm (void)
15502 {
15503 do_neon_cvt_1 (neon_cvt_mode_m);
15504 }
15505
15506 static void
15507 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15508 {
15509 if (is_double)
15510 mark_feature_used (&fpu_vfp_ext_armv8);
15511
15512 encode_arm_vfp_reg (inst.operands[0].reg,
15513 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15514 encode_arm_vfp_reg (inst.operands[1].reg,
15515 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15516 inst.instruction |= to ? 0x10000 : 0;
15517 inst.instruction |= t ? 0x80 : 0;
15518 inst.instruction |= is_double ? 0x100 : 0;
15519 do_vfp_cond_or_thumb ();
15520 }
15521
15522 static void
15523 do_neon_cvttb_1 (bfd_boolean t)
15524 {
15525 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15526
15527 if (rs == NS_NULL)
15528 return;
15529 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15530 {
15531 inst.error = NULL;
15532 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15533 }
15534 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15535 {
15536 inst.error = NULL;
15537 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15538 }
15539 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15540 {
15541 /* The VCVTB and VCVTT instructions with D-register operands
15542 don't work for SP only targets. */
15543 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15544 _(BAD_FPU));
15545
15546 inst.error = NULL;
15547 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15548 }
15549 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15550 {
15551 /* The VCVTB and VCVTT instructions with D-register operands
15552 don't work for SP only targets. */
15553 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15554 _(BAD_FPU));
15555
15556 inst.error = NULL;
15557 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15558 }
15559 else
15560 return;
15561 }
15562
15563 static void
15564 do_neon_cvtb (void)
15565 {
15566 do_neon_cvttb_1 (FALSE);
15567 }
15568
15569
15570 static void
15571 do_neon_cvtt (void)
15572 {
15573 do_neon_cvttb_1 (TRUE);
15574 }
15575
15576 static void
15577 neon_move_immediate (void)
15578 {
15579 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15580 struct neon_type_el et = neon_check_type (2, rs,
15581 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15582 unsigned immlo, immhi = 0, immbits;
15583 int op, cmode, float_p;
15584
15585 constraint (et.type == NT_invtype,
15586 _("operand size must be specified for immediate VMOV"));
15587
15588 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15589 op = (inst.instruction & (1 << 5)) != 0;
15590
15591 immlo = inst.operands[1].imm;
15592 if (inst.operands[1].regisimm)
15593 immhi = inst.operands[1].reg;
15594
15595 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15596 _("immediate has bits set outside the operand size"));
15597
15598 float_p = inst.operands[1].immisfloat;
15599
15600 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15601 et.size, et.type)) == FAIL)
15602 {
15603 /* Invert relevant bits only. */
15604 neon_invert_size (&immlo, &immhi, et.size);
15605 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15606 with one or the other; those cases are caught by
15607 neon_cmode_for_move_imm. */
15608 op = !op;
15609 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15610 &op, et.size, et.type)) == FAIL)
15611 {
15612 first_error (_("immediate out of range"));
15613 return;
15614 }
15615 }
15616
15617 inst.instruction &= ~(1 << 5);
15618 inst.instruction |= op << 5;
15619
15620 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15621 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15622 inst.instruction |= neon_quad (rs) << 6;
15623 inst.instruction |= cmode << 8;
15624
15625 neon_write_immbits (immbits);
15626 }
15627
15628 static void
15629 do_neon_mvn (void)
15630 {
15631 if (inst.operands[1].isreg)
15632 {
15633 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15634
15635 NEON_ENCODE (INTEGER, inst);
15636 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15637 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15638 inst.instruction |= LOW4 (inst.operands[1].reg);
15639 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15640 inst.instruction |= neon_quad (rs) << 6;
15641 }
15642 else
15643 {
15644 NEON_ENCODE (IMMED, inst);
15645 neon_move_immediate ();
15646 }
15647
15648 neon_dp_fixup (&inst);
15649 }
15650
15651 /* Encode instructions of form:
15652
15653 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15654 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15655
15656 static void
15657 neon_mixed_length (struct neon_type_el et, unsigned size)
15658 {
15659 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15660 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15661 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15662 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15663 inst.instruction |= LOW4 (inst.operands[2].reg);
15664 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15665 inst.instruction |= (et.type == NT_unsigned) << 24;
15666 inst.instruction |= neon_logbits (size) << 20;
15667
15668 neon_dp_fixup (&inst);
15669 }
15670
15671 static void
15672 do_neon_dyadic_long (void)
15673 {
15674 /* FIXME: Type checking for lengthening op. */
15675 struct neon_type_el et = neon_check_type (3, NS_QDD,
15676 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15677 neon_mixed_length (et, et.size);
15678 }
15679
15680 static void
15681 do_neon_abal (void)
15682 {
15683 struct neon_type_el et = neon_check_type (3, NS_QDD,
15684 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15685 neon_mixed_length (et, et.size);
15686 }
15687
15688 static void
15689 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15690 {
15691 if (inst.operands[2].isscalar)
15692 {
15693 struct neon_type_el et = neon_check_type (3, NS_QDS,
15694 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15695 NEON_ENCODE (SCALAR, inst);
15696 neon_mul_mac (et, et.type == NT_unsigned);
15697 }
15698 else
15699 {
15700 struct neon_type_el et = neon_check_type (3, NS_QDD,
15701 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15702 NEON_ENCODE (INTEGER, inst);
15703 neon_mixed_length (et, et.size);
15704 }
15705 }
15706
15707 static void
15708 do_neon_mac_maybe_scalar_long (void)
15709 {
15710 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15711 }
15712
15713 static void
15714 do_neon_dyadic_wide (void)
15715 {
15716 struct neon_type_el et = neon_check_type (3, NS_QQD,
15717 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15718 neon_mixed_length (et, et.size);
15719 }
15720
15721 static void
15722 do_neon_dyadic_narrow (void)
15723 {
15724 struct neon_type_el et = neon_check_type (3, NS_QDD,
15725 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15726 /* Operand sign is unimportant, and the U bit is part of the opcode,
15727 so force the operand type to integer. */
15728 et.type = NT_integer;
15729 neon_mixed_length (et, et.size / 2);
15730 }
15731
15732 static void
15733 do_neon_mul_sat_scalar_long (void)
15734 {
15735 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15736 }
15737
15738 static void
15739 do_neon_vmull (void)
15740 {
15741 if (inst.operands[2].isscalar)
15742 do_neon_mac_maybe_scalar_long ();
15743 else
15744 {
15745 struct neon_type_el et = neon_check_type (3, NS_QDD,
15746 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15747
15748 if (et.type == NT_poly)
15749 NEON_ENCODE (POLY, inst);
15750 else
15751 NEON_ENCODE (INTEGER, inst);
15752
15753 /* For polynomial encoding the U bit must be zero, and the size must
15754 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15755 obviously, as 0b10). */
15756 if (et.size == 64)
15757 {
15758 /* Check we're on the correct architecture. */
15759 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15760 inst.error =
15761 _("Instruction form not available on this architecture.");
15762
15763 et.size = 32;
15764 }
15765
15766 neon_mixed_length (et, et.size);
15767 }
15768 }
15769
15770 static void
15771 do_neon_ext (void)
15772 {
15773 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15774 struct neon_type_el et = neon_check_type (3, rs,
15775 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15776 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15777
15778 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15779 _("shift out of range"));
15780 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15781 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15782 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15783 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15784 inst.instruction |= LOW4 (inst.operands[2].reg);
15785 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15786 inst.instruction |= neon_quad (rs) << 6;
15787 inst.instruction |= imm << 8;
15788
15789 neon_dp_fixup (&inst);
15790 }
15791
15792 static void
15793 do_neon_rev (void)
15794 {
15795 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15796 struct neon_type_el et = neon_check_type (2, rs,
15797 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15798 unsigned op = (inst.instruction >> 7) & 3;
15799 /* N (width of reversed regions) is encoded as part of the bitmask. We
15800 extract it here to check the elements to be reversed are smaller.
15801 Otherwise we'd get a reserved instruction. */
15802 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15803 gas_assert (elsize != 0);
15804 constraint (et.size >= elsize,
15805 _("elements must be smaller than reversal region"));
15806 neon_two_same (neon_quad (rs), 1, et.size);
15807 }
15808
15809 static void
15810 do_neon_dup (void)
15811 {
15812 if (inst.operands[1].isscalar)
15813 {
15814 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15815 struct neon_type_el et = neon_check_type (2, rs,
15816 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15817 unsigned sizebits = et.size >> 3;
15818 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15819 int logsize = neon_logbits (et.size);
15820 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15821
15822 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15823 return;
15824
15825 NEON_ENCODE (SCALAR, inst);
15826 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15827 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15828 inst.instruction |= LOW4 (dm);
15829 inst.instruction |= HI1 (dm) << 5;
15830 inst.instruction |= neon_quad (rs) << 6;
15831 inst.instruction |= x << 17;
15832 inst.instruction |= sizebits << 16;
15833
15834 neon_dp_fixup (&inst);
15835 }
15836 else
15837 {
15838 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15839 struct neon_type_el et = neon_check_type (2, rs,
15840 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15841 /* Duplicate ARM register to lanes of vector. */
15842 NEON_ENCODE (ARMREG, inst);
15843 switch (et.size)
15844 {
15845 case 8: inst.instruction |= 0x400000; break;
15846 case 16: inst.instruction |= 0x000020; break;
15847 case 32: inst.instruction |= 0x000000; break;
15848 default: break;
15849 }
15850 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15851 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15852 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15853 inst.instruction |= neon_quad (rs) << 21;
15854 /* The encoding for this instruction is identical for the ARM and Thumb
15855 variants, except for the condition field. */
15856 do_vfp_cond_or_thumb ();
15857 }
15858 }
15859
15860 /* VMOV has particularly many variations. It can be one of:
15861 0. VMOV<c><q> <Qd>, <Qm>
15862 1. VMOV<c><q> <Dd>, <Dm>
15863 (Register operations, which are VORR with Rm = Rn.)
15864 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15865 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15866 (Immediate loads.)
15867 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15868 (ARM register to scalar.)
15869 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15870 (Two ARM registers to vector.)
15871 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15872 (Scalar to ARM register.)
15873 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15874 (Vector to two ARM registers.)
15875 8. VMOV.F32 <Sd>, <Sm>
15876 9. VMOV.F64 <Dd>, <Dm>
15877 (VFP register moves.)
15878 10. VMOV.F32 <Sd>, #imm
15879 11. VMOV.F64 <Dd>, #imm
15880 (VFP float immediate load.)
15881 12. VMOV <Rd>, <Sm>
15882 (VFP single to ARM reg.)
15883 13. VMOV <Sd>, <Rm>
15884 (ARM reg to VFP single.)
15885 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15886 (Two ARM regs to two VFP singles.)
15887 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15888 (Two VFP singles to two ARM regs.)
15889
15890 These cases can be disambiguated using neon_select_shape, except cases 1/9
15891 and 3/11 which depend on the operand type too.
15892
15893 All the encoded bits are hardcoded by this function.
15894
15895 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15896 Cases 5, 7 may be used with VFPv2 and above.
15897
15898 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15899 can specify a type where it doesn't make sense to, and is ignored). */
15900
15901 static void
15902 do_neon_mov (void)
15903 {
15904 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15905 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15906 NS_NULL);
15907 struct neon_type_el et;
15908 const char *ldconst = 0;
15909
15910 switch (rs)
15911 {
15912 case NS_DD: /* case 1/9. */
15913 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15914 /* It is not an error here if no type is given. */
15915 inst.error = NULL;
15916 if (et.type == NT_float && et.size == 64)
15917 {
15918 do_vfp_nsyn_opcode ("fcpyd");
15919 break;
15920 }
15921 /* fall through. */
15922
15923 case NS_QQ: /* case 0/1. */
15924 {
15925 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15926 return;
15927 /* The architecture manual I have doesn't explicitly state which
15928 value the U bit should have for register->register moves, but
15929 the equivalent VORR instruction has U = 0, so do that. */
15930 inst.instruction = 0x0200110;
15931 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15932 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15933 inst.instruction |= LOW4 (inst.operands[1].reg);
15934 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15935 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15936 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15937 inst.instruction |= neon_quad (rs) << 6;
15938
15939 neon_dp_fixup (&inst);
15940 }
15941 break;
15942
15943 case NS_DI: /* case 3/11. */
15944 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15945 inst.error = NULL;
15946 if (et.type == NT_float && et.size == 64)
15947 {
15948 /* case 11 (fconstd). */
15949 ldconst = "fconstd";
15950 goto encode_fconstd;
15951 }
15952 /* fall through. */
15953
15954 case NS_QI: /* case 2/3. */
15955 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15956 return;
15957 inst.instruction = 0x0800010;
15958 neon_move_immediate ();
15959 neon_dp_fixup (&inst);
15960 break;
15961
15962 case NS_SR: /* case 4. */
15963 {
15964 unsigned bcdebits = 0;
15965 int logsize;
15966 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15967 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15968
15969 /* .<size> is optional here, defaulting to .32. */
15970 if (inst.vectype.elems == 0
15971 && inst.operands[0].vectype.type == NT_invtype
15972 && inst.operands[1].vectype.type == NT_invtype)
15973 {
15974 inst.vectype.el[0].type = NT_untyped;
15975 inst.vectype.el[0].size = 32;
15976 inst.vectype.elems = 1;
15977 }
15978
15979 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15980 logsize = neon_logbits (et.size);
15981
15982 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15983 _(BAD_FPU));
15984 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15985 && et.size != 32, _(BAD_FPU));
15986 constraint (et.type == NT_invtype, _("bad type for scalar"));
15987 constraint (x >= 64 / et.size, _("scalar index out of range"));
15988
15989 switch (et.size)
15990 {
15991 case 8: bcdebits = 0x8; break;
15992 case 16: bcdebits = 0x1; break;
15993 case 32: bcdebits = 0x0; break;
15994 default: ;
15995 }
15996
15997 bcdebits |= x << logsize;
15998
15999 inst.instruction = 0xe000b10;
16000 do_vfp_cond_or_thumb ();
16001 inst.instruction |= LOW4 (dn) << 16;
16002 inst.instruction |= HI1 (dn) << 7;
16003 inst.instruction |= inst.operands[1].reg << 12;
16004 inst.instruction |= (bcdebits & 3) << 5;
16005 inst.instruction |= (bcdebits >> 2) << 21;
16006 }
16007 break;
16008
16009 case NS_DRR: /* case 5 (fmdrr). */
16010 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16011 _(BAD_FPU));
16012
16013 inst.instruction = 0xc400b10;
16014 do_vfp_cond_or_thumb ();
16015 inst.instruction |= LOW4 (inst.operands[0].reg);
16016 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16017 inst.instruction |= inst.operands[1].reg << 12;
16018 inst.instruction |= inst.operands[2].reg << 16;
16019 break;
16020
16021 case NS_RS: /* case 6. */
16022 {
16023 unsigned logsize;
16024 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16025 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16026 unsigned abcdebits = 0;
16027
16028 /* .<dt> is optional here, defaulting to .32. */
16029 if (inst.vectype.elems == 0
16030 && inst.operands[0].vectype.type == NT_invtype
16031 && inst.operands[1].vectype.type == NT_invtype)
16032 {
16033 inst.vectype.el[0].type = NT_untyped;
16034 inst.vectype.el[0].size = 32;
16035 inst.vectype.elems = 1;
16036 }
16037
16038 et = neon_check_type (2, NS_NULL,
16039 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16040 logsize = neon_logbits (et.size);
16041
16042 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16043 _(BAD_FPU));
16044 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16045 && et.size != 32, _(BAD_FPU));
16046 constraint (et.type == NT_invtype, _("bad type for scalar"));
16047 constraint (x >= 64 / et.size, _("scalar index out of range"));
16048
16049 switch (et.size)
16050 {
16051 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16052 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16053 case 32: abcdebits = 0x00; break;
16054 default: ;
16055 }
16056
16057 abcdebits |= x << logsize;
16058 inst.instruction = 0xe100b10;
16059 do_vfp_cond_or_thumb ();
16060 inst.instruction |= LOW4 (dn) << 16;
16061 inst.instruction |= HI1 (dn) << 7;
16062 inst.instruction |= inst.operands[0].reg << 12;
16063 inst.instruction |= (abcdebits & 3) << 5;
16064 inst.instruction |= (abcdebits >> 2) << 21;
16065 }
16066 break;
16067
16068 case NS_RRD: /* case 7 (fmrrd). */
16069 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16070 _(BAD_FPU));
16071
16072 inst.instruction = 0xc500b10;
16073 do_vfp_cond_or_thumb ();
16074 inst.instruction |= inst.operands[0].reg << 12;
16075 inst.instruction |= inst.operands[1].reg << 16;
16076 inst.instruction |= LOW4 (inst.operands[2].reg);
16077 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16078 break;
16079
16080 case NS_FF: /* case 8 (fcpys). */
16081 do_vfp_nsyn_opcode ("fcpys");
16082 break;
16083
16084 case NS_FI: /* case 10 (fconsts). */
16085 ldconst = "fconsts";
16086 encode_fconstd:
16087 if (is_quarter_float (inst.operands[1].imm))
16088 {
16089 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16090 do_vfp_nsyn_opcode (ldconst);
16091 }
16092 else
16093 first_error (_("immediate out of range"));
16094 break;
16095
16096 case NS_RF: /* case 12 (fmrs). */
16097 do_vfp_nsyn_opcode ("fmrs");
16098 break;
16099
16100 case NS_FR: /* case 13 (fmsr). */
16101 do_vfp_nsyn_opcode ("fmsr");
16102 break;
16103
16104 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16105 (one of which is a list), but we have parsed four. Do some fiddling to
16106 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16107 expect. */
16108 case NS_RRFF: /* case 14 (fmrrs). */
16109 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16110 _("VFP registers must be adjacent"));
16111 inst.operands[2].imm = 2;
16112 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16113 do_vfp_nsyn_opcode ("fmrrs");
16114 break;
16115
16116 case NS_FFRR: /* case 15 (fmsrr). */
16117 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16118 _("VFP registers must be adjacent"));
16119 inst.operands[1] = inst.operands[2];
16120 inst.operands[2] = inst.operands[3];
16121 inst.operands[0].imm = 2;
16122 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16123 do_vfp_nsyn_opcode ("fmsrr");
16124 break;
16125
16126 case NS_NULL:
16127 /* neon_select_shape has determined that the instruction
16128 shape is wrong and has already set the error message. */
16129 break;
16130
16131 default:
16132 abort ();
16133 }
16134 }
16135
16136 static void
16137 do_neon_rshift_round_imm (void)
16138 {
16139 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16140 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16141 int imm = inst.operands[2].imm;
16142
16143 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16144 if (imm == 0)
16145 {
16146 inst.operands[2].present = 0;
16147 do_neon_mov ();
16148 return;
16149 }
16150
16151 constraint (imm < 1 || (unsigned)imm > et.size,
16152 _("immediate out of range for shift"));
16153 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16154 et.size - imm);
16155 }
16156
16157 static void
16158 do_neon_movl (void)
16159 {
16160 struct neon_type_el et = neon_check_type (2, NS_QD,
16161 N_EQK | N_DBL, N_SU_32 | N_KEY);
16162 unsigned sizebits = et.size >> 3;
16163 inst.instruction |= sizebits << 19;
16164 neon_two_same (0, et.type == NT_unsigned, -1);
16165 }
16166
16167 static void
16168 do_neon_trn (void)
16169 {
16170 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16171 struct neon_type_el et = neon_check_type (2, rs,
16172 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16173 NEON_ENCODE (INTEGER, inst);
16174 neon_two_same (neon_quad (rs), 1, et.size);
16175 }
16176
16177 static void
16178 do_neon_zip_uzp (void)
16179 {
16180 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16181 struct neon_type_el et = neon_check_type (2, rs,
16182 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16183 if (rs == NS_DD && et.size == 32)
16184 {
16185 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16186 inst.instruction = N_MNEM_vtrn;
16187 do_neon_trn ();
16188 return;
16189 }
16190 neon_two_same (neon_quad (rs), 1, et.size);
16191 }
16192
16193 static void
16194 do_neon_sat_abs_neg (void)
16195 {
16196 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16197 struct neon_type_el et = neon_check_type (2, rs,
16198 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16199 neon_two_same (neon_quad (rs), 1, et.size);
16200 }
16201
16202 static void
16203 do_neon_pair_long (void)
16204 {
16205 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16206 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16207 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16208 inst.instruction |= (et.type == NT_unsigned) << 7;
16209 neon_two_same (neon_quad (rs), 1, et.size);
16210 }
16211
16212 static void
16213 do_neon_recip_est (void)
16214 {
16215 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16216 struct neon_type_el et = neon_check_type (2, rs,
16217 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16218 inst.instruction |= (et.type == NT_float) << 8;
16219 neon_two_same (neon_quad (rs), 1, et.size);
16220 }
16221
16222 static void
16223 do_neon_cls (void)
16224 {
16225 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16226 struct neon_type_el et = neon_check_type (2, rs,
16227 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16228 neon_two_same (neon_quad (rs), 1, et.size);
16229 }
16230
16231 static void
16232 do_neon_clz (void)
16233 {
16234 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16235 struct neon_type_el et = neon_check_type (2, rs,
16236 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16237 neon_two_same (neon_quad (rs), 1, et.size);
16238 }
16239
16240 static void
16241 do_neon_cnt (void)
16242 {
16243 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16244 struct neon_type_el et = neon_check_type (2, rs,
16245 N_EQK | N_INT, N_8 | N_KEY);
16246 neon_two_same (neon_quad (rs), 1, et.size);
16247 }
16248
16249 static void
16250 do_neon_swp (void)
16251 {
16252 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16253 neon_two_same (neon_quad (rs), 1, -1);
16254 }
16255
16256 static void
16257 do_neon_tbl_tbx (void)
16258 {
16259 unsigned listlenbits;
16260 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16261
16262 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16263 {
16264 first_error (_("bad list length for table lookup"));
16265 return;
16266 }
16267
16268 listlenbits = inst.operands[1].imm - 1;
16269 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16270 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16271 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16272 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16273 inst.instruction |= LOW4 (inst.operands[2].reg);
16274 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16275 inst.instruction |= listlenbits << 8;
16276
16277 neon_dp_fixup (&inst);
16278 }
16279
16280 static void
16281 do_neon_ldm_stm (void)
16282 {
16283 /* P, U and L bits are part of bitmask. */
16284 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16285 unsigned offsetbits = inst.operands[1].imm * 2;
16286
16287 if (inst.operands[1].issingle)
16288 {
16289 do_vfp_nsyn_ldm_stm (is_dbmode);
16290 return;
16291 }
16292
16293 constraint (is_dbmode && !inst.operands[0].writeback,
16294 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16295
16296 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16297 _("register list must contain at least 1 and at most 16 "
16298 "registers"));
16299
16300 inst.instruction |= inst.operands[0].reg << 16;
16301 inst.instruction |= inst.operands[0].writeback << 21;
16302 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16303 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16304
16305 inst.instruction |= offsetbits;
16306
16307 do_vfp_cond_or_thumb ();
16308 }
16309
16310 static void
16311 do_neon_ldr_str (void)
16312 {
16313 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16314
16315 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16316 And is UNPREDICTABLE in thumb mode. */
16317 if (!is_ldr
16318 && inst.operands[1].reg == REG_PC
16319 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16320 {
16321 if (thumb_mode)
16322 inst.error = _("Use of PC here is UNPREDICTABLE");
16323 else if (warn_on_deprecated)
16324 as_tsktsk (_("Use of PC here is deprecated"));
16325 }
16326
16327 if (inst.operands[0].issingle)
16328 {
16329 if (is_ldr)
16330 do_vfp_nsyn_opcode ("flds");
16331 else
16332 do_vfp_nsyn_opcode ("fsts");
16333 }
16334 else
16335 {
16336 if (is_ldr)
16337 do_vfp_nsyn_opcode ("fldd");
16338 else
16339 do_vfp_nsyn_opcode ("fstd");
16340 }
16341 }
16342
16343 /* "interleave" version also handles non-interleaving register VLD1/VST1
16344 instructions. */
16345
16346 static void
16347 do_neon_ld_st_interleave (void)
16348 {
16349 struct neon_type_el et = neon_check_type (1, NS_NULL,
16350 N_8 | N_16 | N_32 | N_64);
16351 unsigned alignbits = 0;
16352 unsigned idx;
16353 /* The bits in this table go:
16354 0: register stride of one (0) or two (1)
16355 1,2: register list length, minus one (1, 2, 3, 4).
16356 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16357 We use -1 for invalid entries. */
16358 const int typetable[] =
16359 {
16360 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16361 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16362 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16363 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16364 };
16365 int typebits;
16366
16367 if (et.type == NT_invtype)
16368 return;
16369
16370 if (inst.operands[1].immisalign)
16371 switch (inst.operands[1].imm >> 8)
16372 {
16373 case 64: alignbits = 1; break;
16374 case 128:
16375 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16376 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16377 goto bad_alignment;
16378 alignbits = 2;
16379 break;
16380 case 256:
16381 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16382 goto bad_alignment;
16383 alignbits = 3;
16384 break;
16385 default:
16386 bad_alignment:
16387 first_error (_("bad alignment"));
16388 return;
16389 }
16390
16391 inst.instruction |= alignbits << 4;
16392 inst.instruction |= neon_logbits (et.size) << 6;
16393
16394 /* Bits [4:6] of the immediate in a list specifier encode register stride
16395 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16396 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16397 up the right value for "type" in a table based on this value and the given
16398 list style, then stick it back. */
16399 idx = ((inst.operands[0].imm >> 4) & 7)
16400 | (((inst.instruction >> 8) & 3) << 3);
16401
16402 typebits = typetable[idx];
16403
16404 constraint (typebits == -1, _("bad list type for instruction"));
16405 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16406 _("bad element type for instruction"));
16407
16408 inst.instruction &= ~0xf00;
16409 inst.instruction |= typebits << 8;
16410 }
16411
16412 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16413 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16414 otherwise. The variable arguments are a list of pairs of legal (size, align)
16415 values, terminated with -1. */
16416
16417 static int
16418 neon_alignment_bit (int size, int align, int *do_align, ...)
16419 {
16420 va_list ap;
16421 int result = FAIL, thissize, thisalign;
16422
16423 if (!inst.operands[1].immisalign)
16424 {
16425 *do_align = 0;
16426 return SUCCESS;
16427 }
16428
16429 va_start (ap, do_align);
16430
16431 do
16432 {
16433 thissize = va_arg (ap, int);
16434 if (thissize == -1)
16435 break;
16436 thisalign = va_arg (ap, int);
16437
16438 if (size == thissize && align == thisalign)
16439 result = SUCCESS;
16440 }
16441 while (result != SUCCESS);
16442
16443 va_end (ap);
16444
16445 if (result == SUCCESS)
16446 *do_align = 1;
16447 else
16448 first_error (_("unsupported alignment for instruction"));
16449
16450 return result;
16451 }
16452
16453 static void
16454 do_neon_ld_st_lane (void)
16455 {
16456 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16457 int align_good, do_align = 0;
16458 int logsize = neon_logbits (et.size);
16459 int align = inst.operands[1].imm >> 8;
16460 int n = (inst.instruction >> 8) & 3;
16461 int max_el = 64 / et.size;
16462
16463 if (et.type == NT_invtype)
16464 return;
16465
16466 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16467 _("bad list length"));
16468 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16469 _("scalar index out of range"));
16470 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16471 && et.size == 8,
16472 _("stride of 2 unavailable when element size is 8"));
16473
16474 switch (n)
16475 {
16476 case 0: /* VLD1 / VST1. */
16477 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16478 32, 32, -1);
16479 if (align_good == FAIL)
16480 return;
16481 if (do_align)
16482 {
16483 unsigned alignbits = 0;
16484 switch (et.size)
16485 {
16486 case 16: alignbits = 0x1; break;
16487 case 32: alignbits = 0x3; break;
16488 default: ;
16489 }
16490 inst.instruction |= alignbits << 4;
16491 }
16492 break;
16493
16494 case 1: /* VLD2 / VST2. */
16495 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16496 32, 64, -1);
16497 if (align_good == FAIL)
16498 return;
16499 if (do_align)
16500 inst.instruction |= 1 << 4;
16501 break;
16502
16503 case 2: /* VLD3 / VST3. */
16504 constraint (inst.operands[1].immisalign,
16505 _("can't use alignment with this instruction"));
16506 break;
16507
16508 case 3: /* VLD4 / VST4. */
16509 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16510 16, 64, 32, 64, 32, 128, -1);
16511 if (align_good == FAIL)
16512 return;
16513 if (do_align)
16514 {
16515 unsigned alignbits = 0;
16516 switch (et.size)
16517 {
16518 case 8: alignbits = 0x1; break;
16519 case 16: alignbits = 0x1; break;
16520 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16521 default: ;
16522 }
16523 inst.instruction |= alignbits << 4;
16524 }
16525 break;
16526
16527 default: ;
16528 }
16529
16530 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16531 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16532 inst.instruction |= 1 << (4 + logsize);
16533
16534 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16535 inst.instruction |= logsize << 10;
16536 }
16537
16538 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16539
16540 static void
16541 do_neon_ld_dup (void)
16542 {
16543 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16544 int align_good, do_align = 0;
16545
16546 if (et.type == NT_invtype)
16547 return;
16548
16549 switch ((inst.instruction >> 8) & 3)
16550 {
16551 case 0: /* VLD1. */
16552 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16553 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16554 &do_align, 16, 16, 32, 32, -1);
16555 if (align_good == FAIL)
16556 return;
16557 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16558 {
16559 case 1: break;
16560 case 2: inst.instruction |= 1 << 5; break;
16561 default: first_error (_("bad list length")); return;
16562 }
16563 inst.instruction |= neon_logbits (et.size) << 6;
16564 break;
16565
16566 case 1: /* VLD2. */
16567 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16568 &do_align, 8, 16, 16, 32, 32, 64, -1);
16569 if (align_good == FAIL)
16570 return;
16571 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16572 _("bad list length"));
16573 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16574 inst.instruction |= 1 << 5;
16575 inst.instruction |= neon_logbits (et.size) << 6;
16576 break;
16577
16578 case 2: /* VLD3. */
16579 constraint (inst.operands[1].immisalign,
16580 _("can't use alignment with this instruction"));
16581 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16582 _("bad list length"));
16583 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16584 inst.instruction |= 1 << 5;
16585 inst.instruction |= neon_logbits (et.size) << 6;
16586 break;
16587
16588 case 3: /* VLD4. */
16589 {
16590 int align = inst.operands[1].imm >> 8;
16591 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16592 16, 64, 32, 64, 32, 128, -1);
16593 if (align_good == FAIL)
16594 return;
16595 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16596 _("bad list length"));
16597 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16598 inst.instruction |= 1 << 5;
16599 if (et.size == 32 && align == 128)
16600 inst.instruction |= 0x3 << 6;
16601 else
16602 inst.instruction |= neon_logbits (et.size) << 6;
16603 }
16604 break;
16605
16606 default: ;
16607 }
16608
16609 inst.instruction |= do_align << 4;
16610 }
16611
16612 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16613 apart from bits [11:4]. */
16614
16615 static void
16616 do_neon_ldx_stx (void)
16617 {
16618 if (inst.operands[1].isreg)
16619 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16620
16621 switch (NEON_LANE (inst.operands[0].imm))
16622 {
16623 case NEON_INTERLEAVE_LANES:
16624 NEON_ENCODE (INTERLV, inst);
16625 do_neon_ld_st_interleave ();
16626 break;
16627
16628 case NEON_ALL_LANES:
16629 NEON_ENCODE (DUP, inst);
16630 if (inst.instruction == N_INV)
16631 {
16632 first_error ("only loads support such operands");
16633 break;
16634 }
16635 do_neon_ld_dup ();
16636 break;
16637
16638 default:
16639 NEON_ENCODE (LANE, inst);
16640 do_neon_ld_st_lane ();
16641 }
16642
16643 /* L bit comes from bit mask. */
16644 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16645 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16646 inst.instruction |= inst.operands[1].reg << 16;
16647
16648 if (inst.operands[1].postind)
16649 {
16650 int postreg = inst.operands[1].imm & 0xf;
16651 constraint (!inst.operands[1].immisreg,
16652 _("post-index must be a register"));
16653 constraint (postreg == 0xd || postreg == 0xf,
16654 _("bad register for post-index"));
16655 inst.instruction |= postreg;
16656 }
16657 else
16658 {
16659 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16660 constraint (inst.reloc.exp.X_op != O_constant
16661 || inst.reloc.exp.X_add_number != 0,
16662 BAD_ADDR_MODE);
16663
16664 if (inst.operands[1].writeback)
16665 {
16666 inst.instruction |= 0xd;
16667 }
16668 else
16669 inst.instruction |= 0xf;
16670 }
16671
16672 if (thumb_mode)
16673 inst.instruction |= 0xf9000000;
16674 else
16675 inst.instruction |= 0xf4000000;
16676 }
16677
16678 /* FP v8. */
16679 static void
16680 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16681 {
16682 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16683 D register operands. */
16684 if (neon_shape_class[rs] == SC_DOUBLE)
16685 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16686 _(BAD_FPU));
16687
16688 NEON_ENCODE (FPV8, inst);
16689
16690 if (rs == NS_FFF)
16691 do_vfp_sp_dyadic ();
16692 else
16693 do_vfp_dp_rd_rn_rm ();
16694
16695 if (rs == NS_DDD)
16696 inst.instruction |= 0x100;
16697
16698 inst.instruction |= 0xf0000000;
16699 }
16700
16701 static void
16702 do_vsel (void)
16703 {
16704 set_it_insn_type (OUTSIDE_IT_INSN);
16705
16706 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16707 first_error (_("invalid instruction shape"));
16708 }
16709
16710 static void
16711 do_vmaxnm (void)
16712 {
16713 set_it_insn_type (OUTSIDE_IT_INSN);
16714
16715 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16716 return;
16717
16718 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16719 return;
16720
16721 neon_dyadic_misc (NT_untyped, N_F32, 0);
16722 }
16723
16724 static void
16725 do_vrint_1 (enum neon_cvt_mode mode)
16726 {
16727 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16728 struct neon_type_el et;
16729
16730 if (rs == NS_NULL)
16731 return;
16732
16733 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16734 D register operands. */
16735 if (neon_shape_class[rs] == SC_DOUBLE)
16736 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16737 _(BAD_FPU));
16738
16739 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16740 if (et.type != NT_invtype)
16741 {
16742 /* VFP encodings. */
16743 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16744 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16745 set_it_insn_type (OUTSIDE_IT_INSN);
16746
16747 NEON_ENCODE (FPV8, inst);
16748 if (rs == NS_FF)
16749 do_vfp_sp_monadic ();
16750 else
16751 do_vfp_dp_rd_rm ();
16752
16753 switch (mode)
16754 {
16755 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16756 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16757 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16758 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16759 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16760 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16761 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16762 default: abort ();
16763 }
16764
16765 inst.instruction |= (rs == NS_DD) << 8;
16766 do_vfp_cond_or_thumb ();
16767 }
16768 else
16769 {
16770 /* Neon encodings (or something broken...). */
16771 inst.error = NULL;
16772 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16773
16774 if (et.type == NT_invtype)
16775 return;
16776
16777 set_it_insn_type (OUTSIDE_IT_INSN);
16778 NEON_ENCODE (FLOAT, inst);
16779
16780 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16781 return;
16782
16783 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16784 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16785 inst.instruction |= LOW4 (inst.operands[1].reg);
16786 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16787 inst.instruction |= neon_quad (rs) << 6;
16788 switch (mode)
16789 {
16790 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16791 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16792 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16793 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16794 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16795 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16796 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16797 default: abort ();
16798 }
16799
16800 if (thumb_mode)
16801 inst.instruction |= 0xfc000000;
16802 else
16803 inst.instruction |= 0xf0000000;
16804 }
16805 }
16806
16807 static void
16808 do_vrintx (void)
16809 {
16810 do_vrint_1 (neon_cvt_mode_x);
16811 }
16812
16813 static void
16814 do_vrintz (void)
16815 {
16816 do_vrint_1 (neon_cvt_mode_z);
16817 }
16818
16819 static void
16820 do_vrintr (void)
16821 {
16822 do_vrint_1 (neon_cvt_mode_r);
16823 }
16824
16825 static void
16826 do_vrinta (void)
16827 {
16828 do_vrint_1 (neon_cvt_mode_a);
16829 }
16830
16831 static void
16832 do_vrintn (void)
16833 {
16834 do_vrint_1 (neon_cvt_mode_n);
16835 }
16836
16837 static void
16838 do_vrintp (void)
16839 {
16840 do_vrint_1 (neon_cvt_mode_p);
16841 }
16842
16843 static void
16844 do_vrintm (void)
16845 {
16846 do_vrint_1 (neon_cvt_mode_m);
16847 }
16848
16849 /* Crypto v1 instructions. */
16850 static void
16851 do_crypto_2op_1 (unsigned elttype, int op)
16852 {
16853 set_it_insn_type (OUTSIDE_IT_INSN);
16854
16855 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16856 == NT_invtype)
16857 return;
16858
16859 inst.error = NULL;
16860
16861 NEON_ENCODE (INTEGER, inst);
16862 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16863 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16864 inst.instruction |= LOW4 (inst.operands[1].reg);
16865 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16866 if (op != -1)
16867 inst.instruction |= op << 6;
16868
16869 if (thumb_mode)
16870 inst.instruction |= 0xfc000000;
16871 else
16872 inst.instruction |= 0xf0000000;
16873 }
16874
16875 static void
16876 do_crypto_3op_1 (int u, int op)
16877 {
16878 set_it_insn_type (OUTSIDE_IT_INSN);
16879
16880 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16881 N_32 | N_UNT | N_KEY).type == NT_invtype)
16882 return;
16883
16884 inst.error = NULL;
16885
16886 NEON_ENCODE (INTEGER, inst);
16887 neon_three_same (1, u, 8 << op);
16888 }
16889
16890 static void
16891 do_aese (void)
16892 {
16893 do_crypto_2op_1 (N_8, 0);
16894 }
16895
16896 static void
16897 do_aesd (void)
16898 {
16899 do_crypto_2op_1 (N_8, 1);
16900 }
16901
16902 static void
16903 do_aesmc (void)
16904 {
16905 do_crypto_2op_1 (N_8, 2);
16906 }
16907
16908 static void
16909 do_aesimc (void)
16910 {
16911 do_crypto_2op_1 (N_8, 3);
16912 }
16913
16914 static void
16915 do_sha1c (void)
16916 {
16917 do_crypto_3op_1 (0, 0);
16918 }
16919
16920 static void
16921 do_sha1p (void)
16922 {
16923 do_crypto_3op_1 (0, 1);
16924 }
16925
16926 static void
16927 do_sha1m (void)
16928 {
16929 do_crypto_3op_1 (0, 2);
16930 }
16931
16932 static void
16933 do_sha1su0 (void)
16934 {
16935 do_crypto_3op_1 (0, 3);
16936 }
16937
16938 static void
16939 do_sha256h (void)
16940 {
16941 do_crypto_3op_1 (1, 0);
16942 }
16943
16944 static void
16945 do_sha256h2 (void)
16946 {
16947 do_crypto_3op_1 (1, 1);
16948 }
16949
16950 static void
16951 do_sha256su1 (void)
16952 {
16953 do_crypto_3op_1 (1, 2);
16954 }
16955
16956 static void
16957 do_sha1h (void)
16958 {
16959 do_crypto_2op_1 (N_32, -1);
16960 }
16961
16962 static void
16963 do_sha1su1 (void)
16964 {
16965 do_crypto_2op_1 (N_32, 0);
16966 }
16967
16968 static void
16969 do_sha256su0 (void)
16970 {
16971 do_crypto_2op_1 (N_32, 1);
16972 }
16973
16974 static void
16975 do_crc32_1 (unsigned int poly, unsigned int sz)
16976 {
16977 unsigned int Rd = inst.operands[0].reg;
16978 unsigned int Rn = inst.operands[1].reg;
16979 unsigned int Rm = inst.operands[2].reg;
16980
16981 set_it_insn_type (OUTSIDE_IT_INSN);
16982 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16983 inst.instruction |= LOW4 (Rn) << 16;
16984 inst.instruction |= LOW4 (Rm);
16985 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16986 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16987
16988 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16989 as_warn (UNPRED_REG ("r15"));
16990 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16991 as_warn (UNPRED_REG ("r13"));
16992 }
16993
16994 static void
16995 do_crc32b (void)
16996 {
16997 do_crc32_1 (0, 0);
16998 }
16999
17000 static void
17001 do_crc32h (void)
17002 {
17003 do_crc32_1 (0, 1);
17004 }
17005
17006 static void
17007 do_crc32w (void)
17008 {
17009 do_crc32_1 (0, 2);
17010 }
17011
17012 static void
17013 do_crc32cb (void)
17014 {
17015 do_crc32_1 (1, 0);
17016 }
17017
17018 static void
17019 do_crc32ch (void)
17020 {
17021 do_crc32_1 (1, 1);
17022 }
17023
17024 static void
17025 do_crc32cw (void)
17026 {
17027 do_crc32_1 (1, 2);
17028 }
17029
17030 \f
17031 /* Overall per-instruction processing. */
17032
17033 /* We need to be able to fix up arbitrary expressions in some statements.
17034 This is so that we can handle symbols that are an arbitrary distance from
17035 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17036 which returns part of an address in a form which will be valid for
17037 a data instruction. We do this by pushing the expression into a symbol
17038 in the expr_section, and creating a fix for that. */
17039
17040 static void
17041 fix_new_arm (fragS * frag,
17042 int where,
17043 short int size,
17044 expressionS * exp,
17045 int pc_rel,
17046 int reloc)
17047 {
17048 fixS * new_fix;
17049
17050 switch (exp->X_op)
17051 {
17052 case O_constant:
17053 if (pc_rel)
17054 {
17055 /* Create an absolute valued symbol, so we have something to
17056 refer to in the object file. Unfortunately for us, gas's
17057 generic expression parsing will already have folded out
17058 any use of .set foo/.type foo %function that may have
17059 been used to set type information of the target location,
17060 that's being specified symbolically. We have to presume
17061 the user knows what they are doing. */
17062 char name[16 + 8];
17063 symbolS *symbol;
17064
17065 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17066
17067 symbol = symbol_find_or_make (name);
17068 S_SET_SEGMENT (symbol, absolute_section);
17069 symbol_set_frag (symbol, &zero_address_frag);
17070 S_SET_VALUE (symbol, exp->X_add_number);
17071 exp->X_op = O_symbol;
17072 exp->X_add_symbol = symbol;
17073 exp->X_add_number = 0;
17074 }
17075 /* FALLTHROUGH */
17076 case O_symbol:
17077 case O_add:
17078 case O_subtract:
17079 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17080 (enum bfd_reloc_code_real) reloc);
17081 break;
17082
17083 default:
17084 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17085 pc_rel, (enum bfd_reloc_code_real) reloc);
17086 break;
17087 }
17088
17089 /* Mark whether the fix is to a THUMB instruction, or an ARM
17090 instruction. */
17091 new_fix->tc_fix_data = thumb_mode;
17092 }
17093
17094 /* Create a frg for an instruction requiring relaxation. */
17095 static void
17096 output_relax_insn (void)
17097 {
17098 char * to;
17099 symbolS *sym;
17100 int offset;
17101
17102 /* The size of the instruction is unknown, so tie the debug info to the
17103 start of the instruction. */
17104 dwarf2_emit_insn (0);
17105
17106 switch (inst.reloc.exp.X_op)
17107 {
17108 case O_symbol:
17109 sym = inst.reloc.exp.X_add_symbol;
17110 offset = inst.reloc.exp.X_add_number;
17111 break;
17112 case O_constant:
17113 sym = NULL;
17114 offset = inst.reloc.exp.X_add_number;
17115 break;
17116 default:
17117 sym = make_expr_symbol (&inst.reloc.exp);
17118 offset = 0;
17119 break;
17120 }
17121 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17122 inst.relax, sym, offset, NULL/*offset, opcode*/);
17123 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17124 }
17125
17126 /* Write a 32-bit thumb instruction to buf. */
17127 static void
17128 put_thumb32_insn (char * buf, unsigned long insn)
17129 {
17130 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17131 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17132 }
17133
17134 static void
17135 output_inst (const char * str)
17136 {
17137 char * to = NULL;
17138
17139 if (inst.error)
17140 {
17141 as_bad ("%s -- `%s'", inst.error, str);
17142 return;
17143 }
17144 if (inst.relax)
17145 {
17146 output_relax_insn ();
17147 return;
17148 }
17149 if (inst.size == 0)
17150 return;
17151
17152 to = frag_more (inst.size);
17153 /* PR 9814: Record the thumb mode into the current frag so that we know
17154 what type of NOP padding to use, if necessary. We override any previous
17155 setting so that if the mode has changed then the NOPS that we use will
17156 match the encoding of the last instruction in the frag. */
17157 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17158
17159 if (thumb_mode && (inst.size > THUMB_SIZE))
17160 {
17161 gas_assert (inst.size == (2 * THUMB_SIZE));
17162 put_thumb32_insn (to, inst.instruction);
17163 }
17164 else if (inst.size > INSN_SIZE)
17165 {
17166 gas_assert (inst.size == (2 * INSN_SIZE));
17167 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17168 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17169 }
17170 else
17171 md_number_to_chars (to, inst.instruction, inst.size);
17172
17173 if (inst.reloc.type != BFD_RELOC_UNUSED)
17174 fix_new_arm (frag_now, to - frag_now->fr_literal,
17175 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17176 inst.reloc.type);
17177
17178 dwarf2_emit_insn (inst.size);
17179 }
17180
17181 static char *
17182 output_it_inst (int cond, int mask, char * to)
17183 {
17184 unsigned long instruction = 0xbf00;
17185
17186 mask &= 0xf;
17187 instruction |= mask;
17188 instruction |= cond << 4;
17189
17190 if (to == NULL)
17191 {
17192 to = frag_more (2);
17193 #ifdef OBJ_ELF
17194 dwarf2_emit_insn (2);
17195 #endif
17196 }
17197
17198 md_number_to_chars (to, instruction, 2);
17199
17200 return to;
17201 }
17202
17203 /* Tag values used in struct asm_opcode's tag field. */
17204 enum opcode_tag
17205 {
17206 OT_unconditional, /* Instruction cannot be conditionalized.
17207 The ARM condition field is still 0xE. */
17208 OT_unconditionalF, /* Instruction cannot be conditionalized
17209 and carries 0xF in its ARM condition field. */
17210 OT_csuffix, /* Instruction takes a conditional suffix. */
17211 OT_csuffixF, /* Some forms of the instruction take a conditional
17212 suffix, others place 0xF where the condition field
17213 would be. */
17214 OT_cinfix3, /* Instruction takes a conditional infix,
17215 beginning at character index 3. (In
17216 unified mode, it becomes a suffix.) */
17217 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17218 tsts, cmps, cmns, and teqs. */
17219 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17220 character index 3, even in unified mode. Used for
17221 legacy instructions where suffix and infix forms
17222 may be ambiguous. */
17223 OT_csuf_or_in3, /* Instruction takes either a conditional
17224 suffix or an infix at character index 3. */
17225 OT_odd_infix_unc, /* This is the unconditional variant of an
17226 instruction that takes a conditional infix
17227 at an unusual position. In unified mode,
17228 this variant will accept a suffix. */
17229 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17230 are the conditional variants of instructions that
17231 take conditional infixes in unusual positions.
17232 The infix appears at character index
17233 (tag - OT_odd_infix_0). These are not accepted
17234 in unified mode. */
17235 };
17236
17237 /* Subroutine of md_assemble, responsible for looking up the primary
17238 opcode from the mnemonic the user wrote. STR points to the
17239 beginning of the mnemonic.
17240
17241 This is not simply a hash table lookup, because of conditional
17242 variants. Most instructions have conditional variants, which are
17243 expressed with a _conditional affix_ to the mnemonic. If we were
17244 to encode each conditional variant as a literal string in the opcode
17245 table, it would have approximately 20,000 entries.
17246
17247 Most mnemonics take this affix as a suffix, and in unified syntax,
17248 'most' is upgraded to 'all'. However, in the divided syntax, some
17249 instructions take the affix as an infix, notably the s-variants of
17250 the arithmetic instructions. Of those instructions, all but six
17251 have the infix appear after the third character of the mnemonic.
17252
17253 Accordingly, the algorithm for looking up primary opcodes given
17254 an identifier is:
17255
17256 1. Look up the identifier in the opcode table.
17257 If we find a match, go to step U.
17258
17259 2. Look up the last two characters of the identifier in the
17260 conditions table. If we find a match, look up the first N-2
17261 characters of the identifier in the opcode table. If we
17262 find a match, go to step CE.
17263
17264 3. Look up the fourth and fifth characters of the identifier in
17265 the conditions table. If we find a match, extract those
17266 characters from the identifier, and look up the remaining
17267 characters in the opcode table. If we find a match, go
17268 to step CM.
17269
17270 4. Fail.
17271
17272 U. Examine the tag field of the opcode structure, in case this is
17273 one of the six instructions with its conditional infix in an
17274 unusual place. If it is, the tag tells us where to find the
17275 infix; look it up in the conditions table and set inst.cond
17276 accordingly. Otherwise, this is an unconditional instruction.
17277 Again set inst.cond accordingly. Return the opcode structure.
17278
17279 CE. Examine the tag field to make sure this is an instruction that
17280 should receive a conditional suffix. If it is not, fail.
17281 Otherwise, set inst.cond from the suffix we already looked up,
17282 and return the opcode structure.
17283
17284 CM. Examine the tag field to make sure this is an instruction that
17285 should receive a conditional infix after the third character.
17286 If it is not, fail. Otherwise, undo the edits to the current
17287 line of input and proceed as for case CE. */
17288
17289 static const struct asm_opcode *
17290 opcode_lookup (char **str)
17291 {
17292 char *end, *base;
17293 char *affix;
17294 const struct asm_opcode *opcode;
17295 const struct asm_cond *cond;
17296 char save[2];
17297
17298 /* Scan up to the end of the mnemonic, which must end in white space,
17299 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17300 for (base = end = *str; *end != '\0'; end++)
17301 if (*end == ' ' || *end == '.')
17302 break;
17303
17304 if (end == base)
17305 return NULL;
17306
17307 /* Handle a possible width suffix and/or Neon type suffix. */
17308 if (end[0] == '.')
17309 {
17310 int offset = 2;
17311
17312 /* The .w and .n suffixes are only valid if the unified syntax is in
17313 use. */
17314 if (unified_syntax && end[1] == 'w')
17315 inst.size_req = 4;
17316 else if (unified_syntax && end[1] == 'n')
17317 inst.size_req = 2;
17318 else
17319 offset = 0;
17320
17321 inst.vectype.elems = 0;
17322
17323 *str = end + offset;
17324
17325 if (end[offset] == '.')
17326 {
17327 /* See if we have a Neon type suffix (possible in either unified or
17328 non-unified ARM syntax mode). */
17329 if (parse_neon_type (&inst.vectype, str) == FAIL)
17330 return NULL;
17331 }
17332 else if (end[offset] != '\0' && end[offset] != ' ')
17333 return NULL;
17334 }
17335 else
17336 *str = end;
17337
17338 /* Look for unaffixed or special-case affixed mnemonic. */
17339 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17340 end - base);
17341 if (opcode)
17342 {
17343 /* step U */
17344 if (opcode->tag < OT_odd_infix_0)
17345 {
17346 inst.cond = COND_ALWAYS;
17347 return opcode;
17348 }
17349
17350 if (warn_on_deprecated && unified_syntax)
17351 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17352 affix = base + (opcode->tag - OT_odd_infix_0);
17353 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17354 gas_assert (cond);
17355
17356 inst.cond = cond->value;
17357 return opcode;
17358 }
17359
17360 /* Cannot have a conditional suffix on a mnemonic of less than two
17361 characters. */
17362 if (end - base < 3)
17363 return NULL;
17364
17365 /* Look for suffixed mnemonic. */
17366 affix = end - 2;
17367 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17368 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17369 affix - base);
17370 if (opcode && cond)
17371 {
17372 /* step CE */
17373 switch (opcode->tag)
17374 {
17375 case OT_cinfix3_legacy:
17376 /* Ignore conditional suffixes matched on infix only mnemonics. */
17377 break;
17378
17379 case OT_cinfix3:
17380 case OT_cinfix3_deprecated:
17381 case OT_odd_infix_unc:
17382 if (!unified_syntax)
17383 return 0;
17384 /* else fall through */
17385
17386 case OT_csuffix:
17387 case OT_csuffixF:
17388 case OT_csuf_or_in3:
17389 inst.cond = cond->value;
17390 return opcode;
17391
17392 case OT_unconditional:
17393 case OT_unconditionalF:
17394 if (thumb_mode)
17395 inst.cond = cond->value;
17396 else
17397 {
17398 /* Delayed diagnostic. */
17399 inst.error = BAD_COND;
17400 inst.cond = COND_ALWAYS;
17401 }
17402 return opcode;
17403
17404 default:
17405 return NULL;
17406 }
17407 }
17408
17409 /* Cannot have a usual-position infix on a mnemonic of less than
17410 six characters (five would be a suffix). */
17411 if (end - base < 6)
17412 return NULL;
17413
17414 /* Look for infixed mnemonic in the usual position. */
17415 affix = base + 3;
17416 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17417 if (!cond)
17418 return NULL;
17419
17420 memcpy (save, affix, 2);
17421 memmove (affix, affix + 2, (end - affix) - 2);
17422 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17423 (end - base) - 2);
17424 memmove (affix + 2, affix, (end - affix) - 2);
17425 memcpy (affix, save, 2);
17426
17427 if (opcode
17428 && (opcode->tag == OT_cinfix3
17429 || opcode->tag == OT_cinfix3_deprecated
17430 || opcode->tag == OT_csuf_or_in3
17431 || opcode->tag == OT_cinfix3_legacy))
17432 {
17433 /* Step CM. */
17434 if (warn_on_deprecated && unified_syntax
17435 && (opcode->tag == OT_cinfix3
17436 || opcode->tag == OT_cinfix3_deprecated))
17437 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17438
17439 inst.cond = cond->value;
17440 return opcode;
17441 }
17442
17443 return NULL;
17444 }
17445
17446 /* This function generates an initial IT instruction, leaving its block
17447 virtually open for the new instructions. Eventually,
17448 the mask will be updated by now_it_add_mask () each time
17449 a new instruction needs to be included in the IT block.
17450 Finally, the block is closed with close_automatic_it_block ().
17451 The block closure can be requested either from md_assemble (),
17452 a tencode (), or due to a label hook. */
17453
17454 static void
17455 new_automatic_it_block (int cond)
17456 {
17457 now_it.state = AUTOMATIC_IT_BLOCK;
17458 now_it.mask = 0x18;
17459 now_it.cc = cond;
17460 now_it.block_length = 1;
17461 mapping_state (MAP_THUMB);
17462 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17463 now_it.warn_deprecated = FALSE;
17464 now_it.insn_cond = TRUE;
17465 }
17466
17467 /* Close an automatic IT block.
17468 See comments in new_automatic_it_block (). */
17469
17470 static void
17471 close_automatic_it_block (void)
17472 {
17473 now_it.mask = 0x10;
17474 now_it.block_length = 0;
17475 }
17476
17477 /* Update the mask of the current automatically-generated IT
17478 instruction. See comments in new_automatic_it_block (). */
17479
17480 static void
17481 now_it_add_mask (int cond)
17482 {
17483 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17484 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17485 | ((bitvalue) << (nbit)))
17486 const int resulting_bit = (cond & 1);
17487
17488 now_it.mask &= 0xf;
17489 now_it.mask = SET_BIT_VALUE (now_it.mask,
17490 resulting_bit,
17491 (5 - now_it.block_length));
17492 now_it.mask = SET_BIT_VALUE (now_it.mask,
17493 1,
17494 ((5 - now_it.block_length) - 1) );
17495 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17496
17497 #undef CLEAR_BIT
17498 #undef SET_BIT_VALUE
17499 }
17500
17501 /* The IT blocks handling machinery is accessed through the these functions:
17502 it_fsm_pre_encode () from md_assemble ()
17503 set_it_insn_type () optional, from the tencode functions
17504 set_it_insn_type_last () ditto
17505 in_it_block () ditto
17506 it_fsm_post_encode () from md_assemble ()
17507 force_automatic_it_block_close () from label habdling functions
17508
17509 Rationale:
17510 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17511 initializing the IT insn type with a generic initial value depending
17512 on the inst.condition.
17513 2) During the tencode function, two things may happen:
17514 a) The tencode function overrides the IT insn type by
17515 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17516 b) The tencode function queries the IT block state by
17517 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17518
17519 Both set_it_insn_type and in_it_block run the internal FSM state
17520 handling function (handle_it_state), because: a) setting the IT insn
17521 type may incur in an invalid state (exiting the function),
17522 and b) querying the state requires the FSM to be updated.
17523 Specifically we want to avoid creating an IT block for conditional
17524 branches, so it_fsm_pre_encode is actually a guess and we can't
17525 determine whether an IT block is required until the tencode () routine
17526 has decided what type of instruction this actually it.
17527 Because of this, if set_it_insn_type and in_it_block have to be used,
17528 set_it_insn_type has to be called first.
17529
17530 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17531 determines the insn IT type depending on the inst.cond code.
17532 When a tencode () routine encodes an instruction that can be
17533 either outside an IT block, or, in the case of being inside, has to be
17534 the last one, set_it_insn_type_last () will determine the proper
17535 IT instruction type based on the inst.cond code. Otherwise,
17536 set_it_insn_type can be called for overriding that logic or
17537 for covering other cases.
17538
17539 Calling handle_it_state () may not transition the IT block state to
17540 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17541 still queried. Instead, if the FSM determines that the state should
17542 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17543 after the tencode () function: that's what it_fsm_post_encode () does.
17544
17545 Since in_it_block () calls the state handling function to get an
17546 updated state, an error may occur (due to invalid insns combination).
17547 In that case, inst.error is set.
17548 Therefore, inst.error has to be checked after the execution of
17549 the tencode () routine.
17550
17551 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17552 any pending state change (if any) that didn't take place in
17553 handle_it_state () as explained above. */
17554
17555 static void
17556 it_fsm_pre_encode (void)
17557 {
17558 if (inst.cond != COND_ALWAYS)
17559 inst.it_insn_type = INSIDE_IT_INSN;
17560 else
17561 inst.it_insn_type = OUTSIDE_IT_INSN;
17562
17563 now_it.state_handled = 0;
17564 }
17565
17566 /* IT state FSM handling function. */
17567
17568 static int
17569 handle_it_state (void)
17570 {
17571 now_it.state_handled = 1;
17572 now_it.insn_cond = FALSE;
17573
17574 switch (now_it.state)
17575 {
17576 case OUTSIDE_IT_BLOCK:
17577 switch (inst.it_insn_type)
17578 {
17579 case OUTSIDE_IT_INSN:
17580 break;
17581
17582 case INSIDE_IT_INSN:
17583 case INSIDE_IT_LAST_INSN:
17584 if (thumb_mode == 0)
17585 {
17586 if (unified_syntax
17587 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17588 as_tsktsk (_("Warning: conditional outside an IT block"\
17589 " for Thumb."));
17590 }
17591 else
17592 {
17593 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17594 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17595 {
17596 /* Automatically generate the IT instruction. */
17597 new_automatic_it_block (inst.cond);
17598 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17599 close_automatic_it_block ();
17600 }
17601 else
17602 {
17603 inst.error = BAD_OUT_IT;
17604 return FAIL;
17605 }
17606 }
17607 break;
17608
17609 case IF_INSIDE_IT_LAST_INSN:
17610 case NEUTRAL_IT_INSN:
17611 break;
17612
17613 case IT_INSN:
17614 now_it.state = MANUAL_IT_BLOCK;
17615 now_it.block_length = 0;
17616 break;
17617 }
17618 break;
17619
17620 case AUTOMATIC_IT_BLOCK:
17621 /* Three things may happen now:
17622 a) We should increment current it block size;
17623 b) We should close current it block (closing insn or 4 insns);
17624 c) We should close current it block and start a new one (due
17625 to incompatible conditions or
17626 4 insns-length block reached). */
17627
17628 switch (inst.it_insn_type)
17629 {
17630 case OUTSIDE_IT_INSN:
17631 /* The closure of the block shall happen immediatelly,
17632 so any in_it_block () call reports the block as closed. */
17633 force_automatic_it_block_close ();
17634 break;
17635
17636 case INSIDE_IT_INSN:
17637 case INSIDE_IT_LAST_INSN:
17638 case IF_INSIDE_IT_LAST_INSN:
17639 now_it.block_length++;
17640
17641 if (now_it.block_length > 4
17642 || !now_it_compatible (inst.cond))
17643 {
17644 force_automatic_it_block_close ();
17645 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17646 new_automatic_it_block (inst.cond);
17647 }
17648 else
17649 {
17650 now_it.insn_cond = TRUE;
17651 now_it_add_mask (inst.cond);
17652 }
17653
17654 if (now_it.state == AUTOMATIC_IT_BLOCK
17655 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17656 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17657 close_automatic_it_block ();
17658 break;
17659
17660 case NEUTRAL_IT_INSN:
17661 now_it.block_length++;
17662 now_it.insn_cond = TRUE;
17663
17664 if (now_it.block_length > 4)
17665 force_automatic_it_block_close ();
17666 else
17667 now_it_add_mask (now_it.cc & 1);
17668 break;
17669
17670 case IT_INSN:
17671 close_automatic_it_block ();
17672 now_it.state = MANUAL_IT_BLOCK;
17673 break;
17674 }
17675 break;
17676
17677 case MANUAL_IT_BLOCK:
17678 {
17679 /* Check conditional suffixes. */
17680 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17681 int is_last;
17682 now_it.mask <<= 1;
17683 now_it.mask &= 0x1f;
17684 is_last = (now_it.mask == 0x10);
17685 now_it.insn_cond = TRUE;
17686
17687 switch (inst.it_insn_type)
17688 {
17689 case OUTSIDE_IT_INSN:
17690 inst.error = BAD_NOT_IT;
17691 return FAIL;
17692
17693 case INSIDE_IT_INSN:
17694 if (cond != inst.cond)
17695 {
17696 inst.error = BAD_IT_COND;
17697 return FAIL;
17698 }
17699 break;
17700
17701 case INSIDE_IT_LAST_INSN:
17702 case IF_INSIDE_IT_LAST_INSN:
17703 if (cond != inst.cond)
17704 {
17705 inst.error = BAD_IT_COND;
17706 return FAIL;
17707 }
17708 if (!is_last)
17709 {
17710 inst.error = BAD_BRANCH;
17711 return FAIL;
17712 }
17713 break;
17714
17715 case NEUTRAL_IT_INSN:
17716 /* The BKPT instruction is unconditional even in an IT block. */
17717 break;
17718
17719 case IT_INSN:
17720 inst.error = BAD_IT_IT;
17721 return FAIL;
17722 }
17723 }
17724 break;
17725 }
17726
17727 return SUCCESS;
17728 }
17729
17730 struct depr_insn_mask
17731 {
17732 unsigned long pattern;
17733 unsigned long mask;
17734 const char* description;
17735 };
17736
17737 /* List of 16-bit instruction patterns deprecated in an IT block in
17738 ARMv8. */
17739 static const struct depr_insn_mask depr_it_insns[] = {
17740 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17741 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17742 { 0xa000, 0xb800, N_("ADR") },
17743 { 0x4800, 0xf800, N_("Literal loads") },
17744 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17745 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17746 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17747 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17748 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17749 { 0, 0, NULL }
17750 };
17751
17752 static void
17753 it_fsm_post_encode (void)
17754 {
17755 int is_last;
17756
17757 if (!now_it.state_handled)
17758 handle_it_state ();
17759
17760 if (now_it.insn_cond
17761 && !now_it.warn_deprecated
17762 && warn_on_deprecated
17763 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17764 {
17765 if (inst.instruction >= 0x10000)
17766 {
17767 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17768 "deprecated in ARMv8"));
17769 now_it.warn_deprecated = TRUE;
17770 }
17771 else
17772 {
17773 const struct depr_insn_mask *p = depr_it_insns;
17774
17775 while (p->mask != 0)
17776 {
17777 if ((inst.instruction & p->mask) == p->pattern)
17778 {
17779 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17780 "of the following class are deprecated in ARMv8: "
17781 "%s"), p->description);
17782 now_it.warn_deprecated = TRUE;
17783 break;
17784 }
17785
17786 ++p;
17787 }
17788 }
17789
17790 if (now_it.block_length > 1)
17791 {
17792 as_tsktsk (_("IT blocks containing more than one conditional "
17793 "instruction are deprecated in ARMv8"));
17794 now_it.warn_deprecated = TRUE;
17795 }
17796 }
17797
17798 is_last = (now_it.mask == 0x10);
17799 if (is_last)
17800 {
17801 now_it.state = OUTSIDE_IT_BLOCK;
17802 now_it.mask = 0;
17803 }
17804 }
17805
17806 static void
17807 force_automatic_it_block_close (void)
17808 {
17809 if (now_it.state == AUTOMATIC_IT_BLOCK)
17810 {
17811 close_automatic_it_block ();
17812 now_it.state = OUTSIDE_IT_BLOCK;
17813 now_it.mask = 0;
17814 }
17815 }
17816
17817 static int
17818 in_it_block (void)
17819 {
17820 if (!now_it.state_handled)
17821 handle_it_state ();
17822
17823 return now_it.state != OUTSIDE_IT_BLOCK;
17824 }
17825
17826 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17827 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17828 here, hence the "known" in the function name. */
17829
17830 static bfd_boolean
17831 known_t32_only_insn (const struct asm_opcode *opcode)
17832 {
17833 /* Original Thumb-1 wide instruction. */
17834 if (opcode->tencode == do_t_blx
17835 || opcode->tencode == do_t_branch23
17836 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17837 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
17838 return TRUE;
17839
17840 /* Wide-only instruction added to ARMv8-M. */
17841 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m)
17842 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
17843 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
17844 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
17845 return TRUE;
17846
17847 return FALSE;
17848 }
17849
17850 /* Whether wide instruction variant can be used if available for a valid OPCODE
17851 in ARCH. */
17852
17853 static bfd_boolean
17854 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
17855 {
17856 if (known_t32_only_insn (opcode))
17857 return TRUE;
17858
17859 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17860 of variant T3 of B.W is checked in do_t_branch. */
17861 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
17862 && opcode->tencode == do_t_branch)
17863 return TRUE;
17864
17865 /* Wide instruction variants of all instructions with narrow *and* wide
17866 variants become available with ARMv6t2. Other opcodes are either
17867 narrow-only or wide-only and are thus available if OPCODE is valid. */
17868 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
17869 return TRUE;
17870
17871 /* OPCODE with narrow only instruction variant or wide variant not
17872 available. */
17873 return FALSE;
17874 }
17875
17876 void
17877 md_assemble (char *str)
17878 {
17879 char *p = str;
17880 const struct asm_opcode * opcode;
17881
17882 /* Align the previous label if needed. */
17883 if (last_label_seen != NULL)
17884 {
17885 symbol_set_frag (last_label_seen, frag_now);
17886 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17887 S_SET_SEGMENT (last_label_seen, now_seg);
17888 }
17889
17890 memset (&inst, '\0', sizeof (inst));
17891 inst.reloc.type = BFD_RELOC_UNUSED;
17892
17893 opcode = opcode_lookup (&p);
17894 if (!opcode)
17895 {
17896 /* It wasn't an instruction, but it might be a register alias of
17897 the form alias .req reg, or a Neon .dn/.qn directive. */
17898 if (! create_register_alias (str, p)
17899 && ! create_neon_reg_alias (str, p))
17900 as_bad (_("bad instruction `%s'"), str);
17901
17902 return;
17903 }
17904
17905 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17906 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17907
17908 /* The value which unconditional instructions should have in place of the
17909 condition field. */
17910 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17911
17912 if (thumb_mode)
17913 {
17914 arm_feature_set variant;
17915
17916 variant = cpu_variant;
17917 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17918 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17919 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17920 /* Check that this instruction is supported for this CPU. */
17921 if (!opcode->tvariant
17922 || (thumb_mode == 1
17923 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17924 {
17925 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
17926 return;
17927 }
17928 if (inst.cond != COND_ALWAYS && !unified_syntax
17929 && opcode->tencode != do_t_branch)
17930 {
17931 as_bad (_("Thumb does not support conditional execution"));
17932 return;
17933 }
17934
17935 /* Two things are addressed here:
17936 1) Implicit require narrow instructions on Thumb-1.
17937 This avoids relaxation accidentally introducing Thumb-2
17938 instructions.
17939 2) Reject wide instructions in non Thumb-2 cores.
17940
17941 Only instructions with narrow and wide variants need to be handled
17942 but selecting all non wide-only instructions is easier. */
17943 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
17944 && !t32_insn_ok (variant, opcode))
17945 {
17946 if (inst.size_req == 0)
17947 inst.size_req = 2;
17948 else if (inst.size_req == 4)
17949 {
17950 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
17951 as_bad (_("selected processor does not support 32bit wide "
17952 "variant of instruction `%s'"), str);
17953 else
17954 as_bad (_("selected processor does not support `%s' in "
17955 "Thumb-2 mode"), str);
17956 return;
17957 }
17958 }
17959
17960 inst.instruction = opcode->tvalue;
17961
17962 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17963 {
17964 /* Prepare the it_insn_type for those encodings that don't set
17965 it. */
17966 it_fsm_pre_encode ();
17967
17968 opcode->tencode ();
17969
17970 it_fsm_post_encode ();
17971 }
17972
17973 if (!(inst.error || inst.relax))
17974 {
17975 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17976 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17977 if (inst.size_req && inst.size_req != inst.size)
17978 {
17979 as_bad (_("cannot honor width suffix -- `%s'"), str);
17980 return;
17981 }
17982 }
17983
17984 /* Something has gone badly wrong if we try to relax a fixed size
17985 instruction. */
17986 gas_assert (inst.size_req == 0 || !inst.relax);
17987
17988 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17989 *opcode->tvariant);
17990 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17991 set those bits when Thumb-2 32-bit instructions are seen. The impact
17992 of relaxable instructions will be considered later after we finish all
17993 relaxation. */
17994 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
17995 variant = arm_arch_none;
17996 else
17997 variant = cpu_variant;
17998 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
17999 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18000 arm_ext_v6t2);
18001
18002 check_neon_suffixes;
18003
18004 if (!inst.error)
18005 {
18006 mapping_state (MAP_THUMB);
18007 }
18008 }
18009 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18010 {
18011 bfd_boolean is_bx;
18012
18013 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18014 is_bx = (opcode->aencode == do_bx);
18015
18016 /* Check that this instruction is supported for this CPU. */
18017 if (!(is_bx && fix_v4bx)
18018 && !(opcode->avariant &&
18019 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18020 {
18021 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18022 return;
18023 }
18024 if (inst.size_req)
18025 {
18026 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18027 return;
18028 }
18029
18030 inst.instruction = opcode->avalue;
18031 if (opcode->tag == OT_unconditionalF)
18032 inst.instruction |= 0xFU << 28;
18033 else
18034 inst.instruction |= inst.cond << 28;
18035 inst.size = INSN_SIZE;
18036 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18037 {
18038 it_fsm_pre_encode ();
18039 opcode->aencode ();
18040 it_fsm_post_encode ();
18041 }
18042 /* Arm mode bx is marked as both v4T and v5 because it's still required
18043 on a hypothetical non-thumb v5 core. */
18044 if (is_bx)
18045 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18046 else
18047 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18048 *opcode->avariant);
18049
18050 check_neon_suffixes;
18051
18052 if (!inst.error)
18053 {
18054 mapping_state (MAP_ARM);
18055 }
18056 }
18057 else
18058 {
18059 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18060 "-- `%s'"), str);
18061 return;
18062 }
18063 output_inst (str);
18064 }
18065
18066 static void
18067 check_it_blocks_finished (void)
18068 {
18069 #ifdef OBJ_ELF
18070 asection *sect;
18071
18072 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18073 if (seg_info (sect)->tc_segment_info_data.current_it.state
18074 == MANUAL_IT_BLOCK)
18075 {
18076 as_warn (_("section '%s' finished with an open IT block."),
18077 sect->name);
18078 }
18079 #else
18080 if (now_it.state == MANUAL_IT_BLOCK)
18081 as_warn (_("file finished with an open IT block."));
18082 #endif
18083 }
18084
18085 /* Various frobbings of labels and their addresses. */
18086
18087 void
18088 arm_start_line_hook (void)
18089 {
18090 last_label_seen = NULL;
18091 }
18092
18093 void
18094 arm_frob_label (symbolS * sym)
18095 {
18096 last_label_seen = sym;
18097
18098 ARM_SET_THUMB (sym, thumb_mode);
18099
18100 #if defined OBJ_COFF || defined OBJ_ELF
18101 ARM_SET_INTERWORK (sym, support_interwork);
18102 #endif
18103
18104 force_automatic_it_block_close ();
18105
18106 /* Note - do not allow local symbols (.Lxxx) to be labelled
18107 as Thumb functions. This is because these labels, whilst
18108 they exist inside Thumb code, are not the entry points for
18109 possible ARM->Thumb calls. Also, these labels can be used
18110 as part of a computed goto or switch statement. eg gcc
18111 can generate code that looks like this:
18112
18113 ldr r2, [pc, .Laaa]
18114 lsl r3, r3, #2
18115 ldr r2, [r3, r2]
18116 mov pc, r2
18117
18118 .Lbbb: .word .Lxxx
18119 .Lccc: .word .Lyyy
18120 ..etc...
18121 .Laaa: .word Lbbb
18122
18123 The first instruction loads the address of the jump table.
18124 The second instruction converts a table index into a byte offset.
18125 The third instruction gets the jump address out of the table.
18126 The fourth instruction performs the jump.
18127
18128 If the address stored at .Laaa is that of a symbol which has the
18129 Thumb_Func bit set, then the linker will arrange for this address
18130 to have the bottom bit set, which in turn would mean that the
18131 address computation performed by the third instruction would end
18132 up with the bottom bit set. Since the ARM is capable of unaligned
18133 word loads, the instruction would then load the incorrect address
18134 out of the jump table, and chaos would ensue. */
18135 if (label_is_thumb_function_name
18136 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18137 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18138 {
18139 /* When the address of a Thumb function is taken the bottom
18140 bit of that address should be set. This will allow
18141 interworking between Arm and Thumb functions to work
18142 correctly. */
18143
18144 THUMB_SET_FUNC (sym, 1);
18145
18146 label_is_thumb_function_name = FALSE;
18147 }
18148
18149 dwarf2_emit_label (sym);
18150 }
18151
18152 bfd_boolean
18153 arm_data_in_code (void)
18154 {
18155 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18156 {
18157 *input_line_pointer = '/';
18158 input_line_pointer += 5;
18159 *input_line_pointer = 0;
18160 return TRUE;
18161 }
18162
18163 return FALSE;
18164 }
18165
18166 char *
18167 arm_canonicalize_symbol_name (char * name)
18168 {
18169 int len;
18170
18171 if (thumb_mode && (len = strlen (name)) > 5
18172 && streq (name + len - 5, "/data"))
18173 *(name + len - 5) = 0;
18174
18175 return name;
18176 }
18177 \f
18178 /* Table of all register names defined by default. The user can
18179 define additional names with .req. Note that all register names
18180 should appear in both upper and lowercase variants. Some registers
18181 also have mixed-case names. */
18182
18183 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18184 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18185 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18186 #define REGSET(p,t) \
18187 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18188 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18189 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18190 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18191 #define REGSETH(p,t) \
18192 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18193 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18194 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18195 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18196 #define REGSET2(p,t) \
18197 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18198 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18199 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18200 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18201 #define SPLRBANK(base,bank,t) \
18202 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18203 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18204 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18205 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18206 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18207 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18208
18209 static const struct reg_entry reg_names[] =
18210 {
18211 /* ARM integer registers. */
18212 REGSET(r, RN), REGSET(R, RN),
18213
18214 /* ATPCS synonyms. */
18215 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18216 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18217 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18218
18219 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18220 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18221 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18222
18223 /* Well-known aliases. */
18224 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18225 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18226
18227 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18228 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18229
18230 /* Coprocessor numbers. */
18231 REGSET(p, CP), REGSET(P, CP),
18232
18233 /* Coprocessor register numbers. The "cr" variants are for backward
18234 compatibility. */
18235 REGSET(c, CN), REGSET(C, CN),
18236 REGSET(cr, CN), REGSET(CR, CN),
18237
18238 /* ARM banked registers. */
18239 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18240 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18241 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18242 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18243 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18244 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18245 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18246
18247 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18248 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18249 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18250 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18251 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18252 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18253 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18254 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18255
18256 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18257 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18258 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18259 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18260 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18261 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18262 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18263 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18264 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18265
18266 /* FPA registers. */
18267 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18268 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18269
18270 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18271 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18272
18273 /* VFP SP registers. */
18274 REGSET(s,VFS), REGSET(S,VFS),
18275 REGSETH(s,VFS), REGSETH(S,VFS),
18276
18277 /* VFP DP Registers. */
18278 REGSET(d,VFD), REGSET(D,VFD),
18279 /* Extra Neon DP registers. */
18280 REGSETH(d,VFD), REGSETH(D,VFD),
18281
18282 /* Neon QP registers. */
18283 REGSET2(q,NQ), REGSET2(Q,NQ),
18284
18285 /* VFP control registers. */
18286 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18287 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18288 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18289 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18290 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18291 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18292
18293 /* Maverick DSP coprocessor registers. */
18294 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18295 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18296
18297 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18298 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18299 REGDEF(dspsc,0,DSPSC),
18300
18301 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18302 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18303 REGDEF(DSPSC,0,DSPSC),
18304
18305 /* iWMMXt data registers - p0, c0-15. */
18306 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18307
18308 /* iWMMXt control registers - p1, c0-3. */
18309 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18310 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18311 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18312 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18313
18314 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18315 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18316 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18317 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18318 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18319
18320 /* XScale accumulator registers. */
18321 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18322 };
18323 #undef REGDEF
18324 #undef REGNUM
18325 #undef REGSET
18326
18327 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18328 within psr_required_here. */
18329 static const struct asm_psr psrs[] =
18330 {
18331 /* Backward compatibility notation. Note that "all" is no longer
18332 truly all possible PSR bits. */
18333 {"all", PSR_c | PSR_f},
18334 {"flg", PSR_f},
18335 {"ctl", PSR_c},
18336
18337 /* Individual flags. */
18338 {"f", PSR_f},
18339 {"c", PSR_c},
18340 {"x", PSR_x},
18341 {"s", PSR_s},
18342
18343 /* Combinations of flags. */
18344 {"fs", PSR_f | PSR_s},
18345 {"fx", PSR_f | PSR_x},
18346 {"fc", PSR_f | PSR_c},
18347 {"sf", PSR_s | PSR_f},
18348 {"sx", PSR_s | PSR_x},
18349 {"sc", PSR_s | PSR_c},
18350 {"xf", PSR_x | PSR_f},
18351 {"xs", PSR_x | PSR_s},
18352 {"xc", PSR_x | PSR_c},
18353 {"cf", PSR_c | PSR_f},
18354 {"cs", PSR_c | PSR_s},
18355 {"cx", PSR_c | PSR_x},
18356 {"fsx", PSR_f | PSR_s | PSR_x},
18357 {"fsc", PSR_f | PSR_s | PSR_c},
18358 {"fxs", PSR_f | PSR_x | PSR_s},
18359 {"fxc", PSR_f | PSR_x | PSR_c},
18360 {"fcs", PSR_f | PSR_c | PSR_s},
18361 {"fcx", PSR_f | PSR_c | PSR_x},
18362 {"sfx", PSR_s | PSR_f | PSR_x},
18363 {"sfc", PSR_s | PSR_f | PSR_c},
18364 {"sxf", PSR_s | PSR_x | PSR_f},
18365 {"sxc", PSR_s | PSR_x | PSR_c},
18366 {"scf", PSR_s | PSR_c | PSR_f},
18367 {"scx", PSR_s | PSR_c | PSR_x},
18368 {"xfs", PSR_x | PSR_f | PSR_s},
18369 {"xfc", PSR_x | PSR_f | PSR_c},
18370 {"xsf", PSR_x | PSR_s | PSR_f},
18371 {"xsc", PSR_x | PSR_s | PSR_c},
18372 {"xcf", PSR_x | PSR_c | PSR_f},
18373 {"xcs", PSR_x | PSR_c | PSR_s},
18374 {"cfs", PSR_c | PSR_f | PSR_s},
18375 {"cfx", PSR_c | PSR_f | PSR_x},
18376 {"csf", PSR_c | PSR_s | PSR_f},
18377 {"csx", PSR_c | PSR_s | PSR_x},
18378 {"cxf", PSR_c | PSR_x | PSR_f},
18379 {"cxs", PSR_c | PSR_x | PSR_s},
18380 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18381 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18382 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18383 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18384 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18385 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18386 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18387 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18388 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18389 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18390 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18391 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18392 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18393 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18394 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18395 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18396 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18397 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18398 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18399 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18400 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18401 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18402 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18403 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18404 };
18405
18406 /* Table of V7M psr names. */
18407 static const struct asm_psr v7m_psrs[] =
18408 {
18409 {"apsr", 0 }, {"APSR", 0 },
18410 {"iapsr", 1 }, {"IAPSR", 1 },
18411 {"eapsr", 2 }, {"EAPSR", 2 },
18412 {"psr", 3 }, {"PSR", 3 },
18413 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18414 {"ipsr", 5 }, {"IPSR", 5 },
18415 {"epsr", 6 }, {"EPSR", 6 },
18416 {"iepsr", 7 }, {"IEPSR", 7 },
18417 {"msp", 8 }, {"MSP", 8 },
18418 {"psp", 9 }, {"PSP", 9 },
18419 {"primask", 16}, {"PRIMASK", 16},
18420 {"basepri", 17}, {"BASEPRI", 17},
18421 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18422 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18423 {"faultmask", 19}, {"FAULTMASK", 19},
18424 {"control", 20}, {"CONTROL", 20}
18425 };
18426
18427 /* Table of all shift-in-operand names. */
18428 static const struct asm_shift_name shift_names [] =
18429 {
18430 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18431 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18432 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18433 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18434 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18435 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18436 };
18437
18438 /* Table of all explicit relocation names. */
18439 #ifdef OBJ_ELF
18440 static struct reloc_entry reloc_names[] =
18441 {
18442 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18443 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18444 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18445 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18446 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18447 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18448 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18449 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18450 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18451 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18452 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18453 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18454 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18455 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18456 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18457 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18458 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18459 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18460 };
18461 #endif
18462
18463 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18464 static const struct asm_cond conds[] =
18465 {
18466 {"eq", 0x0},
18467 {"ne", 0x1},
18468 {"cs", 0x2}, {"hs", 0x2},
18469 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18470 {"mi", 0x4},
18471 {"pl", 0x5},
18472 {"vs", 0x6},
18473 {"vc", 0x7},
18474 {"hi", 0x8},
18475 {"ls", 0x9},
18476 {"ge", 0xa},
18477 {"lt", 0xb},
18478 {"gt", 0xc},
18479 {"le", 0xd},
18480 {"al", 0xe}
18481 };
18482
18483 #define UL_BARRIER(L,U,CODE,FEAT) \
18484 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18485 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18486
18487 static struct asm_barrier_opt barrier_opt_names[] =
18488 {
18489 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18490 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18491 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18492 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18493 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18494 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18495 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18496 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18497 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18498 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18499 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18500 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18501 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18502 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18503 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18504 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18505 };
18506
18507 #undef UL_BARRIER
18508
18509 /* Table of ARM-format instructions. */
18510
18511 /* Macros for gluing together operand strings. N.B. In all cases
18512 other than OPS0, the trailing OP_stop comes from default
18513 zero-initialization of the unspecified elements of the array. */
18514 #define OPS0() { OP_stop, }
18515 #define OPS1(a) { OP_##a, }
18516 #define OPS2(a,b) { OP_##a,OP_##b, }
18517 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18518 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18519 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18520 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18521
18522 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18523 This is useful when mixing operands for ARM and THUMB, i.e. using the
18524 MIX_ARM_THUMB_OPERANDS macro.
18525 In order to use these macros, prefix the number of operands with _
18526 e.g. _3. */
18527 #define OPS_1(a) { a, }
18528 #define OPS_2(a,b) { a,b, }
18529 #define OPS_3(a,b,c) { a,b,c, }
18530 #define OPS_4(a,b,c,d) { a,b,c,d, }
18531 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18532 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18533
18534 /* These macros abstract out the exact format of the mnemonic table and
18535 save some repeated characters. */
18536
18537 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18538 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18539 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18540 THUMB_VARIANT, do_##ae, do_##te }
18541
18542 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18543 a T_MNEM_xyz enumerator. */
18544 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18545 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18546 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18547 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18548
18549 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18550 infix after the third character. */
18551 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18552 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18553 THUMB_VARIANT, do_##ae, do_##te }
18554 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18555 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18556 THUMB_VARIANT, do_##ae, do_##te }
18557 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18558 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18559 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18560 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18561 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18562 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18563 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18564 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18565
18566 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18567 field is still 0xE. Many of the Thumb variants can be executed
18568 conditionally, so this is checked separately. */
18569 #define TUE(mnem, op, top, nops, ops, ae, te) \
18570 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18571 THUMB_VARIANT, do_##ae, do_##te }
18572
18573 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18574 Used by mnemonics that have very minimal differences in the encoding for
18575 ARM and Thumb variants and can be handled in a common function. */
18576 #define TUEc(mnem, op, top, nops, ops, en) \
18577 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18578 THUMB_VARIANT, do_##en, do_##en }
18579
18580 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18581 condition code field. */
18582 #define TUF(mnem, op, top, nops, ops, ae, te) \
18583 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18584 THUMB_VARIANT, do_##ae, do_##te }
18585
18586 /* ARM-only variants of all the above. */
18587 #define CE(mnem, op, nops, ops, ae) \
18588 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18589
18590 #define C3(mnem, op, nops, ops, ae) \
18591 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18592
18593 /* Legacy mnemonics that always have conditional infix after the third
18594 character. */
18595 #define CL(mnem, op, nops, ops, ae) \
18596 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18597 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18598
18599 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18600 #define cCE(mnem, op, nops, ops, ae) \
18601 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18602
18603 /* Legacy coprocessor instructions where conditional infix and conditional
18604 suffix are ambiguous. For consistency this includes all FPA instructions,
18605 not just the potentially ambiguous ones. */
18606 #define cCL(mnem, op, nops, ops, ae) \
18607 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18608 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18609
18610 /* Coprocessor, takes either a suffix or a position-3 infix
18611 (for an FPA corner case). */
18612 #define C3E(mnem, op, nops, ops, ae) \
18613 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18614 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18615
18616 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18617 { m1 #m2 m3, OPS##nops ops, \
18618 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18619 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18620
18621 #define CM(m1, m2, op, nops, ops, ae) \
18622 xCM_ (m1, , m2, op, nops, ops, ae), \
18623 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18624 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18625 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18626 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18627 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18628 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18629 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18630 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18631 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18632 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18633 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18634 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18635 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18636 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18637 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18638 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18639 xCM_ (m1, le, m2, op, nops, ops, ae), \
18640 xCM_ (m1, al, m2, op, nops, ops, ae)
18641
18642 #define UE(mnem, op, nops, ops, ae) \
18643 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18644
18645 #define UF(mnem, op, nops, ops, ae) \
18646 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18647
18648 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18649 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18650 use the same encoding function for each. */
18651 #define NUF(mnem, op, nops, ops, enc) \
18652 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18653 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18654
18655 /* Neon data processing, version which indirects through neon_enc_tab for
18656 the various overloaded versions of opcodes. */
18657 #define nUF(mnem, op, nops, ops, enc) \
18658 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18659 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18660
18661 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18662 version. */
18663 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18664 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18665 THUMB_VARIANT, do_##enc, do_##enc }
18666
18667 #define NCE(mnem, op, nops, ops, enc) \
18668 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18669
18670 #define NCEF(mnem, op, nops, ops, enc) \
18671 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18672
18673 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18674 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18675 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18676 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18677
18678 #define nCE(mnem, op, nops, ops, enc) \
18679 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18680
18681 #define nCEF(mnem, op, nops, ops, enc) \
18682 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18683
18684 #define do_0 0
18685
18686 static const struct asm_opcode insns[] =
18687 {
18688 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18689 #define THUMB_VARIANT & arm_ext_v4t
18690 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18691 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18692 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18693 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18694 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18695 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18696 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18697 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18698 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18699 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18700 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18701 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18702 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18703 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18704 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18705 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18706
18707 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18708 for setting PSR flag bits. They are obsolete in V6 and do not
18709 have Thumb equivalents. */
18710 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18711 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18712 CL("tstp", 110f000, 2, (RR, SH), cmp),
18713 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18714 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18715 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18716 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18717 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18718 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18719
18720 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18721 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
18722 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18723 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18724
18725 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18726 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18727 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18728 OP_RRnpc),
18729 OP_ADDRGLDR),ldst, t_ldst),
18730 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18731
18732 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18733 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18734 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18735 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18736 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18737 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18738
18739 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18740 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18741 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18742 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18743
18744 /* Pseudo ops. */
18745 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18746 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18747 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18748 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18749
18750 /* Thumb-compatibility pseudo ops. */
18751 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18752 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18753 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18754 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18755 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18756 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18757 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18758 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18759 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18760 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18761 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18762 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18763
18764 /* These may simplify to neg. */
18765 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18766 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18767
18768 #undef THUMB_VARIANT
18769 #define THUMB_VARIANT & arm_ext_v6
18770
18771 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18772
18773 /* V1 instructions with no Thumb analogue prior to V6T2. */
18774 #undef THUMB_VARIANT
18775 #define THUMB_VARIANT & arm_ext_v6t2
18776
18777 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18778 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18779 CL("teqp", 130f000, 2, (RR, SH), cmp),
18780
18781 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18782 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18783 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18784 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18785
18786 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18787 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18788
18789 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18790 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18791
18792 /* V1 instructions with no Thumb analogue at all. */
18793 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18794 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18795
18796 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18797 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18798 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18799 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18800 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18801 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18802 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18803 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18804
18805 #undef ARM_VARIANT
18806 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18807 #undef THUMB_VARIANT
18808 #define THUMB_VARIANT & arm_ext_v4t
18809
18810 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18811 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18812
18813 #undef THUMB_VARIANT
18814 #define THUMB_VARIANT & arm_ext_v6t2
18815
18816 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18817 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18818
18819 /* Generic coprocessor instructions. */
18820 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18821 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18822 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18823 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18824 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18825 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18826 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18827
18828 #undef ARM_VARIANT
18829 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18830
18831 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18832 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18833
18834 #undef ARM_VARIANT
18835 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18836 #undef THUMB_VARIANT
18837 #define THUMB_VARIANT & arm_ext_msr
18838
18839 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18840 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18841
18842 #undef ARM_VARIANT
18843 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18844 #undef THUMB_VARIANT
18845 #define THUMB_VARIANT & arm_ext_v6t2
18846
18847 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18848 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18849 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18850 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18851 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18852 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18853 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18854 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18855
18856 #undef ARM_VARIANT
18857 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18858 #undef THUMB_VARIANT
18859 #define THUMB_VARIANT & arm_ext_v4t
18860
18861 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18862 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18863 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18864 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18865 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18866 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18867
18868 #undef ARM_VARIANT
18869 #define ARM_VARIANT & arm_ext_v4t_5
18870
18871 /* ARM Architecture 4T. */
18872 /* Note: bx (and blx) are required on V5, even if the processor does
18873 not support Thumb. */
18874 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18875
18876 #undef ARM_VARIANT
18877 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18878 #undef THUMB_VARIANT
18879 #define THUMB_VARIANT & arm_ext_v5t
18880
18881 /* Note: blx has 2 variants; the .value coded here is for
18882 BLX(2). Only this variant has conditional execution. */
18883 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18884 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18885
18886 #undef THUMB_VARIANT
18887 #define THUMB_VARIANT & arm_ext_v6t2
18888
18889 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18890 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18891 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18892 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18893 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18894 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18895 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18896 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18897
18898 #undef ARM_VARIANT
18899 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18900 #undef THUMB_VARIANT
18901 #define THUMB_VARIANT & arm_ext_v5exp
18902
18903 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18904 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18905 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18906 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18907
18908 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18909 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18910
18911 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18912 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18913 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18914 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18915
18916 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18917 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18918 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18919 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18920
18921 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18922 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18923
18924 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18925 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18926 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18927 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18928
18929 #undef ARM_VARIANT
18930 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18931 #undef THUMB_VARIANT
18932 #define THUMB_VARIANT & arm_ext_v6t2
18933
18934 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18935 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18936 ldrd, t_ldstd),
18937 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18938 ADDRGLDRS), ldrd, t_ldstd),
18939
18940 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18941 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18942
18943 #undef ARM_VARIANT
18944 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18945
18946 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18947
18948 #undef ARM_VARIANT
18949 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18950 #undef THUMB_VARIANT
18951 #define THUMB_VARIANT & arm_ext_v6
18952
18953 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18954 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18955 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18956 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18957 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18958 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18959 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18960 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18961 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18962 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18963
18964 #undef THUMB_VARIANT
18965 #define THUMB_VARIANT & arm_ext_v6t2_v8m
18966
18967 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18968 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18969 strex, t_strex),
18970 #undef THUMB_VARIANT
18971 #define THUMB_VARIANT & arm_ext_v6t2
18972
18973 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18974 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18975
18976 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18977 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18978
18979 /* ARM V6 not included in V7M. */
18980 #undef THUMB_VARIANT
18981 #define THUMB_VARIANT & arm_ext_v6_notm
18982 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18983 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18984 UF(rfeib, 9900a00, 1, (RRw), rfe),
18985 UF(rfeda, 8100a00, 1, (RRw), rfe),
18986 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18987 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18988 UF(rfefa, 8100a00, 1, (RRw), rfe),
18989 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18990 UF(rfeed, 9900a00, 1, (RRw), rfe),
18991 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18992 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18993 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18994 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18995 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18996 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18997 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18998 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18999 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19000 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19001
19002 /* ARM V6 not included in V7M (eg. integer SIMD). */
19003 #undef THUMB_VARIANT
19004 #define THUMB_VARIANT & arm_ext_v6_dsp
19005 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19006 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19007 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19008 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19009 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19010 /* Old name for QASX. */
19011 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19012 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19013 /* Old name for QSAX. */
19014 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19015 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19016 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19017 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19018 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19019 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19020 /* Old name for SASX. */
19021 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19022 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19023 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19024 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19025 /* Old name for SHASX. */
19026 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19027 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19028 /* Old name for SHSAX. */
19029 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19030 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19031 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19032 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19033 /* Old name for SSAX. */
19034 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19035 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19036 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19037 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19038 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19039 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19040 /* Old name for UASX. */
19041 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19042 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19043 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19044 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19045 /* Old name for UHASX. */
19046 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19047 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19048 /* Old name for UHSAX. */
19049 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19050 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19051 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19052 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19053 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19054 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19055 /* Old name for UQASX. */
19056 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19057 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19058 /* Old name for UQSAX. */
19059 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19060 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19061 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19062 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19063 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19064 /* Old name for USAX. */
19065 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19066 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19067 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19068 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19069 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19070 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19071 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19072 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19073 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19074 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19075 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19076 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19077 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19078 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19079 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19080 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19081 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19082 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19083 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19084 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19085 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19086 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19087 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19088 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19089 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19090 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19091 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19092 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19093 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19094 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19095 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19096 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19097 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19098 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19099
19100 #undef ARM_VARIANT
19101 #define ARM_VARIANT & arm_ext_v6k
19102 #undef THUMB_VARIANT
19103 #define THUMB_VARIANT & arm_ext_v6k
19104
19105 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19106 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19107 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19108 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19109
19110 #undef THUMB_VARIANT
19111 #define THUMB_VARIANT & arm_ext_v6_notm
19112 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19113 ldrexd, t_ldrexd),
19114 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19115 RRnpcb), strexd, t_strexd),
19116
19117 #undef THUMB_VARIANT
19118 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19119 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19120 rd_rn, rd_rn),
19121 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19122 rd_rn, rd_rn),
19123 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19124 strex, t_strexbh),
19125 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19126 strex, t_strexbh),
19127 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19128
19129 #undef ARM_VARIANT
19130 #define ARM_VARIANT & arm_ext_sec
19131 #undef THUMB_VARIANT
19132 #define THUMB_VARIANT & arm_ext_sec
19133
19134 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19135
19136 #undef ARM_VARIANT
19137 #define ARM_VARIANT & arm_ext_virt
19138 #undef THUMB_VARIANT
19139 #define THUMB_VARIANT & arm_ext_virt
19140
19141 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19142 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19143
19144 #undef ARM_VARIANT
19145 #define ARM_VARIANT & arm_ext_pan
19146 #undef THUMB_VARIANT
19147 #define THUMB_VARIANT & arm_ext_pan
19148
19149 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19150
19151 #undef ARM_VARIANT
19152 #define ARM_VARIANT & arm_ext_v6t2
19153 #undef THUMB_VARIANT
19154 #define THUMB_VARIANT & arm_ext_v6t2
19155
19156 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19157 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19158 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19159 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19160
19161 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19162 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19163
19164 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19165 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19166 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19167 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19168
19169 #undef THUMB_VARIANT
19170 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19171 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19172 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19173
19174 /* Thumb-only instructions. */
19175 #undef ARM_VARIANT
19176 #define ARM_VARIANT NULL
19177 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19178 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19179
19180 /* ARM does not really have an IT instruction, so always allow it.
19181 The opcode is copied from Thumb in order to allow warnings in
19182 -mimplicit-it=[never | arm] modes. */
19183 #undef ARM_VARIANT
19184 #define ARM_VARIANT & arm_ext_v1
19185 #undef THUMB_VARIANT
19186 #define THUMB_VARIANT & arm_ext_v6t2
19187
19188 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19189 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19190 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19191 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19192 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19193 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19194 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19195 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19196 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19197 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19198 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19199 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19200 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19201 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19202 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19203 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19204 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19205 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19206
19207 /* Thumb2 only instructions. */
19208 #undef ARM_VARIANT
19209 #define ARM_VARIANT NULL
19210
19211 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19212 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19213 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19214 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19215 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19216 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19217
19218 /* Hardware division instructions. */
19219 #undef ARM_VARIANT
19220 #define ARM_VARIANT & arm_ext_adiv
19221 #undef THUMB_VARIANT
19222 #define THUMB_VARIANT & arm_ext_div
19223
19224 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19225 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19226
19227 /* ARM V6M/V7 instructions. */
19228 #undef ARM_VARIANT
19229 #define ARM_VARIANT & arm_ext_barrier
19230 #undef THUMB_VARIANT
19231 #define THUMB_VARIANT & arm_ext_barrier
19232
19233 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19234 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19235 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19236
19237 /* ARM V7 instructions. */
19238 #undef ARM_VARIANT
19239 #define ARM_VARIANT & arm_ext_v7
19240 #undef THUMB_VARIANT
19241 #define THUMB_VARIANT & arm_ext_v7
19242
19243 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19244 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19245
19246 #undef ARM_VARIANT
19247 #define ARM_VARIANT & arm_ext_mp
19248 #undef THUMB_VARIANT
19249 #define THUMB_VARIANT & arm_ext_mp
19250
19251 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19252
19253 /* AArchv8 instructions. */
19254 #undef ARM_VARIANT
19255 #define ARM_VARIANT & arm_ext_v8
19256
19257 /* Instructions shared between armv8-a and armv8-m. */
19258 #undef THUMB_VARIANT
19259 #define THUMB_VARIANT & arm_ext_atomics
19260
19261 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19262 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19263 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19264 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19265 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19266 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19267 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19268 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19269 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19270 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19271 stlex, t_stlex),
19272 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19273 stlex, t_stlex),
19274 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19275 stlex, t_stlex),
19276 #undef THUMB_VARIANT
19277 #define THUMB_VARIANT & arm_ext_v8
19278
19279 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19280 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19281 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19282 ldrexd, t_ldrexd),
19283 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19284 strexd, t_strexd),
19285 /* ARMv8 T32 only. */
19286 #undef ARM_VARIANT
19287 #define ARM_VARIANT NULL
19288 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19289 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19290 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19291
19292 /* FP for ARMv8. */
19293 #undef ARM_VARIANT
19294 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19295 #undef THUMB_VARIANT
19296 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19297
19298 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19299 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19300 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19301 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19302 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19303 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19304 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19305 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19306 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19307 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19308 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19309 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19310 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19311 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19312 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19313 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19314 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19315
19316 /* Crypto v1 extensions. */
19317 #undef ARM_VARIANT
19318 #define ARM_VARIANT & fpu_crypto_ext_armv8
19319 #undef THUMB_VARIANT
19320 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19321
19322 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19323 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19324 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19325 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19326 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19327 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19328 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19329 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19330 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19331 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19332 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19333 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19334 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19335 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19336
19337 #undef ARM_VARIANT
19338 #define ARM_VARIANT & crc_ext_armv8
19339 #undef THUMB_VARIANT
19340 #define THUMB_VARIANT & crc_ext_armv8
19341 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19342 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19343 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19344 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19345 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19346 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19347
19348 /* ARMv8.2 RAS extension. */
19349 #undef ARM_VARIANT
19350 #define ARM_VARIANT & arm_ext_v8_2
19351 #undef THUMB_VARIANT
19352 #define THUMB_VARIANT & arm_ext_v8_2
19353 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19354
19355 #undef ARM_VARIANT
19356 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19357 #undef THUMB_VARIANT
19358 #define THUMB_VARIANT NULL
19359
19360 cCE("wfs", e200110, 1, (RR), rd),
19361 cCE("rfs", e300110, 1, (RR), rd),
19362 cCE("wfc", e400110, 1, (RR), rd),
19363 cCE("rfc", e500110, 1, (RR), rd),
19364
19365 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19366 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19367 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19368 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19369
19370 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19371 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19372 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19373 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19374
19375 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19376 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19377 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19378 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19379 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19380 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19381 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19382 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19383 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19384 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19385 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19386 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19387
19388 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19389 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19390 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19391 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19392 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19393 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19394 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19395 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19396 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19397 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19398 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19399 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19400
19401 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19402 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19403 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19404 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19405 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19406 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19407 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19408 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19409 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19410 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19411 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19412 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19413
19414 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19415 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19416 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19417 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19418 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19419 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19420 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19421 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19422 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19423 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19424 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19425 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19426
19427 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19428 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19429 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19430 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19431 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19432 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19433 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19434 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19435 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19436 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19437 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19438 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19439
19440 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19441 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19442 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19443 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19444 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19445 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19446 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19447 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19448 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19449 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19450 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19451 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19452
19453 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19454 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19455 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19456 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19457 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19458 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19459 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19460 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19461 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19462 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19463 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19464 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19465
19466 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19467 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19468 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19469 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19470 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19471 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19472 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19473 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19474 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19475 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19476 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19477 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19478
19479 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19480 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19481 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19482 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19483 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19484 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19485 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19486 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19487 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19488 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19489 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19490 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19491
19492 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19493 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19494 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19495 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19496 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19497 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19498 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19499 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19500 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19501 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19502 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19503 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19504
19505 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19506 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19507 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19508 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19509 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19510 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19511 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19512 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19513 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19514 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19515 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19516 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19517
19518 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19519 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19520 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19521 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19522 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19523 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19524 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19525 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19526 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19527 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19528 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19529 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19530
19531 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19532 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19533 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19534 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19535 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19536 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19537 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19538 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19539 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19540 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19541 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19542 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19543
19544 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19545 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19546 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19547 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19548 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19549 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19550 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19551 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19552 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19553 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19554 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19555 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19556
19557 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19558 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19559 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19560 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19561 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19562 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19563 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19564 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19565 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19566 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19567 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19568 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19569
19570 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19571 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19572 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19573 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19574 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19575 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19576 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19577 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19578 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19579 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19580 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19581 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19582
19583 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19584 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19585 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19586 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19587 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19588 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19589 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19590 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19591 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19592 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19593 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19594 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19595
19596 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19597 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19598 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19599 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19600 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19601 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19602 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19603 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19604 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19605 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19606 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19607 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19608
19609 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19610 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19611 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19612 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19613 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19614 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19615 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19616 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19617 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19618 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19619 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19620 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19621
19622 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19623 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19624 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19625 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19626 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19627 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19628 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19629 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19630 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19631 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19632 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19633 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19634
19635 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19636 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19637 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19638 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19639 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19640 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19641 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19642 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19643 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19644 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19645 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19646 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19647
19648 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19649 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19650 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19651 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19652 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19653 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19654 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19655 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19656 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19657 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19658 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19659 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19660
19661 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19662 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19663 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19664 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19665 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19666 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19667 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19668 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19669 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19670 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19671 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19672 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19673
19674 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19675 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19676 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19677 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19678 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19679 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19680 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19681 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19682 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19683 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19684 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19685 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19686
19687 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19688 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19689 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19690 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19691 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19692 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19693 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19694 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19695 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19696 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19697 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19698 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19699
19700 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19701 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19702 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19703 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19704 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19705 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19706 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19707 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19708 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19709 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19710 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19711 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19712
19713 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19714 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19715 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19716 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19717 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19718 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19719 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19720 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19721 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19722 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19723 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19724 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19725
19726 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19727 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19728 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19729 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19730 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19731 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19732 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19733 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19734 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19735 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19736 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19737 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19738
19739 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19740 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19741 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19742 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19743 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19744 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19745 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19746 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19747 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19748 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19749 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19750 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19751
19752 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19753 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19754 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19755 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19756
19757 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19758 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19759 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19760 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19761 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19762 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19763 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19764 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19765 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19766 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19767 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19768 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19769
19770 /* The implementation of the FIX instruction is broken on some
19771 assemblers, in that it accepts a precision specifier as well as a
19772 rounding specifier, despite the fact that this is meaningless.
19773 To be more compatible, we accept it as well, though of course it
19774 does not set any bits. */
19775 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19776 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19777 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19778 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19779 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19780 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19781 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19782 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19783 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19784 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19785 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19786 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19787 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19788
19789 /* Instructions that were new with the real FPA, call them V2. */
19790 #undef ARM_VARIANT
19791 #define ARM_VARIANT & fpu_fpa_ext_v2
19792
19793 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19794 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19795 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19796 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19797 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19798 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19799
19800 #undef ARM_VARIANT
19801 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19802
19803 /* Moves and type conversions. */
19804 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19805 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19806 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19807 cCE("fmstat", ef1fa10, 0, (), noargs),
19808 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19809 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19810 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19811 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19812 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19813 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19814 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19815 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19816 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19817 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19818
19819 /* Memory operations. */
19820 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19821 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19822 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19823 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19824 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19825 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19826 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19827 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19828 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19829 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19830 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19831 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19832 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19833 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19834 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19835 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19836 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19837 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19838
19839 /* Monadic operations. */
19840 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19841 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19842 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19843
19844 /* Dyadic operations. */
19845 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19846 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19847 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19848 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19849 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19850 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19851 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19852 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19853 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19854
19855 /* Comparisons. */
19856 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19857 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19858 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19859 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19860
19861 /* Double precision load/store are still present on single precision
19862 implementations. */
19863 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19864 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19865 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19866 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19867 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19868 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19869 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19870 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19871 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19872 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19873
19874 #undef ARM_VARIANT
19875 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19876
19877 /* Moves and type conversions. */
19878 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19879 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19880 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19881 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19882 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19883 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19884 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19885 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19886 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19887 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19888 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19889 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19890 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19891
19892 /* Monadic operations. */
19893 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19894 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19895 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19896
19897 /* Dyadic operations. */
19898 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19899 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19900 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19901 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19902 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19903 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19904 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19905 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19906 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19907
19908 /* Comparisons. */
19909 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19910 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19911 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19912 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19913
19914 #undef ARM_VARIANT
19915 #define ARM_VARIANT & fpu_vfp_ext_v2
19916
19917 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19918 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19919 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19920 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19921
19922 /* Instructions which may belong to either the Neon or VFP instruction sets.
19923 Individual encoder functions perform additional architecture checks. */
19924 #undef ARM_VARIANT
19925 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19926 #undef THUMB_VARIANT
19927 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19928
19929 /* These mnemonics are unique to VFP. */
19930 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19931 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19932 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19933 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19934 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19935 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19936 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19937 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19938 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19939 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19940
19941 /* Mnemonics shared by Neon and VFP. */
19942 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19943 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19944 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19945
19946 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19947 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19948
19949 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19950 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19951
19952 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19953 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19954 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19955 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19956 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19957 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19958 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19959 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19960
19961 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19962 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19963 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19964 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19965
19966
19967 /* NOTE: All VMOV encoding is special-cased! */
19968 NCE(vmov, 0, 1, (VMOV), neon_mov),
19969 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19970
19971 #undef THUMB_VARIANT
19972 #define THUMB_VARIANT & fpu_neon_ext_v1
19973 #undef ARM_VARIANT
19974 #define ARM_VARIANT & fpu_neon_ext_v1
19975
19976 /* Data processing with three registers of the same length. */
19977 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19978 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19979 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19980 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19981 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19982 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19983 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19984 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19985 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19986 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19987 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19988 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19989 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19990 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19991 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19992 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19993 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19994 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19995 /* If not immediate, fall back to neon_dyadic_i64_su.
19996 shl_imm should accept I8 I16 I32 I64,
19997 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19998 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19999 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20000 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20001 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20002 /* Logic ops, types optional & ignored. */
20003 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20004 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20005 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20006 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20007 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20008 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20009 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20010 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20011 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20012 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20013 /* Bitfield ops, untyped. */
20014 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20015 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20016 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20017 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20018 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20019 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20020 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20021 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20022 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20023 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20024 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20025 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20026 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20027 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20028 back to neon_dyadic_if_su. */
20029 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20030 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20031 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20032 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20033 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20034 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20035 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20036 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20037 /* Comparison. Type I8 I16 I32 F32. */
20038 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20039 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20040 /* As above, D registers only. */
20041 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20042 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20043 /* Int and float variants, signedness unimportant. */
20044 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20045 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20046 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20047 /* Add/sub take types I8 I16 I32 I64 F32. */
20048 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20049 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20050 /* vtst takes sizes 8, 16, 32. */
20051 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20052 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20053 /* VMUL takes I8 I16 I32 F32 P8. */
20054 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20055 /* VQD{R}MULH takes S16 S32. */
20056 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20057 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20058 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20059 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20060 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20061 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20062 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20063 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20064 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20065 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20066 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20067 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20068 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20069 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20070 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20071 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20072 /* ARM v8.1 extension. */
20073 nUF(vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20074 nUF(vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20075 nUF(vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20076 nUF(vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20077
20078 /* Two address, int/float. Types S8 S16 S32 F32. */
20079 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20080 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20081
20082 /* Data processing with two registers and a shift amount. */
20083 /* Right shifts, and variants with rounding.
20084 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20085 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20086 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20087 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20088 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20089 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20090 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20091 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20092 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20093 /* Shift and insert. Sizes accepted 8 16 32 64. */
20094 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20095 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20096 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20097 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20098 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20099 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20100 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20101 /* Right shift immediate, saturating & narrowing, with rounding variants.
20102 Types accepted S16 S32 S64 U16 U32 U64. */
20103 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20104 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20105 /* As above, unsigned. Types accepted S16 S32 S64. */
20106 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20107 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20108 /* Right shift narrowing. Types accepted I16 I32 I64. */
20109 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20110 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20111 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20112 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20113 /* CVT with optional immediate for fixed-point variant. */
20114 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20115
20116 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20117 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20118
20119 /* Data processing, three registers of different lengths. */
20120 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20121 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20122 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20123 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20124 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20125 /* If not scalar, fall back to neon_dyadic_long.
20126 Vector types as above, scalar types S16 S32 U16 U32. */
20127 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20128 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20129 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20130 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20131 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20132 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20133 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20134 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20135 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20136 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20137 /* Saturating doubling multiplies. Types S16 S32. */
20138 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20139 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20140 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20141 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20142 S16 S32 U16 U32. */
20143 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20144
20145 /* Extract. Size 8. */
20146 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20147 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20148
20149 /* Two registers, miscellaneous. */
20150 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20151 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20152 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20153 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20154 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20155 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20156 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20157 /* Vector replicate. Sizes 8 16 32. */
20158 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20159 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20160 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20161 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20162 /* VMOVN. Types I16 I32 I64. */
20163 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20164 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20165 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20166 /* VQMOVUN. Types S16 S32 S64. */
20167 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20168 /* VZIP / VUZP. Sizes 8 16 32. */
20169 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20170 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20171 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20172 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20173 /* VQABS / VQNEG. Types S8 S16 S32. */
20174 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20175 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20176 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20177 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20178 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20179 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20180 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20181 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20182 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20183 /* Reciprocal estimates. Types U32 F32. */
20184 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20185 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20186 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20187 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20188 /* VCLS. Types S8 S16 S32. */
20189 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20190 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20191 /* VCLZ. Types I8 I16 I32. */
20192 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20193 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20194 /* VCNT. Size 8. */
20195 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20196 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20197 /* Two address, untyped. */
20198 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20199 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20200 /* VTRN. Sizes 8 16 32. */
20201 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20202 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20203
20204 /* Table lookup. Size 8. */
20205 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20206 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20207
20208 #undef THUMB_VARIANT
20209 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20210 #undef ARM_VARIANT
20211 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20212
20213 /* Neon element/structure load/store. */
20214 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20215 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20216 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20217 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20218 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20219 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20220 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20221 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20222
20223 #undef THUMB_VARIANT
20224 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20225 #undef ARM_VARIANT
20226 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20227 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20228 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20229 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20230 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20231 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20232 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20233 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20234 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20235 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20236
20237 #undef THUMB_VARIANT
20238 #define THUMB_VARIANT & fpu_vfp_ext_v3
20239 #undef ARM_VARIANT
20240 #define ARM_VARIANT & fpu_vfp_ext_v3
20241
20242 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20243 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20244 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20245 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20246 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20247 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20248 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20249 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20250 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20251
20252 #undef ARM_VARIANT
20253 #define ARM_VARIANT & fpu_vfp_ext_fma
20254 #undef THUMB_VARIANT
20255 #define THUMB_VARIANT & fpu_vfp_ext_fma
20256 /* Mnemonics shared by Neon and VFP. These are included in the
20257 VFP FMA variant; NEON and VFP FMA always includes the NEON
20258 FMA instructions. */
20259 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20260 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20261 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20262 the v form should always be used. */
20263 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20264 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20265 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20266 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20267 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20268 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20269
20270 #undef THUMB_VARIANT
20271 #undef ARM_VARIANT
20272 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20273
20274 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20275 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20276 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20277 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20278 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20279 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20280 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20281 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20282
20283 #undef ARM_VARIANT
20284 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20285
20286 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20287 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20288 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20289 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20290 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20291 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20292 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20293 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20294 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20295 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20296 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20297 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20298 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20299 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20300 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20301 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20302 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20303 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20304 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20305 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20306 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20307 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20308 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20309 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20310 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20311 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20312 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20313 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20314 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20315 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20316 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20317 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20318 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20319 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20320 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20321 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20322 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20323 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20324 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20325 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20326 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20327 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20328 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20329 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20330 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20331 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20332 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20333 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20334 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20335 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20336 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20337 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20338 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20339 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20340 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20341 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20342 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20343 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20344 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20345 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20346 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20347 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20348 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20349 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20350 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20351 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20352 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20353 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20354 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20355 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20356 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20357 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20358 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20359 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20360 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20361 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20362 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20363 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20364 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20365 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20366 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20367 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20368 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20369 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20370 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20371 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20372 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20373 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20374 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20375 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20376 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20377 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20378 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20379 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20380 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20381 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20382 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20383 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20384 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20385 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20386 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20387 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20388 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20389 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20390 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20391 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20392 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20393 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20394 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20395 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20396 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20397 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20398 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20399 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20400 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20401 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20402 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20403 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20404 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20405 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20406 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20407 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20408 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20409 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20410 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20411 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20412 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20413 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20414 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20415 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20416 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20417 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20418 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20419 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20420 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20421 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20422 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20423 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20424 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20425 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20426 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20427 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20428 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20429 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20430 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20431 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20432 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20433 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20434 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20435 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20436 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20437 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20438 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20439 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20440 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20441 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20442 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20443 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20444 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20445 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20446 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20447 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20448
20449 #undef ARM_VARIANT
20450 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20451
20452 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20453 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20454 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20455 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20456 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20457 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20458 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20459 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20460 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20461 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20462 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20463 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20464 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20465 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20466 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20467 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20468 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20469 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20470 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20471 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20472 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20473 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20474 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20475 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20476 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20477 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20478 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20479 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20480 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20481 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20482 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20483 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20484 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20485 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20486 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20487 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20488 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20489 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20490 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20491 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20492 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20493 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20494 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20495 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20496 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20497 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20498 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20499 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20500 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20501 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20502 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20503 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20504 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20505 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20506 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20507 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20508 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20509
20510 #undef ARM_VARIANT
20511 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20512
20513 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20514 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20515 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20516 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20517 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20518 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20519 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20520 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20521 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20522 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20523 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20524 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20525 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20526 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20527 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20528 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20529 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20530 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20531 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20532 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20533 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20534 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20535 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20536 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20537 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20538 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20539 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20540 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20541 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20542 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20543 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20544 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20545 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20546 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20547 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20548 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20549 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20550 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20551 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20552 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20553 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20554 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20555 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20556 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20557 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20558 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20559 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20560 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20561 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20562 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20563 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20564 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20565 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20566 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20567 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20568 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20569 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20570 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20571 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20572 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20573 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20574 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20575 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20576 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20577 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20578 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20579 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20580 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20581 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20582 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20583 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20584 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20585 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20586 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20587 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20588 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20589
20590 #undef ARM_VARIANT
20591 #define ARM_VARIANT NULL
20592 #undef THUMB_VARIANT
20593 #define THUMB_VARIANT & arm_ext_v8m
20594 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
20595 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
20596 };
20597 #undef ARM_VARIANT
20598 #undef THUMB_VARIANT
20599 #undef TCE
20600 #undef TUE
20601 #undef TUF
20602 #undef TCC
20603 #undef cCE
20604 #undef cCL
20605 #undef C3E
20606 #undef CE
20607 #undef CM
20608 #undef UE
20609 #undef UF
20610 #undef UT
20611 #undef NUF
20612 #undef nUF
20613 #undef NCE
20614 #undef nCE
20615 #undef OPS0
20616 #undef OPS1
20617 #undef OPS2
20618 #undef OPS3
20619 #undef OPS4
20620 #undef OPS5
20621 #undef OPS6
20622 #undef do_0
20623 \f
20624 /* MD interface: bits in the object file. */
20625
20626 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20627 for use in the a.out file, and stores them in the array pointed to by buf.
20628 This knows about the endian-ness of the target machine and does
20629 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20630 2 (short) and 4 (long) Floating numbers are put out as a series of
20631 LITTLENUMS (shorts, here at least). */
20632
20633 void
20634 md_number_to_chars (char * buf, valueT val, int n)
20635 {
20636 if (target_big_endian)
20637 number_to_chars_bigendian (buf, val, n);
20638 else
20639 number_to_chars_littleendian (buf, val, n);
20640 }
20641
20642 static valueT
20643 md_chars_to_number (char * buf, int n)
20644 {
20645 valueT result = 0;
20646 unsigned char * where = (unsigned char *) buf;
20647
20648 if (target_big_endian)
20649 {
20650 while (n--)
20651 {
20652 result <<= 8;
20653 result |= (*where++ & 255);
20654 }
20655 }
20656 else
20657 {
20658 while (n--)
20659 {
20660 result <<= 8;
20661 result |= (where[n] & 255);
20662 }
20663 }
20664
20665 return result;
20666 }
20667
20668 /* MD interface: Sections. */
20669
20670 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20671 that an rs_machine_dependent frag may reach. */
20672
20673 unsigned int
20674 arm_frag_max_var (fragS *fragp)
20675 {
20676 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20677 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20678
20679 Note that we generate relaxable instructions even for cases that don't
20680 really need it, like an immediate that's a trivial constant. So we're
20681 overestimating the instruction size for some of those cases. Rather
20682 than putting more intelligence here, it would probably be better to
20683 avoid generating a relaxation frag in the first place when it can be
20684 determined up front that a short instruction will suffice. */
20685
20686 gas_assert (fragp->fr_type == rs_machine_dependent);
20687 return INSN_SIZE;
20688 }
20689
20690 /* Estimate the size of a frag before relaxing. Assume everything fits in
20691 2 bytes. */
20692
20693 int
20694 md_estimate_size_before_relax (fragS * fragp,
20695 segT segtype ATTRIBUTE_UNUSED)
20696 {
20697 fragp->fr_var = 2;
20698 return 2;
20699 }
20700
20701 /* Convert a machine dependent frag. */
20702
20703 void
20704 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20705 {
20706 unsigned long insn;
20707 unsigned long old_op;
20708 char *buf;
20709 expressionS exp;
20710 fixS *fixp;
20711 int reloc_type;
20712 int pc_rel;
20713 int opcode;
20714
20715 buf = fragp->fr_literal + fragp->fr_fix;
20716
20717 old_op = bfd_get_16(abfd, buf);
20718 if (fragp->fr_symbol)
20719 {
20720 exp.X_op = O_symbol;
20721 exp.X_add_symbol = fragp->fr_symbol;
20722 }
20723 else
20724 {
20725 exp.X_op = O_constant;
20726 }
20727 exp.X_add_number = fragp->fr_offset;
20728 opcode = fragp->fr_subtype;
20729 switch (opcode)
20730 {
20731 case T_MNEM_ldr_pc:
20732 case T_MNEM_ldr_pc2:
20733 case T_MNEM_ldr_sp:
20734 case T_MNEM_str_sp:
20735 case T_MNEM_ldr:
20736 case T_MNEM_ldrb:
20737 case T_MNEM_ldrh:
20738 case T_MNEM_str:
20739 case T_MNEM_strb:
20740 case T_MNEM_strh:
20741 if (fragp->fr_var == 4)
20742 {
20743 insn = THUMB_OP32 (opcode);
20744 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20745 {
20746 insn |= (old_op & 0x700) << 4;
20747 }
20748 else
20749 {
20750 insn |= (old_op & 7) << 12;
20751 insn |= (old_op & 0x38) << 13;
20752 }
20753 insn |= 0x00000c00;
20754 put_thumb32_insn (buf, insn);
20755 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20756 }
20757 else
20758 {
20759 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20760 }
20761 pc_rel = (opcode == T_MNEM_ldr_pc2);
20762 break;
20763 case T_MNEM_adr:
20764 if (fragp->fr_var == 4)
20765 {
20766 insn = THUMB_OP32 (opcode);
20767 insn |= (old_op & 0xf0) << 4;
20768 put_thumb32_insn (buf, insn);
20769 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20770 }
20771 else
20772 {
20773 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20774 exp.X_add_number -= 4;
20775 }
20776 pc_rel = 1;
20777 break;
20778 case T_MNEM_mov:
20779 case T_MNEM_movs:
20780 case T_MNEM_cmp:
20781 case T_MNEM_cmn:
20782 if (fragp->fr_var == 4)
20783 {
20784 int r0off = (opcode == T_MNEM_mov
20785 || opcode == T_MNEM_movs) ? 0 : 8;
20786 insn = THUMB_OP32 (opcode);
20787 insn = (insn & 0xe1ffffff) | 0x10000000;
20788 insn |= (old_op & 0x700) << r0off;
20789 put_thumb32_insn (buf, insn);
20790 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20791 }
20792 else
20793 {
20794 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20795 }
20796 pc_rel = 0;
20797 break;
20798 case T_MNEM_b:
20799 if (fragp->fr_var == 4)
20800 {
20801 insn = THUMB_OP32(opcode);
20802 put_thumb32_insn (buf, insn);
20803 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20804 }
20805 else
20806 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20807 pc_rel = 1;
20808 break;
20809 case T_MNEM_bcond:
20810 if (fragp->fr_var == 4)
20811 {
20812 insn = THUMB_OP32(opcode);
20813 insn |= (old_op & 0xf00) << 14;
20814 put_thumb32_insn (buf, insn);
20815 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20816 }
20817 else
20818 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20819 pc_rel = 1;
20820 break;
20821 case T_MNEM_add_sp:
20822 case T_MNEM_add_pc:
20823 case T_MNEM_inc_sp:
20824 case T_MNEM_dec_sp:
20825 if (fragp->fr_var == 4)
20826 {
20827 /* ??? Choose between add and addw. */
20828 insn = THUMB_OP32 (opcode);
20829 insn |= (old_op & 0xf0) << 4;
20830 put_thumb32_insn (buf, insn);
20831 if (opcode == T_MNEM_add_pc)
20832 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20833 else
20834 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20835 }
20836 else
20837 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20838 pc_rel = 0;
20839 break;
20840
20841 case T_MNEM_addi:
20842 case T_MNEM_addis:
20843 case T_MNEM_subi:
20844 case T_MNEM_subis:
20845 if (fragp->fr_var == 4)
20846 {
20847 insn = THUMB_OP32 (opcode);
20848 insn |= (old_op & 0xf0) << 4;
20849 insn |= (old_op & 0xf) << 16;
20850 put_thumb32_insn (buf, insn);
20851 if (insn & (1 << 20))
20852 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20853 else
20854 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20855 }
20856 else
20857 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20858 pc_rel = 0;
20859 break;
20860 default:
20861 abort ();
20862 }
20863 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20864 (enum bfd_reloc_code_real) reloc_type);
20865 fixp->fx_file = fragp->fr_file;
20866 fixp->fx_line = fragp->fr_line;
20867 fragp->fr_fix += fragp->fr_var;
20868
20869 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20870 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20871 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20872 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20873 }
20874
20875 /* Return the size of a relaxable immediate operand instruction.
20876 SHIFT and SIZE specify the form of the allowable immediate. */
20877 static int
20878 relax_immediate (fragS *fragp, int size, int shift)
20879 {
20880 offsetT offset;
20881 offsetT mask;
20882 offsetT low;
20883
20884 /* ??? Should be able to do better than this. */
20885 if (fragp->fr_symbol)
20886 return 4;
20887
20888 low = (1 << shift) - 1;
20889 mask = (1 << (shift + size)) - (1 << shift);
20890 offset = fragp->fr_offset;
20891 /* Force misaligned offsets to 32-bit variant. */
20892 if (offset & low)
20893 return 4;
20894 if (offset & ~mask)
20895 return 4;
20896 return 2;
20897 }
20898
20899 /* Get the address of a symbol during relaxation. */
20900 static addressT
20901 relaxed_symbol_addr (fragS *fragp, long stretch)
20902 {
20903 fragS *sym_frag;
20904 addressT addr;
20905 symbolS *sym;
20906
20907 sym = fragp->fr_symbol;
20908 sym_frag = symbol_get_frag (sym);
20909 know (S_GET_SEGMENT (sym) != absolute_section
20910 || sym_frag == &zero_address_frag);
20911 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20912
20913 /* If frag has yet to be reached on this pass, assume it will
20914 move by STRETCH just as we did. If this is not so, it will
20915 be because some frag between grows, and that will force
20916 another pass. */
20917
20918 if (stretch != 0
20919 && sym_frag->relax_marker != fragp->relax_marker)
20920 {
20921 fragS *f;
20922
20923 /* Adjust stretch for any alignment frag. Note that if have
20924 been expanding the earlier code, the symbol may be
20925 defined in what appears to be an earlier frag. FIXME:
20926 This doesn't handle the fr_subtype field, which specifies
20927 a maximum number of bytes to skip when doing an
20928 alignment. */
20929 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20930 {
20931 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20932 {
20933 if (stretch < 0)
20934 stretch = - ((- stretch)
20935 & ~ ((1 << (int) f->fr_offset) - 1));
20936 else
20937 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20938 if (stretch == 0)
20939 break;
20940 }
20941 }
20942 if (f != NULL)
20943 addr += stretch;
20944 }
20945
20946 return addr;
20947 }
20948
20949 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20950 load. */
20951 static int
20952 relax_adr (fragS *fragp, asection *sec, long stretch)
20953 {
20954 addressT addr;
20955 offsetT val;
20956
20957 /* Assume worst case for symbols not known to be in the same section. */
20958 if (fragp->fr_symbol == NULL
20959 || !S_IS_DEFINED (fragp->fr_symbol)
20960 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20961 || S_IS_WEAK (fragp->fr_symbol))
20962 return 4;
20963
20964 val = relaxed_symbol_addr (fragp, stretch);
20965 addr = fragp->fr_address + fragp->fr_fix;
20966 addr = (addr + 4) & ~3;
20967 /* Force misaligned targets to 32-bit variant. */
20968 if (val & 3)
20969 return 4;
20970 val -= addr;
20971 if (val < 0 || val > 1020)
20972 return 4;
20973 return 2;
20974 }
20975
20976 /* Return the size of a relaxable add/sub immediate instruction. */
20977 static int
20978 relax_addsub (fragS *fragp, asection *sec)
20979 {
20980 char *buf;
20981 int op;
20982
20983 buf = fragp->fr_literal + fragp->fr_fix;
20984 op = bfd_get_16(sec->owner, buf);
20985 if ((op & 0xf) == ((op >> 4) & 0xf))
20986 return relax_immediate (fragp, 8, 0);
20987 else
20988 return relax_immediate (fragp, 3, 0);
20989 }
20990
20991 /* Return TRUE iff the definition of symbol S could be pre-empted
20992 (overridden) at link or load time. */
20993 static bfd_boolean
20994 symbol_preemptible (symbolS *s)
20995 {
20996 /* Weak symbols can always be pre-empted. */
20997 if (S_IS_WEAK (s))
20998 return TRUE;
20999
21000 /* Non-global symbols cannot be pre-empted. */
21001 if (! S_IS_EXTERNAL (s))
21002 return FALSE;
21003
21004 #ifdef OBJ_ELF
21005 /* In ELF, a global symbol can be marked protected, or private. In that
21006 case it can't be pre-empted (other definitions in the same link unit
21007 would violate the ODR). */
21008 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21009 return FALSE;
21010 #endif
21011
21012 /* Other global symbols might be pre-empted. */
21013 return TRUE;
21014 }
21015
21016 /* Return the size of a relaxable branch instruction. BITS is the
21017 size of the offset field in the narrow instruction. */
21018
21019 static int
21020 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21021 {
21022 addressT addr;
21023 offsetT val;
21024 offsetT limit;
21025
21026 /* Assume worst case for symbols not known to be in the same section. */
21027 if (!S_IS_DEFINED (fragp->fr_symbol)
21028 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21029 || S_IS_WEAK (fragp->fr_symbol))
21030 return 4;
21031
21032 #ifdef OBJ_ELF
21033 /* A branch to a function in ARM state will require interworking. */
21034 if (S_IS_DEFINED (fragp->fr_symbol)
21035 && ARM_IS_FUNC (fragp->fr_symbol))
21036 return 4;
21037 #endif
21038
21039 if (symbol_preemptible (fragp->fr_symbol))
21040 return 4;
21041
21042 val = relaxed_symbol_addr (fragp, stretch);
21043 addr = fragp->fr_address + fragp->fr_fix + 4;
21044 val -= addr;
21045
21046 /* Offset is a signed value *2 */
21047 limit = 1 << bits;
21048 if (val >= limit || val < -limit)
21049 return 4;
21050 return 2;
21051 }
21052
21053
21054 /* Relax a machine dependent frag. This returns the amount by which
21055 the current size of the frag should change. */
21056
21057 int
21058 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21059 {
21060 int oldsize;
21061 int newsize;
21062
21063 oldsize = fragp->fr_var;
21064 switch (fragp->fr_subtype)
21065 {
21066 case T_MNEM_ldr_pc2:
21067 newsize = relax_adr (fragp, sec, stretch);
21068 break;
21069 case T_MNEM_ldr_pc:
21070 case T_MNEM_ldr_sp:
21071 case T_MNEM_str_sp:
21072 newsize = relax_immediate (fragp, 8, 2);
21073 break;
21074 case T_MNEM_ldr:
21075 case T_MNEM_str:
21076 newsize = relax_immediate (fragp, 5, 2);
21077 break;
21078 case T_MNEM_ldrh:
21079 case T_MNEM_strh:
21080 newsize = relax_immediate (fragp, 5, 1);
21081 break;
21082 case T_MNEM_ldrb:
21083 case T_MNEM_strb:
21084 newsize = relax_immediate (fragp, 5, 0);
21085 break;
21086 case T_MNEM_adr:
21087 newsize = relax_adr (fragp, sec, stretch);
21088 break;
21089 case T_MNEM_mov:
21090 case T_MNEM_movs:
21091 case T_MNEM_cmp:
21092 case T_MNEM_cmn:
21093 newsize = relax_immediate (fragp, 8, 0);
21094 break;
21095 case T_MNEM_b:
21096 newsize = relax_branch (fragp, sec, 11, stretch);
21097 break;
21098 case T_MNEM_bcond:
21099 newsize = relax_branch (fragp, sec, 8, stretch);
21100 break;
21101 case T_MNEM_add_sp:
21102 case T_MNEM_add_pc:
21103 newsize = relax_immediate (fragp, 8, 2);
21104 break;
21105 case T_MNEM_inc_sp:
21106 case T_MNEM_dec_sp:
21107 newsize = relax_immediate (fragp, 7, 2);
21108 break;
21109 case T_MNEM_addi:
21110 case T_MNEM_addis:
21111 case T_MNEM_subi:
21112 case T_MNEM_subis:
21113 newsize = relax_addsub (fragp, sec);
21114 break;
21115 default:
21116 abort ();
21117 }
21118
21119 fragp->fr_var = newsize;
21120 /* Freeze wide instructions that are at or before the same location as
21121 in the previous pass. This avoids infinite loops.
21122 Don't freeze them unconditionally because targets may be artificially
21123 misaligned by the expansion of preceding frags. */
21124 if (stretch <= 0 && newsize > 2)
21125 {
21126 md_convert_frag (sec->owner, sec, fragp);
21127 frag_wane (fragp);
21128 }
21129
21130 return newsize - oldsize;
21131 }
21132
21133 /* Round up a section size to the appropriate boundary. */
21134
21135 valueT
21136 md_section_align (segT segment ATTRIBUTE_UNUSED,
21137 valueT size)
21138 {
21139 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21140 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21141 {
21142 /* For a.out, force the section size to be aligned. If we don't do
21143 this, BFD will align it for us, but it will not write out the
21144 final bytes of the section. This may be a bug in BFD, but it is
21145 easier to fix it here since that is how the other a.out targets
21146 work. */
21147 int align;
21148
21149 align = bfd_get_section_alignment (stdoutput, segment);
21150 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21151 }
21152 #endif
21153
21154 return size;
21155 }
21156
21157 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21158 of an rs_align_code fragment. */
21159
21160 void
21161 arm_handle_align (fragS * fragP)
21162 {
21163 static char const arm_noop[2][2][4] =
21164 {
21165 { /* ARMv1 */
21166 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21167 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21168 },
21169 { /* ARMv6k */
21170 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21171 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21172 },
21173 };
21174 static char const thumb_noop[2][2][2] =
21175 {
21176 { /* Thumb-1 */
21177 {0xc0, 0x46}, /* LE */
21178 {0x46, 0xc0}, /* BE */
21179 },
21180 { /* Thumb-2 */
21181 {0x00, 0xbf}, /* LE */
21182 {0xbf, 0x00} /* BE */
21183 }
21184 };
21185 static char const wide_thumb_noop[2][4] =
21186 { /* Wide Thumb-2 */
21187 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21188 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21189 };
21190
21191 unsigned bytes, fix, noop_size;
21192 char * p;
21193 const char * noop;
21194 const char *narrow_noop = NULL;
21195 #ifdef OBJ_ELF
21196 enum mstate state;
21197 #endif
21198
21199 if (fragP->fr_type != rs_align_code)
21200 return;
21201
21202 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21203 p = fragP->fr_literal + fragP->fr_fix;
21204 fix = 0;
21205
21206 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21207 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21208
21209 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21210
21211 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21212 {
21213 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21214 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21215 {
21216 narrow_noop = thumb_noop[1][target_big_endian];
21217 noop = wide_thumb_noop[target_big_endian];
21218 }
21219 else
21220 noop = thumb_noop[0][target_big_endian];
21221 noop_size = 2;
21222 #ifdef OBJ_ELF
21223 state = MAP_THUMB;
21224 #endif
21225 }
21226 else
21227 {
21228 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21229 ? selected_cpu : arm_arch_none,
21230 arm_ext_v6k) != 0]
21231 [target_big_endian];
21232 noop_size = 4;
21233 #ifdef OBJ_ELF
21234 state = MAP_ARM;
21235 #endif
21236 }
21237
21238 fragP->fr_var = noop_size;
21239
21240 if (bytes & (noop_size - 1))
21241 {
21242 fix = bytes & (noop_size - 1);
21243 #ifdef OBJ_ELF
21244 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21245 #endif
21246 memset (p, 0, fix);
21247 p += fix;
21248 bytes -= fix;
21249 }
21250
21251 if (narrow_noop)
21252 {
21253 if (bytes & noop_size)
21254 {
21255 /* Insert a narrow noop. */
21256 memcpy (p, narrow_noop, noop_size);
21257 p += noop_size;
21258 bytes -= noop_size;
21259 fix += noop_size;
21260 }
21261
21262 /* Use wide noops for the remainder */
21263 noop_size = 4;
21264 }
21265
21266 while (bytes >= noop_size)
21267 {
21268 memcpy (p, noop, noop_size);
21269 p += noop_size;
21270 bytes -= noop_size;
21271 fix += noop_size;
21272 }
21273
21274 fragP->fr_fix += fix;
21275 }
21276
21277 /* Called from md_do_align. Used to create an alignment
21278 frag in a code section. */
21279
21280 void
21281 arm_frag_align_code (int n, int max)
21282 {
21283 char * p;
21284
21285 /* We assume that there will never be a requirement
21286 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21287 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21288 {
21289 char err_msg[128];
21290
21291 sprintf (err_msg,
21292 _("alignments greater than %d bytes not supported in .text sections."),
21293 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21294 as_fatal ("%s", err_msg);
21295 }
21296
21297 p = frag_var (rs_align_code,
21298 MAX_MEM_FOR_RS_ALIGN_CODE,
21299 1,
21300 (relax_substateT) max,
21301 (symbolS *) NULL,
21302 (offsetT) n,
21303 (char *) NULL);
21304 *p = 0;
21305 }
21306
21307 /* Perform target specific initialisation of a frag.
21308 Note - despite the name this initialisation is not done when the frag
21309 is created, but only when its type is assigned. A frag can be created
21310 and used a long time before its type is set, so beware of assuming that
21311 this initialisationis performed first. */
21312
21313 #ifndef OBJ_ELF
21314 void
21315 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21316 {
21317 /* Record whether this frag is in an ARM or a THUMB area. */
21318 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21319 }
21320
21321 #else /* OBJ_ELF is defined. */
21322 void
21323 arm_init_frag (fragS * fragP, int max_chars)
21324 {
21325 int frag_thumb_mode;
21326
21327 /* If the current ARM vs THUMB mode has not already
21328 been recorded into this frag then do so now. */
21329 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21330 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21331
21332 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21333
21334 /* Record a mapping symbol for alignment frags. We will delete this
21335 later if the alignment ends up empty. */
21336 switch (fragP->fr_type)
21337 {
21338 case rs_align:
21339 case rs_align_test:
21340 case rs_fill:
21341 mapping_state_2 (MAP_DATA, max_chars);
21342 break;
21343 case rs_align_code:
21344 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21345 break;
21346 default:
21347 break;
21348 }
21349 }
21350
21351 /* When we change sections we need to issue a new mapping symbol. */
21352
21353 void
21354 arm_elf_change_section (void)
21355 {
21356 /* Link an unlinked unwind index table section to the .text section. */
21357 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21358 && elf_linked_to_section (now_seg) == NULL)
21359 elf_linked_to_section (now_seg) = text_section;
21360 }
21361
21362 int
21363 arm_elf_section_type (const char * str, size_t len)
21364 {
21365 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21366 return SHT_ARM_EXIDX;
21367
21368 return -1;
21369 }
21370 \f
21371 /* Code to deal with unwinding tables. */
21372
21373 static void add_unwind_adjustsp (offsetT);
21374
21375 /* Generate any deferred unwind frame offset. */
21376
21377 static void
21378 flush_pending_unwind (void)
21379 {
21380 offsetT offset;
21381
21382 offset = unwind.pending_offset;
21383 unwind.pending_offset = 0;
21384 if (offset != 0)
21385 add_unwind_adjustsp (offset);
21386 }
21387
21388 /* Add an opcode to this list for this function. Two-byte opcodes should
21389 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21390 order. */
21391
21392 static void
21393 add_unwind_opcode (valueT op, int length)
21394 {
21395 /* Add any deferred stack adjustment. */
21396 if (unwind.pending_offset)
21397 flush_pending_unwind ();
21398
21399 unwind.sp_restored = 0;
21400
21401 if (unwind.opcode_count + length > unwind.opcode_alloc)
21402 {
21403 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21404 if (unwind.opcodes)
21405 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21406 unwind.opcode_alloc);
21407 else
21408 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21409 }
21410 while (length > 0)
21411 {
21412 length--;
21413 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21414 op >>= 8;
21415 unwind.opcode_count++;
21416 }
21417 }
21418
21419 /* Add unwind opcodes to adjust the stack pointer. */
21420
21421 static void
21422 add_unwind_adjustsp (offsetT offset)
21423 {
21424 valueT op;
21425
21426 if (offset > 0x200)
21427 {
21428 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21429 char bytes[5];
21430 int n;
21431 valueT o;
21432
21433 /* Long form: 0xb2, uleb128. */
21434 /* This might not fit in a word so add the individual bytes,
21435 remembering the list is built in reverse order. */
21436 o = (valueT) ((offset - 0x204) >> 2);
21437 if (o == 0)
21438 add_unwind_opcode (0, 1);
21439
21440 /* Calculate the uleb128 encoding of the offset. */
21441 n = 0;
21442 while (o)
21443 {
21444 bytes[n] = o & 0x7f;
21445 o >>= 7;
21446 if (o)
21447 bytes[n] |= 0x80;
21448 n++;
21449 }
21450 /* Add the insn. */
21451 for (; n; n--)
21452 add_unwind_opcode (bytes[n - 1], 1);
21453 add_unwind_opcode (0xb2, 1);
21454 }
21455 else if (offset > 0x100)
21456 {
21457 /* Two short opcodes. */
21458 add_unwind_opcode (0x3f, 1);
21459 op = (offset - 0x104) >> 2;
21460 add_unwind_opcode (op, 1);
21461 }
21462 else if (offset > 0)
21463 {
21464 /* Short opcode. */
21465 op = (offset - 4) >> 2;
21466 add_unwind_opcode (op, 1);
21467 }
21468 else if (offset < 0)
21469 {
21470 offset = -offset;
21471 while (offset > 0x100)
21472 {
21473 add_unwind_opcode (0x7f, 1);
21474 offset -= 0x100;
21475 }
21476 op = ((offset - 4) >> 2) | 0x40;
21477 add_unwind_opcode (op, 1);
21478 }
21479 }
21480
21481 /* Finish the list of unwind opcodes for this function. */
21482 static void
21483 finish_unwind_opcodes (void)
21484 {
21485 valueT op;
21486
21487 if (unwind.fp_used)
21488 {
21489 /* Adjust sp as necessary. */
21490 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21491 flush_pending_unwind ();
21492
21493 /* After restoring sp from the frame pointer. */
21494 op = 0x90 | unwind.fp_reg;
21495 add_unwind_opcode (op, 1);
21496 }
21497 else
21498 flush_pending_unwind ();
21499 }
21500
21501
21502 /* Start an exception table entry. If idx is nonzero this is an index table
21503 entry. */
21504
21505 static void
21506 start_unwind_section (const segT text_seg, int idx)
21507 {
21508 const char * text_name;
21509 const char * prefix;
21510 const char * prefix_once;
21511 const char * group_name;
21512 size_t prefix_len;
21513 size_t text_len;
21514 char * sec_name;
21515 size_t sec_name_len;
21516 int type;
21517 int flags;
21518 int linkonce;
21519
21520 if (idx)
21521 {
21522 prefix = ELF_STRING_ARM_unwind;
21523 prefix_once = ELF_STRING_ARM_unwind_once;
21524 type = SHT_ARM_EXIDX;
21525 }
21526 else
21527 {
21528 prefix = ELF_STRING_ARM_unwind_info;
21529 prefix_once = ELF_STRING_ARM_unwind_info_once;
21530 type = SHT_PROGBITS;
21531 }
21532
21533 text_name = segment_name (text_seg);
21534 if (streq (text_name, ".text"))
21535 text_name = "";
21536
21537 if (strncmp (text_name, ".gnu.linkonce.t.",
21538 strlen (".gnu.linkonce.t.")) == 0)
21539 {
21540 prefix = prefix_once;
21541 text_name += strlen (".gnu.linkonce.t.");
21542 }
21543
21544 prefix_len = strlen (prefix);
21545 text_len = strlen (text_name);
21546 sec_name_len = prefix_len + text_len;
21547 sec_name = (char *) xmalloc (sec_name_len + 1);
21548 memcpy (sec_name, prefix, prefix_len);
21549 memcpy (sec_name + prefix_len, text_name, text_len);
21550 sec_name[prefix_len + text_len] = '\0';
21551
21552 flags = SHF_ALLOC;
21553 linkonce = 0;
21554 group_name = 0;
21555
21556 /* Handle COMDAT group. */
21557 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21558 {
21559 group_name = elf_group_name (text_seg);
21560 if (group_name == NULL)
21561 {
21562 as_bad (_("Group section `%s' has no group signature"),
21563 segment_name (text_seg));
21564 ignore_rest_of_line ();
21565 return;
21566 }
21567 flags |= SHF_GROUP;
21568 linkonce = 1;
21569 }
21570
21571 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21572
21573 /* Set the section link for index tables. */
21574 if (idx)
21575 elf_linked_to_section (now_seg) = text_seg;
21576 }
21577
21578
21579 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21580 personality routine data. Returns zero, or the index table value for
21581 an inline entry. */
21582
21583 static valueT
21584 create_unwind_entry (int have_data)
21585 {
21586 int size;
21587 addressT where;
21588 char *ptr;
21589 /* The current word of data. */
21590 valueT data;
21591 /* The number of bytes left in this word. */
21592 int n;
21593
21594 finish_unwind_opcodes ();
21595
21596 /* Remember the current text section. */
21597 unwind.saved_seg = now_seg;
21598 unwind.saved_subseg = now_subseg;
21599
21600 start_unwind_section (now_seg, 0);
21601
21602 if (unwind.personality_routine == NULL)
21603 {
21604 if (unwind.personality_index == -2)
21605 {
21606 if (have_data)
21607 as_bad (_("handlerdata in cantunwind frame"));
21608 return 1; /* EXIDX_CANTUNWIND. */
21609 }
21610
21611 /* Use a default personality routine if none is specified. */
21612 if (unwind.personality_index == -1)
21613 {
21614 if (unwind.opcode_count > 3)
21615 unwind.personality_index = 1;
21616 else
21617 unwind.personality_index = 0;
21618 }
21619
21620 /* Space for the personality routine entry. */
21621 if (unwind.personality_index == 0)
21622 {
21623 if (unwind.opcode_count > 3)
21624 as_bad (_("too many unwind opcodes for personality routine 0"));
21625
21626 if (!have_data)
21627 {
21628 /* All the data is inline in the index table. */
21629 data = 0x80;
21630 n = 3;
21631 while (unwind.opcode_count > 0)
21632 {
21633 unwind.opcode_count--;
21634 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21635 n--;
21636 }
21637
21638 /* Pad with "finish" opcodes. */
21639 while (n--)
21640 data = (data << 8) | 0xb0;
21641
21642 return data;
21643 }
21644 size = 0;
21645 }
21646 else
21647 /* We get two opcodes "free" in the first word. */
21648 size = unwind.opcode_count - 2;
21649 }
21650 else
21651 {
21652 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21653 if (unwind.personality_index != -1)
21654 {
21655 as_bad (_("attempt to recreate an unwind entry"));
21656 return 1;
21657 }
21658
21659 /* An extra byte is required for the opcode count. */
21660 size = unwind.opcode_count + 1;
21661 }
21662
21663 size = (size + 3) >> 2;
21664 if (size > 0xff)
21665 as_bad (_("too many unwind opcodes"));
21666
21667 frag_align (2, 0, 0);
21668 record_alignment (now_seg, 2);
21669 unwind.table_entry = expr_build_dot ();
21670
21671 /* Allocate the table entry. */
21672 ptr = frag_more ((size << 2) + 4);
21673 /* PR 13449: Zero the table entries in case some of them are not used. */
21674 memset (ptr, 0, (size << 2) + 4);
21675 where = frag_now_fix () - ((size << 2) + 4);
21676
21677 switch (unwind.personality_index)
21678 {
21679 case -1:
21680 /* ??? Should this be a PLT generating relocation? */
21681 /* Custom personality routine. */
21682 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21683 BFD_RELOC_ARM_PREL31);
21684
21685 where += 4;
21686 ptr += 4;
21687
21688 /* Set the first byte to the number of additional words. */
21689 data = size > 0 ? size - 1 : 0;
21690 n = 3;
21691 break;
21692
21693 /* ABI defined personality routines. */
21694 case 0:
21695 /* Three opcodes bytes are packed into the first word. */
21696 data = 0x80;
21697 n = 3;
21698 break;
21699
21700 case 1:
21701 case 2:
21702 /* The size and first two opcode bytes go in the first word. */
21703 data = ((0x80 + unwind.personality_index) << 8) | size;
21704 n = 2;
21705 break;
21706
21707 default:
21708 /* Should never happen. */
21709 abort ();
21710 }
21711
21712 /* Pack the opcodes into words (MSB first), reversing the list at the same
21713 time. */
21714 while (unwind.opcode_count > 0)
21715 {
21716 if (n == 0)
21717 {
21718 md_number_to_chars (ptr, data, 4);
21719 ptr += 4;
21720 n = 4;
21721 data = 0;
21722 }
21723 unwind.opcode_count--;
21724 n--;
21725 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21726 }
21727
21728 /* Finish off the last word. */
21729 if (n < 4)
21730 {
21731 /* Pad with "finish" opcodes. */
21732 while (n--)
21733 data = (data << 8) | 0xb0;
21734
21735 md_number_to_chars (ptr, data, 4);
21736 }
21737
21738 if (!have_data)
21739 {
21740 /* Add an empty descriptor if there is no user-specified data. */
21741 ptr = frag_more (4);
21742 md_number_to_chars (ptr, 0, 4);
21743 }
21744
21745 return 0;
21746 }
21747
21748
21749 /* Initialize the DWARF-2 unwind information for this procedure. */
21750
21751 void
21752 tc_arm_frame_initial_instructions (void)
21753 {
21754 cfi_add_CFA_def_cfa (REG_SP, 0);
21755 }
21756 #endif /* OBJ_ELF */
21757
21758 /* Convert REGNAME to a DWARF-2 register number. */
21759
21760 int
21761 tc_arm_regname_to_dw2regnum (char *regname)
21762 {
21763 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21764 if (reg != FAIL)
21765 return reg;
21766
21767 /* PR 16694: Allow VFP registers as well. */
21768 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21769 if (reg != FAIL)
21770 return 64 + reg;
21771
21772 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21773 if (reg != FAIL)
21774 return reg + 256;
21775
21776 return -1;
21777 }
21778
21779 #ifdef TE_PE
21780 void
21781 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21782 {
21783 expressionS exp;
21784
21785 exp.X_op = O_secrel;
21786 exp.X_add_symbol = symbol;
21787 exp.X_add_number = 0;
21788 emit_expr (&exp, size);
21789 }
21790 #endif
21791
21792 /* MD interface: Symbol and relocation handling. */
21793
21794 /* Return the address within the segment that a PC-relative fixup is
21795 relative to. For ARM, PC-relative fixups applied to instructions
21796 are generally relative to the location of the fixup plus 8 bytes.
21797 Thumb branches are offset by 4, and Thumb loads relative to PC
21798 require special handling. */
21799
21800 long
21801 md_pcrel_from_section (fixS * fixP, segT seg)
21802 {
21803 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21804
21805 /* If this is pc-relative and we are going to emit a relocation
21806 then we just want to put out any pipeline compensation that the linker
21807 will need. Otherwise we want to use the calculated base.
21808 For WinCE we skip the bias for externals as well, since this
21809 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21810 if (fixP->fx_pcrel
21811 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21812 || (arm_force_relocation (fixP)
21813 #ifdef TE_WINCE
21814 && !S_IS_EXTERNAL (fixP->fx_addsy)
21815 #endif
21816 )))
21817 base = 0;
21818
21819
21820 switch (fixP->fx_r_type)
21821 {
21822 /* PC relative addressing on the Thumb is slightly odd as the
21823 bottom two bits of the PC are forced to zero for the
21824 calculation. This happens *after* application of the
21825 pipeline offset. However, Thumb adrl already adjusts for
21826 this, so we need not do it again. */
21827 case BFD_RELOC_ARM_THUMB_ADD:
21828 return base & ~3;
21829
21830 case BFD_RELOC_ARM_THUMB_OFFSET:
21831 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21832 case BFD_RELOC_ARM_T32_ADD_PC12:
21833 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21834 return (base + 4) & ~3;
21835
21836 /* Thumb branches are simply offset by +4. */
21837 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21838 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21839 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21840 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21841 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21842 return base + 4;
21843
21844 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21845 if (fixP->fx_addsy
21846 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21847 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21848 && ARM_IS_FUNC (fixP->fx_addsy)
21849 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21850 base = fixP->fx_where + fixP->fx_frag->fr_address;
21851 return base + 4;
21852
21853 /* BLX is like branches above, but forces the low two bits of PC to
21854 zero. */
21855 case BFD_RELOC_THUMB_PCREL_BLX:
21856 if (fixP->fx_addsy
21857 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21858 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21859 && THUMB_IS_FUNC (fixP->fx_addsy)
21860 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21861 base = fixP->fx_where + fixP->fx_frag->fr_address;
21862 return (base + 4) & ~3;
21863
21864 /* ARM mode branches are offset by +8. However, the Windows CE
21865 loader expects the relocation not to take this into account. */
21866 case BFD_RELOC_ARM_PCREL_BLX:
21867 if (fixP->fx_addsy
21868 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21869 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21870 && ARM_IS_FUNC (fixP->fx_addsy)
21871 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21872 base = fixP->fx_where + fixP->fx_frag->fr_address;
21873 return base + 8;
21874
21875 case BFD_RELOC_ARM_PCREL_CALL:
21876 if (fixP->fx_addsy
21877 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21878 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21879 && THUMB_IS_FUNC (fixP->fx_addsy)
21880 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21881 base = fixP->fx_where + fixP->fx_frag->fr_address;
21882 return base + 8;
21883
21884 case BFD_RELOC_ARM_PCREL_BRANCH:
21885 case BFD_RELOC_ARM_PCREL_JUMP:
21886 case BFD_RELOC_ARM_PLT32:
21887 #ifdef TE_WINCE
21888 /* When handling fixups immediately, because we have already
21889 discovered the value of a symbol, or the address of the frag involved
21890 we must account for the offset by +8, as the OS loader will never see the reloc.
21891 see fixup_segment() in write.c
21892 The S_IS_EXTERNAL test handles the case of global symbols.
21893 Those need the calculated base, not just the pipe compensation the linker will need. */
21894 if (fixP->fx_pcrel
21895 && fixP->fx_addsy != NULL
21896 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21897 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21898 return base + 8;
21899 return base;
21900 #else
21901 return base + 8;
21902 #endif
21903
21904
21905 /* ARM mode loads relative to PC are also offset by +8. Unlike
21906 branches, the Windows CE loader *does* expect the relocation
21907 to take this into account. */
21908 case BFD_RELOC_ARM_OFFSET_IMM:
21909 case BFD_RELOC_ARM_OFFSET_IMM8:
21910 case BFD_RELOC_ARM_HWLITERAL:
21911 case BFD_RELOC_ARM_LITERAL:
21912 case BFD_RELOC_ARM_CP_OFF_IMM:
21913 return base + 8;
21914
21915
21916 /* Other PC-relative relocations are un-offset. */
21917 default:
21918 return base;
21919 }
21920 }
21921
21922 static bfd_boolean flag_warn_syms = TRUE;
21923
21924 bfd_boolean
21925 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
21926 {
21927 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21928 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21929 does mean that the resulting code might be very confusing to the reader.
21930 Also this warning can be triggered if the user omits an operand before
21931 an immediate address, eg:
21932
21933 LDR =foo
21934
21935 GAS treats this as an assignment of the value of the symbol foo to a
21936 symbol LDR, and so (without this code) it will not issue any kind of
21937 warning or error message.
21938
21939 Note - ARM instructions are case-insensitive but the strings in the hash
21940 table are all stored in lower case, so we must first ensure that name is
21941 lower case too. */
21942 if (flag_warn_syms && arm_ops_hsh)
21943 {
21944 char * nbuf = strdup (name);
21945 char * p;
21946
21947 for (p = nbuf; *p; p++)
21948 *p = TOLOWER (*p);
21949 if (hash_find (arm_ops_hsh, nbuf) != NULL)
21950 {
21951 static struct hash_control * already_warned = NULL;
21952
21953 if (already_warned == NULL)
21954 already_warned = hash_new ();
21955 /* Only warn about the symbol once. To keep the code
21956 simple we let hash_insert do the lookup for us. */
21957 if (hash_insert (already_warned, name, NULL) == NULL)
21958 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
21959 }
21960 else
21961 free (nbuf);
21962 }
21963
21964 return FALSE;
21965 }
21966
21967 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21968 Otherwise we have no need to default values of symbols. */
21969
21970 symbolS *
21971 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21972 {
21973 #ifdef OBJ_ELF
21974 if (name[0] == '_' && name[1] == 'G'
21975 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21976 {
21977 if (!GOT_symbol)
21978 {
21979 if (symbol_find (name))
21980 as_bad (_("GOT already in the symbol table"));
21981
21982 GOT_symbol = symbol_new (name, undefined_section,
21983 (valueT) 0, & zero_address_frag);
21984 }
21985
21986 return GOT_symbol;
21987 }
21988 #endif
21989
21990 return NULL;
21991 }
21992
21993 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21994 computed as two separate immediate values, added together. We
21995 already know that this value cannot be computed by just one ARM
21996 instruction. */
21997
21998 static unsigned int
21999 validate_immediate_twopart (unsigned int val,
22000 unsigned int * highpart)
22001 {
22002 unsigned int a;
22003 unsigned int i;
22004
22005 for (i = 0; i < 32; i += 2)
22006 if (((a = rotate_left (val, i)) & 0xff) != 0)
22007 {
22008 if (a & 0xff00)
22009 {
22010 if (a & ~ 0xffff)
22011 continue;
22012 * highpart = (a >> 8) | ((i + 24) << 7);
22013 }
22014 else if (a & 0xff0000)
22015 {
22016 if (a & 0xff000000)
22017 continue;
22018 * highpart = (a >> 16) | ((i + 16) << 7);
22019 }
22020 else
22021 {
22022 gas_assert (a & 0xff000000);
22023 * highpart = (a >> 24) | ((i + 8) << 7);
22024 }
22025
22026 return (a & 0xff) | (i << 7);
22027 }
22028
22029 return FAIL;
22030 }
22031
22032 static int
22033 validate_offset_imm (unsigned int val, int hwse)
22034 {
22035 if ((hwse && val > 255) || val > 4095)
22036 return FAIL;
22037 return val;
22038 }
22039
22040 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22041 negative immediate constant by altering the instruction. A bit of
22042 a hack really.
22043 MOV <-> MVN
22044 AND <-> BIC
22045 ADC <-> SBC
22046 by inverting the second operand, and
22047 ADD <-> SUB
22048 CMP <-> CMN
22049 by negating the second operand. */
22050
22051 static int
22052 negate_data_op (unsigned long * instruction,
22053 unsigned long value)
22054 {
22055 int op, new_inst;
22056 unsigned long negated, inverted;
22057
22058 negated = encode_arm_immediate (-value);
22059 inverted = encode_arm_immediate (~value);
22060
22061 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22062 switch (op)
22063 {
22064 /* First negates. */
22065 case OPCODE_SUB: /* ADD <-> SUB */
22066 new_inst = OPCODE_ADD;
22067 value = negated;
22068 break;
22069
22070 case OPCODE_ADD:
22071 new_inst = OPCODE_SUB;
22072 value = negated;
22073 break;
22074
22075 case OPCODE_CMP: /* CMP <-> CMN */
22076 new_inst = OPCODE_CMN;
22077 value = negated;
22078 break;
22079
22080 case OPCODE_CMN:
22081 new_inst = OPCODE_CMP;
22082 value = negated;
22083 break;
22084
22085 /* Now Inverted ops. */
22086 case OPCODE_MOV: /* MOV <-> MVN */
22087 new_inst = OPCODE_MVN;
22088 value = inverted;
22089 break;
22090
22091 case OPCODE_MVN:
22092 new_inst = OPCODE_MOV;
22093 value = inverted;
22094 break;
22095
22096 case OPCODE_AND: /* AND <-> BIC */
22097 new_inst = OPCODE_BIC;
22098 value = inverted;
22099 break;
22100
22101 case OPCODE_BIC:
22102 new_inst = OPCODE_AND;
22103 value = inverted;
22104 break;
22105
22106 case OPCODE_ADC: /* ADC <-> SBC */
22107 new_inst = OPCODE_SBC;
22108 value = inverted;
22109 break;
22110
22111 case OPCODE_SBC:
22112 new_inst = OPCODE_ADC;
22113 value = inverted;
22114 break;
22115
22116 /* We cannot do anything. */
22117 default:
22118 return FAIL;
22119 }
22120
22121 if (value == (unsigned) FAIL)
22122 return FAIL;
22123
22124 *instruction &= OPCODE_MASK;
22125 *instruction |= new_inst << DATA_OP_SHIFT;
22126 return value;
22127 }
22128
22129 /* Like negate_data_op, but for Thumb-2. */
22130
22131 static unsigned int
22132 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22133 {
22134 int op, new_inst;
22135 int rd;
22136 unsigned int negated, inverted;
22137
22138 negated = encode_thumb32_immediate (-value);
22139 inverted = encode_thumb32_immediate (~value);
22140
22141 rd = (*instruction >> 8) & 0xf;
22142 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22143 switch (op)
22144 {
22145 /* ADD <-> SUB. Includes CMP <-> CMN. */
22146 case T2_OPCODE_SUB:
22147 new_inst = T2_OPCODE_ADD;
22148 value = negated;
22149 break;
22150
22151 case T2_OPCODE_ADD:
22152 new_inst = T2_OPCODE_SUB;
22153 value = negated;
22154 break;
22155
22156 /* ORR <-> ORN. Includes MOV <-> MVN. */
22157 case T2_OPCODE_ORR:
22158 new_inst = T2_OPCODE_ORN;
22159 value = inverted;
22160 break;
22161
22162 case T2_OPCODE_ORN:
22163 new_inst = T2_OPCODE_ORR;
22164 value = inverted;
22165 break;
22166
22167 /* AND <-> BIC. TST has no inverted equivalent. */
22168 case T2_OPCODE_AND:
22169 new_inst = T2_OPCODE_BIC;
22170 if (rd == 15)
22171 value = FAIL;
22172 else
22173 value = inverted;
22174 break;
22175
22176 case T2_OPCODE_BIC:
22177 new_inst = T2_OPCODE_AND;
22178 value = inverted;
22179 break;
22180
22181 /* ADC <-> SBC */
22182 case T2_OPCODE_ADC:
22183 new_inst = T2_OPCODE_SBC;
22184 value = inverted;
22185 break;
22186
22187 case T2_OPCODE_SBC:
22188 new_inst = T2_OPCODE_ADC;
22189 value = inverted;
22190 break;
22191
22192 /* We cannot do anything. */
22193 default:
22194 return FAIL;
22195 }
22196
22197 if (value == (unsigned int)FAIL)
22198 return FAIL;
22199
22200 *instruction &= T2_OPCODE_MASK;
22201 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22202 return value;
22203 }
22204
22205 /* Read a 32-bit thumb instruction from buf. */
22206 static unsigned long
22207 get_thumb32_insn (char * buf)
22208 {
22209 unsigned long insn;
22210 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22211 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22212
22213 return insn;
22214 }
22215
22216
22217 /* We usually want to set the low bit on the address of thumb function
22218 symbols. In particular .word foo - . should have the low bit set.
22219 Generic code tries to fold the difference of two symbols to
22220 a constant. Prevent this and force a relocation when the first symbols
22221 is a thumb function. */
22222
22223 bfd_boolean
22224 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22225 {
22226 if (op == O_subtract
22227 && l->X_op == O_symbol
22228 && r->X_op == O_symbol
22229 && THUMB_IS_FUNC (l->X_add_symbol))
22230 {
22231 l->X_op = O_subtract;
22232 l->X_op_symbol = r->X_add_symbol;
22233 l->X_add_number -= r->X_add_number;
22234 return TRUE;
22235 }
22236
22237 /* Process as normal. */
22238 return FALSE;
22239 }
22240
22241 /* Encode Thumb2 unconditional branches and calls. The encoding
22242 for the 2 are identical for the immediate values. */
22243
22244 static void
22245 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22246 {
22247 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22248 offsetT newval;
22249 offsetT newval2;
22250 addressT S, I1, I2, lo, hi;
22251
22252 S = (value >> 24) & 0x01;
22253 I1 = (value >> 23) & 0x01;
22254 I2 = (value >> 22) & 0x01;
22255 hi = (value >> 12) & 0x3ff;
22256 lo = (value >> 1) & 0x7ff;
22257 newval = md_chars_to_number (buf, THUMB_SIZE);
22258 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22259 newval |= (S << 10) | hi;
22260 newval2 &= ~T2I1I2MASK;
22261 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22262 md_number_to_chars (buf, newval, THUMB_SIZE);
22263 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22264 }
22265
22266 void
22267 md_apply_fix (fixS * fixP,
22268 valueT * valP,
22269 segT seg)
22270 {
22271 offsetT value = * valP;
22272 offsetT newval;
22273 unsigned int newimm;
22274 unsigned long temp;
22275 int sign;
22276 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22277
22278 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22279
22280 /* Note whether this will delete the relocation. */
22281
22282 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22283 fixP->fx_done = 1;
22284
22285 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22286 consistency with the behaviour on 32-bit hosts. Remember value
22287 for emit_reloc. */
22288 value &= 0xffffffff;
22289 value ^= 0x80000000;
22290 value -= 0x80000000;
22291
22292 *valP = value;
22293 fixP->fx_addnumber = value;
22294
22295 /* Same treatment for fixP->fx_offset. */
22296 fixP->fx_offset &= 0xffffffff;
22297 fixP->fx_offset ^= 0x80000000;
22298 fixP->fx_offset -= 0x80000000;
22299
22300 switch (fixP->fx_r_type)
22301 {
22302 case BFD_RELOC_NONE:
22303 /* This will need to go in the object file. */
22304 fixP->fx_done = 0;
22305 break;
22306
22307 case BFD_RELOC_ARM_IMMEDIATE:
22308 /* We claim that this fixup has been processed here,
22309 even if in fact we generate an error because we do
22310 not have a reloc for it, so tc_gen_reloc will reject it. */
22311 fixP->fx_done = 1;
22312
22313 if (fixP->fx_addsy)
22314 {
22315 const char *msg = 0;
22316
22317 if (! S_IS_DEFINED (fixP->fx_addsy))
22318 msg = _("undefined symbol %s used as an immediate value");
22319 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22320 msg = _("symbol %s is in a different section");
22321 else if (S_IS_WEAK (fixP->fx_addsy))
22322 msg = _("symbol %s is weak and may be overridden later");
22323
22324 if (msg)
22325 {
22326 as_bad_where (fixP->fx_file, fixP->fx_line,
22327 msg, S_GET_NAME (fixP->fx_addsy));
22328 break;
22329 }
22330 }
22331
22332 temp = md_chars_to_number (buf, INSN_SIZE);
22333
22334 /* If the offset is negative, we should use encoding A2 for ADR. */
22335 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22336 newimm = negate_data_op (&temp, value);
22337 else
22338 {
22339 newimm = encode_arm_immediate (value);
22340
22341 /* If the instruction will fail, see if we can fix things up by
22342 changing the opcode. */
22343 if (newimm == (unsigned int) FAIL)
22344 newimm = negate_data_op (&temp, value);
22345 }
22346
22347 if (newimm == (unsigned int) FAIL)
22348 {
22349 as_bad_where (fixP->fx_file, fixP->fx_line,
22350 _("invalid constant (%lx) after fixup"),
22351 (unsigned long) value);
22352 break;
22353 }
22354
22355 newimm |= (temp & 0xfffff000);
22356 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22357 break;
22358
22359 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22360 {
22361 unsigned int highpart = 0;
22362 unsigned int newinsn = 0xe1a00000; /* nop. */
22363
22364 if (fixP->fx_addsy)
22365 {
22366 const char *msg = 0;
22367
22368 if (! S_IS_DEFINED (fixP->fx_addsy))
22369 msg = _("undefined symbol %s used as an immediate value");
22370 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22371 msg = _("symbol %s is in a different section");
22372 else if (S_IS_WEAK (fixP->fx_addsy))
22373 msg = _("symbol %s is weak and may be overridden later");
22374
22375 if (msg)
22376 {
22377 as_bad_where (fixP->fx_file, fixP->fx_line,
22378 msg, S_GET_NAME (fixP->fx_addsy));
22379 break;
22380 }
22381 }
22382
22383 newimm = encode_arm_immediate (value);
22384 temp = md_chars_to_number (buf, INSN_SIZE);
22385
22386 /* If the instruction will fail, see if we can fix things up by
22387 changing the opcode. */
22388 if (newimm == (unsigned int) FAIL
22389 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22390 {
22391 /* No ? OK - try using two ADD instructions to generate
22392 the value. */
22393 newimm = validate_immediate_twopart (value, & highpart);
22394
22395 /* Yes - then make sure that the second instruction is
22396 also an add. */
22397 if (newimm != (unsigned int) FAIL)
22398 newinsn = temp;
22399 /* Still No ? Try using a negated value. */
22400 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22401 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22402 /* Otherwise - give up. */
22403 else
22404 {
22405 as_bad_where (fixP->fx_file, fixP->fx_line,
22406 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22407 (long) value);
22408 break;
22409 }
22410
22411 /* Replace the first operand in the 2nd instruction (which
22412 is the PC) with the destination register. We have
22413 already added in the PC in the first instruction and we
22414 do not want to do it again. */
22415 newinsn &= ~ 0xf0000;
22416 newinsn |= ((newinsn & 0x0f000) << 4);
22417 }
22418
22419 newimm |= (temp & 0xfffff000);
22420 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22421
22422 highpart |= (newinsn & 0xfffff000);
22423 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22424 }
22425 break;
22426
22427 case BFD_RELOC_ARM_OFFSET_IMM:
22428 if (!fixP->fx_done && seg->use_rela_p)
22429 value = 0;
22430
22431 case BFD_RELOC_ARM_LITERAL:
22432 sign = value > 0;
22433
22434 if (value < 0)
22435 value = - value;
22436
22437 if (validate_offset_imm (value, 0) == FAIL)
22438 {
22439 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22440 as_bad_where (fixP->fx_file, fixP->fx_line,
22441 _("invalid literal constant: pool needs to be closer"));
22442 else
22443 as_bad_where (fixP->fx_file, fixP->fx_line,
22444 _("bad immediate value for offset (%ld)"),
22445 (long) value);
22446 break;
22447 }
22448
22449 newval = md_chars_to_number (buf, INSN_SIZE);
22450 if (value == 0)
22451 newval &= 0xfffff000;
22452 else
22453 {
22454 newval &= 0xff7ff000;
22455 newval |= value | (sign ? INDEX_UP : 0);
22456 }
22457 md_number_to_chars (buf, newval, INSN_SIZE);
22458 break;
22459
22460 case BFD_RELOC_ARM_OFFSET_IMM8:
22461 case BFD_RELOC_ARM_HWLITERAL:
22462 sign = value > 0;
22463
22464 if (value < 0)
22465 value = - value;
22466
22467 if (validate_offset_imm (value, 1) == FAIL)
22468 {
22469 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22470 as_bad_where (fixP->fx_file, fixP->fx_line,
22471 _("invalid literal constant: pool needs to be closer"));
22472 else
22473 as_bad_where (fixP->fx_file, fixP->fx_line,
22474 _("bad immediate value for 8-bit offset (%ld)"),
22475 (long) value);
22476 break;
22477 }
22478
22479 newval = md_chars_to_number (buf, INSN_SIZE);
22480 if (value == 0)
22481 newval &= 0xfffff0f0;
22482 else
22483 {
22484 newval &= 0xff7ff0f0;
22485 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22486 }
22487 md_number_to_chars (buf, newval, INSN_SIZE);
22488 break;
22489
22490 case BFD_RELOC_ARM_T32_OFFSET_U8:
22491 if (value < 0 || value > 1020 || value % 4 != 0)
22492 as_bad_where (fixP->fx_file, fixP->fx_line,
22493 _("bad immediate value for offset (%ld)"), (long) value);
22494 value /= 4;
22495
22496 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22497 newval |= value;
22498 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22499 break;
22500
22501 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22502 /* This is a complicated relocation used for all varieties of Thumb32
22503 load/store instruction with immediate offset:
22504
22505 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22506 *4, optional writeback(W)
22507 (doubleword load/store)
22508
22509 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22510 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22511 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22512 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22513 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22514
22515 Uppercase letters indicate bits that are already encoded at
22516 this point. Lowercase letters are our problem. For the
22517 second block of instructions, the secondary opcode nybble
22518 (bits 8..11) is present, and bit 23 is zero, even if this is
22519 a PC-relative operation. */
22520 newval = md_chars_to_number (buf, THUMB_SIZE);
22521 newval <<= 16;
22522 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22523
22524 if ((newval & 0xf0000000) == 0xe0000000)
22525 {
22526 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22527 if (value >= 0)
22528 newval |= (1 << 23);
22529 else
22530 value = -value;
22531 if (value % 4 != 0)
22532 {
22533 as_bad_where (fixP->fx_file, fixP->fx_line,
22534 _("offset not a multiple of 4"));
22535 break;
22536 }
22537 value /= 4;
22538 if (value > 0xff)
22539 {
22540 as_bad_where (fixP->fx_file, fixP->fx_line,
22541 _("offset out of range"));
22542 break;
22543 }
22544 newval &= ~0xff;
22545 }
22546 else if ((newval & 0x000f0000) == 0x000f0000)
22547 {
22548 /* PC-relative, 12-bit offset. */
22549 if (value >= 0)
22550 newval |= (1 << 23);
22551 else
22552 value = -value;
22553 if (value > 0xfff)
22554 {
22555 as_bad_where (fixP->fx_file, fixP->fx_line,
22556 _("offset out of range"));
22557 break;
22558 }
22559 newval &= ~0xfff;
22560 }
22561 else if ((newval & 0x00000100) == 0x00000100)
22562 {
22563 /* Writeback: 8-bit, +/- offset. */
22564 if (value >= 0)
22565 newval |= (1 << 9);
22566 else
22567 value = -value;
22568 if (value > 0xff)
22569 {
22570 as_bad_where (fixP->fx_file, fixP->fx_line,
22571 _("offset out of range"));
22572 break;
22573 }
22574 newval &= ~0xff;
22575 }
22576 else if ((newval & 0x00000f00) == 0x00000e00)
22577 {
22578 /* T-instruction: positive 8-bit offset. */
22579 if (value < 0 || value > 0xff)
22580 {
22581 as_bad_where (fixP->fx_file, fixP->fx_line,
22582 _("offset out of range"));
22583 break;
22584 }
22585 newval &= ~0xff;
22586 newval |= value;
22587 }
22588 else
22589 {
22590 /* Positive 12-bit or negative 8-bit offset. */
22591 int limit;
22592 if (value >= 0)
22593 {
22594 newval |= (1 << 23);
22595 limit = 0xfff;
22596 }
22597 else
22598 {
22599 value = -value;
22600 limit = 0xff;
22601 }
22602 if (value > limit)
22603 {
22604 as_bad_where (fixP->fx_file, fixP->fx_line,
22605 _("offset out of range"));
22606 break;
22607 }
22608 newval &= ~limit;
22609 }
22610
22611 newval |= value;
22612 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22613 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22614 break;
22615
22616 case BFD_RELOC_ARM_SHIFT_IMM:
22617 newval = md_chars_to_number (buf, INSN_SIZE);
22618 if (((unsigned long) value) > 32
22619 || (value == 32
22620 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22621 {
22622 as_bad_where (fixP->fx_file, fixP->fx_line,
22623 _("shift expression is too large"));
22624 break;
22625 }
22626
22627 if (value == 0)
22628 /* Shifts of zero must be done as lsl. */
22629 newval &= ~0x60;
22630 else if (value == 32)
22631 value = 0;
22632 newval &= 0xfffff07f;
22633 newval |= (value & 0x1f) << 7;
22634 md_number_to_chars (buf, newval, INSN_SIZE);
22635 break;
22636
22637 case BFD_RELOC_ARM_T32_IMMEDIATE:
22638 case BFD_RELOC_ARM_T32_ADD_IMM:
22639 case BFD_RELOC_ARM_T32_IMM12:
22640 case BFD_RELOC_ARM_T32_ADD_PC12:
22641 /* We claim that this fixup has been processed here,
22642 even if in fact we generate an error because we do
22643 not have a reloc for it, so tc_gen_reloc will reject it. */
22644 fixP->fx_done = 1;
22645
22646 if (fixP->fx_addsy
22647 && ! S_IS_DEFINED (fixP->fx_addsy))
22648 {
22649 as_bad_where (fixP->fx_file, fixP->fx_line,
22650 _("undefined symbol %s used as an immediate value"),
22651 S_GET_NAME (fixP->fx_addsy));
22652 break;
22653 }
22654
22655 newval = md_chars_to_number (buf, THUMB_SIZE);
22656 newval <<= 16;
22657 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22658
22659 newimm = FAIL;
22660 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22661 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22662 {
22663 newimm = encode_thumb32_immediate (value);
22664 if (newimm == (unsigned int) FAIL)
22665 newimm = thumb32_negate_data_op (&newval, value);
22666 }
22667 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22668 && newimm == (unsigned int) FAIL)
22669 {
22670 /* Turn add/sum into addw/subw. */
22671 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22672 newval = (newval & 0xfeffffff) | 0x02000000;
22673 /* No flat 12-bit imm encoding for addsw/subsw. */
22674 if ((newval & 0x00100000) == 0)
22675 {
22676 /* 12 bit immediate for addw/subw. */
22677 if (value < 0)
22678 {
22679 value = -value;
22680 newval ^= 0x00a00000;
22681 }
22682 if (value > 0xfff)
22683 newimm = (unsigned int) FAIL;
22684 else
22685 newimm = value;
22686 }
22687 }
22688
22689 if (newimm == (unsigned int)FAIL)
22690 {
22691 as_bad_where (fixP->fx_file, fixP->fx_line,
22692 _("invalid constant (%lx) after fixup"),
22693 (unsigned long) value);
22694 break;
22695 }
22696
22697 newval |= (newimm & 0x800) << 15;
22698 newval |= (newimm & 0x700) << 4;
22699 newval |= (newimm & 0x0ff);
22700
22701 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22702 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22703 break;
22704
22705 case BFD_RELOC_ARM_SMC:
22706 if (((unsigned long) value) > 0xffff)
22707 as_bad_where (fixP->fx_file, fixP->fx_line,
22708 _("invalid smc expression"));
22709 newval = md_chars_to_number (buf, INSN_SIZE);
22710 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22711 md_number_to_chars (buf, newval, INSN_SIZE);
22712 break;
22713
22714 case BFD_RELOC_ARM_HVC:
22715 if (((unsigned long) value) > 0xffff)
22716 as_bad_where (fixP->fx_file, fixP->fx_line,
22717 _("invalid hvc expression"));
22718 newval = md_chars_to_number (buf, INSN_SIZE);
22719 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22720 md_number_to_chars (buf, newval, INSN_SIZE);
22721 break;
22722
22723 case BFD_RELOC_ARM_SWI:
22724 if (fixP->tc_fix_data != 0)
22725 {
22726 if (((unsigned long) value) > 0xff)
22727 as_bad_where (fixP->fx_file, fixP->fx_line,
22728 _("invalid swi expression"));
22729 newval = md_chars_to_number (buf, THUMB_SIZE);
22730 newval |= value;
22731 md_number_to_chars (buf, newval, THUMB_SIZE);
22732 }
22733 else
22734 {
22735 if (((unsigned long) value) > 0x00ffffff)
22736 as_bad_where (fixP->fx_file, fixP->fx_line,
22737 _("invalid swi expression"));
22738 newval = md_chars_to_number (buf, INSN_SIZE);
22739 newval |= value;
22740 md_number_to_chars (buf, newval, INSN_SIZE);
22741 }
22742 break;
22743
22744 case BFD_RELOC_ARM_MULTI:
22745 if (((unsigned long) value) > 0xffff)
22746 as_bad_where (fixP->fx_file, fixP->fx_line,
22747 _("invalid expression in load/store multiple"));
22748 newval = value | md_chars_to_number (buf, INSN_SIZE);
22749 md_number_to_chars (buf, newval, INSN_SIZE);
22750 break;
22751
22752 #ifdef OBJ_ELF
22753 case BFD_RELOC_ARM_PCREL_CALL:
22754
22755 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22756 && fixP->fx_addsy
22757 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22758 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22759 && THUMB_IS_FUNC (fixP->fx_addsy))
22760 /* Flip the bl to blx. This is a simple flip
22761 bit here because we generate PCREL_CALL for
22762 unconditional bls. */
22763 {
22764 newval = md_chars_to_number (buf, INSN_SIZE);
22765 newval = newval | 0x10000000;
22766 md_number_to_chars (buf, newval, INSN_SIZE);
22767 temp = 1;
22768 fixP->fx_done = 1;
22769 }
22770 else
22771 temp = 3;
22772 goto arm_branch_common;
22773
22774 case BFD_RELOC_ARM_PCREL_JUMP:
22775 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22776 && fixP->fx_addsy
22777 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22778 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22779 && THUMB_IS_FUNC (fixP->fx_addsy))
22780 {
22781 /* This would map to a bl<cond>, b<cond>,
22782 b<always> to a Thumb function. We
22783 need to force a relocation for this particular
22784 case. */
22785 newval = md_chars_to_number (buf, INSN_SIZE);
22786 fixP->fx_done = 0;
22787 }
22788
22789 case BFD_RELOC_ARM_PLT32:
22790 #endif
22791 case BFD_RELOC_ARM_PCREL_BRANCH:
22792 temp = 3;
22793 goto arm_branch_common;
22794
22795 case BFD_RELOC_ARM_PCREL_BLX:
22796
22797 temp = 1;
22798 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22799 && fixP->fx_addsy
22800 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22801 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22802 && ARM_IS_FUNC (fixP->fx_addsy))
22803 {
22804 /* Flip the blx to a bl and warn. */
22805 const char *name = S_GET_NAME (fixP->fx_addsy);
22806 newval = 0xeb000000;
22807 as_warn_where (fixP->fx_file, fixP->fx_line,
22808 _("blx to '%s' an ARM ISA state function changed to bl"),
22809 name);
22810 md_number_to_chars (buf, newval, INSN_SIZE);
22811 temp = 3;
22812 fixP->fx_done = 1;
22813 }
22814
22815 #ifdef OBJ_ELF
22816 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22817 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22818 #endif
22819
22820 arm_branch_common:
22821 /* We are going to store value (shifted right by two) in the
22822 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22823 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22824 also be be clear. */
22825 if (value & temp)
22826 as_bad_where (fixP->fx_file, fixP->fx_line,
22827 _("misaligned branch destination"));
22828 if ((value & (offsetT)0xfe000000) != (offsetT)0
22829 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22830 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22831
22832 if (fixP->fx_done || !seg->use_rela_p)
22833 {
22834 newval = md_chars_to_number (buf, INSN_SIZE);
22835 newval |= (value >> 2) & 0x00ffffff;
22836 /* Set the H bit on BLX instructions. */
22837 if (temp == 1)
22838 {
22839 if (value & 2)
22840 newval |= 0x01000000;
22841 else
22842 newval &= ~0x01000000;
22843 }
22844 md_number_to_chars (buf, newval, INSN_SIZE);
22845 }
22846 break;
22847
22848 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22849 /* CBZ can only branch forward. */
22850
22851 /* Attempts to use CBZ to branch to the next instruction
22852 (which, strictly speaking, are prohibited) will be turned into
22853 no-ops.
22854
22855 FIXME: It may be better to remove the instruction completely and
22856 perform relaxation. */
22857 if (value == -2)
22858 {
22859 newval = md_chars_to_number (buf, THUMB_SIZE);
22860 newval = 0xbf00; /* NOP encoding T1 */
22861 md_number_to_chars (buf, newval, THUMB_SIZE);
22862 }
22863 else
22864 {
22865 if (value & ~0x7e)
22866 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22867
22868 if (fixP->fx_done || !seg->use_rela_p)
22869 {
22870 newval = md_chars_to_number (buf, THUMB_SIZE);
22871 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22872 md_number_to_chars (buf, newval, THUMB_SIZE);
22873 }
22874 }
22875 break;
22876
22877 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22878 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22879 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22880
22881 if (fixP->fx_done || !seg->use_rela_p)
22882 {
22883 newval = md_chars_to_number (buf, THUMB_SIZE);
22884 newval |= (value & 0x1ff) >> 1;
22885 md_number_to_chars (buf, newval, THUMB_SIZE);
22886 }
22887 break;
22888
22889 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22890 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22891 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22892
22893 if (fixP->fx_done || !seg->use_rela_p)
22894 {
22895 newval = md_chars_to_number (buf, THUMB_SIZE);
22896 newval |= (value & 0xfff) >> 1;
22897 md_number_to_chars (buf, newval, THUMB_SIZE);
22898 }
22899 break;
22900
22901 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22902 if (fixP->fx_addsy
22903 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22904 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22905 && ARM_IS_FUNC (fixP->fx_addsy)
22906 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22907 {
22908 /* Force a relocation for a branch 20 bits wide. */
22909 fixP->fx_done = 0;
22910 }
22911 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22912 as_bad_where (fixP->fx_file, fixP->fx_line,
22913 _("conditional branch out of range"));
22914
22915 if (fixP->fx_done || !seg->use_rela_p)
22916 {
22917 offsetT newval2;
22918 addressT S, J1, J2, lo, hi;
22919
22920 S = (value & 0x00100000) >> 20;
22921 J2 = (value & 0x00080000) >> 19;
22922 J1 = (value & 0x00040000) >> 18;
22923 hi = (value & 0x0003f000) >> 12;
22924 lo = (value & 0x00000ffe) >> 1;
22925
22926 newval = md_chars_to_number (buf, THUMB_SIZE);
22927 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22928 newval |= (S << 10) | hi;
22929 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22930 md_number_to_chars (buf, newval, THUMB_SIZE);
22931 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22932 }
22933 break;
22934
22935 case BFD_RELOC_THUMB_PCREL_BLX:
22936 /* If there is a blx from a thumb state function to
22937 another thumb function flip this to a bl and warn
22938 about it. */
22939
22940 if (fixP->fx_addsy
22941 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22942 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22943 && THUMB_IS_FUNC (fixP->fx_addsy))
22944 {
22945 const char *name = S_GET_NAME (fixP->fx_addsy);
22946 as_warn_where (fixP->fx_file, fixP->fx_line,
22947 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22948 name);
22949 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22950 newval = newval | 0x1000;
22951 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22952 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22953 fixP->fx_done = 1;
22954 }
22955
22956
22957 goto thumb_bl_common;
22958
22959 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22960 /* A bl from Thumb state ISA to an internal ARM state function
22961 is converted to a blx. */
22962 if (fixP->fx_addsy
22963 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22964 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22965 && ARM_IS_FUNC (fixP->fx_addsy)
22966 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22967 {
22968 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22969 newval = newval & ~0x1000;
22970 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22971 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22972 fixP->fx_done = 1;
22973 }
22974
22975 thumb_bl_common:
22976
22977 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22978 /* For a BLX instruction, make sure that the relocation is rounded up
22979 to a word boundary. This follows the semantics of the instruction
22980 which specifies that bit 1 of the target address will come from bit
22981 1 of the base address. */
22982 value = (value + 3) & ~ 3;
22983
22984 #ifdef OBJ_ELF
22985 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22986 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22987 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22988 #endif
22989
22990 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22991 {
22992 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
22993 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22994 else if ((value & ~0x1ffffff)
22995 && ((value & ~0x1ffffff) != ~0x1ffffff))
22996 as_bad_where (fixP->fx_file, fixP->fx_line,
22997 _("Thumb2 branch out of range"));
22998 }
22999
23000 if (fixP->fx_done || !seg->use_rela_p)
23001 encode_thumb2_b_bl_offset (buf, value);
23002
23003 break;
23004
23005 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23006 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23007 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23008
23009 if (fixP->fx_done || !seg->use_rela_p)
23010 encode_thumb2_b_bl_offset (buf, value);
23011
23012 break;
23013
23014 case BFD_RELOC_8:
23015 if (fixP->fx_done || !seg->use_rela_p)
23016 *buf = value;
23017 break;
23018
23019 case BFD_RELOC_16:
23020 if (fixP->fx_done || !seg->use_rela_p)
23021 md_number_to_chars (buf, value, 2);
23022 break;
23023
23024 #ifdef OBJ_ELF
23025 case BFD_RELOC_ARM_TLS_CALL:
23026 case BFD_RELOC_ARM_THM_TLS_CALL:
23027 case BFD_RELOC_ARM_TLS_DESCSEQ:
23028 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23029 case BFD_RELOC_ARM_TLS_GOTDESC:
23030 case BFD_RELOC_ARM_TLS_GD32:
23031 case BFD_RELOC_ARM_TLS_LE32:
23032 case BFD_RELOC_ARM_TLS_IE32:
23033 case BFD_RELOC_ARM_TLS_LDM32:
23034 case BFD_RELOC_ARM_TLS_LDO32:
23035 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23036 break;
23037
23038 case BFD_RELOC_ARM_GOT32:
23039 case BFD_RELOC_ARM_GOTOFF:
23040 break;
23041
23042 case BFD_RELOC_ARM_GOT_PREL:
23043 if (fixP->fx_done || !seg->use_rela_p)
23044 md_number_to_chars (buf, value, 4);
23045 break;
23046
23047 case BFD_RELOC_ARM_TARGET2:
23048 /* TARGET2 is not partial-inplace, so we need to write the
23049 addend here for REL targets, because it won't be written out
23050 during reloc processing later. */
23051 if (fixP->fx_done || !seg->use_rela_p)
23052 md_number_to_chars (buf, fixP->fx_offset, 4);
23053 break;
23054 #endif
23055
23056 case BFD_RELOC_RVA:
23057 case BFD_RELOC_32:
23058 case BFD_RELOC_ARM_TARGET1:
23059 case BFD_RELOC_ARM_ROSEGREL32:
23060 case BFD_RELOC_ARM_SBREL32:
23061 case BFD_RELOC_32_PCREL:
23062 #ifdef TE_PE
23063 case BFD_RELOC_32_SECREL:
23064 #endif
23065 if (fixP->fx_done || !seg->use_rela_p)
23066 #ifdef TE_WINCE
23067 /* For WinCE we only do this for pcrel fixups. */
23068 if (fixP->fx_done || fixP->fx_pcrel)
23069 #endif
23070 md_number_to_chars (buf, value, 4);
23071 break;
23072
23073 #ifdef OBJ_ELF
23074 case BFD_RELOC_ARM_PREL31:
23075 if (fixP->fx_done || !seg->use_rela_p)
23076 {
23077 newval = md_chars_to_number (buf, 4) & 0x80000000;
23078 if ((value ^ (value >> 1)) & 0x40000000)
23079 {
23080 as_bad_where (fixP->fx_file, fixP->fx_line,
23081 _("rel31 relocation overflow"));
23082 }
23083 newval |= value & 0x7fffffff;
23084 md_number_to_chars (buf, newval, 4);
23085 }
23086 break;
23087 #endif
23088
23089 case BFD_RELOC_ARM_CP_OFF_IMM:
23090 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23091 if (value < -1023 || value > 1023 || (value & 3))
23092 as_bad_where (fixP->fx_file, fixP->fx_line,
23093 _("co-processor offset out of range"));
23094 cp_off_common:
23095 sign = value > 0;
23096 if (value < 0)
23097 value = -value;
23098 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23099 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23100 newval = md_chars_to_number (buf, INSN_SIZE);
23101 else
23102 newval = get_thumb32_insn (buf);
23103 if (value == 0)
23104 newval &= 0xffffff00;
23105 else
23106 {
23107 newval &= 0xff7fff00;
23108 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23109 }
23110 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23111 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23112 md_number_to_chars (buf, newval, INSN_SIZE);
23113 else
23114 put_thumb32_insn (buf, newval);
23115 break;
23116
23117 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23118 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23119 if (value < -255 || value > 255)
23120 as_bad_where (fixP->fx_file, fixP->fx_line,
23121 _("co-processor offset out of range"));
23122 value *= 4;
23123 goto cp_off_common;
23124
23125 case BFD_RELOC_ARM_THUMB_OFFSET:
23126 newval = md_chars_to_number (buf, THUMB_SIZE);
23127 /* Exactly what ranges, and where the offset is inserted depends
23128 on the type of instruction, we can establish this from the
23129 top 4 bits. */
23130 switch (newval >> 12)
23131 {
23132 case 4: /* PC load. */
23133 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23134 forced to zero for these loads; md_pcrel_from has already
23135 compensated for this. */
23136 if (value & 3)
23137 as_bad_where (fixP->fx_file, fixP->fx_line,
23138 _("invalid offset, target not word aligned (0x%08lX)"),
23139 (((unsigned long) fixP->fx_frag->fr_address
23140 + (unsigned long) fixP->fx_where) & ~3)
23141 + (unsigned long) value);
23142
23143 if (value & ~0x3fc)
23144 as_bad_where (fixP->fx_file, fixP->fx_line,
23145 _("invalid offset, value too big (0x%08lX)"),
23146 (long) value);
23147
23148 newval |= value >> 2;
23149 break;
23150
23151 case 9: /* SP load/store. */
23152 if (value & ~0x3fc)
23153 as_bad_where (fixP->fx_file, fixP->fx_line,
23154 _("invalid offset, value too big (0x%08lX)"),
23155 (long) value);
23156 newval |= value >> 2;
23157 break;
23158
23159 case 6: /* Word load/store. */
23160 if (value & ~0x7c)
23161 as_bad_where (fixP->fx_file, fixP->fx_line,
23162 _("invalid offset, value too big (0x%08lX)"),
23163 (long) value);
23164 newval |= value << 4; /* 6 - 2. */
23165 break;
23166
23167 case 7: /* Byte load/store. */
23168 if (value & ~0x1f)
23169 as_bad_where (fixP->fx_file, fixP->fx_line,
23170 _("invalid offset, value too big (0x%08lX)"),
23171 (long) value);
23172 newval |= value << 6;
23173 break;
23174
23175 case 8: /* Halfword load/store. */
23176 if (value & ~0x3e)
23177 as_bad_where (fixP->fx_file, fixP->fx_line,
23178 _("invalid offset, value too big (0x%08lX)"),
23179 (long) value);
23180 newval |= value << 5; /* 6 - 1. */
23181 break;
23182
23183 default:
23184 as_bad_where (fixP->fx_file, fixP->fx_line,
23185 "Unable to process relocation for thumb opcode: %lx",
23186 (unsigned long) newval);
23187 break;
23188 }
23189 md_number_to_chars (buf, newval, THUMB_SIZE);
23190 break;
23191
23192 case BFD_RELOC_ARM_THUMB_ADD:
23193 /* This is a complicated relocation, since we use it for all of
23194 the following immediate relocations:
23195
23196 3bit ADD/SUB
23197 8bit ADD/SUB
23198 9bit ADD/SUB SP word-aligned
23199 10bit ADD PC/SP word-aligned
23200
23201 The type of instruction being processed is encoded in the
23202 instruction field:
23203
23204 0x8000 SUB
23205 0x00F0 Rd
23206 0x000F Rs
23207 */
23208 newval = md_chars_to_number (buf, THUMB_SIZE);
23209 {
23210 int rd = (newval >> 4) & 0xf;
23211 int rs = newval & 0xf;
23212 int subtract = !!(newval & 0x8000);
23213
23214 /* Check for HI regs, only very restricted cases allowed:
23215 Adjusting SP, and using PC or SP to get an address. */
23216 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23217 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23218 as_bad_where (fixP->fx_file, fixP->fx_line,
23219 _("invalid Hi register with immediate"));
23220
23221 /* If value is negative, choose the opposite instruction. */
23222 if (value < 0)
23223 {
23224 value = -value;
23225 subtract = !subtract;
23226 if (value < 0)
23227 as_bad_where (fixP->fx_file, fixP->fx_line,
23228 _("immediate value out of range"));
23229 }
23230
23231 if (rd == REG_SP)
23232 {
23233 if (value & ~0x1fc)
23234 as_bad_where (fixP->fx_file, fixP->fx_line,
23235 _("invalid immediate for stack address calculation"));
23236 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23237 newval |= value >> 2;
23238 }
23239 else if (rs == REG_PC || rs == REG_SP)
23240 {
23241 /* PR gas/18541. If the addition is for a defined symbol
23242 within range of an ADR instruction then accept it. */
23243 if (subtract
23244 && value == 4
23245 && fixP->fx_addsy != NULL)
23246 {
23247 subtract = 0;
23248
23249 if (! S_IS_DEFINED (fixP->fx_addsy)
23250 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23251 || S_IS_WEAK (fixP->fx_addsy))
23252 {
23253 as_bad_where (fixP->fx_file, fixP->fx_line,
23254 _("address calculation needs a strongly defined nearby symbol"));
23255 }
23256 else
23257 {
23258 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23259
23260 /* Round up to the next 4-byte boundary. */
23261 if (v & 3)
23262 v = (v + 3) & ~ 3;
23263 else
23264 v += 4;
23265 v = S_GET_VALUE (fixP->fx_addsy) - v;
23266
23267 if (v & ~0x3fc)
23268 {
23269 as_bad_where (fixP->fx_file, fixP->fx_line,
23270 _("symbol too far away"));
23271 }
23272 else
23273 {
23274 fixP->fx_done = 1;
23275 value = v;
23276 }
23277 }
23278 }
23279
23280 if (subtract || value & ~0x3fc)
23281 as_bad_where (fixP->fx_file, fixP->fx_line,
23282 _("invalid immediate for address calculation (value = 0x%08lX)"),
23283 (unsigned long) (subtract ? - value : value));
23284 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23285 newval |= rd << 8;
23286 newval |= value >> 2;
23287 }
23288 else if (rs == rd)
23289 {
23290 if (value & ~0xff)
23291 as_bad_where (fixP->fx_file, fixP->fx_line,
23292 _("immediate value out of range"));
23293 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23294 newval |= (rd << 8) | value;
23295 }
23296 else
23297 {
23298 if (value & ~0x7)
23299 as_bad_where (fixP->fx_file, fixP->fx_line,
23300 _("immediate value out of range"));
23301 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23302 newval |= rd | (rs << 3) | (value << 6);
23303 }
23304 }
23305 md_number_to_chars (buf, newval, THUMB_SIZE);
23306 break;
23307
23308 case BFD_RELOC_ARM_THUMB_IMM:
23309 newval = md_chars_to_number (buf, THUMB_SIZE);
23310 if (value < 0 || value > 255)
23311 as_bad_where (fixP->fx_file, fixP->fx_line,
23312 _("invalid immediate: %ld is out of range"),
23313 (long) value);
23314 newval |= value;
23315 md_number_to_chars (buf, newval, THUMB_SIZE);
23316 break;
23317
23318 case BFD_RELOC_ARM_THUMB_SHIFT:
23319 /* 5bit shift value (0..32). LSL cannot take 32. */
23320 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23321 temp = newval & 0xf800;
23322 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23323 as_bad_where (fixP->fx_file, fixP->fx_line,
23324 _("invalid shift value: %ld"), (long) value);
23325 /* Shifts of zero must be encoded as LSL. */
23326 if (value == 0)
23327 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23328 /* Shifts of 32 are encoded as zero. */
23329 else if (value == 32)
23330 value = 0;
23331 newval |= value << 6;
23332 md_number_to_chars (buf, newval, THUMB_SIZE);
23333 break;
23334
23335 case BFD_RELOC_VTABLE_INHERIT:
23336 case BFD_RELOC_VTABLE_ENTRY:
23337 fixP->fx_done = 0;
23338 return;
23339
23340 case BFD_RELOC_ARM_MOVW:
23341 case BFD_RELOC_ARM_MOVT:
23342 case BFD_RELOC_ARM_THUMB_MOVW:
23343 case BFD_RELOC_ARM_THUMB_MOVT:
23344 if (fixP->fx_done || !seg->use_rela_p)
23345 {
23346 /* REL format relocations are limited to a 16-bit addend. */
23347 if (!fixP->fx_done)
23348 {
23349 if (value < -0x8000 || value > 0x7fff)
23350 as_bad_where (fixP->fx_file, fixP->fx_line,
23351 _("offset out of range"));
23352 }
23353 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23354 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23355 {
23356 value >>= 16;
23357 }
23358
23359 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23360 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23361 {
23362 newval = get_thumb32_insn (buf);
23363 newval &= 0xfbf08f00;
23364 newval |= (value & 0xf000) << 4;
23365 newval |= (value & 0x0800) << 15;
23366 newval |= (value & 0x0700) << 4;
23367 newval |= (value & 0x00ff);
23368 put_thumb32_insn (buf, newval);
23369 }
23370 else
23371 {
23372 newval = md_chars_to_number (buf, 4);
23373 newval &= 0xfff0f000;
23374 newval |= value & 0x0fff;
23375 newval |= (value & 0xf000) << 4;
23376 md_number_to_chars (buf, newval, 4);
23377 }
23378 }
23379 return;
23380
23381 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23382 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23383 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23384 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23385 gas_assert (!fixP->fx_done);
23386 {
23387 bfd_vma insn;
23388 bfd_boolean is_mov;
23389 bfd_vma encoded_addend = value;
23390
23391 /* Check that addend can be encoded in instruction. */
23392 if (!seg->use_rela_p && (value < 0 || value > 255))
23393 as_bad_where (fixP->fx_file, fixP->fx_line,
23394 _("the offset 0x%08lX is not representable"),
23395 (unsigned long) encoded_addend);
23396
23397 /* Extract the instruction. */
23398 insn = md_chars_to_number (buf, THUMB_SIZE);
23399 is_mov = (insn & 0xf800) == 0x2000;
23400
23401 /* Encode insn. */
23402 if (is_mov)
23403 {
23404 if (!seg->use_rela_p)
23405 insn |= encoded_addend;
23406 }
23407 else
23408 {
23409 int rd, rs;
23410
23411 /* Extract the instruction. */
23412 /* Encoding is the following
23413 0x8000 SUB
23414 0x00F0 Rd
23415 0x000F Rs
23416 */
23417 /* The following conditions must be true :
23418 - ADD
23419 - Rd == Rs
23420 - Rd <= 7
23421 */
23422 rd = (insn >> 4) & 0xf;
23423 rs = insn & 0xf;
23424 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23425 as_bad_where (fixP->fx_file, fixP->fx_line,
23426 _("Unable to process relocation for thumb opcode: %lx"),
23427 (unsigned long) insn);
23428
23429 /* Encode as ADD immediate8 thumb 1 code. */
23430 insn = 0x3000 | (rd << 8);
23431
23432 /* Place the encoded addend into the first 8 bits of the
23433 instruction. */
23434 if (!seg->use_rela_p)
23435 insn |= encoded_addend;
23436 }
23437
23438 /* Update the instruction. */
23439 md_number_to_chars (buf, insn, THUMB_SIZE);
23440 }
23441 break;
23442
23443 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23444 case BFD_RELOC_ARM_ALU_PC_G0:
23445 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23446 case BFD_RELOC_ARM_ALU_PC_G1:
23447 case BFD_RELOC_ARM_ALU_PC_G2:
23448 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23449 case BFD_RELOC_ARM_ALU_SB_G0:
23450 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23451 case BFD_RELOC_ARM_ALU_SB_G1:
23452 case BFD_RELOC_ARM_ALU_SB_G2:
23453 gas_assert (!fixP->fx_done);
23454 if (!seg->use_rela_p)
23455 {
23456 bfd_vma insn;
23457 bfd_vma encoded_addend;
23458 bfd_vma addend_abs = abs (value);
23459
23460 /* Check that the absolute value of the addend can be
23461 expressed as an 8-bit constant plus a rotation. */
23462 encoded_addend = encode_arm_immediate (addend_abs);
23463 if (encoded_addend == (unsigned int) FAIL)
23464 as_bad_where (fixP->fx_file, fixP->fx_line,
23465 _("the offset 0x%08lX is not representable"),
23466 (unsigned long) addend_abs);
23467
23468 /* Extract the instruction. */
23469 insn = md_chars_to_number (buf, INSN_SIZE);
23470
23471 /* If the addend is positive, use an ADD instruction.
23472 Otherwise use a SUB. Take care not to destroy the S bit. */
23473 insn &= 0xff1fffff;
23474 if (value < 0)
23475 insn |= 1 << 22;
23476 else
23477 insn |= 1 << 23;
23478
23479 /* Place the encoded addend into the first 12 bits of the
23480 instruction. */
23481 insn &= 0xfffff000;
23482 insn |= encoded_addend;
23483
23484 /* Update the instruction. */
23485 md_number_to_chars (buf, insn, INSN_SIZE);
23486 }
23487 break;
23488
23489 case BFD_RELOC_ARM_LDR_PC_G0:
23490 case BFD_RELOC_ARM_LDR_PC_G1:
23491 case BFD_RELOC_ARM_LDR_PC_G2:
23492 case BFD_RELOC_ARM_LDR_SB_G0:
23493 case BFD_RELOC_ARM_LDR_SB_G1:
23494 case BFD_RELOC_ARM_LDR_SB_G2:
23495 gas_assert (!fixP->fx_done);
23496 if (!seg->use_rela_p)
23497 {
23498 bfd_vma insn;
23499 bfd_vma addend_abs = abs (value);
23500
23501 /* Check that the absolute value of the addend can be
23502 encoded in 12 bits. */
23503 if (addend_abs >= 0x1000)
23504 as_bad_where (fixP->fx_file, fixP->fx_line,
23505 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23506 (unsigned long) addend_abs);
23507
23508 /* Extract the instruction. */
23509 insn = md_chars_to_number (buf, INSN_SIZE);
23510
23511 /* If the addend is negative, clear bit 23 of the instruction.
23512 Otherwise set it. */
23513 if (value < 0)
23514 insn &= ~(1 << 23);
23515 else
23516 insn |= 1 << 23;
23517
23518 /* Place the absolute value of the addend into the first 12 bits
23519 of the instruction. */
23520 insn &= 0xfffff000;
23521 insn |= addend_abs;
23522
23523 /* Update the instruction. */
23524 md_number_to_chars (buf, insn, INSN_SIZE);
23525 }
23526 break;
23527
23528 case BFD_RELOC_ARM_LDRS_PC_G0:
23529 case BFD_RELOC_ARM_LDRS_PC_G1:
23530 case BFD_RELOC_ARM_LDRS_PC_G2:
23531 case BFD_RELOC_ARM_LDRS_SB_G0:
23532 case BFD_RELOC_ARM_LDRS_SB_G1:
23533 case BFD_RELOC_ARM_LDRS_SB_G2:
23534 gas_assert (!fixP->fx_done);
23535 if (!seg->use_rela_p)
23536 {
23537 bfd_vma insn;
23538 bfd_vma addend_abs = abs (value);
23539
23540 /* Check that the absolute value of the addend can be
23541 encoded in 8 bits. */
23542 if (addend_abs >= 0x100)
23543 as_bad_where (fixP->fx_file, fixP->fx_line,
23544 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23545 (unsigned long) addend_abs);
23546
23547 /* Extract the instruction. */
23548 insn = md_chars_to_number (buf, INSN_SIZE);
23549
23550 /* If the addend is negative, clear bit 23 of the instruction.
23551 Otherwise set it. */
23552 if (value < 0)
23553 insn &= ~(1 << 23);
23554 else
23555 insn |= 1 << 23;
23556
23557 /* Place the first four bits of the absolute value of the addend
23558 into the first 4 bits of the instruction, and the remaining
23559 four into bits 8 .. 11. */
23560 insn &= 0xfffff0f0;
23561 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23562
23563 /* Update the instruction. */
23564 md_number_to_chars (buf, insn, INSN_SIZE);
23565 }
23566 break;
23567
23568 case BFD_RELOC_ARM_LDC_PC_G0:
23569 case BFD_RELOC_ARM_LDC_PC_G1:
23570 case BFD_RELOC_ARM_LDC_PC_G2:
23571 case BFD_RELOC_ARM_LDC_SB_G0:
23572 case BFD_RELOC_ARM_LDC_SB_G1:
23573 case BFD_RELOC_ARM_LDC_SB_G2:
23574 gas_assert (!fixP->fx_done);
23575 if (!seg->use_rela_p)
23576 {
23577 bfd_vma insn;
23578 bfd_vma addend_abs = abs (value);
23579
23580 /* Check that the absolute value of the addend is a multiple of
23581 four and, when divided by four, fits in 8 bits. */
23582 if (addend_abs & 0x3)
23583 as_bad_where (fixP->fx_file, fixP->fx_line,
23584 _("bad offset 0x%08lX (must be word-aligned)"),
23585 (unsigned long) addend_abs);
23586
23587 if ((addend_abs >> 2) > 0xff)
23588 as_bad_where (fixP->fx_file, fixP->fx_line,
23589 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23590 (unsigned long) addend_abs);
23591
23592 /* Extract the instruction. */
23593 insn = md_chars_to_number (buf, INSN_SIZE);
23594
23595 /* If the addend is negative, clear bit 23 of the instruction.
23596 Otherwise set it. */
23597 if (value < 0)
23598 insn &= ~(1 << 23);
23599 else
23600 insn |= 1 << 23;
23601
23602 /* Place the addend (divided by four) into the first eight
23603 bits of the instruction. */
23604 insn &= 0xfffffff0;
23605 insn |= addend_abs >> 2;
23606
23607 /* Update the instruction. */
23608 md_number_to_chars (buf, insn, INSN_SIZE);
23609 }
23610 break;
23611
23612 case BFD_RELOC_ARM_V4BX:
23613 /* This will need to go in the object file. */
23614 fixP->fx_done = 0;
23615 break;
23616
23617 case BFD_RELOC_UNUSED:
23618 default:
23619 as_bad_where (fixP->fx_file, fixP->fx_line,
23620 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23621 }
23622 }
23623
23624 /* Translate internal representation of relocation info to BFD target
23625 format. */
23626
23627 arelent *
23628 tc_gen_reloc (asection *section, fixS *fixp)
23629 {
23630 arelent * reloc;
23631 bfd_reloc_code_real_type code;
23632
23633 reloc = (arelent *) xmalloc (sizeof (arelent));
23634
23635 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23636 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23637 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23638
23639 if (fixp->fx_pcrel)
23640 {
23641 if (section->use_rela_p)
23642 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23643 else
23644 fixp->fx_offset = reloc->address;
23645 }
23646 reloc->addend = fixp->fx_offset;
23647
23648 switch (fixp->fx_r_type)
23649 {
23650 case BFD_RELOC_8:
23651 if (fixp->fx_pcrel)
23652 {
23653 code = BFD_RELOC_8_PCREL;
23654 break;
23655 }
23656
23657 case BFD_RELOC_16:
23658 if (fixp->fx_pcrel)
23659 {
23660 code = BFD_RELOC_16_PCREL;
23661 break;
23662 }
23663
23664 case BFD_RELOC_32:
23665 if (fixp->fx_pcrel)
23666 {
23667 code = BFD_RELOC_32_PCREL;
23668 break;
23669 }
23670
23671 case BFD_RELOC_ARM_MOVW:
23672 if (fixp->fx_pcrel)
23673 {
23674 code = BFD_RELOC_ARM_MOVW_PCREL;
23675 break;
23676 }
23677
23678 case BFD_RELOC_ARM_MOVT:
23679 if (fixp->fx_pcrel)
23680 {
23681 code = BFD_RELOC_ARM_MOVT_PCREL;
23682 break;
23683 }
23684
23685 case BFD_RELOC_ARM_THUMB_MOVW:
23686 if (fixp->fx_pcrel)
23687 {
23688 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23689 break;
23690 }
23691
23692 case BFD_RELOC_ARM_THUMB_MOVT:
23693 if (fixp->fx_pcrel)
23694 {
23695 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23696 break;
23697 }
23698
23699 case BFD_RELOC_NONE:
23700 case BFD_RELOC_ARM_PCREL_BRANCH:
23701 case BFD_RELOC_ARM_PCREL_BLX:
23702 case BFD_RELOC_RVA:
23703 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23704 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23705 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23706 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23707 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23708 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23709 case BFD_RELOC_VTABLE_ENTRY:
23710 case BFD_RELOC_VTABLE_INHERIT:
23711 #ifdef TE_PE
23712 case BFD_RELOC_32_SECREL:
23713 #endif
23714 code = fixp->fx_r_type;
23715 break;
23716
23717 case BFD_RELOC_THUMB_PCREL_BLX:
23718 #ifdef OBJ_ELF
23719 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23720 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23721 else
23722 #endif
23723 code = BFD_RELOC_THUMB_PCREL_BLX;
23724 break;
23725
23726 case BFD_RELOC_ARM_LITERAL:
23727 case BFD_RELOC_ARM_HWLITERAL:
23728 /* If this is called then the a literal has
23729 been referenced across a section boundary. */
23730 as_bad_where (fixp->fx_file, fixp->fx_line,
23731 _("literal referenced across section boundary"));
23732 return NULL;
23733
23734 #ifdef OBJ_ELF
23735 case BFD_RELOC_ARM_TLS_CALL:
23736 case BFD_RELOC_ARM_THM_TLS_CALL:
23737 case BFD_RELOC_ARM_TLS_DESCSEQ:
23738 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23739 case BFD_RELOC_ARM_GOT32:
23740 case BFD_RELOC_ARM_GOTOFF:
23741 case BFD_RELOC_ARM_GOT_PREL:
23742 case BFD_RELOC_ARM_PLT32:
23743 case BFD_RELOC_ARM_TARGET1:
23744 case BFD_RELOC_ARM_ROSEGREL32:
23745 case BFD_RELOC_ARM_SBREL32:
23746 case BFD_RELOC_ARM_PREL31:
23747 case BFD_RELOC_ARM_TARGET2:
23748 case BFD_RELOC_ARM_TLS_LDO32:
23749 case BFD_RELOC_ARM_PCREL_CALL:
23750 case BFD_RELOC_ARM_PCREL_JUMP:
23751 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23752 case BFD_RELOC_ARM_ALU_PC_G0:
23753 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23754 case BFD_RELOC_ARM_ALU_PC_G1:
23755 case BFD_RELOC_ARM_ALU_PC_G2:
23756 case BFD_RELOC_ARM_LDR_PC_G0:
23757 case BFD_RELOC_ARM_LDR_PC_G1:
23758 case BFD_RELOC_ARM_LDR_PC_G2:
23759 case BFD_RELOC_ARM_LDRS_PC_G0:
23760 case BFD_RELOC_ARM_LDRS_PC_G1:
23761 case BFD_RELOC_ARM_LDRS_PC_G2:
23762 case BFD_RELOC_ARM_LDC_PC_G0:
23763 case BFD_RELOC_ARM_LDC_PC_G1:
23764 case BFD_RELOC_ARM_LDC_PC_G2:
23765 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23766 case BFD_RELOC_ARM_ALU_SB_G0:
23767 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23768 case BFD_RELOC_ARM_ALU_SB_G1:
23769 case BFD_RELOC_ARM_ALU_SB_G2:
23770 case BFD_RELOC_ARM_LDR_SB_G0:
23771 case BFD_RELOC_ARM_LDR_SB_G1:
23772 case BFD_RELOC_ARM_LDR_SB_G2:
23773 case BFD_RELOC_ARM_LDRS_SB_G0:
23774 case BFD_RELOC_ARM_LDRS_SB_G1:
23775 case BFD_RELOC_ARM_LDRS_SB_G2:
23776 case BFD_RELOC_ARM_LDC_SB_G0:
23777 case BFD_RELOC_ARM_LDC_SB_G1:
23778 case BFD_RELOC_ARM_LDC_SB_G2:
23779 case BFD_RELOC_ARM_V4BX:
23780 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23781 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23782 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23783 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23784 code = fixp->fx_r_type;
23785 break;
23786
23787 case BFD_RELOC_ARM_TLS_GOTDESC:
23788 case BFD_RELOC_ARM_TLS_GD32:
23789 case BFD_RELOC_ARM_TLS_LE32:
23790 case BFD_RELOC_ARM_TLS_IE32:
23791 case BFD_RELOC_ARM_TLS_LDM32:
23792 /* BFD will include the symbol's address in the addend.
23793 But we don't want that, so subtract it out again here. */
23794 if (!S_IS_COMMON (fixp->fx_addsy))
23795 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23796 code = fixp->fx_r_type;
23797 break;
23798 #endif
23799
23800 case BFD_RELOC_ARM_IMMEDIATE:
23801 as_bad_where (fixp->fx_file, fixp->fx_line,
23802 _("internal relocation (type: IMMEDIATE) not fixed up"));
23803 return NULL;
23804
23805 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23806 as_bad_where (fixp->fx_file, fixp->fx_line,
23807 _("ADRL used for a symbol not defined in the same file"));
23808 return NULL;
23809
23810 case BFD_RELOC_ARM_OFFSET_IMM:
23811 if (section->use_rela_p)
23812 {
23813 code = fixp->fx_r_type;
23814 break;
23815 }
23816
23817 if (fixp->fx_addsy != NULL
23818 && !S_IS_DEFINED (fixp->fx_addsy)
23819 && S_IS_LOCAL (fixp->fx_addsy))
23820 {
23821 as_bad_where (fixp->fx_file, fixp->fx_line,
23822 _("undefined local label `%s'"),
23823 S_GET_NAME (fixp->fx_addsy));
23824 return NULL;
23825 }
23826
23827 as_bad_where (fixp->fx_file, fixp->fx_line,
23828 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23829 return NULL;
23830
23831 default:
23832 {
23833 char * type;
23834
23835 switch (fixp->fx_r_type)
23836 {
23837 case BFD_RELOC_NONE: type = "NONE"; break;
23838 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23839 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23840 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23841 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23842 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23843 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23844 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23845 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23846 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23847 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23848 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23849 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23850 default: type = _("<unknown>"); break;
23851 }
23852 as_bad_where (fixp->fx_file, fixp->fx_line,
23853 _("cannot represent %s relocation in this object file format"),
23854 type);
23855 return NULL;
23856 }
23857 }
23858
23859 #ifdef OBJ_ELF
23860 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23861 && GOT_symbol
23862 && fixp->fx_addsy == GOT_symbol)
23863 {
23864 code = BFD_RELOC_ARM_GOTPC;
23865 reloc->addend = fixp->fx_offset = reloc->address;
23866 }
23867 #endif
23868
23869 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23870
23871 if (reloc->howto == NULL)
23872 {
23873 as_bad_where (fixp->fx_file, fixp->fx_line,
23874 _("cannot represent %s relocation in this object file format"),
23875 bfd_get_reloc_code_name (code));
23876 return NULL;
23877 }
23878
23879 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23880 vtable entry to be used in the relocation's section offset. */
23881 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23882 reloc->address = fixp->fx_offset;
23883
23884 return reloc;
23885 }
23886
23887 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23888
23889 void
23890 cons_fix_new_arm (fragS * frag,
23891 int where,
23892 int size,
23893 expressionS * exp,
23894 bfd_reloc_code_real_type reloc)
23895 {
23896 int pcrel = 0;
23897
23898 /* Pick a reloc.
23899 FIXME: @@ Should look at CPU word size. */
23900 switch (size)
23901 {
23902 case 1:
23903 reloc = BFD_RELOC_8;
23904 break;
23905 case 2:
23906 reloc = BFD_RELOC_16;
23907 break;
23908 case 4:
23909 default:
23910 reloc = BFD_RELOC_32;
23911 break;
23912 case 8:
23913 reloc = BFD_RELOC_64;
23914 break;
23915 }
23916
23917 #ifdef TE_PE
23918 if (exp->X_op == O_secrel)
23919 {
23920 exp->X_op = O_symbol;
23921 reloc = BFD_RELOC_32_SECREL;
23922 }
23923 #endif
23924
23925 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23926 }
23927
23928 #if defined (OBJ_COFF)
23929 void
23930 arm_validate_fix (fixS * fixP)
23931 {
23932 /* If the destination of the branch is a defined symbol which does not have
23933 the THUMB_FUNC attribute, then we must be calling a function which has
23934 the (interfacearm) attribute. We look for the Thumb entry point to that
23935 function and change the branch to refer to that function instead. */
23936 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23937 && fixP->fx_addsy != NULL
23938 && S_IS_DEFINED (fixP->fx_addsy)
23939 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23940 {
23941 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23942 }
23943 }
23944 #endif
23945
23946
23947 int
23948 arm_force_relocation (struct fix * fixp)
23949 {
23950 #if defined (OBJ_COFF) && defined (TE_PE)
23951 if (fixp->fx_r_type == BFD_RELOC_RVA)
23952 return 1;
23953 #endif
23954
23955 /* In case we have a call or a branch to a function in ARM ISA mode from
23956 a thumb function or vice-versa force the relocation. These relocations
23957 are cleared off for some cores that might have blx and simple transformations
23958 are possible. */
23959
23960 #ifdef OBJ_ELF
23961 switch (fixp->fx_r_type)
23962 {
23963 case BFD_RELOC_ARM_PCREL_JUMP:
23964 case BFD_RELOC_ARM_PCREL_CALL:
23965 case BFD_RELOC_THUMB_PCREL_BLX:
23966 if (THUMB_IS_FUNC (fixp->fx_addsy))
23967 return 1;
23968 break;
23969
23970 case BFD_RELOC_ARM_PCREL_BLX:
23971 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23972 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23973 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23974 if (ARM_IS_FUNC (fixp->fx_addsy))
23975 return 1;
23976 break;
23977
23978 default:
23979 break;
23980 }
23981 #endif
23982
23983 /* Resolve these relocations even if the symbol is extern or weak.
23984 Technically this is probably wrong due to symbol preemption.
23985 In practice these relocations do not have enough range to be useful
23986 at dynamic link time, and some code (e.g. in the Linux kernel)
23987 expects these references to be resolved. */
23988 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23989 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23990 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23991 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23992 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23993 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23994 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23995 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23996 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23997 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23998 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23999 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24000 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24001 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24002 return 0;
24003
24004 /* Always leave these relocations for the linker. */
24005 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24006 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24007 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24008 return 1;
24009
24010 /* Always generate relocations against function symbols. */
24011 if (fixp->fx_r_type == BFD_RELOC_32
24012 && fixp->fx_addsy
24013 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24014 return 1;
24015
24016 return generic_force_reloc (fixp);
24017 }
24018
24019 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24020 /* Relocations against function names must be left unadjusted,
24021 so that the linker can use this information to generate interworking
24022 stubs. The MIPS version of this function
24023 also prevents relocations that are mips-16 specific, but I do not
24024 know why it does this.
24025
24026 FIXME:
24027 There is one other problem that ought to be addressed here, but
24028 which currently is not: Taking the address of a label (rather
24029 than a function) and then later jumping to that address. Such
24030 addresses also ought to have their bottom bit set (assuming that
24031 they reside in Thumb code), but at the moment they will not. */
24032
24033 bfd_boolean
24034 arm_fix_adjustable (fixS * fixP)
24035 {
24036 if (fixP->fx_addsy == NULL)
24037 return 1;
24038
24039 /* Preserve relocations against symbols with function type. */
24040 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24041 return FALSE;
24042
24043 if (THUMB_IS_FUNC (fixP->fx_addsy)
24044 && fixP->fx_subsy == NULL)
24045 return FALSE;
24046
24047 /* We need the symbol name for the VTABLE entries. */
24048 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24049 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24050 return FALSE;
24051
24052 /* Don't allow symbols to be discarded on GOT related relocs. */
24053 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24054 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24055 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24056 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24057 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24058 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24059 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24060 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24061 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24062 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24063 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24064 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24065 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24066 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24067 return FALSE;
24068
24069 /* Similarly for group relocations. */
24070 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24071 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24072 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24073 return FALSE;
24074
24075 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24076 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24077 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24078 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24079 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24080 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24081 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24082 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24083 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24084 return FALSE;
24085
24086 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24087 offsets, so keep these symbols. */
24088 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24089 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24090 return FALSE;
24091
24092 return TRUE;
24093 }
24094 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24095
24096 #ifdef OBJ_ELF
24097 const char *
24098 elf32_arm_target_format (void)
24099 {
24100 #ifdef TE_SYMBIAN
24101 return (target_big_endian
24102 ? "elf32-bigarm-symbian"
24103 : "elf32-littlearm-symbian");
24104 #elif defined (TE_VXWORKS)
24105 return (target_big_endian
24106 ? "elf32-bigarm-vxworks"
24107 : "elf32-littlearm-vxworks");
24108 #elif defined (TE_NACL)
24109 return (target_big_endian
24110 ? "elf32-bigarm-nacl"
24111 : "elf32-littlearm-nacl");
24112 #else
24113 if (target_big_endian)
24114 return "elf32-bigarm";
24115 else
24116 return "elf32-littlearm";
24117 #endif
24118 }
24119
24120 void
24121 armelf_frob_symbol (symbolS * symp,
24122 int * puntp)
24123 {
24124 elf_frob_symbol (symp, puntp);
24125 }
24126 #endif
24127
24128 /* MD interface: Finalization. */
24129
24130 void
24131 arm_cleanup (void)
24132 {
24133 literal_pool * pool;
24134
24135 /* Ensure that all the IT blocks are properly closed. */
24136 check_it_blocks_finished ();
24137
24138 for (pool = list_of_pools; pool; pool = pool->next)
24139 {
24140 /* Put it at the end of the relevant section. */
24141 subseg_set (pool->section, pool->sub_section);
24142 #ifdef OBJ_ELF
24143 arm_elf_change_section ();
24144 #endif
24145 s_ltorg (0);
24146 }
24147 }
24148
24149 #ifdef OBJ_ELF
24150 /* Remove any excess mapping symbols generated for alignment frags in
24151 SEC. We may have created a mapping symbol before a zero byte
24152 alignment; remove it if there's a mapping symbol after the
24153 alignment. */
24154 static void
24155 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24156 void *dummy ATTRIBUTE_UNUSED)
24157 {
24158 segment_info_type *seginfo = seg_info (sec);
24159 fragS *fragp;
24160
24161 if (seginfo == NULL || seginfo->frchainP == NULL)
24162 return;
24163
24164 for (fragp = seginfo->frchainP->frch_root;
24165 fragp != NULL;
24166 fragp = fragp->fr_next)
24167 {
24168 symbolS *sym = fragp->tc_frag_data.last_map;
24169 fragS *next = fragp->fr_next;
24170
24171 /* Variable-sized frags have been converted to fixed size by
24172 this point. But if this was variable-sized to start with,
24173 there will be a fixed-size frag after it. So don't handle
24174 next == NULL. */
24175 if (sym == NULL || next == NULL)
24176 continue;
24177
24178 if (S_GET_VALUE (sym) < next->fr_address)
24179 /* Not at the end of this frag. */
24180 continue;
24181 know (S_GET_VALUE (sym) == next->fr_address);
24182
24183 do
24184 {
24185 if (next->tc_frag_data.first_map != NULL)
24186 {
24187 /* Next frag starts with a mapping symbol. Discard this
24188 one. */
24189 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24190 break;
24191 }
24192
24193 if (next->fr_next == NULL)
24194 {
24195 /* This mapping symbol is at the end of the section. Discard
24196 it. */
24197 know (next->fr_fix == 0 && next->fr_var == 0);
24198 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24199 break;
24200 }
24201
24202 /* As long as we have empty frags without any mapping symbols,
24203 keep looking. */
24204 /* If the next frag is non-empty and does not start with a
24205 mapping symbol, then this mapping symbol is required. */
24206 if (next->fr_address != next->fr_next->fr_address)
24207 break;
24208
24209 next = next->fr_next;
24210 }
24211 while (next != NULL);
24212 }
24213 }
24214 #endif
24215
24216 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24217 ARM ones. */
24218
24219 void
24220 arm_adjust_symtab (void)
24221 {
24222 #ifdef OBJ_COFF
24223 symbolS * sym;
24224
24225 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24226 {
24227 if (ARM_IS_THUMB (sym))
24228 {
24229 if (THUMB_IS_FUNC (sym))
24230 {
24231 /* Mark the symbol as a Thumb function. */
24232 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24233 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24234 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24235
24236 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24237 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24238 else
24239 as_bad (_("%s: unexpected function type: %d"),
24240 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24241 }
24242 else switch (S_GET_STORAGE_CLASS (sym))
24243 {
24244 case C_EXT:
24245 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24246 break;
24247 case C_STAT:
24248 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24249 break;
24250 case C_LABEL:
24251 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24252 break;
24253 default:
24254 /* Do nothing. */
24255 break;
24256 }
24257 }
24258
24259 if (ARM_IS_INTERWORK (sym))
24260 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24261 }
24262 #endif
24263 #ifdef OBJ_ELF
24264 symbolS * sym;
24265 char bind;
24266
24267 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24268 {
24269 if (ARM_IS_THUMB (sym))
24270 {
24271 elf_symbol_type * elf_sym;
24272
24273 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24274 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24275
24276 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24277 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24278 {
24279 /* If it's a .thumb_func, declare it as so,
24280 otherwise tag label as .code 16. */
24281 if (THUMB_IS_FUNC (sym))
24282 elf_sym->internal_elf_sym.st_target_internal
24283 = ST_BRANCH_TO_THUMB;
24284 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24285 elf_sym->internal_elf_sym.st_info =
24286 ELF_ST_INFO (bind, STT_ARM_16BIT);
24287 }
24288 }
24289 }
24290
24291 /* Remove any overlapping mapping symbols generated by alignment frags. */
24292 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24293 /* Now do generic ELF adjustments. */
24294 elf_adjust_symtab ();
24295 #endif
24296 }
24297
24298 /* MD interface: Initialization. */
24299
24300 static void
24301 set_constant_flonums (void)
24302 {
24303 int i;
24304
24305 for (i = 0; i < NUM_FLOAT_VALS; i++)
24306 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24307 abort ();
24308 }
24309
24310 /* Auto-select Thumb mode if it's the only available instruction set for the
24311 given architecture. */
24312
24313 static void
24314 autoselect_thumb_from_cpu_variant (void)
24315 {
24316 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24317 opcode_select (16);
24318 }
24319
24320 void
24321 md_begin (void)
24322 {
24323 unsigned mach;
24324 unsigned int i;
24325
24326 if ( (arm_ops_hsh = hash_new ()) == NULL
24327 || (arm_cond_hsh = hash_new ()) == NULL
24328 || (arm_shift_hsh = hash_new ()) == NULL
24329 || (arm_psr_hsh = hash_new ()) == NULL
24330 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24331 || (arm_reg_hsh = hash_new ()) == NULL
24332 || (arm_reloc_hsh = hash_new ()) == NULL
24333 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24334 as_fatal (_("virtual memory exhausted"));
24335
24336 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24337 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24338 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24339 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24340 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24341 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24342 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24343 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24344 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24345 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24346 (void *) (v7m_psrs + i));
24347 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24348 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24349 for (i = 0;
24350 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24351 i++)
24352 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24353 (void *) (barrier_opt_names + i));
24354 #ifdef OBJ_ELF
24355 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24356 {
24357 struct reloc_entry * entry = reloc_names + i;
24358
24359 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24360 /* This makes encode_branch() use the EABI versions of this relocation. */
24361 entry->reloc = BFD_RELOC_UNUSED;
24362
24363 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24364 }
24365 #endif
24366
24367 set_constant_flonums ();
24368
24369 /* Set the cpu variant based on the command-line options. We prefer
24370 -mcpu= over -march= if both are set (as for GCC); and we prefer
24371 -mfpu= over any other way of setting the floating point unit.
24372 Use of legacy options with new options are faulted. */
24373 if (legacy_cpu)
24374 {
24375 if (mcpu_cpu_opt || march_cpu_opt)
24376 as_bad (_("use of old and new-style options to set CPU type"));
24377
24378 mcpu_cpu_opt = legacy_cpu;
24379 }
24380 else if (!mcpu_cpu_opt)
24381 mcpu_cpu_opt = march_cpu_opt;
24382
24383 if (legacy_fpu)
24384 {
24385 if (mfpu_opt)
24386 as_bad (_("use of old and new-style options to set FPU type"));
24387
24388 mfpu_opt = legacy_fpu;
24389 }
24390 else if (!mfpu_opt)
24391 {
24392 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24393 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24394 /* Some environments specify a default FPU. If they don't, infer it
24395 from the processor. */
24396 if (mcpu_fpu_opt)
24397 mfpu_opt = mcpu_fpu_opt;
24398 else
24399 mfpu_opt = march_fpu_opt;
24400 #else
24401 mfpu_opt = &fpu_default;
24402 #endif
24403 }
24404
24405 if (!mfpu_opt)
24406 {
24407 if (mcpu_cpu_opt != NULL)
24408 mfpu_opt = &fpu_default;
24409 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24410 mfpu_opt = &fpu_arch_vfp_v2;
24411 else
24412 mfpu_opt = &fpu_arch_fpa;
24413 }
24414
24415 #ifdef CPU_DEFAULT
24416 if (!mcpu_cpu_opt)
24417 {
24418 mcpu_cpu_opt = &cpu_default;
24419 selected_cpu = cpu_default;
24420 }
24421 else if (no_cpu_selected ())
24422 selected_cpu = cpu_default;
24423 #else
24424 if (mcpu_cpu_opt)
24425 selected_cpu = *mcpu_cpu_opt;
24426 else
24427 mcpu_cpu_opt = &arm_arch_any;
24428 #endif
24429
24430 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24431
24432 autoselect_thumb_from_cpu_variant ();
24433
24434 arm_arch_used = thumb_arch_used = arm_arch_none;
24435
24436 #if defined OBJ_COFF || defined OBJ_ELF
24437 {
24438 unsigned int flags = 0;
24439
24440 #if defined OBJ_ELF
24441 flags = meabi_flags;
24442
24443 switch (meabi_flags)
24444 {
24445 case EF_ARM_EABI_UNKNOWN:
24446 #endif
24447 /* Set the flags in the private structure. */
24448 if (uses_apcs_26) flags |= F_APCS26;
24449 if (support_interwork) flags |= F_INTERWORK;
24450 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24451 if (pic_code) flags |= F_PIC;
24452 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24453 flags |= F_SOFT_FLOAT;
24454
24455 switch (mfloat_abi_opt)
24456 {
24457 case ARM_FLOAT_ABI_SOFT:
24458 case ARM_FLOAT_ABI_SOFTFP:
24459 flags |= F_SOFT_FLOAT;
24460 break;
24461
24462 case ARM_FLOAT_ABI_HARD:
24463 if (flags & F_SOFT_FLOAT)
24464 as_bad (_("hard-float conflicts with specified fpu"));
24465 break;
24466 }
24467
24468 /* Using pure-endian doubles (even if soft-float). */
24469 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24470 flags |= F_VFP_FLOAT;
24471
24472 #if defined OBJ_ELF
24473 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24474 flags |= EF_ARM_MAVERICK_FLOAT;
24475 break;
24476
24477 case EF_ARM_EABI_VER4:
24478 case EF_ARM_EABI_VER5:
24479 /* No additional flags to set. */
24480 break;
24481
24482 default:
24483 abort ();
24484 }
24485 #endif
24486 bfd_set_private_flags (stdoutput, flags);
24487
24488 /* We have run out flags in the COFF header to encode the
24489 status of ATPCS support, so instead we create a dummy,
24490 empty, debug section called .arm.atpcs. */
24491 if (atpcs)
24492 {
24493 asection * sec;
24494
24495 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24496
24497 if (sec != NULL)
24498 {
24499 bfd_set_section_flags
24500 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24501 bfd_set_section_size (stdoutput, sec, 0);
24502 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24503 }
24504 }
24505 }
24506 #endif
24507
24508 /* Record the CPU type as well. */
24509 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24510 mach = bfd_mach_arm_iWMMXt2;
24511 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24512 mach = bfd_mach_arm_iWMMXt;
24513 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24514 mach = bfd_mach_arm_XScale;
24515 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24516 mach = bfd_mach_arm_ep9312;
24517 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24518 mach = bfd_mach_arm_5TE;
24519 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24520 {
24521 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24522 mach = bfd_mach_arm_5T;
24523 else
24524 mach = bfd_mach_arm_5;
24525 }
24526 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24527 {
24528 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24529 mach = bfd_mach_arm_4T;
24530 else
24531 mach = bfd_mach_arm_4;
24532 }
24533 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24534 mach = bfd_mach_arm_3M;
24535 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24536 mach = bfd_mach_arm_3;
24537 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24538 mach = bfd_mach_arm_2a;
24539 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24540 mach = bfd_mach_arm_2;
24541 else
24542 mach = bfd_mach_arm_unknown;
24543
24544 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24545 }
24546
24547 /* Command line processing. */
24548
24549 /* md_parse_option
24550 Invocation line includes a switch not recognized by the base assembler.
24551 See if it's a processor-specific option.
24552
24553 This routine is somewhat complicated by the need for backwards
24554 compatibility (since older releases of gcc can't be changed).
24555 The new options try to make the interface as compatible as
24556 possible with GCC.
24557
24558 New options (supported) are:
24559
24560 -mcpu=<cpu name> Assemble for selected processor
24561 -march=<architecture name> Assemble for selected architecture
24562 -mfpu=<fpu architecture> Assemble for selected FPU.
24563 -EB/-mbig-endian Big-endian
24564 -EL/-mlittle-endian Little-endian
24565 -k Generate PIC code
24566 -mthumb Start in Thumb mode
24567 -mthumb-interwork Code supports ARM/Thumb interworking
24568
24569 -m[no-]warn-deprecated Warn about deprecated features
24570 -m[no-]warn-syms Warn when symbols match instructions
24571
24572 For now we will also provide support for:
24573
24574 -mapcs-32 32-bit Program counter
24575 -mapcs-26 26-bit Program counter
24576 -macps-float Floats passed in FP registers
24577 -mapcs-reentrant Reentrant code
24578 -matpcs
24579 (sometime these will probably be replaced with -mapcs=<list of options>
24580 and -matpcs=<list of options>)
24581
24582 The remaining options are only supported for back-wards compatibility.
24583 Cpu variants, the arm part is optional:
24584 -m[arm]1 Currently not supported.
24585 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24586 -m[arm]3 Arm 3 processor
24587 -m[arm]6[xx], Arm 6 processors
24588 -m[arm]7[xx][t][[d]m] Arm 7 processors
24589 -m[arm]8[10] Arm 8 processors
24590 -m[arm]9[20][tdmi] Arm 9 processors
24591 -mstrongarm[110[0]] StrongARM processors
24592 -mxscale XScale processors
24593 -m[arm]v[2345[t[e]]] Arm architectures
24594 -mall All (except the ARM1)
24595 FP variants:
24596 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24597 -mfpe-old (No float load/store multiples)
24598 -mvfpxd VFP Single precision
24599 -mvfp All VFP
24600 -mno-fpu Disable all floating point instructions
24601
24602 The following CPU names are recognized:
24603 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24604 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24605 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24606 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24607 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24608 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24609 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24610
24611 */
24612
24613 const char * md_shortopts = "m:k";
24614
24615 #ifdef ARM_BI_ENDIAN
24616 #define OPTION_EB (OPTION_MD_BASE + 0)
24617 #define OPTION_EL (OPTION_MD_BASE + 1)
24618 #else
24619 #if TARGET_BYTES_BIG_ENDIAN
24620 #define OPTION_EB (OPTION_MD_BASE + 0)
24621 #else
24622 #define OPTION_EL (OPTION_MD_BASE + 1)
24623 #endif
24624 #endif
24625 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24626
24627 struct option md_longopts[] =
24628 {
24629 #ifdef OPTION_EB
24630 {"EB", no_argument, NULL, OPTION_EB},
24631 #endif
24632 #ifdef OPTION_EL
24633 {"EL", no_argument, NULL, OPTION_EL},
24634 #endif
24635 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24636 {NULL, no_argument, NULL, 0}
24637 };
24638
24639
24640 size_t md_longopts_size = sizeof (md_longopts);
24641
24642 struct arm_option_table
24643 {
24644 char *option; /* Option name to match. */
24645 char *help; /* Help information. */
24646 int *var; /* Variable to change. */
24647 int value; /* What to change it to. */
24648 char *deprecated; /* If non-null, print this message. */
24649 };
24650
24651 struct arm_option_table arm_opts[] =
24652 {
24653 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24654 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24655 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24656 &support_interwork, 1, NULL},
24657 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24658 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24659 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24660 1, NULL},
24661 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24662 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24663 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24664 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24665 NULL},
24666
24667 /* These are recognized by the assembler, but have no affect on code. */
24668 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24669 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24670
24671 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24672 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24673 &warn_on_deprecated, 0, NULL},
24674 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
24675 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
24676 {NULL, NULL, NULL, 0, NULL}
24677 };
24678
24679 struct arm_legacy_option_table
24680 {
24681 char *option; /* Option name to match. */
24682 const arm_feature_set **var; /* Variable to change. */
24683 const arm_feature_set value; /* What to change it to. */
24684 char *deprecated; /* If non-null, print this message. */
24685 };
24686
24687 const struct arm_legacy_option_table arm_legacy_opts[] =
24688 {
24689 /* DON'T add any new processors to this list -- we want the whole list
24690 to go away... Add them to the processors table instead. */
24691 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24692 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24693 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24694 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24695 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24696 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24697 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24698 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24699 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24700 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24701 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24702 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24703 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24704 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24705 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24706 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24707 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24708 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24709 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24710 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24711 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24712 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24713 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24714 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24715 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24716 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24717 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24718 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24719 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24720 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24721 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24722 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24723 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24724 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24725 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24726 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24727 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24728 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24729 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24730 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24731 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24732 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24733 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24734 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24735 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24736 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24737 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24738 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24739 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24740 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24741 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24742 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24743 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24744 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24745 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24746 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24747 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24748 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24749 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24750 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24751 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24752 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24753 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24754 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24755 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24756 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24757 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24758 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24759 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24760 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24761 N_("use -mcpu=strongarm110")},
24762 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24763 N_("use -mcpu=strongarm1100")},
24764 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24765 N_("use -mcpu=strongarm1110")},
24766 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24767 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24768 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24769
24770 /* Architecture variants -- don't add any more to this list either. */
24771 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24772 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24773 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24774 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24775 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24776 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24777 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24778 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24779 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24780 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24781 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24782 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24783 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24784 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24785 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24786 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24787 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24788 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24789
24790 /* Floating point variants -- don't add any more to this list either. */
24791 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24792 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24793 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24794 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24795 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24796
24797 {NULL, NULL, ARM_ARCH_NONE, NULL}
24798 };
24799
24800 struct arm_cpu_option_table
24801 {
24802 char *name;
24803 size_t name_len;
24804 const arm_feature_set value;
24805 /* For some CPUs we assume an FPU unless the user explicitly sets
24806 -mfpu=... */
24807 const arm_feature_set default_fpu;
24808 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24809 case. */
24810 const char *canonical_name;
24811 };
24812
24813 /* This list should, at a minimum, contain all the cpu names
24814 recognized by GCC. */
24815 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24816 static const struct arm_cpu_option_table arm_cpus[] =
24817 {
24818 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24819 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24820 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24821 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24822 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24823 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24824 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24825 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24826 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24827 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24828 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24829 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24830 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24831 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24832 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24833 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24834 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24835 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24836 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24837 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24838 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24839 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24840 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24841 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24842 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24843 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24844 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24845 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24846 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24847 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24848 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24849 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24850 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24851 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24852 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24853 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24854 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24855 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24856 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24857 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24858 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24859 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24860 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24861 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24862 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24863 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24864 /* For V5 or later processors we default to using VFP; but the user
24865 should really set the FPU type explicitly. */
24866 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24867 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24868 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24869 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24870 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24871 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24872 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24873 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24874 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24875 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24876 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24877 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24878 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24879 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24880 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24881 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24882 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24883 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24884 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24885 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24886 "ARM1026EJ-S"),
24887 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24888 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24889 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24890 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24891 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24892 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24893 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24894 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24895 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24896 "ARM1136JF-S"),
24897 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24898 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24899 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24900 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24901 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24902 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
24903 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
24904 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24905 FPU_NONE, "Cortex-A5"),
24906 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24907 "Cortex-A7"),
24908 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24909 ARM_FEATURE_COPROC (FPU_VFP_V3
24910 | FPU_NEON_EXT_V1),
24911 "Cortex-A8"),
24912 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24913 ARM_FEATURE_COPROC (FPU_VFP_V3
24914 | FPU_NEON_EXT_V1),
24915 "Cortex-A9"),
24916 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24917 "Cortex-A12"),
24918 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24919 "Cortex-A15"),
24920 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24921 "Cortex-A17"),
24922 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24923 "Cortex-A35"),
24924 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24925 "Cortex-A53"),
24926 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24927 "Cortex-A57"),
24928 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24929 "Cortex-A72"),
24930 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24931 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24932 "Cortex-R4F"),
24933 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24934 FPU_NONE, "Cortex-R5"),
24935 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24936 FPU_ARCH_VFP_V3D16,
24937 "Cortex-R7"),
24938 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
24939 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24940 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24941 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24942 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24943 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24944 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24945 "Samsung " \
24946 "Exynos M1"),
24947 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24948 "Qualcomm "
24949 "QDF24XX"),
24950
24951 /* ??? XSCALE is really an architecture. */
24952 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24953 /* ??? iwmmxt is not a processor. */
24954 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24955 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24956 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24957 /* Maverick */
24958 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24959 FPU_ARCH_MAVERICK, "ARM920T"),
24960 /* Marvell processors. */
24961 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
24962 | ARM_EXT_SEC,
24963 ARM_EXT2_V6T2_V8M),
24964 FPU_ARCH_VFP_V3D16, NULL),
24965 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
24966 | ARM_EXT_SEC,
24967 ARM_EXT2_V6T2_V8M),
24968 FPU_ARCH_NEON_VFP_V4, NULL),
24969 /* APM X-Gene family. */
24970 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24971 "APM X-Gene 1"),
24972 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24973 "APM X-Gene 2"),
24974
24975 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24976 };
24977 #undef ARM_CPU_OPT
24978
24979 struct arm_arch_option_table
24980 {
24981 char *name;
24982 size_t name_len;
24983 const arm_feature_set value;
24984 const arm_feature_set default_fpu;
24985 };
24986
24987 /* This list should, at a minimum, contain all the architecture names
24988 recognized by GCC. */
24989 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24990 static const struct arm_arch_option_table arm_archs[] =
24991 {
24992 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24993 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24994 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24995 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24996 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24997 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24998 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24999 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25000 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25001 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25002 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25003 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25004 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25005 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25006 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25007 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25008 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25009 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25010 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25011 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25012 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25013 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25014 kept to preserve existing behaviour. */
25015 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25016 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25017 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25018 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25019 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25020 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25021 kept to preserve existing behaviour. */
25022 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25023 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25024 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25025 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25026 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25027 /* The official spelling of the ARMv7 profile variants is the dashed form.
25028 Accept the non-dashed form for compatibility with old toolchains. */
25029 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25030 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25031 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25032 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25033 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25034 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25035 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25036 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25037 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25038 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25039 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25040 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25041 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25042 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25043 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25044 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25045 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25046 };
25047 #undef ARM_ARCH_OPT
25048
25049 /* ISA extensions in the co-processor and main instruction set space. */
25050 struct arm_option_extension_value_table
25051 {
25052 char *name;
25053 size_t name_len;
25054 const arm_feature_set merge_value;
25055 const arm_feature_set clear_value;
25056 const arm_feature_set allowed_archs;
25057 };
25058
25059 /* The following table must be in alphabetical order with a NULL last entry.
25060 */
25061 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25062 static const struct arm_option_extension_value_table arm_extensions[] =
25063 {
25064 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25065 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25066 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25067 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25068 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25069 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25071 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25072 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25073 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25074 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25075 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
25076 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25077 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
25078 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25079 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
25080 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25081 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25082 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25083 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25084 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25085 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25086 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25087 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25088 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25089 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25090 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25091 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25092 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25093 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25094 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
25095 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25096 | ARM_EXT_DIV),
25097 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25098 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25099 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8,
25100 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25101 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25102 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25103 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
25104 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
25105 };
25106 #undef ARM_EXT_OPT
25107
25108 /* ISA floating-point and Advanced SIMD extensions. */
25109 struct arm_option_fpu_value_table
25110 {
25111 char *name;
25112 const arm_feature_set value;
25113 };
25114
25115 /* This list should, at a minimum, contain all the fpu names
25116 recognized by GCC. */
25117 static const struct arm_option_fpu_value_table arm_fpus[] =
25118 {
25119 {"softfpa", FPU_NONE},
25120 {"fpe", FPU_ARCH_FPE},
25121 {"fpe2", FPU_ARCH_FPE},
25122 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25123 {"fpa", FPU_ARCH_FPA},
25124 {"fpa10", FPU_ARCH_FPA},
25125 {"fpa11", FPU_ARCH_FPA},
25126 {"arm7500fe", FPU_ARCH_FPA},
25127 {"softvfp", FPU_ARCH_VFP},
25128 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25129 {"vfp", FPU_ARCH_VFP_V2},
25130 {"vfp9", FPU_ARCH_VFP_V2},
25131 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25132 {"vfp10", FPU_ARCH_VFP_V2},
25133 {"vfp10-r0", FPU_ARCH_VFP_V1},
25134 {"vfpxd", FPU_ARCH_VFP_V1xD},
25135 {"vfpv2", FPU_ARCH_VFP_V2},
25136 {"vfpv3", FPU_ARCH_VFP_V3},
25137 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25138 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25139 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25140 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25141 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25142 {"arm1020t", FPU_ARCH_VFP_V1},
25143 {"arm1020e", FPU_ARCH_VFP_V2},
25144 {"arm1136jfs", FPU_ARCH_VFP_V2},
25145 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25146 {"maverick", FPU_ARCH_MAVERICK},
25147 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25148 {"neon-fp16", FPU_ARCH_NEON_FP16},
25149 {"vfpv4", FPU_ARCH_VFP_V4},
25150 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25151 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25152 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25153 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25154 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25155 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25156 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25157 {"crypto-neon-fp-armv8",
25158 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25159 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25160 {"crypto-neon-fp-armv8.1",
25161 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25162 {NULL, ARM_ARCH_NONE}
25163 };
25164
25165 struct arm_option_value_table
25166 {
25167 char *name;
25168 long value;
25169 };
25170
25171 static const struct arm_option_value_table arm_float_abis[] =
25172 {
25173 {"hard", ARM_FLOAT_ABI_HARD},
25174 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25175 {"soft", ARM_FLOAT_ABI_SOFT},
25176 {NULL, 0}
25177 };
25178
25179 #ifdef OBJ_ELF
25180 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25181 static const struct arm_option_value_table arm_eabis[] =
25182 {
25183 {"gnu", EF_ARM_EABI_UNKNOWN},
25184 {"4", EF_ARM_EABI_VER4},
25185 {"5", EF_ARM_EABI_VER5},
25186 {NULL, 0}
25187 };
25188 #endif
25189
25190 struct arm_long_option_table
25191 {
25192 char * option; /* Substring to match. */
25193 char * help; /* Help information. */
25194 int (* func) (char * subopt); /* Function to decode sub-option. */
25195 char * deprecated; /* If non-null, print this message. */
25196 };
25197
25198 static bfd_boolean
25199 arm_parse_extension (char *str, const arm_feature_set **opt_p)
25200 {
25201 arm_feature_set *ext_set = (arm_feature_set *)
25202 xmalloc (sizeof (arm_feature_set));
25203
25204 /* We insist on extensions being specified in alphabetical order, and with
25205 extensions being added before being removed. We achieve this by having
25206 the global ARM_EXTENSIONS table in alphabetical order, and using the
25207 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25208 or removing it (0) and only allowing it to change in the order
25209 -1 -> 1 -> 0. */
25210 const struct arm_option_extension_value_table * opt = NULL;
25211 int adding_value = -1;
25212
25213 /* Copy the feature set, so that we can modify it. */
25214 *ext_set = **opt_p;
25215 *opt_p = ext_set;
25216
25217 while (str != NULL && *str != 0)
25218 {
25219 char *ext;
25220 size_t len;
25221
25222 if (*str != '+')
25223 {
25224 as_bad (_("invalid architectural extension"));
25225 return FALSE;
25226 }
25227
25228 str++;
25229 ext = strchr (str, '+');
25230
25231 if (ext != NULL)
25232 len = ext - str;
25233 else
25234 len = strlen (str);
25235
25236 if (len >= 2 && strncmp (str, "no", 2) == 0)
25237 {
25238 if (adding_value != 0)
25239 {
25240 adding_value = 0;
25241 opt = arm_extensions;
25242 }
25243
25244 len -= 2;
25245 str += 2;
25246 }
25247 else if (len > 0)
25248 {
25249 if (adding_value == -1)
25250 {
25251 adding_value = 1;
25252 opt = arm_extensions;
25253 }
25254 else if (adding_value != 1)
25255 {
25256 as_bad (_("must specify extensions to add before specifying "
25257 "those to remove"));
25258 return FALSE;
25259 }
25260 }
25261
25262 if (len == 0)
25263 {
25264 as_bad (_("missing architectural extension"));
25265 return FALSE;
25266 }
25267
25268 gas_assert (adding_value != -1);
25269 gas_assert (opt != NULL);
25270
25271 /* Scan over the options table trying to find an exact match. */
25272 for (; opt->name != NULL; opt++)
25273 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25274 {
25275 /* Check we can apply the extension to this architecture. */
25276 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
25277 {
25278 as_bad (_("extension does not apply to the base architecture"));
25279 return FALSE;
25280 }
25281
25282 /* Add or remove the extension. */
25283 if (adding_value)
25284 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25285 else
25286 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25287
25288 break;
25289 }
25290
25291 if (opt->name == NULL)
25292 {
25293 /* Did we fail to find an extension because it wasn't specified in
25294 alphabetical order, or because it does not exist? */
25295
25296 for (opt = arm_extensions; opt->name != NULL; opt++)
25297 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25298 break;
25299
25300 if (opt->name == NULL)
25301 as_bad (_("unknown architectural extension `%s'"), str);
25302 else
25303 as_bad (_("architectural extensions must be specified in "
25304 "alphabetical order"));
25305
25306 return FALSE;
25307 }
25308 else
25309 {
25310 /* We should skip the extension we've just matched the next time
25311 round. */
25312 opt++;
25313 }
25314
25315 str = ext;
25316 };
25317
25318 return TRUE;
25319 }
25320
25321 static bfd_boolean
25322 arm_parse_cpu (char *str)
25323 {
25324 const struct arm_cpu_option_table *opt;
25325 char *ext = strchr (str, '+');
25326 size_t len;
25327
25328 if (ext != NULL)
25329 len = ext - str;
25330 else
25331 len = strlen (str);
25332
25333 if (len == 0)
25334 {
25335 as_bad (_("missing cpu name `%s'"), str);
25336 return FALSE;
25337 }
25338
25339 for (opt = arm_cpus; opt->name != NULL; opt++)
25340 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25341 {
25342 mcpu_cpu_opt = &opt->value;
25343 mcpu_fpu_opt = &opt->default_fpu;
25344 if (opt->canonical_name)
25345 {
25346 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25347 strcpy (selected_cpu_name, opt->canonical_name);
25348 }
25349 else
25350 {
25351 size_t i;
25352
25353 if (len >= sizeof selected_cpu_name)
25354 len = (sizeof selected_cpu_name) - 1;
25355
25356 for (i = 0; i < len; i++)
25357 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25358 selected_cpu_name[i] = 0;
25359 }
25360
25361 if (ext != NULL)
25362 return arm_parse_extension (ext, &mcpu_cpu_opt);
25363
25364 return TRUE;
25365 }
25366
25367 as_bad (_("unknown cpu `%s'"), str);
25368 return FALSE;
25369 }
25370
25371 static bfd_boolean
25372 arm_parse_arch (char *str)
25373 {
25374 const struct arm_arch_option_table *opt;
25375 char *ext = strchr (str, '+');
25376 size_t len;
25377
25378 if (ext != NULL)
25379 len = ext - str;
25380 else
25381 len = strlen (str);
25382
25383 if (len == 0)
25384 {
25385 as_bad (_("missing architecture name `%s'"), str);
25386 return FALSE;
25387 }
25388
25389 for (opt = arm_archs; opt->name != NULL; opt++)
25390 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25391 {
25392 march_cpu_opt = &opt->value;
25393 march_fpu_opt = &opt->default_fpu;
25394 strcpy (selected_cpu_name, opt->name);
25395
25396 if (ext != NULL)
25397 return arm_parse_extension (ext, &march_cpu_opt);
25398
25399 return TRUE;
25400 }
25401
25402 as_bad (_("unknown architecture `%s'\n"), str);
25403 return FALSE;
25404 }
25405
25406 static bfd_boolean
25407 arm_parse_fpu (char * str)
25408 {
25409 const struct arm_option_fpu_value_table * opt;
25410
25411 for (opt = arm_fpus; opt->name != NULL; opt++)
25412 if (streq (opt->name, str))
25413 {
25414 mfpu_opt = &opt->value;
25415 return TRUE;
25416 }
25417
25418 as_bad (_("unknown floating point format `%s'\n"), str);
25419 return FALSE;
25420 }
25421
25422 static bfd_boolean
25423 arm_parse_float_abi (char * str)
25424 {
25425 const struct arm_option_value_table * opt;
25426
25427 for (opt = arm_float_abis; opt->name != NULL; opt++)
25428 if (streq (opt->name, str))
25429 {
25430 mfloat_abi_opt = opt->value;
25431 return TRUE;
25432 }
25433
25434 as_bad (_("unknown floating point abi `%s'\n"), str);
25435 return FALSE;
25436 }
25437
25438 #ifdef OBJ_ELF
25439 static bfd_boolean
25440 arm_parse_eabi (char * str)
25441 {
25442 const struct arm_option_value_table *opt;
25443
25444 for (opt = arm_eabis; opt->name != NULL; opt++)
25445 if (streq (opt->name, str))
25446 {
25447 meabi_flags = opt->value;
25448 return TRUE;
25449 }
25450 as_bad (_("unknown EABI `%s'\n"), str);
25451 return FALSE;
25452 }
25453 #endif
25454
25455 static bfd_boolean
25456 arm_parse_it_mode (char * str)
25457 {
25458 bfd_boolean ret = TRUE;
25459
25460 if (streq ("arm", str))
25461 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25462 else if (streq ("thumb", str))
25463 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25464 else if (streq ("always", str))
25465 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25466 else if (streq ("never", str))
25467 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25468 else
25469 {
25470 as_bad (_("unknown implicit IT mode `%s', should be "\
25471 "arm, thumb, always, or never."), str);
25472 ret = FALSE;
25473 }
25474
25475 return ret;
25476 }
25477
25478 static bfd_boolean
25479 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
25480 {
25481 codecomposer_syntax = TRUE;
25482 arm_comment_chars[0] = ';';
25483 arm_line_separator_chars[0] = 0;
25484 return TRUE;
25485 }
25486
25487 struct arm_long_option_table arm_long_opts[] =
25488 {
25489 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25490 arm_parse_cpu, NULL},
25491 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25492 arm_parse_arch, NULL},
25493 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25494 arm_parse_fpu, NULL},
25495 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25496 arm_parse_float_abi, NULL},
25497 #ifdef OBJ_ELF
25498 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25499 arm_parse_eabi, NULL},
25500 #endif
25501 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25502 arm_parse_it_mode, NULL},
25503 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25504 arm_ccs_mode, NULL},
25505 {NULL, NULL, 0, NULL}
25506 };
25507
25508 int
25509 md_parse_option (int c, char * arg)
25510 {
25511 struct arm_option_table *opt;
25512 const struct arm_legacy_option_table *fopt;
25513 struct arm_long_option_table *lopt;
25514
25515 switch (c)
25516 {
25517 #ifdef OPTION_EB
25518 case OPTION_EB:
25519 target_big_endian = 1;
25520 break;
25521 #endif
25522
25523 #ifdef OPTION_EL
25524 case OPTION_EL:
25525 target_big_endian = 0;
25526 break;
25527 #endif
25528
25529 case OPTION_FIX_V4BX:
25530 fix_v4bx = TRUE;
25531 break;
25532
25533 case 'a':
25534 /* Listing option. Just ignore these, we don't support additional
25535 ones. */
25536 return 0;
25537
25538 default:
25539 for (opt = arm_opts; opt->option != NULL; opt++)
25540 {
25541 if (c == opt->option[0]
25542 && ((arg == NULL && opt->option[1] == 0)
25543 || streq (arg, opt->option + 1)))
25544 {
25545 /* If the option is deprecated, tell the user. */
25546 if (warn_on_deprecated && opt->deprecated != NULL)
25547 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25548 arg ? arg : "", _(opt->deprecated));
25549
25550 if (opt->var != NULL)
25551 *opt->var = opt->value;
25552
25553 return 1;
25554 }
25555 }
25556
25557 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25558 {
25559 if (c == fopt->option[0]
25560 && ((arg == NULL && fopt->option[1] == 0)
25561 || streq (arg, fopt->option + 1)))
25562 {
25563 /* If the option is deprecated, tell the user. */
25564 if (warn_on_deprecated && fopt->deprecated != NULL)
25565 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25566 arg ? arg : "", _(fopt->deprecated));
25567
25568 if (fopt->var != NULL)
25569 *fopt->var = &fopt->value;
25570
25571 return 1;
25572 }
25573 }
25574
25575 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25576 {
25577 /* These options are expected to have an argument. */
25578 if (c == lopt->option[0]
25579 && arg != NULL
25580 && strncmp (arg, lopt->option + 1,
25581 strlen (lopt->option + 1)) == 0)
25582 {
25583 /* If the option is deprecated, tell the user. */
25584 if (warn_on_deprecated && lopt->deprecated != NULL)
25585 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25586 _(lopt->deprecated));
25587
25588 /* Call the sup-option parser. */
25589 return lopt->func (arg + strlen (lopt->option) - 1);
25590 }
25591 }
25592
25593 return 0;
25594 }
25595
25596 return 1;
25597 }
25598
25599 void
25600 md_show_usage (FILE * fp)
25601 {
25602 struct arm_option_table *opt;
25603 struct arm_long_option_table *lopt;
25604
25605 fprintf (fp, _(" ARM-specific assembler options:\n"));
25606
25607 for (opt = arm_opts; opt->option != NULL; opt++)
25608 if (opt->help != NULL)
25609 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25610
25611 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25612 if (lopt->help != NULL)
25613 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25614
25615 #ifdef OPTION_EB
25616 fprintf (fp, _("\
25617 -EB assemble code for a big-endian cpu\n"));
25618 #endif
25619
25620 #ifdef OPTION_EL
25621 fprintf (fp, _("\
25622 -EL assemble code for a little-endian cpu\n"));
25623 #endif
25624
25625 fprintf (fp, _("\
25626 --fix-v4bx Allow BX in ARMv4 code\n"));
25627 }
25628
25629
25630 #ifdef OBJ_ELF
25631 typedef struct
25632 {
25633 int val;
25634 arm_feature_set flags;
25635 } cpu_arch_ver_table;
25636
25637 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25638 must be sorted least features first but some reordering is needed, eg. for
25639 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25640 static const cpu_arch_ver_table cpu_arch_ver[] =
25641 {
25642 {1, ARM_ARCH_V4},
25643 {2, ARM_ARCH_V4T},
25644 {3, ARM_ARCH_V5},
25645 {3, ARM_ARCH_V5T},
25646 {4, ARM_ARCH_V5TE},
25647 {5, ARM_ARCH_V5TEJ},
25648 {6, ARM_ARCH_V6},
25649 {9, ARM_ARCH_V6K},
25650 {7, ARM_ARCH_V6Z},
25651 {11, ARM_ARCH_V6M},
25652 {12, ARM_ARCH_V6SM},
25653 {8, ARM_ARCH_V6T2},
25654 {10, ARM_ARCH_V7VE},
25655 {10, ARM_ARCH_V7R},
25656 {10, ARM_ARCH_V7M},
25657 {14, ARM_ARCH_V8A},
25658 {16, ARM_ARCH_V8M_BASE},
25659 {17, ARM_ARCH_V8M_MAIN},
25660 {0, ARM_ARCH_NONE}
25661 };
25662
25663 /* Set an attribute if it has not already been set by the user. */
25664 static void
25665 aeabi_set_attribute_int (int tag, int value)
25666 {
25667 if (tag < 1
25668 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25669 || !attributes_set_explicitly[tag])
25670 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25671 }
25672
25673 static void
25674 aeabi_set_attribute_string (int tag, const char *value)
25675 {
25676 if (tag < 1
25677 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25678 || !attributes_set_explicitly[tag])
25679 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25680 }
25681
25682 /* Set the public EABI object attributes. */
25683 void
25684 aeabi_set_public_attributes (void)
25685 {
25686 int arch;
25687 char profile;
25688 int virt_sec = 0;
25689 int fp16_optional = 0;
25690 arm_feature_set flags;
25691 arm_feature_set tmp;
25692 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
25693 const cpu_arch_ver_table *p;
25694
25695 /* Choose the architecture based on the capabilities of the requested cpu
25696 (if any) and/or the instructions actually used. */
25697 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25698 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25699 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25700
25701 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25702 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25703
25704 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25705 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25706
25707 selected_cpu = flags;
25708
25709 /* Allow the user to override the reported architecture. */
25710 if (object_arch)
25711 {
25712 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25713 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25714 }
25715
25716 /* We need to make sure that the attributes do not identify us as v6S-M
25717 when the only v6S-M feature in use is the Operating System Extensions. */
25718 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25719 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25720 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25721
25722 tmp = flags;
25723 arch = 0;
25724 for (p = cpu_arch_ver; p->val; p++)
25725 {
25726 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25727 {
25728 arch = p->val;
25729 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25730 }
25731 }
25732
25733 /* The table lookup above finds the last architecture to contribute
25734 a new feature. Unfortunately, Tag13 is a subset of the union of
25735 v6T2 and v7-M, so it is never seen as contributing a new feature.
25736 We can not search for the last entry which is entirely used,
25737 because if no CPU is specified we build up only those flags
25738 actually used. Perhaps we should separate out the specified
25739 and implicit cases. Avoid taking this path for -march=all by
25740 checking for contradictory v7-A / v7-M features. */
25741 if (arch == TAG_CPU_ARCH_V7
25742 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25743 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25744 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25745 arch = TAG_CPU_ARCH_V7E_M;
25746
25747 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
25748 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
25749 arch = TAG_CPU_ARCH_V8M_MAIN;
25750
25751 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25752 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25753 ARMv8-M, -march=all must be detected as ARMv8-A. */
25754 if (arch == TAG_CPU_ARCH_V8M_MAIN
25755 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
25756 arch = TAG_CPU_ARCH_V8;
25757
25758 /* Tag_CPU_name. */
25759 if (selected_cpu_name[0])
25760 {
25761 char *q;
25762
25763 q = selected_cpu_name;
25764 if (strncmp (q, "armv", 4) == 0)
25765 {
25766 int i;
25767
25768 q += 4;
25769 for (i = 0; q[i]; i++)
25770 q[i] = TOUPPER (q[i]);
25771 }
25772 aeabi_set_attribute_string (Tag_CPU_name, q);
25773 }
25774
25775 /* Tag_CPU_arch. */
25776 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25777
25778 /* Tag_CPU_arch_profile. */
25779 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25780 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25781 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
25782 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m)))
25783 profile = 'A';
25784 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25785 profile = 'R';
25786 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25787 profile = 'M';
25788 else
25789 profile = '\0';
25790
25791 if (profile != '\0')
25792 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25793
25794 /* Tag_ARM_ISA_use. */
25795 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25796 || arch == 0)
25797 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25798
25799 /* Tag_THUMB_ISA_use. */
25800 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25801 || arch == 0)
25802 {
25803 int thumb_isa_use;
25804
25805 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25806 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25807 thumb_isa_use = 3;
25808 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
25809 thumb_isa_use = 2;
25810 else
25811 thumb_isa_use = 1;
25812 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
25813 }
25814
25815 /* Tag_VFP_arch. */
25816 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25817 aeabi_set_attribute_int (Tag_VFP_arch,
25818 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25819 ? 7 : 8);
25820 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25821 aeabi_set_attribute_int (Tag_VFP_arch,
25822 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25823 ? 5 : 6);
25824 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25825 {
25826 fp16_optional = 1;
25827 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25828 }
25829 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25830 {
25831 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25832 fp16_optional = 1;
25833 }
25834 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25835 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25836 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25837 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25838 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25839
25840 /* Tag_ABI_HardFP_use. */
25841 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25842 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25843 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25844
25845 /* Tag_WMMX_arch. */
25846 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25847 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25848 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25849 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25850
25851 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25852 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25853 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25854 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25855 {
25856 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25857 {
25858 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25859 }
25860 else
25861 {
25862 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25863 fp16_optional = 1;
25864 }
25865 }
25866
25867 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25868 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25869 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25870
25871 /* Tag_DIV_use.
25872
25873 We set Tag_DIV_use to two when integer divide instructions have been used
25874 in ARM state, or when Thumb integer divide instructions have been used,
25875 but we have no architecture profile set, nor have we any ARM instructions.
25876
25877 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25878 by the base architecture.
25879
25880 For new architectures we will have to check these tests. */
25881 gas_assert (arch <= TAG_CPU_ARCH_V8
25882 || (arch >= TAG_CPU_ARCH_V8M_BASE
25883 && arch <= TAG_CPU_ARCH_V8M_MAIN));
25884 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25885 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25886 aeabi_set_attribute_int (Tag_DIV_use, 0);
25887 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25888 || (profile == '\0'
25889 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25890 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25891 aeabi_set_attribute_int (Tag_DIV_use, 2);
25892
25893 /* Tag_MP_extension_use. */
25894 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25895 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25896
25897 /* Tag Virtualization_use. */
25898 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25899 virt_sec |= 1;
25900 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25901 virt_sec |= 2;
25902 if (virt_sec != 0)
25903 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25904 }
25905
25906 /* Add the default contents for the .ARM.attributes section. */
25907 void
25908 arm_md_end (void)
25909 {
25910 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25911 return;
25912
25913 aeabi_set_public_attributes ();
25914 }
25915 #endif /* OBJ_ELF */
25916
25917
25918 /* Parse a .cpu directive. */
25919
25920 static void
25921 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25922 {
25923 const struct arm_cpu_option_table *opt;
25924 char *name;
25925 char saved_char;
25926
25927 name = input_line_pointer;
25928 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25929 input_line_pointer++;
25930 saved_char = *input_line_pointer;
25931 *input_line_pointer = 0;
25932
25933 /* Skip the first "all" entry. */
25934 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25935 if (streq (opt->name, name))
25936 {
25937 mcpu_cpu_opt = &opt->value;
25938 selected_cpu = opt->value;
25939 if (opt->canonical_name)
25940 strcpy (selected_cpu_name, opt->canonical_name);
25941 else
25942 {
25943 int i;
25944 for (i = 0; opt->name[i]; i++)
25945 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25946
25947 selected_cpu_name[i] = 0;
25948 }
25949 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25950 *input_line_pointer = saved_char;
25951 demand_empty_rest_of_line ();
25952 return;
25953 }
25954 as_bad (_("unknown cpu `%s'"), name);
25955 *input_line_pointer = saved_char;
25956 ignore_rest_of_line ();
25957 }
25958
25959
25960 /* Parse a .arch directive. */
25961
25962 static void
25963 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25964 {
25965 const struct arm_arch_option_table *opt;
25966 char saved_char;
25967 char *name;
25968
25969 name = input_line_pointer;
25970 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25971 input_line_pointer++;
25972 saved_char = *input_line_pointer;
25973 *input_line_pointer = 0;
25974
25975 /* Skip the first "all" entry. */
25976 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25977 if (streq (opt->name, name))
25978 {
25979 mcpu_cpu_opt = &opt->value;
25980 selected_cpu = opt->value;
25981 strcpy (selected_cpu_name, opt->name);
25982 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25983 *input_line_pointer = saved_char;
25984 demand_empty_rest_of_line ();
25985 return;
25986 }
25987
25988 as_bad (_("unknown architecture `%s'\n"), name);
25989 *input_line_pointer = saved_char;
25990 ignore_rest_of_line ();
25991 }
25992
25993
25994 /* Parse a .object_arch directive. */
25995
25996 static void
25997 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25998 {
25999 const struct arm_arch_option_table *opt;
26000 char saved_char;
26001 char *name;
26002
26003 name = input_line_pointer;
26004 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26005 input_line_pointer++;
26006 saved_char = *input_line_pointer;
26007 *input_line_pointer = 0;
26008
26009 /* Skip the first "all" entry. */
26010 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26011 if (streq (opt->name, name))
26012 {
26013 object_arch = &opt->value;
26014 *input_line_pointer = saved_char;
26015 demand_empty_rest_of_line ();
26016 return;
26017 }
26018
26019 as_bad (_("unknown architecture `%s'\n"), name);
26020 *input_line_pointer = saved_char;
26021 ignore_rest_of_line ();
26022 }
26023
26024 /* Parse a .arch_extension directive. */
26025
26026 static void
26027 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26028 {
26029 const struct arm_option_extension_value_table *opt;
26030 char saved_char;
26031 char *name;
26032 int adding_value = 1;
26033
26034 name = input_line_pointer;
26035 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26036 input_line_pointer++;
26037 saved_char = *input_line_pointer;
26038 *input_line_pointer = 0;
26039
26040 if (strlen (name) >= 2
26041 && strncmp (name, "no", 2) == 0)
26042 {
26043 adding_value = 0;
26044 name += 2;
26045 }
26046
26047 for (opt = arm_extensions; opt->name != NULL; opt++)
26048 if (streq (opt->name, name))
26049 {
26050 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
26051 {
26052 as_bad (_("architectural extension `%s' is not allowed for the "
26053 "current base architecture"), name);
26054 break;
26055 }
26056
26057 if (adding_value)
26058 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26059 opt->merge_value);
26060 else
26061 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26062
26063 mcpu_cpu_opt = &selected_cpu;
26064 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26065 *input_line_pointer = saved_char;
26066 demand_empty_rest_of_line ();
26067 return;
26068 }
26069
26070 if (opt->name == NULL)
26071 as_bad (_("unknown architecture extension `%s'\n"), name);
26072
26073 *input_line_pointer = saved_char;
26074 ignore_rest_of_line ();
26075 }
26076
26077 /* Parse a .fpu directive. */
26078
26079 static void
26080 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26081 {
26082 const struct arm_option_fpu_value_table *opt;
26083 char saved_char;
26084 char *name;
26085
26086 name = input_line_pointer;
26087 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26088 input_line_pointer++;
26089 saved_char = *input_line_pointer;
26090 *input_line_pointer = 0;
26091
26092 for (opt = arm_fpus; opt->name != NULL; opt++)
26093 if (streq (opt->name, name))
26094 {
26095 mfpu_opt = &opt->value;
26096 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26097 *input_line_pointer = saved_char;
26098 demand_empty_rest_of_line ();
26099 return;
26100 }
26101
26102 as_bad (_("unknown floating point format `%s'\n"), name);
26103 *input_line_pointer = saved_char;
26104 ignore_rest_of_line ();
26105 }
26106
26107 /* Copy symbol information. */
26108
26109 void
26110 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26111 {
26112 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26113 }
26114
26115 #ifdef OBJ_ELF
26116 /* Given a symbolic attribute NAME, return the proper integer value.
26117 Returns -1 if the attribute is not known. */
26118
26119 int
26120 arm_convert_symbolic_attribute (const char *name)
26121 {
26122 static const struct
26123 {
26124 const char * name;
26125 const int tag;
26126 }
26127 attribute_table[] =
26128 {
26129 /* When you modify this table you should
26130 also modify the list in doc/c-arm.texi. */
26131 #define T(tag) {#tag, tag}
26132 T (Tag_CPU_raw_name),
26133 T (Tag_CPU_name),
26134 T (Tag_CPU_arch),
26135 T (Tag_CPU_arch_profile),
26136 T (Tag_ARM_ISA_use),
26137 T (Tag_THUMB_ISA_use),
26138 T (Tag_FP_arch),
26139 T (Tag_VFP_arch),
26140 T (Tag_WMMX_arch),
26141 T (Tag_Advanced_SIMD_arch),
26142 T (Tag_PCS_config),
26143 T (Tag_ABI_PCS_R9_use),
26144 T (Tag_ABI_PCS_RW_data),
26145 T (Tag_ABI_PCS_RO_data),
26146 T (Tag_ABI_PCS_GOT_use),
26147 T (Tag_ABI_PCS_wchar_t),
26148 T (Tag_ABI_FP_rounding),
26149 T (Tag_ABI_FP_denormal),
26150 T (Tag_ABI_FP_exceptions),
26151 T (Tag_ABI_FP_user_exceptions),
26152 T (Tag_ABI_FP_number_model),
26153 T (Tag_ABI_align_needed),
26154 T (Tag_ABI_align8_needed),
26155 T (Tag_ABI_align_preserved),
26156 T (Tag_ABI_align8_preserved),
26157 T (Tag_ABI_enum_size),
26158 T (Tag_ABI_HardFP_use),
26159 T (Tag_ABI_VFP_args),
26160 T (Tag_ABI_WMMX_args),
26161 T (Tag_ABI_optimization_goals),
26162 T (Tag_ABI_FP_optimization_goals),
26163 T (Tag_compatibility),
26164 T (Tag_CPU_unaligned_access),
26165 T (Tag_FP_HP_extension),
26166 T (Tag_VFP_HP_extension),
26167 T (Tag_ABI_FP_16bit_format),
26168 T (Tag_MPextension_use),
26169 T (Tag_DIV_use),
26170 T (Tag_nodefaults),
26171 T (Tag_also_compatible_with),
26172 T (Tag_conformance),
26173 T (Tag_T2EE_use),
26174 T (Tag_Virtualization_use),
26175 /* We deliberately do not include Tag_MPextension_use_legacy. */
26176 #undef T
26177 };
26178 unsigned int i;
26179
26180 if (name == NULL)
26181 return -1;
26182
26183 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26184 if (streq (name, attribute_table[i].name))
26185 return attribute_table[i].tag;
26186
26187 return -1;
26188 }
26189
26190
26191 /* Apply sym value for relocations only in the case that they are for
26192 local symbols in the same segment as the fixup and you have the
26193 respective architectural feature for blx and simple switches. */
26194 int
26195 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26196 {
26197 if (fixP->fx_addsy
26198 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26199 /* PR 17444: If the local symbol is in a different section then a reloc
26200 will always be generated for it, so applying the symbol value now
26201 will result in a double offset being stored in the relocation. */
26202 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26203 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26204 {
26205 switch (fixP->fx_r_type)
26206 {
26207 case BFD_RELOC_ARM_PCREL_BLX:
26208 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26209 if (ARM_IS_FUNC (fixP->fx_addsy))
26210 return 1;
26211 break;
26212
26213 case BFD_RELOC_ARM_PCREL_CALL:
26214 case BFD_RELOC_THUMB_PCREL_BLX:
26215 if (THUMB_IS_FUNC (fixP->fx_addsy))
26216 return 1;
26217 break;
26218
26219 default:
26220 break;
26221 }
26222
26223 }
26224 return 0;
26225 }
26226 #endif /* OBJ_ELF */