]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
Prepare gas for 64-bit obstacks
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2014 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
206
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
212
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
243 static const arm_feature_set crc_ext_armv8 =
244 ARM_FEATURE (0, CRC_EXT_ARMV8);
245
246 static int mfloat_abi_opt = -1;
247 /* Record user cpu selection for object attributes. */
248 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
249 /* Must be long enough to hold any of the names in arm_cpus. */
250 static char selected_cpu_name[16];
251
252 /* Return if no cpu was selected on command-line. */
253 static bfd_boolean
254 no_cpu_selected (void)
255 {
256 return selected_cpu.core == arm_arch_none.core
257 && selected_cpu.coproc == arm_arch_none.coproc;
258 }
259
260 #ifdef OBJ_ELF
261 # ifdef EABI_DEFAULT
262 static int meabi_flags = EABI_DEFAULT;
263 # else
264 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
265 # endif
266
267 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
268
269 bfd_boolean
270 arm_is_eabi (void)
271 {
272 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
273 }
274 #endif
275
276 #ifdef OBJ_ELF
277 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
278 symbolS * GOT_symbol;
279 #endif
280
281 /* 0: assemble for ARM,
282 1: assemble for Thumb,
283 2: assemble for Thumb even though target CPU does not support thumb
284 instructions. */
285 static int thumb_mode = 0;
286 /* A value distinct from the possible values for thumb_mode that we
287 can use to record whether thumb_mode has been copied into the
288 tc_frag_data field of a frag. */
289 #define MODE_RECORDED (1 << 4)
290
291 /* Specifies the intrinsic IT insn behavior mode. */
292 enum implicit_it_mode
293 {
294 IMPLICIT_IT_MODE_NEVER = 0x00,
295 IMPLICIT_IT_MODE_ARM = 0x01,
296 IMPLICIT_IT_MODE_THUMB = 0x02,
297 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
298 };
299 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
300
301 /* If unified_syntax is true, we are processing the new unified
302 ARM/Thumb syntax. Important differences from the old ARM mode:
303
304 - Immediate operands do not require a # prefix.
305 - Conditional affixes always appear at the end of the
306 instruction. (For backward compatibility, those instructions
307 that formerly had them in the middle, continue to accept them
308 there.)
309 - The IT instruction may appear, and if it does is validated
310 against subsequent conditional affixes. It does not generate
311 machine code.
312
313 Important differences from the old Thumb mode:
314
315 - Immediate operands do not require a # prefix.
316 - Most of the V6T2 instructions are only available in unified mode.
317 - The .N and .W suffixes are recognized and honored (it is an error
318 if they cannot be honored).
319 - All instructions set the flags if and only if they have an 's' affix.
320 - Conditional affixes may be used. They are validated against
321 preceding IT instructions. Unlike ARM mode, you cannot use a
322 conditional affix except in the scope of an IT instruction. */
323
324 static bfd_boolean unified_syntax = FALSE;
325
326 /* An immediate operand can start with #, and ld*, st*, pld operands
327 can contain [ and ]. We need to tell APP not to elide whitespace
328 before a [, which can appear as the first operand for pld.
329 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
330 const char arm_symbol_chars[] = "#[]{}";
331
332 enum neon_el_type
333 {
334 NT_invtype,
335 NT_untyped,
336 NT_integer,
337 NT_float,
338 NT_poly,
339 NT_signed,
340 NT_unsigned
341 };
342
343 struct neon_type_el
344 {
345 enum neon_el_type type;
346 unsigned size;
347 };
348
349 #define NEON_MAX_TYPE_ELS 4
350
351 struct neon_type
352 {
353 struct neon_type_el el[NEON_MAX_TYPE_ELS];
354 unsigned elems;
355 };
356
357 enum it_instruction_type
358 {
359 OUTSIDE_IT_INSN,
360 INSIDE_IT_INSN,
361 INSIDE_IT_LAST_INSN,
362 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
363 if inside, should be the last one. */
364 NEUTRAL_IT_INSN, /* This could be either inside or outside,
365 i.e. BKPT and NOP. */
366 IT_INSN /* The IT insn has been parsed. */
367 };
368
369 /* The maximum number of operands we need. */
370 #define ARM_IT_MAX_OPERANDS 6
371
372 struct arm_it
373 {
374 const char * error;
375 unsigned long instruction;
376 int size;
377 int size_req;
378 int cond;
379 /* "uncond_value" is set to the value in place of the conditional field in
380 unconditional versions of the instruction, or -1 if nothing is
381 appropriate. */
382 int uncond_value;
383 struct neon_type vectype;
384 /* This does not indicate an actual NEON instruction, only that
385 the mnemonic accepts neon-style type suffixes. */
386 int is_neon;
387 /* Set to the opcode if the instruction needs relaxation.
388 Zero if the instruction is not relaxed. */
389 unsigned long relax;
390 struct
391 {
392 bfd_reloc_code_real_type type;
393 expressionS exp;
394 int pc_rel;
395 } reloc;
396
397 enum it_instruction_type it_insn_type;
398
399 struct
400 {
401 unsigned reg;
402 signed int imm;
403 struct neon_type_el vectype;
404 unsigned present : 1; /* Operand present. */
405 unsigned isreg : 1; /* Operand was a register. */
406 unsigned immisreg : 1; /* .imm field is a second register. */
407 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
408 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
409 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
410 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
411 instructions. This allows us to disambiguate ARM <-> vector insns. */
412 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
413 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
414 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
415 unsigned issingle : 1; /* Operand is VFP single-precision register. */
416 unsigned hasreloc : 1; /* Operand has relocation suffix. */
417 unsigned writeback : 1; /* Operand has trailing ! */
418 unsigned preind : 1; /* Preindexed address. */
419 unsigned postind : 1; /* Postindexed address. */
420 unsigned negative : 1; /* Index register was negated. */
421 unsigned shifted : 1; /* Shift applied to operation. */
422 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
423 } operands[ARM_IT_MAX_OPERANDS];
424 };
425
426 static struct arm_it inst;
427
428 #define NUM_FLOAT_VALS 8
429
430 const char * fp_const[] =
431 {
432 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
433 };
434
435 /* Number of littlenums required to hold an extended precision number. */
436 #define MAX_LITTLENUMS 6
437
438 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
439
440 #define FAIL (-1)
441 #define SUCCESS (0)
442
443 #define SUFF_S 1
444 #define SUFF_D 2
445 #define SUFF_E 3
446 #define SUFF_P 4
447
448 #define CP_T_X 0x00008000
449 #define CP_T_Y 0x00400000
450
451 #define CONDS_BIT 0x00100000
452 #define LOAD_BIT 0x00100000
453
454 #define DOUBLE_LOAD_FLAG 0x00000001
455
456 struct asm_cond
457 {
458 const char * template_name;
459 unsigned long value;
460 };
461
462 #define COND_ALWAYS 0xE
463
464 struct asm_psr
465 {
466 const char * template_name;
467 unsigned long field;
468 };
469
470 struct asm_barrier_opt
471 {
472 const char * template_name;
473 unsigned long value;
474 const arm_feature_set arch;
475 };
476
477 /* The bit that distinguishes CPSR and SPSR. */
478 #define SPSR_BIT (1 << 22)
479
480 /* The individual PSR flag bits. */
481 #define PSR_c (1 << 16)
482 #define PSR_x (1 << 17)
483 #define PSR_s (1 << 18)
484 #define PSR_f (1 << 19)
485
486 struct reloc_entry
487 {
488 char * name;
489 bfd_reloc_code_real_type reloc;
490 };
491
492 enum vfp_reg_pos
493 {
494 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
495 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
496 };
497
498 enum vfp_ldstm_type
499 {
500 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
501 };
502
503 /* Bits for DEFINED field in neon_typed_alias. */
504 #define NTA_HASTYPE 1
505 #define NTA_HASINDEX 2
506
507 struct neon_typed_alias
508 {
509 unsigned char defined;
510 unsigned char index;
511 struct neon_type_el eltype;
512 };
513
514 /* ARM register categories. This includes coprocessor numbers and various
515 architecture extensions' registers. */
516 enum arm_reg_type
517 {
518 REG_TYPE_RN,
519 REG_TYPE_CP,
520 REG_TYPE_CN,
521 REG_TYPE_FN,
522 REG_TYPE_VFS,
523 REG_TYPE_VFD,
524 REG_TYPE_NQ,
525 REG_TYPE_VFSD,
526 REG_TYPE_NDQ,
527 REG_TYPE_NSDQ,
528 REG_TYPE_VFC,
529 REG_TYPE_MVF,
530 REG_TYPE_MVD,
531 REG_TYPE_MVFX,
532 REG_TYPE_MVDX,
533 REG_TYPE_MVAX,
534 REG_TYPE_DSPSC,
535 REG_TYPE_MMXWR,
536 REG_TYPE_MMXWC,
537 REG_TYPE_MMXWCG,
538 REG_TYPE_XSCALE,
539 REG_TYPE_RNB
540 };
541
542 /* Structure for a hash table entry for a register.
543 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
544 information which states whether a vector type or index is specified (for a
545 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
546 struct reg_entry
547 {
548 const char * name;
549 unsigned int number;
550 unsigned char type;
551 unsigned char builtin;
552 struct neon_typed_alias * neon;
553 };
554
555 /* Diagnostics used when we don't get a register of the expected type. */
556 const char * const reg_expected_msgs[] =
557 {
558 N_("ARM register expected"),
559 N_("bad or missing co-processor number"),
560 N_("co-processor register expected"),
561 N_("FPA register expected"),
562 N_("VFP single precision register expected"),
563 N_("VFP/Neon double precision register expected"),
564 N_("Neon quad precision register expected"),
565 N_("VFP single or double precision register expected"),
566 N_("Neon double or quad precision register expected"),
567 N_("VFP single, double or Neon quad precision register expected"),
568 N_("VFP system register expected"),
569 N_("Maverick MVF register expected"),
570 N_("Maverick MVD register expected"),
571 N_("Maverick MVFX register expected"),
572 N_("Maverick MVDX register expected"),
573 N_("Maverick MVAX register expected"),
574 N_("Maverick DSPSC register expected"),
575 N_("iWMMXt data register expected"),
576 N_("iWMMXt control register expected"),
577 N_("iWMMXt scalar register expected"),
578 N_("XScale accumulator register expected"),
579 };
580
581 /* Some well known registers that we refer to directly elsewhere. */
582 #define REG_R12 12
583 #define REG_SP 13
584 #define REG_LR 14
585 #define REG_PC 15
586
587 /* ARM instructions take 4bytes in the object file, Thumb instructions
588 take 2: */
589 #define INSN_SIZE 4
590
591 struct asm_opcode
592 {
593 /* Basic string to match. */
594 const char * template_name;
595
596 /* Parameters to instruction. */
597 unsigned int operands[8];
598
599 /* Conditional tag - see opcode_lookup. */
600 unsigned int tag : 4;
601
602 /* Basic instruction code. */
603 unsigned int avalue : 28;
604
605 /* Thumb-format instruction code. */
606 unsigned int tvalue;
607
608 /* Which architecture variant provides this instruction. */
609 const arm_feature_set * avariant;
610 const arm_feature_set * tvariant;
611
612 /* Function to call to encode instruction in ARM format. */
613 void (* aencode) (void);
614
615 /* Function to call to encode instruction in Thumb format. */
616 void (* tencode) (void);
617 };
618
619 /* Defines for various bits that we will want to toggle. */
620 #define INST_IMMEDIATE 0x02000000
621 #define OFFSET_REG 0x02000000
622 #define HWOFFSET_IMM 0x00400000
623 #define SHIFT_BY_REG 0x00000010
624 #define PRE_INDEX 0x01000000
625 #define INDEX_UP 0x00800000
626 #define WRITE_BACK 0x00200000
627 #define LDM_TYPE_2_OR_3 0x00400000
628 #define CPSI_MMOD 0x00020000
629
630 #define LITERAL_MASK 0xf000f000
631 #define OPCODE_MASK 0xfe1fffff
632 #define V4_STR_BIT 0x00000020
633 #define VLDR_VMOV_SAME 0x0040f000
634
635 #define T2_SUBS_PC_LR 0xf3de8f00
636
637 #define DATA_OP_SHIFT 21
638
639 #define T2_OPCODE_MASK 0xfe1fffff
640 #define T2_DATA_OP_SHIFT 21
641
642 #define A_COND_MASK 0xf0000000
643 #define A_PUSH_POP_OP_MASK 0x0fff0000
644
645 /* Opcodes for pushing/poping registers to/from the stack. */
646 #define A1_OPCODE_PUSH 0x092d0000
647 #define A2_OPCODE_PUSH 0x052d0004
648 #define A2_OPCODE_POP 0x049d0004
649
650 /* Codes to distinguish the arithmetic instructions. */
651 #define OPCODE_AND 0
652 #define OPCODE_EOR 1
653 #define OPCODE_SUB 2
654 #define OPCODE_RSB 3
655 #define OPCODE_ADD 4
656 #define OPCODE_ADC 5
657 #define OPCODE_SBC 6
658 #define OPCODE_RSC 7
659 #define OPCODE_TST 8
660 #define OPCODE_TEQ 9
661 #define OPCODE_CMP 10
662 #define OPCODE_CMN 11
663 #define OPCODE_ORR 12
664 #define OPCODE_MOV 13
665 #define OPCODE_BIC 14
666 #define OPCODE_MVN 15
667
668 #define T2_OPCODE_AND 0
669 #define T2_OPCODE_BIC 1
670 #define T2_OPCODE_ORR 2
671 #define T2_OPCODE_ORN 3
672 #define T2_OPCODE_EOR 4
673 #define T2_OPCODE_ADD 8
674 #define T2_OPCODE_ADC 10
675 #define T2_OPCODE_SBC 11
676 #define T2_OPCODE_SUB 13
677 #define T2_OPCODE_RSB 14
678
679 #define T_OPCODE_MUL 0x4340
680 #define T_OPCODE_TST 0x4200
681 #define T_OPCODE_CMN 0x42c0
682 #define T_OPCODE_NEG 0x4240
683 #define T_OPCODE_MVN 0x43c0
684
685 #define T_OPCODE_ADD_R3 0x1800
686 #define T_OPCODE_SUB_R3 0x1a00
687 #define T_OPCODE_ADD_HI 0x4400
688 #define T_OPCODE_ADD_ST 0xb000
689 #define T_OPCODE_SUB_ST 0xb080
690 #define T_OPCODE_ADD_SP 0xa800
691 #define T_OPCODE_ADD_PC 0xa000
692 #define T_OPCODE_ADD_I8 0x3000
693 #define T_OPCODE_SUB_I8 0x3800
694 #define T_OPCODE_ADD_I3 0x1c00
695 #define T_OPCODE_SUB_I3 0x1e00
696
697 #define T_OPCODE_ASR_R 0x4100
698 #define T_OPCODE_LSL_R 0x4080
699 #define T_OPCODE_LSR_R 0x40c0
700 #define T_OPCODE_ROR_R 0x41c0
701 #define T_OPCODE_ASR_I 0x1000
702 #define T_OPCODE_LSL_I 0x0000
703 #define T_OPCODE_LSR_I 0x0800
704
705 #define T_OPCODE_MOV_I8 0x2000
706 #define T_OPCODE_CMP_I8 0x2800
707 #define T_OPCODE_CMP_LR 0x4280
708 #define T_OPCODE_MOV_HR 0x4600
709 #define T_OPCODE_CMP_HR 0x4500
710
711 #define T_OPCODE_LDR_PC 0x4800
712 #define T_OPCODE_LDR_SP 0x9800
713 #define T_OPCODE_STR_SP 0x9000
714 #define T_OPCODE_LDR_IW 0x6800
715 #define T_OPCODE_STR_IW 0x6000
716 #define T_OPCODE_LDR_IH 0x8800
717 #define T_OPCODE_STR_IH 0x8000
718 #define T_OPCODE_LDR_IB 0x7800
719 #define T_OPCODE_STR_IB 0x7000
720 #define T_OPCODE_LDR_RW 0x5800
721 #define T_OPCODE_STR_RW 0x5000
722 #define T_OPCODE_LDR_RH 0x5a00
723 #define T_OPCODE_STR_RH 0x5200
724 #define T_OPCODE_LDR_RB 0x5c00
725 #define T_OPCODE_STR_RB 0x5400
726
727 #define T_OPCODE_PUSH 0xb400
728 #define T_OPCODE_POP 0xbc00
729
730 #define T_OPCODE_BRANCH 0xe000
731
732 #define THUMB_SIZE 2 /* Size of thumb instruction. */
733 #define THUMB_PP_PC_LR 0x0100
734 #define THUMB_LOAD_BIT 0x0800
735 #define THUMB2_LOAD_BIT 0x00100000
736
737 #define BAD_ARGS _("bad arguments to instruction")
738 #define BAD_SP _("r13 not allowed here")
739 #define BAD_PC _("r15 not allowed here")
740 #define BAD_COND _("instruction cannot be conditional")
741 #define BAD_OVERLAP _("registers may not be the same")
742 #define BAD_HIREG _("lo register required")
743 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
744 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
745 #define BAD_BRANCH _("branch must be last instruction in IT block")
746 #define BAD_NOT_IT _("instruction not allowed in IT block")
747 #define BAD_FPU _("selected FPU does not support instruction")
748 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
749 #define BAD_IT_COND _("incorrect condition in IT block")
750 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
751 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
752 #define BAD_PC_ADDRESSING \
753 _("cannot use register index with PC-relative addressing")
754 #define BAD_PC_WRITEBACK \
755 _("cannot use writeback with PC-relative addressing")
756 #define BAD_RANGE _("branch out of range")
757 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
758
759 static struct hash_control * arm_ops_hsh;
760 static struct hash_control * arm_cond_hsh;
761 static struct hash_control * arm_shift_hsh;
762 static struct hash_control * arm_psr_hsh;
763 static struct hash_control * arm_v7m_psr_hsh;
764 static struct hash_control * arm_reg_hsh;
765 static struct hash_control * arm_reloc_hsh;
766 static struct hash_control * arm_barrier_opt_hsh;
767
768 /* Stuff needed to resolve the label ambiguity
769 As:
770 ...
771 label: <insn>
772 may differ from:
773 ...
774 label:
775 <insn> */
776
777 symbolS * last_label_seen;
778 static int label_is_thumb_function_name = FALSE;
779
780 /* Literal pool structure. Held on a per-section
781 and per-sub-section basis. */
782
783 #define MAX_LITERAL_POOL_SIZE 1024
784 typedef struct literal_pool
785 {
786 expressionS literals [MAX_LITERAL_POOL_SIZE];
787 unsigned int next_free_entry;
788 unsigned int id;
789 symbolS * symbol;
790 segT section;
791 subsegT sub_section;
792 #ifdef OBJ_ELF
793 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
794 #endif
795 struct literal_pool * next;
796 unsigned int alignment;
797 } literal_pool;
798
799 /* Pointer to a linked list of literal pools. */
800 literal_pool * list_of_pools = NULL;
801
802 typedef enum asmfunc_states
803 {
804 OUTSIDE_ASMFUNC,
805 WAITING_ASMFUNC_NAME,
806 WAITING_ENDASMFUNC
807 } asmfunc_states;
808
809 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
810
811 #ifdef OBJ_ELF
812 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
813 #else
814 static struct current_it now_it;
815 #endif
816
817 static inline int
818 now_it_compatible (int cond)
819 {
820 return (cond & ~1) == (now_it.cc & ~1);
821 }
822
823 static inline int
824 conditional_insn (void)
825 {
826 return inst.cond != COND_ALWAYS;
827 }
828
829 static int in_it_block (void);
830
831 static int handle_it_state (void);
832
833 static void force_automatic_it_block_close (void);
834
835 static void it_fsm_post_encode (void);
836
837 #define set_it_insn_type(type) \
838 do \
839 { \
840 inst.it_insn_type = type; \
841 if (handle_it_state () == FAIL) \
842 return; \
843 } \
844 while (0)
845
846 #define set_it_insn_type_nonvoid(type, failret) \
847 do \
848 { \
849 inst.it_insn_type = type; \
850 if (handle_it_state () == FAIL) \
851 return failret; \
852 } \
853 while(0)
854
855 #define set_it_insn_type_last() \
856 do \
857 { \
858 if (inst.cond == COND_ALWAYS) \
859 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
860 else \
861 set_it_insn_type (INSIDE_IT_LAST_INSN); \
862 } \
863 while (0)
864
865 /* Pure syntax. */
866
867 /* This array holds the chars that always start a comment. If the
868 pre-processor is disabled, these aren't very useful. */
869 char arm_comment_chars[] = "@";
870
871 /* This array holds the chars that only start a comment at the beginning of
872 a line. If the line seems to have the form '# 123 filename'
873 .line and .file directives will appear in the pre-processed output. */
874 /* Note that input_file.c hand checks for '#' at the beginning of the
875 first line of the input file. This is because the compiler outputs
876 #NO_APP at the beginning of its output. */
877 /* Also note that comments like this one will always work. */
878 const char line_comment_chars[] = "#";
879
880 char arm_line_separator_chars[] = ";";
881
882 /* Chars that can be used to separate mant
883 from exp in floating point numbers. */
884 const char EXP_CHARS[] = "eE";
885
886 /* Chars that mean this number is a floating point constant. */
887 /* As in 0f12.456 */
888 /* or 0d1.2345e12 */
889
890 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
891
892 /* Prefix characters that indicate the start of an immediate
893 value. */
894 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
895
896 /* Separator character handling. */
897
898 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
899
900 static inline int
901 skip_past_char (char ** str, char c)
902 {
903 /* PR gas/14987: Allow for whitespace before the expected character. */
904 skip_whitespace (*str);
905
906 if (**str == c)
907 {
908 (*str)++;
909 return SUCCESS;
910 }
911 else
912 return FAIL;
913 }
914
915 #define skip_past_comma(str) skip_past_char (str, ',')
916
917 /* Arithmetic expressions (possibly involving symbols). */
918
919 /* Return TRUE if anything in the expression is a bignum. */
920
921 static int
922 walk_no_bignums (symbolS * sp)
923 {
924 if (symbol_get_value_expression (sp)->X_op == O_big)
925 return 1;
926
927 if (symbol_get_value_expression (sp)->X_add_symbol)
928 {
929 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
930 || (symbol_get_value_expression (sp)->X_op_symbol
931 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
932 }
933
934 return 0;
935 }
936
937 static int in_my_get_expression = 0;
938
939 /* Third argument to my_get_expression. */
940 #define GE_NO_PREFIX 0
941 #define GE_IMM_PREFIX 1
942 #define GE_OPT_PREFIX 2
943 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
944 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
945 #define GE_OPT_PREFIX_BIG 3
946
947 static int
948 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
949 {
950 char * save_in;
951 segT seg;
952
953 /* In unified syntax, all prefixes are optional. */
954 if (unified_syntax)
955 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
956 : GE_OPT_PREFIX;
957
958 switch (prefix_mode)
959 {
960 case GE_NO_PREFIX: break;
961 case GE_IMM_PREFIX:
962 if (!is_immediate_prefix (**str))
963 {
964 inst.error = _("immediate expression requires a # prefix");
965 return FAIL;
966 }
967 (*str)++;
968 break;
969 case GE_OPT_PREFIX:
970 case GE_OPT_PREFIX_BIG:
971 if (is_immediate_prefix (**str))
972 (*str)++;
973 break;
974 default: abort ();
975 }
976
977 memset (ep, 0, sizeof (expressionS));
978
979 save_in = input_line_pointer;
980 input_line_pointer = *str;
981 in_my_get_expression = 1;
982 seg = expression (ep);
983 in_my_get_expression = 0;
984
985 if (ep->X_op == O_illegal || ep->X_op == O_absent)
986 {
987 /* We found a bad or missing expression in md_operand(). */
988 *str = input_line_pointer;
989 input_line_pointer = save_in;
990 if (inst.error == NULL)
991 inst.error = (ep->X_op == O_absent
992 ? _("missing expression") :_("bad expression"));
993 return 1;
994 }
995
996 #ifdef OBJ_AOUT
997 if (seg != absolute_section
998 && seg != text_section
999 && seg != data_section
1000 && seg != bss_section
1001 && seg != undefined_section)
1002 {
1003 inst.error = _("bad segment");
1004 *str = input_line_pointer;
1005 input_line_pointer = save_in;
1006 return 1;
1007 }
1008 #else
1009 (void) seg;
1010 #endif
1011
1012 /* Get rid of any bignums now, so that we don't generate an error for which
1013 we can't establish a line number later on. Big numbers are never valid
1014 in instructions, which is where this routine is always called. */
1015 if (prefix_mode != GE_OPT_PREFIX_BIG
1016 && (ep->X_op == O_big
1017 || (ep->X_add_symbol
1018 && (walk_no_bignums (ep->X_add_symbol)
1019 || (ep->X_op_symbol
1020 && walk_no_bignums (ep->X_op_symbol))))))
1021 {
1022 inst.error = _("invalid constant");
1023 *str = input_line_pointer;
1024 input_line_pointer = save_in;
1025 return 1;
1026 }
1027
1028 *str = input_line_pointer;
1029 input_line_pointer = save_in;
1030 return 0;
1031 }
1032
1033 /* Turn a string in input_line_pointer into a floating point constant
1034 of type TYPE, and store the appropriate bytes in *LITP. The number
1035 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1036 returned, or NULL on OK.
1037
1038 Note that fp constants aren't represent in the normal way on the ARM.
1039 In big endian mode, things are as expected. However, in little endian
1040 mode fp constants are big-endian word-wise, and little-endian byte-wise
1041 within the words. For example, (double) 1.1 in big endian mode is
1042 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1043 the byte sequence 99 99 f1 3f 9a 99 99 99.
1044
1045 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1046
1047 char *
1048 md_atof (int type, char * litP, int * sizeP)
1049 {
1050 int prec;
1051 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1052 char *t;
1053 int i;
1054
1055 switch (type)
1056 {
1057 case 'f':
1058 case 'F':
1059 case 's':
1060 case 'S':
1061 prec = 2;
1062 break;
1063
1064 case 'd':
1065 case 'D':
1066 case 'r':
1067 case 'R':
1068 prec = 4;
1069 break;
1070
1071 case 'x':
1072 case 'X':
1073 prec = 5;
1074 break;
1075
1076 case 'p':
1077 case 'P':
1078 prec = 5;
1079 break;
1080
1081 default:
1082 *sizeP = 0;
1083 return _("Unrecognized or unsupported floating point constant");
1084 }
1085
1086 t = atof_ieee (input_line_pointer, type, words);
1087 if (t)
1088 input_line_pointer = t;
1089 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1090
1091 if (target_big_endian)
1092 {
1093 for (i = 0; i < prec; i++)
1094 {
1095 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1096 litP += sizeof (LITTLENUM_TYPE);
1097 }
1098 }
1099 else
1100 {
1101 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1102 for (i = prec - 1; i >= 0; i--)
1103 {
1104 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1105 litP += sizeof (LITTLENUM_TYPE);
1106 }
1107 else
1108 /* For a 4 byte float the order of elements in `words' is 1 0.
1109 For an 8 byte float the order is 1 0 3 2. */
1110 for (i = 0; i < prec; i += 2)
1111 {
1112 md_number_to_chars (litP, (valueT) words[i + 1],
1113 sizeof (LITTLENUM_TYPE));
1114 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1115 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1116 litP += 2 * sizeof (LITTLENUM_TYPE);
1117 }
1118 }
1119
1120 return NULL;
1121 }
1122
1123 /* We handle all bad expressions here, so that we can report the faulty
1124 instruction in the error message. */
1125 void
1126 md_operand (expressionS * exp)
1127 {
1128 if (in_my_get_expression)
1129 exp->X_op = O_illegal;
1130 }
1131
1132 /* Immediate values. */
1133
1134 /* Generic immediate-value read function for use in directives.
1135 Accepts anything that 'expression' can fold to a constant.
1136 *val receives the number. */
1137 #ifdef OBJ_ELF
1138 static int
1139 immediate_for_directive (int *val)
1140 {
1141 expressionS exp;
1142 exp.X_op = O_illegal;
1143
1144 if (is_immediate_prefix (*input_line_pointer))
1145 {
1146 input_line_pointer++;
1147 expression (&exp);
1148 }
1149
1150 if (exp.X_op != O_constant)
1151 {
1152 as_bad (_("expected #constant"));
1153 ignore_rest_of_line ();
1154 return FAIL;
1155 }
1156 *val = exp.X_add_number;
1157 return SUCCESS;
1158 }
1159 #endif
1160
1161 /* Register parsing. */
1162
1163 /* Generic register parser. CCP points to what should be the
1164 beginning of a register name. If it is indeed a valid register
1165 name, advance CCP over it and return the reg_entry structure;
1166 otherwise return NULL. Does not issue diagnostics. */
1167
1168 static struct reg_entry *
1169 arm_reg_parse_multi (char **ccp)
1170 {
1171 char *start = *ccp;
1172 char *p;
1173 struct reg_entry *reg;
1174
1175 skip_whitespace (start);
1176
1177 #ifdef REGISTER_PREFIX
1178 if (*start != REGISTER_PREFIX)
1179 return NULL;
1180 start++;
1181 #endif
1182 #ifdef OPTIONAL_REGISTER_PREFIX
1183 if (*start == OPTIONAL_REGISTER_PREFIX)
1184 start++;
1185 #endif
1186
1187 p = start;
1188 if (!ISALPHA (*p) || !is_name_beginner (*p))
1189 return NULL;
1190
1191 do
1192 p++;
1193 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1194
1195 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1196
1197 if (!reg)
1198 return NULL;
1199
1200 *ccp = p;
1201 return reg;
1202 }
1203
1204 static int
1205 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1206 enum arm_reg_type type)
1207 {
1208 /* Alternative syntaxes are accepted for a few register classes. */
1209 switch (type)
1210 {
1211 case REG_TYPE_MVF:
1212 case REG_TYPE_MVD:
1213 case REG_TYPE_MVFX:
1214 case REG_TYPE_MVDX:
1215 /* Generic coprocessor register names are allowed for these. */
1216 if (reg && reg->type == REG_TYPE_CN)
1217 return reg->number;
1218 break;
1219
1220 case REG_TYPE_CP:
1221 /* For backward compatibility, a bare number is valid here. */
1222 {
1223 unsigned long processor = strtoul (start, ccp, 10);
1224 if (*ccp != start && processor <= 15)
1225 return processor;
1226 }
1227
1228 case REG_TYPE_MMXWC:
1229 /* WC includes WCG. ??? I'm not sure this is true for all
1230 instructions that take WC registers. */
1231 if (reg && reg->type == REG_TYPE_MMXWCG)
1232 return reg->number;
1233 break;
1234
1235 default:
1236 break;
1237 }
1238
1239 return FAIL;
1240 }
1241
1242 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1243 return value is the register number or FAIL. */
1244
1245 static int
1246 arm_reg_parse (char **ccp, enum arm_reg_type type)
1247 {
1248 char *start = *ccp;
1249 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1250 int ret;
1251
1252 /* Do not allow a scalar (reg+index) to parse as a register. */
1253 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1254 return FAIL;
1255
1256 if (reg && reg->type == type)
1257 return reg->number;
1258
1259 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1260 return ret;
1261
1262 *ccp = start;
1263 return FAIL;
1264 }
1265
1266 /* Parse a Neon type specifier. *STR should point at the leading '.'
1267 character. Does no verification at this stage that the type fits the opcode
1268 properly. E.g.,
1269
1270 .i32.i32.s16
1271 .s32.f32
1272 .u16
1273
1274 Can all be legally parsed by this function.
1275
1276 Fills in neon_type struct pointer with parsed information, and updates STR
1277 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1278 type, FAIL if not. */
1279
1280 static int
1281 parse_neon_type (struct neon_type *type, char **str)
1282 {
1283 char *ptr = *str;
1284
1285 if (type)
1286 type->elems = 0;
1287
1288 while (type->elems < NEON_MAX_TYPE_ELS)
1289 {
1290 enum neon_el_type thistype = NT_untyped;
1291 unsigned thissize = -1u;
1292
1293 if (*ptr != '.')
1294 break;
1295
1296 ptr++;
1297
1298 /* Just a size without an explicit type. */
1299 if (ISDIGIT (*ptr))
1300 goto parsesize;
1301
1302 switch (TOLOWER (*ptr))
1303 {
1304 case 'i': thistype = NT_integer; break;
1305 case 'f': thistype = NT_float; break;
1306 case 'p': thistype = NT_poly; break;
1307 case 's': thistype = NT_signed; break;
1308 case 'u': thistype = NT_unsigned; break;
1309 case 'd':
1310 thistype = NT_float;
1311 thissize = 64;
1312 ptr++;
1313 goto done;
1314 default:
1315 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1316 return FAIL;
1317 }
1318
1319 ptr++;
1320
1321 /* .f is an abbreviation for .f32. */
1322 if (thistype == NT_float && !ISDIGIT (*ptr))
1323 thissize = 32;
1324 else
1325 {
1326 parsesize:
1327 thissize = strtoul (ptr, &ptr, 10);
1328
1329 if (thissize != 8 && thissize != 16 && thissize != 32
1330 && thissize != 64)
1331 {
1332 as_bad (_("bad size %d in type specifier"), thissize);
1333 return FAIL;
1334 }
1335 }
1336
1337 done:
1338 if (type)
1339 {
1340 type->el[type->elems].type = thistype;
1341 type->el[type->elems].size = thissize;
1342 type->elems++;
1343 }
1344 }
1345
1346 /* Empty/missing type is not a successful parse. */
1347 if (type->elems == 0)
1348 return FAIL;
1349
1350 *str = ptr;
1351
1352 return SUCCESS;
1353 }
1354
1355 /* Errors may be set multiple times during parsing or bit encoding
1356 (particularly in the Neon bits), but usually the earliest error which is set
1357 will be the most meaningful. Avoid overwriting it with later (cascading)
1358 errors by calling this function. */
1359
1360 static void
1361 first_error (const char *err)
1362 {
1363 if (!inst.error)
1364 inst.error = err;
1365 }
1366
1367 /* Parse a single type, e.g. ".s32", leading period included. */
1368 static int
1369 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1370 {
1371 char *str = *ccp;
1372 struct neon_type optype;
1373
1374 if (*str == '.')
1375 {
1376 if (parse_neon_type (&optype, &str) == SUCCESS)
1377 {
1378 if (optype.elems == 1)
1379 *vectype = optype.el[0];
1380 else
1381 {
1382 first_error (_("only one type should be specified for operand"));
1383 return FAIL;
1384 }
1385 }
1386 else
1387 {
1388 first_error (_("vector type expected"));
1389 return FAIL;
1390 }
1391 }
1392 else
1393 return FAIL;
1394
1395 *ccp = str;
1396
1397 return SUCCESS;
1398 }
1399
1400 /* Special meanings for indices (which have a range of 0-7), which will fit into
1401 a 4-bit integer. */
1402
1403 #define NEON_ALL_LANES 15
1404 #define NEON_INTERLEAVE_LANES 14
1405
1406 /* Parse either a register or a scalar, with an optional type. Return the
1407 register number, and optionally fill in the actual type of the register
1408 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1409 type/index information in *TYPEINFO. */
1410
1411 static int
1412 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1413 enum arm_reg_type *rtype,
1414 struct neon_typed_alias *typeinfo)
1415 {
1416 char *str = *ccp;
1417 struct reg_entry *reg = arm_reg_parse_multi (&str);
1418 struct neon_typed_alias atype;
1419 struct neon_type_el parsetype;
1420
1421 atype.defined = 0;
1422 atype.index = -1;
1423 atype.eltype.type = NT_invtype;
1424 atype.eltype.size = -1;
1425
1426 /* Try alternate syntax for some types of register. Note these are mutually
1427 exclusive with the Neon syntax extensions. */
1428 if (reg == NULL)
1429 {
1430 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1431 if (altreg != FAIL)
1432 *ccp = str;
1433 if (typeinfo)
1434 *typeinfo = atype;
1435 return altreg;
1436 }
1437
1438 /* Undo polymorphism when a set of register types may be accepted. */
1439 if ((type == REG_TYPE_NDQ
1440 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1441 || (type == REG_TYPE_VFSD
1442 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1443 || (type == REG_TYPE_NSDQ
1444 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1445 || reg->type == REG_TYPE_NQ))
1446 || (type == REG_TYPE_MMXWC
1447 && (reg->type == REG_TYPE_MMXWCG)))
1448 type = (enum arm_reg_type) reg->type;
1449
1450 if (type != reg->type)
1451 return FAIL;
1452
1453 if (reg->neon)
1454 atype = *reg->neon;
1455
1456 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1457 {
1458 if ((atype.defined & NTA_HASTYPE) != 0)
1459 {
1460 first_error (_("can't redefine type for operand"));
1461 return FAIL;
1462 }
1463 atype.defined |= NTA_HASTYPE;
1464 atype.eltype = parsetype;
1465 }
1466
1467 if (skip_past_char (&str, '[') == SUCCESS)
1468 {
1469 if (type != REG_TYPE_VFD)
1470 {
1471 first_error (_("only D registers may be indexed"));
1472 return FAIL;
1473 }
1474
1475 if ((atype.defined & NTA_HASINDEX) != 0)
1476 {
1477 first_error (_("can't change index for operand"));
1478 return FAIL;
1479 }
1480
1481 atype.defined |= NTA_HASINDEX;
1482
1483 if (skip_past_char (&str, ']') == SUCCESS)
1484 atype.index = NEON_ALL_LANES;
1485 else
1486 {
1487 expressionS exp;
1488
1489 my_get_expression (&exp, &str, GE_NO_PREFIX);
1490
1491 if (exp.X_op != O_constant)
1492 {
1493 first_error (_("constant expression required"));
1494 return FAIL;
1495 }
1496
1497 if (skip_past_char (&str, ']') == FAIL)
1498 return FAIL;
1499
1500 atype.index = exp.X_add_number;
1501 }
1502 }
1503
1504 if (typeinfo)
1505 *typeinfo = atype;
1506
1507 if (rtype)
1508 *rtype = type;
1509
1510 *ccp = str;
1511
1512 return reg->number;
1513 }
1514
1515 /* Like arm_reg_parse, but allow allow the following extra features:
1516 - If RTYPE is non-zero, return the (possibly restricted) type of the
1517 register (e.g. Neon double or quad reg when either has been requested).
1518 - If this is a Neon vector type with additional type information, fill
1519 in the struct pointed to by VECTYPE (if non-NULL).
1520 This function will fault on encountering a scalar. */
1521
1522 static int
1523 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1524 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1525 {
1526 struct neon_typed_alias atype;
1527 char *str = *ccp;
1528 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1529
1530 if (reg == FAIL)
1531 return FAIL;
1532
1533 /* Do not allow regname(... to parse as a register. */
1534 if (*str == '(')
1535 return FAIL;
1536
1537 /* Do not allow a scalar (reg+index) to parse as a register. */
1538 if ((atype.defined & NTA_HASINDEX) != 0)
1539 {
1540 first_error (_("register operand expected, but got scalar"));
1541 return FAIL;
1542 }
1543
1544 if (vectype)
1545 *vectype = atype.eltype;
1546
1547 *ccp = str;
1548
1549 return reg;
1550 }
1551
1552 #define NEON_SCALAR_REG(X) ((X) >> 4)
1553 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1554
1555 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1556 have enough information to be able to do a good job bounds-checking. So, we
1557 just do easy checks here, and do further checks later. */
1558
1559 static int
1560 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1561 {
1562 int reg;
1563 char *str = *ccp;
1564 struct neon_typed_alias atype;
1565
1566 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1567
1568 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1569 return FAIL;
1570
1571 if (atype.index == NEON_ALL_LANES)
1572 {
1573 first_error (_("scalar must have an index"));
1574 return FAIL;
1575 }
1576 else if (atype.index >= 64 / elsize)
1577 {
1578 first_error (_("scalar index out of range"));
1579 return FAIL;
1580 }
1581
1582 if (type)
1583 *type = atype.eltype;
1584
1585 *ccp = str;
1586
1587 return reg * 16 + atype.index;
1588 }
1589
1590 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1591
1592 static long
1593 parse_reg_list (char ** strp)
1594 {
1595 char * str = * strp;
1596 long range = 0;
1597 int another_range;
1598
1599 /* We come back here if we get ranges concatenated by '+' or '|'. */
1600 do
1601 {
1602 skip_whitespace (str);
1603
1604 another_range = 0;
1605
1606 if (*str == '{')
1607 {
1608 int in_range = 0;
1609 int cur_reg = -1;
1610
1611 str++;
1612 do
1613 {
1614 int reg;
1615
1616 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1617 {
1618 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1619 return FAIL;
1620 }
1621
1622 if (in_range)
1623 {
1624 int i;
1625
1626 if (reg <= cur_reg)
1627 {
1628 first_error (_("bad range in register list"));
1629 return FAIL;
1630 }
1631
1632 for (i = cur_reg + 1; i < reg; i++)
1633 {
1634 if (range & (1 << i))
1635 as_tsktsk
1636 (_("Warning: duplicated register (r%d) in register list"),
1637 i);
1638 else
1639 range |= 1 << i;
1640 }
1641 in_range = 0;
1642 }
1643
1644 if (range & (1 << reg))
1645 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1646 reg);
1647 else if (reg <= cur_reg)
1648 as_tsktsk (_("Warning: register range not in ascending order"));
1649
1650 range |= 1 << reg;
1651 cur_reg = reg;
1652 }
1653 while (skip_past_comma (&str) != FAIL
1654 || (in_range = 1, *str++ == '-'));
1655 str--;
1656
1657 if (skip_past_char (&str, '}') == FAIL)
1658 {
1659 first_error (_("missing `}'"));
1660 return FAIL;
1661 }
1662 }
1663 else
1664 {
1665 expressionS exp;
1666
1667 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1668 return FAIL;
1669
1670 if (exp.X_op == O_constant)
1671 {
1672 if (exp.X_add_number
1673 != (exp.X_add_number & 0x0000ffff))
1674 {
1675 inst.error = _("invalid register mask");
1676 return FAIL;
1677 }
1678
1679 if ((range & exp.X_add_number) != 0)
1680 {
1681 int regno = range & exp.X_add_number;
1682
1683 regno &= -regno;
1684 regno = (1 << regno) - 1;
1685 as_tsktsk
1686 (_("Warning: duplicated register (r%d) in register list"),
1687 regno);
1688 }
1689
1690 range |= exp.X_add_number;
1691 }
1692 else
1693 {
1694 if (inst.reloc.type != 0)
1695 {
1696 inst.error = _("expression too complex");
1697 return FAIL;
1698 }
1699
1700 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1701 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1702 inst.reloc.pc_rel = 0;
1703 }
1704 }
1705
1706 if (*str == '|' || *str == '+')
1707 {
1708 str++;
1709 another_range = 1;
1710 }
1711 }
1712 while (another_range);
1713
1714 *strp = str;
1715 return range;
1716 }
1717
1718 /* Types of registers in a list. */
1719
1720 enum reg_list_els
1721 {
1722 REGLIST_VFP_S,
1723 REGLIST_VFP_D,
1724 REGLIST_NEON_D
1725 };
1726
1727 /* Parse a VFP register list. If the string is invalid return FAIL.
1728 Otherwise return the number of registers, and set PBASE to the first
1729 register. Parses registers of type ETYPE.
1730 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1731 - Q registers can be used to specify pairs of D registers
1732 - { } can be omitted from around a singleton register list
1733 FIXME: This is not implemented, as it would require backtracking in
1734 some cases, e.g.:
1735 vtbl.8 d3,d4,d5
1736 This could be done (the meaning isn't really ambiguous), but doesn't
1737 fit in well with the current parsing framework.
1738 - 32 D registers may be used (also true for VFPv3).
1739 FIXME: Types are ignored in these register lists, which is probably a
1740 bug. */
1741
1742 static int
1743 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1744 {
1745 char *str = *ccp;
1746 int base_reg;
1747 int new_base;
1748 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1749 int max_regs = 0;
1750 int count = 0;
1751 int warned = 0;
1752 unsigned long mask = 0;
1753 int i;
1754
1755 if (skip_past_char (&str, '{') == FAIL)
1756 {
1757 inst.error = _("expecting {");
1758 return FAIL;
1759 }
1760
1761 switch (etype)
1762 {
1763 case REGLIST_VFP_S:
1764 regtype = REG_TYPE_VFS;
1765 max_regs = 32;
1766 break;
1767
1768 case REGLIST_VFP_D:
1769 regtype = REG_TYPE_VFD;
1770 break;
1771
1772 case REGLIST_NEON_D:
1773 regtype = REG_TYPE_NDQ;
1774 break;
1775 }
1776
1777 if (etype != REGLIST_VFP_S)
1778 {
1779 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1780 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1781 {
1782 max_regs = 32;
1783 if (thumb_mode)
1784 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1785 fpu_vfp_ext_d32);
1786 else
1787 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1788 fpu_vfp_ext_d32);
1789 }
1790 else
1791 max_regs = 16;
1792 }
1793
1794 base_reg = max_regs;
1795
1796 do
1797 {
1798 int setmask = 1, addregs = 1;
1799
1800 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1801
1802 if (new_base == FAIL)
1803 {
1804 first_error (_(reg_expected_msgs[regtype]));
1805 return FAIL;
1806 }
1807
1808 if (new_base >= max_regs)
1809 {
1810 first_error (_("register out of range in list"));
1811 return FAIL;
1812 }
1813
1814 /* Note: a value of 2 * n is returned for the register Q<n>. */
1815 if (regtype == REG_TYPE_NQ)
1816 {
1817 setmask = 3;
1818 addregs = 2;
1819 }
1820
1821 if (new_base < base_reg)
1822 base_reg = new_base;
1823
1824 if (mask & (setmask << new_base))
1825 {
1826 first_error (_("invalid register list"));
1827 return FAIL;
1828 }
1829
1830 if ((mask >> new_base) != 0 && ! warned)
1831 {
1832 as_tsktsk (_("register list not in ascending order"));
1833 warned = 1;
1834 }
1835
1836 mask |= setmask << new_base;
1837 count += addregs;
1838
1839 if (*str == '-') /* We have the start of a range expression */
1840 {
1841 int high_range;
1842
1843 str++;
1844
1845 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1846 == FAIL)
1847 {
1848 inst.error = gettext (reg_expected_msgs[regtype]);
1849 return FAIL;
1850 }
1851
1852 if (high_range >= max_regs)
1853 {
1854 first_error (_("register out of range in list"));
1855 return FAIL;
1856 }
1857
1858 if (regtype == REG_TYPE_NQ)
1859 high_range = high_range + 1;
1860
1861 if (high_range <= new_base)
1862 {
1863 inst.error = _("register range not in ascending order");
1864 return FAIL;
1865 }
1866
1867 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1868 {
1869 if (mask & (setmask << new_base))
1870 {
1871 inst.error = _("invalid register list");
1872 return FAIL;
1873 }
1874
1875 mask |= setmask << new_base;
1876 count += addregs;
1877 }
1878 }
1879 }
1880 while (skip_past_comma (&str) != FAIL);
1881
1882 str++;
1883
1884 /* Sanity check -- should have raised a parse error above. */
1885 if (count == 0 || count > max_regs)
1886 abort ();
1887
1888 *pbase = base_reg;
1889
1890 /* Final test -- the registers must be consecutive. */
1891 mask >>= base_reg;
1892 for (i = 0; i < count; i++)
1893 {
1894 if ((mask & (1u << i)) == 0)
1895 {
1896 inst.error = _("non-contiguous register range");
1897 return FAIL;
1898 }
1899 }
1900
1901 *ccp = str;
1902
1903 return count;
1904 }
1905
1906 /* True if two alias types are the same. */
1907
1908 static bfd_boolean
1909 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1910 {
1911 if (!a && !b)
1912 return TRUE;
1913
1914 if (!a || !b)
1915 return FALSE;
1916
1917 if (a->defined != b->defined)
1918 return FALSE;
1919
1920 if ((a->defined & NTA_HASTYPE) != 0
1921 && (a->eltype.type != b->eltype.type
1922 || a->eltype.size != b->eltype.size))
1923 return FALSE;
1924
1925 if ((a->defined & NTA_HASINDEX) != 0
1926 && (a->index != b->index))
1927 return FALSE;
1928
1929 return TRUE;
1930 }
1931
1932 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1933 The base register is put in *PBASE.
1934 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1935 the return value.
1936 The register stride (minus one) is put in bit 4 of the return value.
1937 Bits [6:5] encode the list length (minus one).
1938 The type of the list elements is put in *ELTYPE, if non-NULL. */
1939
1940 #define NEON_LANE(X) ((X) & 0xf)
1941 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1942 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1943
1944 static int
1945 parse_neon_el_struct_list (char **str, unsigned *pbase,
1946 struct neon_type_el *eltype)
1947 {
1948 char *ptr = *str;
1949 int base_reg = -1;
1950 int reg_incr = -1;
1951 int count = 0;
1952 int lane = -1;
1953 int leading_brace = 0;
1954 enum arm_reg_type rtype = REG_TYPE_NDQ;
1955 const char *const incr_error = _("register stride must be 1 or 2");
1956 const char *const type_error = _("mismatched element/structure types in list");
1957 struct neon_typed_alias firsttype;
1958
1959 if (skip_past_char (&ptr, '{') == SUCCESS)
1960 leading_brace = 1;
1961
1962 do
1963 {
1964 struct neon_typed_alias atype;
1965 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1966
1967 if (getreg == FAIL)
1968 {
1969 first_error (_(reg_expected_msgs[rtype]));
1970 return FAIL;
1971 }
1972
1973 if (base_reg == -1)
1974 {
1975 base_reg = getreg;
1976 if (rtype == REG_TYPE_NQ)
1977 {
1978 reg_incr = 1;
1979 }
1980 firsttype = atype;
1981 }
1982 else if (reg_incr == -1)
1983 {
1984 reg_incr = getreg - base_reg;
1985 if (reg_incr < 1 || reg_incr > 2)
1986 {
1987 first_error (_(incr_error));
1988 return FAIL;
1989 }
1990 }
1991 else if (getreg != base_reg + reg_incr * count)
1992 {
1993 first_error (_(incr_error));
1994 return FAIL;
1995 }
1996
1997 if (! neon_alias_types_same (&atype, &firsttype))
1998 {
1999 first_error (_(type_error));
2000 return FAIL;
2001 }
2002
2003 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2004 modes. */
2005 if (ptr[0] == '-')
2006 {
2007 struct neon_typed_alias htype;
2008 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2009 if (lane == -1)
2010 lane = NEON_INTERLEAVE_LANES;
2011 else if (lane != NEON_INTERLEAVE_LANES)
2012 {
2013 first_error (_(type_error));
2014 return FAIL;
2015 }
2016 if (reg_incr == -1)
2017 reg_incr = 1;
2018 else if (reg_incr != 1)
2019 {
2020 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2021 return FAIL;
2022 }
2023 ptr++;
2024 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2025 if (hireg == FAIL)
2026 {
2027 first_error (_(reg_expected_msgs[rtype]));
2028 return FAIL;
2029 }
2030 if (! neon_alias_types_same (&htype, &firsttype))
2031 {
2032 first_error (_(type_error));
2033 return FAIL;
2034 }
2035 count += hireg + dregs - getreg;
2036 continue;
2037 }
2038
2039 /* If we're using Q registers, we can't use [] or [n] syntax. */
2040 if (rtype == REG_TYPE_NQ)
2041 {
2042 count += 2;
2043 continue;
2044 }
2045
2046 if ((atype.defined & NTA_HASINDEX) != 0)
2047 {
2048 if (lane == -1)
2049 lane = atype.index;
2050 else if (lane != atype.index)
2051 {
2052 first_error (_(type_error));
2053 return FAIL;
2054 }
2055 }
2056 else if (lane == -1)
2057 lane = NEON_INTERLEAVE_LANES;
2058 else if (lane != NEON_INTERLEAVE_LANES)
2059 {
2060 first_error (_(type_error));
2061 return FAIL;
2062 }
2063 count++;
2064 }
2065 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2066
2067 /* No lane set by [x]. We must be interleaving structures. */
2068 if (lane == -1)
2069 lane = NEON_INTERLEAVE_LANES;
2070
2071 /* Sanity check. */
2072 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2073 || (count > 1 && reg_incr == -1))
2074 {
2075 first_error (_("error parsing element/structure list"));
2076 return FAIL;
2077 }
2078
2079 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2080 {
2081 first_error (_("expected }"));
2082 return FAIL;
2083 }
2084
2085 if (reg_incr == -1)
2086 reg_incr = 1;
2087
2088 if (eltype)
2089 *eltype = firsttype.eltype;
2090
2091 *pbase = base_reg;
2092 *str = ptr;
2093
2094 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2095 }
2096
2097 /* Parse an explicit relocation suffix on an expression. This is
2098 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2099 arm_reloc_hsh contains no entries, so this function can only
2100 succeed if there is no () after the word. Returns -1 on error,
2101 BFD_RELOC_UNUSED if there wasn't any suffix. */
2102
2103 static int
2104 parse_reloc (char **str)
2105 {
2106 struct reloc_entry *r;
2107 char *p, *q;
2108
2109 if (**str != '(')
2110 return BFD_RELOC_UNUSED;
2111
2112 p = *str + 1;
2113 q = p;
2114
2115 while (*q && *q != ')' && *q != ',')
2116 q++;
2117 if (*q != ')')
2118 return -1;
2119
2120 if ((r = (struct reloc_entry *)
2121 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2122 return -1;
2123
2124 *str = q + 1;
2125 return r->reloc;
2126 }
2127
2128 /* Directives: register aliases. */
2129
2130 static struct reg_entry *
2131 insert_reg_alias (char *str, unsigned number, int type)
2132 {
2133 struct reg_entry *new_reg;
2134 const char *name;
2135
2136 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2137 {
2138 if (new_reg->builtin)
2139 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2140
2141 /* Only warn about a redefinition if it's not defined as the
2142 same register. */
2143 else if (new_reg->number != number || new_reg->type != type)
2144 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2145
2146 return NULL;
2147 }
2148
2149 name = xstrdup (str);
2150 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2151
2152 new_reg->name = name;
2153 new_reg->number = number;
2154 new_reg->type = type;
2155 new_reg->builtin = FALSE;
2156 new_reg->neon = NULL;
2157
2158 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2159 abort ();
2160
2161 return new_reg;
2162 }
2163
2164 static void
2165 insert_neon_reg_alias (char *str, int number, int type,
2166 struct neon_typed_alias *atype)
2167 {
2168 struct reg_entry *reg = insert_reg_alias (str, number, type);
2169
2170 if (!reg)
2171 {
2172 first_error (_("attempt to redefine typed alias"));
2173 return;
2174 }
2175
2176 if (atype)
2177 {
2178 reg->neon = (struct neon_typed_alias *)
2179 xmalloc (sizeof (struct neon_typed_alias));
2180 *reg->neon = *atype;
2181 }
2182 }
2183
2184 /* Look for the .req directive. This is of the form:
2185
2186 new_register_name .req existing_register_name
2187
2188 If we find one, or if it looks sufficiently like one that we want to
2189 handle any error here, return TRUE. Otherwise return FALSE. */
2190
2191 static bfd_boolean
2192 create_register_alias (char * newname, char *p)
2193 {
2194 struct reg_entry *old;
2195 char *oldname, *nbuf;
2196 size_t nlen;
2197
2198 /* The input scrubber ensures that whitespace after the mnemonic is
2199 collapsed to single spaces. */
2200 oldname = p;
2201 if (strncmp (oldname, " .req ", 6) != 0)
2202 return FALSE;
2203
2204 oldname += 6;
2205 if (*oldname == '\0')
2206 return FALSE;
2207
2208 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2209 if (!old)
2210 {
2211 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2212 return TRUE;
2213 }
2214
2215 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2216 the desired alias name, and p points to its end. If not, then
2217 the desired alias name is in the global original_case_string. */
2218 #ifdef TC_CASE_SENSITIVE
2219 nlen = p - newname;
2220 #else
2221 newname = original_case_string;
2222 nlen = strlen (newname);
2223 #endif
2224
2225 nbuf = (char *) alloca (nlen + 1);
2226 memcpy (nbuf, newname, nlen);
2227 nbuf[nlen] = '\0';
2228
2229 /* Create aliases under the new name as stated; an all-lowercase
2230 version of the new name; and an all-uppercase version of the new
2231 name. */
2232 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2233 {
2234 for (p = nbuf; *p; p++)
2235 *p = TOUPPER (*p);
2236
2237 if (strncmp (nbuf, newname, nlen))
2238 {
2239 /* If this attempt to create an additional alias fails, do not bother
2240 trying to create the all-lower case alias. We will fail and issue
2241 a second, duplicate error message. This situation arises when the
2242 programmer does something like:
2243 foo .req r0
2244 Foo .req r1
2245 The second .req creates the "Foo" alias but then fails to create
2246 the artificial FOO alias because it has already been created by the
2247 first .req. */
2248 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2249 return TRUE;
2250 }
2251
2252 for (p = nbuf; *p; p++)
2253 *p = TOLOWER (*p);
2254
2255 if (strncmp (nbuf, newname, nlen))
2256 insert_reg_alias (nbuf, old->number, old->type);
2257 }
2258
2259 return TRUE;
2260 }
2261
2262 /* Create a Neon typed/indexed register alias using directives, e.g.:
2263 X .dn d5.s32[1]
2264 Y .qn 6.s16
2265 Z .dn d7
2266 T .dn Z[0]
2267 These typed registers can be used instead of the types specified after the
2268 Neon mnemonic, so long as all operands given have types. Types can also be
2269 specified directly, e.g.:
2270 vadd d0.s32, d1.s32, d2.s32 */
2271
2272 static bfd_boolean
2273 create_neon_reg_alias (char *newname, char *p)
2274 {
2275 enum arm_reg_type basetype;
2276 struct reg_entry *basereg;
2277 struct reg_entry mybasereg;
2278 struct neon_type ntype;
2279 struct neon_typed_alias typeinfo;
2280 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2281 int namelen;
2282
2283 typeinfo.defined = 0;
2284 typeinfo.eltype.type = NT_invtype;
2285 typeinfo.eltype.size = -1;
2286 typeinfo.index = -1;
2287
2288 nameend = p;
2289
2290 if (strncmp (p, " .dn ", 5) == 0)
2291 basetype = REG_TYPE_VFD;
2292 else if (strncmp (p, " .qn ", 5) == 0)
2293 basetype = REG_TYPE_NQ;
2294 else
2295 return FALSE;
2296
2297 p += 5;
2298
2299 if (*p == '\0')
2300 return FALSE;
2301
2302 basereg = arm_reg_parse_multi (&p);
2303
2304 if (basereg && basereg->type != basetype)
2305 {
2306 as_bad (_("bad type for register"));
2307 return FALSE;
2308 }
2309
2310 if (basereg == NULL)
2311 {
2312 expressionS exp;
2313 /* Try parsing as an integer. */
2314 my_get_expression (&exp, &p, GE_NO_PREFIX);
2315 if (exp.X_op != O_constant)
2316 {
2317 as_bad (_("expression must be constant"));
2318 return FALSE;
2319 }
2320 basereg = &mybasereg;
2321 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2322 : exp.X_add_number;
2323 basereg->neon = 0;
2324 }
2325
2326 if (basereg->neon)
2327 typeinfo = *basereg->neon;
2328
2329 if (parse_neon_type (&ntype, &p) == SUCCESS)
2330 {
2331 /* We got a type. */
2332 if (typeinfo.defined & NTA_HASTYPE)
2333 {
2334 as_bad (_("can't redefine the type of a register alias"));
2335 return FALSE;
2336 }
2337
2338 typeinfo.defined |= NTA_HASTYPE;
2339 if (ntype.elems != 1)
2340 {
2341 as_bad (_("you must specify a single type only"));
2342 return FALSE;
2343 }
2344 typeinfo.eltype = ntype.el[0];
2345 }
2346
2347 if (skip_past_char (&p, '[') == SUCCESS)
2348 {
2349 expressionS exp;
2350 /* We got a scalar index. */
2351
2352 if (typeinfo.defined & NTA_HASINDEX)
2353 {
2354 as_bad (_("can't redefine the index of a scalar alias"));
2355 return FALSE;
2356 }
2357
2358 my_get_expression (&exp, &p, GE_NO_PREFIX);
2359
2360 if (exp.X_op != O_constant)
2361 {
2362 as_bad (_("scalar index must be constant"));
2363 return FALSE;
2364 }
2365
2366 typeinfo.defined |= NTA_HASINDEX;
2367 typeinfo.index = exp.X_add_number;
2368
2369 if (skip_past_char (&p, ']') == FAIL)
2370 {
2371 as_bad (_("expecting ]"));
2372 return FALSE;
2373 }
2374 }
2375
2376 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2377 the desired alias name, and p points to its end. If not, then
2378 the desired alias name is in the global original_case_string. */
2379 #ifdef TC_CASE_SENSITIVE
2380 namelen = nameend - newname;
2381 #else
2382 newname = original_case_string;
2383 namelen = strlen (newname);
2384 #endif
2385
2386 namebuf = (char *) alloca (namelen + 1);
2387 strncpy (namebuf, newname, namelen);
2388 namebuf[namelen] = '\0';
2389
2390 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2391 typeinfo.defined != 0 ? &typeinfo : NULL);
2392
2393 /* Insert name in all uppercase. */
2394 for (p = namebuf; *p; p++)
2395 *p = TOUPPER (*p);
2396
2397 if (strncmp (namebuf, newname, namelen))
2398 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2399 typeinfo.defined != 0 ? &typeinfo : NULL);
2400
2401 /* Insert name in all lowercase. */
2402 for (p = namebuf; *p; p++)
2403 *p = TOLOWER (*p);
2404
2405 if (strncmp (namebuf, newname, namelen))
2406 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2407 typeinfo.defined != 0 ? &typeinfo : NULL);
2408
2409 return TRUE;
2410 }
2411
2412 /* Should never be called, as .req goes between the alias and the
2413 register name, not at the beginning of the line. */
2414
2415 static void
2416 s_req (int a ATTRIBUTE_UNUSED)
2417 {
2418 as_bad (_("invalid syntax for .req directive"));
2419 }
2420
2421 static void
2422 s_dn (int a ATTRIBUTE_UNUSED)
2423 {
2424 as_bad (_("invalid syntax for .dn directive"));
2425 }
2426
2427 static void
2428 s_qn (int a ATTRIBUTE_UNUSED)
2429 {
2430 as_bad (_("invalid syntax for .qn directive"));
2431 }
2432
2433 /* The .unreq directive deletes an alias which was previously defined
2434 by .req. For example:
2435
2436 my_alias .req r11
2437 .unreq my_alias */
2438
2439 static void
2440 s_unreq (int a ATTRIBUTE_UNUSED)
2441 {
2442 char * name;
2443 char saved_char;
2444
2445 name = input_line_pointer;
2446
2447 while (*input_line_pointer != 0
2448 && *input_line_pointer != ' '
2449 && *input_line_pointer != '\n')
2450 ++input_line_pointer;
2451
2452 saved_char = *input_line_pointer;
2453 *input_line_pointer = 0;
2454
2455 if (!*name)
2456 as_bad (_("invalid syntax for .unreq directive"));
2457 else
2458 {
2459 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2460 name);
2461
2462 if (!reg)
2463 as_bad (_("unknown register alias '%s'"), name);
2464 else if (reg->builtin)
2465 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2466 name);
2467 else
2468 {
2469 char * p;
2470 char * nbuf;
2471
2472 hash_delete (arm_reg_hsh, name, FALSE);
2473 free ((char *) reg->name);
2474 if (reg->neon)
2475 free (reg->neon);
2476 free (reg);
2477
2478 /* Also locate the all upper case and all lower case versions.
2479 Do not complain if we cannot find one or the other as it
2480 was probably deleted above. */
2481
2482 nbuf = strdup (name);
2483 for (p = nbuf; *p; p++)
2484 *p = TOUPPER (*p);
2485 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2486 if (reg)
2487 {
2488 hash_delete (arm_reg_hsh, nbuf, FALSE);
2489 free ((char *) reg->name);
2490 if (reg->neon)
2491 free (reg->neon);
2492 free (reg);
2493 }
2494
2495 for (p = nbuf; *p; p++)
2496 *p = TOLOWER (*p);
2497 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2498 if (reg)
2499 {
2500 hash_delete (arm_reg_hsh, nbuf, FALSE);
2501 free ((char *) reg->name);
2502 if (reg->neon)
2503 free (reg->neon);
2504 free (reg);
2505 }
2506
2507 free (nbuf);
2508 }
2509 }
2510
2511 *input_line_pointer = saved_char;
2512 demand_empty_rest_of_line ();
2513 }
2514
2515 /* Directives: Instruction set selection. */
2516
2517 #ifdef OBJ_ELF
2518 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2519 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2520 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2521 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2522
2523 /* Create a new mapping symbol for the transition to STATE. */
2524
2525 static void
2526 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2527 {
2528 symbolS * symbolP;
2529 const char * symname;
2530 int type;
2531
2532 switch (state)
2533 {
2534 case MAP_DATA:
2535 symname = "$d";
2536 type = BSF_NO_FLAGS;
2537 break;
2538 case MAP_ARM:
2539 symname = "$a";
2540 type = BSF_NO_FLAGS;
2541 break;
2542 case MAP_THUMB:
2543 symname = "$t";
2544 type = BSF_NO_FLAGS;
2545 break;
2546 default:
2547 abort ();
2548 }
2549
2550 symbolP = symbol_new (symname, now_seg, value, frag);
2551 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2552
2553 switch (state)
2554 {
2555 case MAP_ARM:
2556 THUMB_SET_FUNC (symbolP, 0);
2557 ARM_SET_THUMB (symbolP, 0);
2558 ARM_SET_INTERWORK (symbolP, support_interwork);
2559 break;
2560
2561 case MAP_THUMB:
2562 THUMB_SET_FUNC (symbolP, 1);
2563 ARM_SET_THUMB (symbolP, 1);
2564 ARM_SET_INTERWORK (symbolP, support_interwork);
2565 break;
2566
2567 case MAP_DATA:
2568 default:
2569 break;
2570 }
2571
2572 /* Save the mapping symbols for future reference. Also check that
2573 we do not place two mapping symbols at the same offset within a
2574 frag. We'll handle overlap between frags in
2575 check_mapping_symbols.
2576
2577 If .fill or other data filling directive generates zero sized data,
2578 the mapping symbol for the following code will have the same value
2579 as the one generated for the data filling directive. In this case,
2580 we replace the old symbol with the new one at the same address. */
2581 if (value == 0)
2582 {
2583 if (frag->tc_frag_data.first_map != NULL)
2584 {
2585 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2586 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2587 }
2588 frag->tc_frag_data.first_map = symbolP;
2589 }
2590 if (frag->tc_frag_data.last_map != NULL)
2591 {
2592 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2593 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2594 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2595 }
2596 frag->tc_frag_data.last_map = symbolP;
2597 }
2598
2599 /* We must sometimes convert a region marked as code to data during
2600 code alignment, if an odd number of bytes have to be padded. The
2601 code mapping symbol is pushed to an aligned address. */
2602
2603 static void
2604 insert_data_mapping_symbol (enum mstate state,
2605 valueT value, fragS *frag, offsetT bytes)
2606 {
2607 /* If there was already a mapping symbol, remove it. */
2608 if (frag->tc_frag_data.last_map != NULL
2609 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2610 {
2611 symbolS *symp = frag->tc_frag_data.last_map;
2612
2613 if (value == 0)
2614 {
2615 know (frag->tc_frag_data.first_map == symp);
2616 frag->tc_frag_data.first_map = NULL;
2617 }
2618 frag->tc_frag_data.last_map = NULL;
2619 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2620 }
2621
2622 make_mapping_symbol (MAP_DATA, value, frag);
2623 make_mapping_symbol (state, value + bytes, frag);
2624 }
2625
2626 static void mapping_state_2 (enum mstate state, int max_chars);
2627
2628 /* Set the mapping state to STATE. Only call this when about to
2629 emit some STATE bytes to the file. */
2630
2631 void
2632 mapping_state (enum mstate state)
2633 {
2634 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2635
2636 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2637
2638 if (mapstate == state)
2639 /* The mapping symbol has already been emitted.
2640 There is nothing else to do. */
2641 return;
2642
2643 if (state == MAP_ARM || state == MAP_THUMB)
2644 /* PR gas/12931
2645 All ARM instructions require 4-byte alignment.
2646 (Almost) all Thumb instructions require 2-byte alignment.
2647
2648 When emitting instructions into any section, mark the section
2649 appropriately.
2650
2651 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2652 but themselves require 2-byte alignment; this applies to some
2653 PC- relative forms. However, these cases will invovle implicit
2654 literal pool generation or an explicit .align >=2, both of
2655 which will cause the section to me marked with sufficient
2656 alignment. Thus, we don't handle those cases here. */
2657 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2658
2659 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2660 /* This case will be evaluated later in the next else. */
2661 return;
2662 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2663 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2664 {
2665 /* Only add the symbol if the offset is > 0:
2666 if we're at the first frag, check it's size > 0;
2667 if we're not at the first frag, then for sure
2668 the offset is > 0. */
2669 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2670 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2671
2672 if (add_symbol)
2673 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2674 }
2675
2676 mapping_state_2 (state, 0);
2677 #undef TRANSITION
2678 }
2679
2680 /* Same as mapping_state, but MAX_CHARS bytes have already been
2681 allocated. Put the mapping symbol that far back. */
2682
2683 static void
2684 mapping_state_2 (enum mstate state, int max_chars)
2685 {
2686 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2687
2688 if (!SEG_NORMAL (now_seg))
2689 return;
2690
2691 if (mapstate == state)
2692 /* The mapping symbol has already been emitted.
2693 There is nothing else to do. */
2694 return;
2695
2696 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2697 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2698 }
2699 #else
2700 #define mapping_state(x) ((void)0)
2701 #define mapping_state_2(x, y) ((void)0)
2702 #endif
2703
2704 /* Find the real, Thumb encoded start of a Thumb function. */
2705
2706 #ifdef OBJ_COFF
2707 static symbolS *
2708 find_real_start (symbolS * symbolP)
2709 {
2710 char * real_start;
2711 const char * name = S_GET_NAME (symbolP);
2712 symbolS * new_target;
2713
2714 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2715 #define STUB_NAME ".real_start_of"
2716
2717 if (name == NULL)
2718 abort ();
2719
2720 /* The compiler may generate BL instructions to local labels because
2721 it needs to perform a branch to a far away location. These labels
2722 do not have a corresponding ".real_start_of" label. We check
2723 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2724 the ".real_start_of" convention for nonlocal branches. */
2725 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2726 return symbolP;
2727
2728 real_start = ACONCAT ((STUB_NAME, name, NULL));
2729 new_target = symbol_find (real_start);
2730
2731 if (new_target == NULL)
2732 {
2733 as_warn (_("Failed to find real start of function: %s\n"), name);
2734 new_target = symbolP;
2735 }
2736
2737 return new_target;
2738 }
2739 #endif
2740
2741 static void
2742 opcode_select (int width)
2743 {
2744 switch (width)
2745 {
2746 case 16:
2747 if (! thumb_mode)
2748 {
2749 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2750 as_bad (_("selected processor does not support THUMB opcodes"));
2751
2752 thumb_mode = 1;
2753 /* No need to force the alignment, since we will have been
2754 coming from ARM mode, which is word-aligned. */
2755 record_alignment (now_seg, 1);
2756 }
2757 break;
2758
2759 case 32:
2760 if (thumb_mode)
2761 {
2762 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2763 as_bad (_("selected processor does not support ARM opcodes"));
2764
2765 thumb_mode = 0;
2766
2767 if (!need_pass_2)
2768 frag_align (2, 0, 0);
2769
2770 record_alignment (now_seg, 1);
2771 }
2772 break;
2773
2774 default:
2775 as_bad (_("invalid instruction size selected (%d)"), width);
2776 }
2777 }
2778
2779 static void
2780 s_arm (int ignore ATTRIBUTE_UNUSED)
2781 {
2782 opcode_select (32);
2783 demand_empty_rest_of_line ();
2784 }
2785
2786 static void
2787 s_thumb (int ignore ATTRIBUTE_UNUSED)
2788 {
2789 opcode_select (16);
2790 demand_empty_rest_of_line ();
2791 }
2792
2793 static void
2794 s_code (int unused ATTRIBUTE_UNUSED)
2795 {
2796 int temp;
2797
2798 temp = get_absolute_expression ();
2799 switch (temp)
2800 {
2801 case 16:
2802 case 32:
2803 opcode_select (temp);
2804 break;
2805
2806 default:
2807 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2808 }
2809 }
2810
2811 static void
2812 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2813 {
2814 /* If we are not already in thumb mode go into it, EVEN if
2815 the target processor does not support thumb instructions.
2816 This is used by gcc/config/arm/lib1funcs.asm for example
2817 to compile interworking support functions even if the
2818 target processor should not support interworking. */
2819 if (! thumb_mode)
2820 {
2821 thumb_mode = 2;
2822 record_alignment (now_seg, 1);
2823 }
2824
2825 demand_empty_rest_of_line ();
2826 }
2827
2828 static void
2829 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2830 {
2831 s_thumb (0);
2832
2833 /* The following label is the name/address of the start of a Thumb function.
2834 We need to know this for the interworking support. */
2835 label_is_thumb_function_name = TRUE;
2836 }
2837
2838 /* Perform a .set directive, but also mark the alias as
2839 being a thumb function. */
2840
2841 static void
2842 s_thumb_set (int equiv)
2843 {
2844 /* XXX the following is a duplicate of the code for s_set() in read.c
2845 We cannot just call that code as we need to get at the symbol that
2846 is created. */
2847 char * name;
2848 char delim;
2849 char * end_name;
2850 symbolS * symbolP;
2851
2852 /* Especial apologies for the random logic:
2853 This just grew, and could be parsed much more simply!
2854 Dean - in haste. */
2855 name = input_line_pointer;
2856 delim = get_symbol_end ();
2857 end_name = input_line_pointer;
2858 *end_name = delim;
2859
2860 if (*input_line_pointer != ',')
2861 {
2862 *end_name = 0;
2863 as_bad (_("expected comma after name \"%s\""), name);
2864 *end_name = delim;
2865 ignore_rest_of_line ();
2866 return;
2867 }
2868
2869 input_line_pointer++;
2870 *end_name = 0;
2871
2872 if (name[0] == '.' && name[1] == '\0')
2873 {
2874 /* XXX - this should not happen to .thumb_set. */
2875 abort ();
2876 }
2877
2878 if ((symbolP = symbol_find (name)) == NULL
2879 && (symbolP = md_undefined_symbol (name)) == NULL)
2880 {
2881 #ifndef NO_LISTING
2882 /* When doing symbol listings, play games with dummy fragments living
2883 outside the normal fragment chain to record the file and line info
2884 for this symbol. */
2885 if (listing & LISTING_SYMBOLS)
2886 {
2887 extern struct list_info_struct * listing_tail;
2888 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2889
2890 memset (dummy_frag, 0, sizeof (fragS));
2891 dummy_frag->fr_type = rs_fill;
2892 dummy_frag->line = listing_tail;
2893 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2894 dummy_frag->fr_symbol = symbolP;
2895 }
2896 else
2897 #endif
2898 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2899
2900 #ifdef OBJ_COFF
2901 /* "set" symbols are local unless otherwise specified. */
2902 SF_SET_LOCAL (symbolP);
2903 #endif /* OBJ_COFF */
2904 } /* Make a new symbol. */
2905
2906 symbol_table_insert (symbolP);
2907
2908 * end_name = delim;
2909
2910 if (equiv
2911 && S_IS_DEFINED (symbolP)
2912 && S_GET_SEGMENT (symbolP) != reg_section)
2913 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2914
2915 pseudo_set (symbolP);
2916
2917 demand_empty_rest_of_line ();
2918
2919 /* XXX Now we come to the Thumb specific bit of code. */
2920
2921 THUMB_SET_FUNC (symbolP, 1);
2922 ARM_SET_THUMB (symbolP, 1);
2923 #if defined OBJ_ELF || defined OBJ_COFF
2924 ARM_SET_INTERWORK (symbolP, support_interwork);
2925 #endif
2926 }
2927
2928 /* Directives: Mode selection. */
2929
2930 /* .syntax [unified|divided] - choose the new unified syntax
2931 (same for Arm and Thumb encoding, modulo slight differences in what
2932 can be represented) or the old divergent syntax for each mode. */
2933 static void
2934 s_syntax (int unused ATTRIBUTE_UNUSED)
2935 {
2936 char *name, delim;
2937
2938 name = input_line_pointer;
2939 delim = get_symbol_end ();
2940
2941 if (!strcasecmp (name, "unified"))
2942 unified_syntax = TRUE;
2943 else if (!strcasecmp (name, "divided"))
2944 unified_syntax = FALSE;
2945 else
2946 {
2947 as_bad (_("unrecognized syntax mode \"%s\""), name);
2948 return;
2949 }
2950 *input_line_pointer = delim;
2951 demand_empty_rest_of_line ();
2952 }
2953
2954 /* Directives: sectioning and alignment. */
2955
2956 /* Same as s_align_ptwo but align 0 => align 2. */
2957
2958 static void
2959 s_align (int unused ATTRIBUTE_UNUSED)
2960 {
2961 int temp;
2962 bfd_boolean fill_p;
2963 long temp_fill;
2964 long max_alignment = 15;
2965
2966 temp = get_absolute_expression ();
2967 if (temp > max_alignment)
2968 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2969 else if (temp < 0)
2970 {
2971 as_bad (_("alignment negative. 0 assumed."));
2972 temp = 0;
2973 }
2974
2975 if (*input_line_pointer == ',')
2976 {
2977 input_line_pointer++;
2978 temp_fill = get_absolute_expression ();
2979 fill_p = TRUE;
2980 }
2981 else
2982 {
2983 fill_p = FALSE;
2984 temp_fill = 0;
2985 }
2986
2987 if (!temp)
2988 temp = 2;
2989
2990 /* Only make a frag if we HAVE to. */
2991 if (temp && !need_pass_2)
2992 {
2993 if (!fill_p && subseg_text_p (now_seg))
2994 frag_align_code (temp, 0);
2995 else
2996 frag_align (temp, (int) temp_fill, 0);
2997 }
2998 demand_empty_rest_of_line ();
2999
3000 record_alignment (now_seg, temp);
3001 }
3002
3003 static void
3004 s_bss (int ignore ATTRIBUTE_UNUSED)
3005 {
3006 /* We don't support putting frags in the BSS segment, we fake it by
3007 marking in_bss, then looking at s_skip for clues. */
3008 subseg_set (bss_section, 0);
3009 demand_empty_rest_of_line ();
3010
3011 #ifdef md_elf_section_change_hook
3012 md_elf_section_change_hook ();
3013 #endif
3014 }
3015
3016 static void
3017 s_even (int ignore ATTRIBUTE_UNUSED)
3018 {
3019 /* Never make frag if expect extra pass. */
3020 if (!need_pass_2)
3021 frag_align (1, 0, 0);
3022
3023 record_alignment (now_seg, 1);
3024
3025 demand_empty_rest_of_line ();
3026 }
3027
3028 /* Directives: CodeComposer Studio. */
3029
3030 /* .ref (for CodeComposer Studio syntax only). */
3031 static void
3032 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3033 {
3034 if (codecomposer_syntax)
3035 ignore_rest_of_line ();
3036 else
3037 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3038 }
3039
3040 /* If name is not NULL, then it is used for marking the beginning of a
3041 function, wherease if it is NULL then it means the function end. */
3042 static void
3043 asmfunc_debug (const char * name)
3044 {
3045 static const char * last_name = NULL;
3046
3047 if (name != NULL)
3048 {
3049 gas_assert (last_name == NULL);
3050 last_name = name;
3051
3052 if (debug_type == DEBUG_STABS)
3053 stabs_generate_asm_func (name, name);
3054 }
3055 else
3056 {
3057 gas_assert (last_name != NULL);
3058
3059 if (debug_type == DEBUG_STABS)
3060 stabs_generate_asm_endfunc (last_name, last_name);
3061
3062 last_name = NULL;
3063 }
3064 }
3065
3066 static void
3067 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3068 {
3069 if (codecomposer_syntax)
3070 {
3071 switch (asmfunc_state)
3072 {
3073 case OUTSIDE_ASMFUNC:
3074 asmfunc_state = WAITING_ASMFUNC_NAME;
3075 break;
3076
3077 case WAITING_ASMFUNC_NAME:
3078 as_bad (_(".asmfunc repeated."));
3079 break;
3080
3081 case WAITING_ENDASMFUNC:
3082 as_bad (_(".asmfunc without function."));
3083 break;
3084 }
3085 demand_empty_rest_of_line ();
3086 }
3087 else
3088 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3089 }
3090
3091 static void
3092 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3093 {
3094 if (codecomposer_syntax)
3095 {
3096 switch (asmfunc_state)
3097 {
3098 case OUTSIDE_ASMFUNC:
3099 as_bad (_(".endasmfunc without a .asmfunc."));
3100 break;
3101
3102 case WAITING_ASMFUNC_NAME:
3103 as_bad (_(".endasmfunc without function."));
3104 break;
3105
3106 case WAITING_ENDASMFUNC:
3107 asmfunc_state = OUTSIDE_ASMFUNC;
3108 asmfunc_debug (NULL);
3109 break;
3110 }
3111 demand_empty_rest_of_line ();
3112 }
3113 else
3114 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3115 }
3116
3117 static void
3118 s_ccs_def (int name)
3119 {
3120 if (codecomposer_syntax)
3121 s_globl (name);
3122 else
3123 as_bad (_(".def pseudo-op only available with -mccs flag."));
3124 }
3125
3126 /* Directives: Literal pools. */
3127
3128 static literal_pool *
3129 find_literal_pool (void)
3130 {
3131 literal_pool * pool;
3132
3133 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3134 {
3135 if (pool->section == now_seg
3136 && pool->sub_section == now_subseg)
3137 break;
3138 }
3139
3140 return pool;
3141 }
3142
3143 static literal_pool *
3144 find_or_make_literal_pool (void)
3145 {
3146 /* Next literal pool ID number. */
3147 static unsigned int latest_pool_num = 1;
3148 literal_pool * pool;
3149
3150 pool = find_literal_pool ();
3151
3152 if (pool == NULL)
3153 {
3154 /* Create a new pool. */
3155 pool = (literal_pool *) xmalloc (sizeof (* pool));
3156 if (! pool)
3157 return NULL;
3158
3159 pool->next_free_entry = 0;
3160 pool->section = now_seg;
3161 pool->sub_section = now_subseg;
3162 pool->next = list_of_pools;
3163 pool->symbol = NULL;
3164 pool->alignment = 2;
3165
3166 /* Add it to the list. */
3167 list_of_pools = pool;
3168 }
3169
3170 /* New pools, and emptied pools, will have a NULL symbol. */
3171 if (pool->symbol == NULL)
3172 {
3173 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3174 (valueT) 0, &zero_address_frag);
3175 pool->id = latest_pool_num ++;
3176 }
3177
3178 /* Done. */
3179 return pool;
3180 }
3181
3182 /* Add the literal in the global 'inst'
3183 structure to the relevant literal pool. */
3184
3185 static int
3186 add_to_lit_pool (unsigned int nbytes)
3187 {
3188 #define PADDING_SLOT 0x1
3189 #define LIT_ENTRY_SIZE_MASK 0xFF
3190 literal_pool * pool;
3191 unsigned int entry, pool_size = 0;
3192 bfd_boolean padding_slot_p = FALSE;
3193 unsigned imm1;
3194 unsigned imm2 = 0;
3195
3196 if (nbytes == 8)
3197 {
3198 imm1 = inst.operands[1].imm;
3199 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3200 : inst.reloc.exp.X_unsigned ? 0
3201 : ((int64_t) inst.operands[1].imm) >> 32);
3202 if (target_big_endian)
3203 {
3204 imm1 = imm2;
3205 imm2 = inst.operands[1].imm;
3206 }
3207 }
3208
3209 pool = find_or_make_literal_pool ();
3210
3211 /* Check if this literal value is already in the pool. */
3212 for (entry = 0; entry < pool->next_free_entry; entry ++)
3213 {
3214 if (nbytes == 4)
3215 {
3216 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3217 && (inst.reloc.exp.X_op == O_constant)
3218 && (pool->literals[entry].X_add_number
3219 == inst.reloc.exp.X_add_number)
3220 && (pool->literals[entry].X_md == nbytes)
3221 && (pool->literals[entry].X_unsigned
3222 == inst.reloc.exp.X_unsigned))
3223 break;
3224
3225 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3226 && (inst.reloc.exp.X_op == O_symbol)
3227 && (pool->literals[entry].X_add_number
3228 == inst.reloc.exp.X_add_number)
3229 && (pool->literals[entry].X_add_symbol
3230 == inst.reloc.exp.X_add_symbol)
3231 && (pool->literals[entry].X_op_symbol
3232 == inst.reloc.exp.X_op_symbol)
3233 && (pool->literals[entry].X_md == nbytes))
3234 break;
3235 }
3236 else if ((nbytes == 8)
3237 && !(pool_size & 0x7)
3238 && ((entry + 1) != pool->next_free_entry)
3239 && (pool->literals[entry].X_op == O_constant)
3240 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3241 && (pool->literals[entry].X_unsigned
3242 == inst.reloc.exp.X_unsigned)
3243 && (pool->literals[entry + 1].X_op == O_constant)
3244 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3245 && (pool->literals[entry + 1].X_unsigned
3246 == inst.reloc.exp.X_unsigned))
3247 break;
3248
3249 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3250 if (padding_slot_p && (nbytes == 4))
3251 break;
3252
3253 pool_size += 4;
3254 }
3255
3256 /* Do we need to create a new entry? */
3257 if (entry == pool->next_free_entry)
3258 {
3259 if (entry >= MAX_LITERAL_POOL_SIZE)
3260 {
3261 inst.error = _("literal pool overflow");
3262 return FAIL;
3263 }
3264
3265 if (nbytes == 8)
3266 {
3267 /* For 8-byte entries, we align to an 8-byte boundary,
3268 and split it into two 4-byte entries, because on 32-bit
3269 host, 8-byte constants are treated as big num, thus
3270 saved in "generic_bignum" which will be overwritten
3271 by later assignments.
3272
3273 We also need to make sure there is enough space for
3274 the split.
3275
3276 We also check to make sure the literal operand is a
3277 constant number. */
3278 if (!(inst.reloc.exp.X_op == O_constant
3279 || inst.reloc.exp.X_op == O_big))
3280 {
3281 inst.error = _("invalid type for literal pool");
3282 return FAIL;
3283 }
3284 else if (pool_size & 0x7)
3285 {
3286 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3287 {
3288 inst.error = _("literal pool overflow");
3289 return FAIL;
3290 }
3291
3292 pool->literals[entry] = inst.reloc.exp;
3293 pool->literals[entry].X_add_number = 0;
3294 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3295 pool->next_free_entry += 1;
3296 pool_size += 4;
3297 }
3298 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3299 {
3300 inst.error = _("literal pool overflow");
3301 return FAIL;
3302 }
3303
3304 pool->literals[entry] = inst.reloc.exp;
3305 pool->literals[entry].X_op = O_constant;
3306 pool->literals[entry].X_add_number = imm1;
3307 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3308 pool->literals[entry++].X_md = 4;
3309 pool->literals[entry] = inst.reloc.exp;
3310 pool->literals[entry].X_op = O_constant;
3311 pool->literals[entry].X_add_number = imm2;
3312 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3313 pool->literals[entry].X_md = 4;
3314 pool->alignment = 3;
3315 pool->next_free_entry += 1;
3316 }
3317 else
3318 {
3319 pool->literals[entry] = inst.reloc.exp;
3320 pool->literals[entry].X_md = 4;
3321 }
3322
3323 #ifdef OBJ_ELF
3324 /* PR ld/12974: Record the location of the first source line to reference
3325 this entry in the literal pool. If it turns out during linking that the
3326 symbol does not exist we will be able to give an accurate line number for
3327 the (first use of the) missing reference. */
3328 if (debug_type == DEBUG_DWARF2)
3329 dwarf2_where (pool->locs + entry);
3330 #endif
3331 pool->next_free_entry += 1;
3332 }
3333 else if (padding_slot_p)
3334 {
3335 pool->literals[entry] = inst.reloc.exp;
3336 pool->literals[entry].X_md = nbytes;
3337 }
3338
3339 inst.reloc.exp.X_op = O_symbol;
3340 inst.reloc.exp.X_add_number = pool_size;
3341 inst.reloc.exp.X_add_symbol = pool->symbol;
3342
3343 return SUCCESS;
3344 }
3345
3346 bfd_boolean
3347 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
3348 {
3349 bfd_boolean ret = TRUE;
3350
3351 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3352 {
3353 const char *label = rest;
3354
3355 while (!is_end_of_line[(int) label[-1]])
3356 --label;
3357
3358 if (*label == '.')
3359 {
3360 as_bad (_("Invalid label '%s'"), label);
3361 ret = FALSE;
3362 }
3363
3364 asmfunc_debug (label);
3365
3366 asmfunc_state = WAITING_ENDASMFUNC;
3367 }
3368
3369 return ret;
3370 }
3371
3372 /* Can't use symbol_new here, so have to create a symbol and then at
3373 a later date assign it a value. Thats what these functions do. */
3374
3375 static void
3376 symbol_locate (symbolS * symbolP,
3377 const char * name, /* It is copied, the caller can modify. */
3378 segT segment, /* Segment identifier (SEG_<something>). */
3379 valueT valu, /* Symbol value. */
3380 fragS * frag) /* Associated fragment. */
3381 {
3382 size_t name_length;
3383 char * preserved_copy_of_name;
3384
3385 name_length = strlen (name) + 1; /* +1 for \0. */
3386 obstack_grow (&notes, name, name_length);
3387 preserved_copy_of_name = (char *) obstack_finish (&notes);
3388
3389 #ifdef tc_canonicalize_symbol_name
3390 preserved_copy_of_name =
3391 tc_canonicalize_symbol_name (preserved_copy_of_name);
3392 #endif
3393
3394 S_SET_NAME (symbolP, preserved_copy_of_name);
3395
3396 S_SET_SEGMENT (symbolP, segment);
3397 S_SET_VALUE (symbolP, valu);
3398 symbol_clear_list_pointers (symbolP);
3399
3400 symbol_set_frag (symbolP, frag);
3401
3402 /* Link to end of symbol chain. */
3403 {
3404 extern int symbol_table_frozen;
3405
3406 if (symbol_table_frozen)
3407 abort ();
3408 }
3409
3410 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3411
3412 obj_symbol_new_hook (symbolP);
3413
3414 #ifdef tc_symbol_new_hook
3415 tc_symbol_new_hook (symbolP);
3416 #endif
3417
3418 #ifdef DEBUG_SYMS
3419 verify_symbol_chain (symbol_rootP, symbol_lastP);
3420 #endif /* DEBUG_SYMS */
3421 }
3422
3423 static void
3424 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3425 {
3426 unsigned int entry;
3427 literal_pool * pool;
3428 char sym_name[20];
3429
3430 pool = find_literal_pool ();
3431 if (pool == NULL
3432 || pool->symbol == NULL
3433 || pool->next_free_entry == 0)
3434 return;
3435
3436 /* Align pool as you have word accesses.
3437 Only make a frag if we have to. */
3438 if (!need_pass_2)
3439 frag_align (pool->alignment, 0, 0);
3440
3441 record_alignment (now_seg, 2);
3442
3443 #ifdef OBJ_ELF
3444 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3445 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3446 #endif
3447 sprintf (sym_name, "$$lit_\002%x", pool->id);
3448
3449 symbol_locate (pool->symbol, sym_name, now_seg,
3450 (valueT) frag_now_fix (), frag_now);
3451 symbol_table_insert (pool->symbol);
3452
3453 ARM_SET_THUMB (pool->symbol, thumb_mode);
3454
3455 #if defined OBJ_COFF || defined OBJ_ELF
3456 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3457 #endif
3458
3459 for (entry = 0; entry < pool->next_free_entry; entry ++)
3460 {
3461 #ifdef OBJ_ELF
3462 if (debug_type == DEBUG_DWARF2)
3463 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3464 #endif
3465 /* First output the expression in the instruction to the pool. */
3466 emit_expr (&(pool->literals[entry]),
3467 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3468 }
3469
3470 /* Mark the pool as empty. */
3471 pool->next_free_entry = 0;
3472 pool->symbol = NULL;
3473 }
3474
3475 #ifdef OBJ_ELF
3476 /* Forward declarations for functions below, in the MD interface
3477 section. */
3478 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3479 static valueT create_unwind_entry (int);
3480 static void start_unwind_section (const segT, int);
3481 static void add_unwind_opcode (valueT, int);
3482 static void flush_pending_unwind (void);
3483
3484 /* Directives: Data. */
3485
3486 static void
3487 s_arm_elf_cons (int nbytes)
3488 {
3489 expressionS exp;
3490
3491 #ifdef md_flush_pending_output
3492 md_flush_pending_output ();
3493 #endif
3494
3495 if (is_it_end_of_statement ())
3496 {
3497 demand_empty_rest_of_line ();
3498 return;
3499 }
3500
3501 #ifdef md_cons_align
3502 md_cons_align (nbytes);
3503 #endif
3504
3505 mapping_state (MAP_DATA);
3506 do
3507 {
3508 int reloc;
3509 char *base = input_line_pointer;
3510
3511 expression (& exp);
3512
3513 if (exp.X_op != O_symbol)
3514 emit_expr (&exp, (unsigned int) nbytes);
3515 else
3516 {
3517 char *before_reloc = input_line_pointer;
3518 reloc = parse_reloc (&input_line_pointer);
3519 if (reloc == -1)
3520 {
3521 as_bad (_("unrecognized relocation suffix"));
3522 ignore_rest_of_line ();
3523 return;
3524 }
3525 else if (reloc == BFD_RELOC_UNUSED)
3526 emit_expr (&exp, (unsigned int) nbytes);
3527 else
3528 {
3529 reloc_howto_type *howto = (reloc_howto_type *)
3530 bfd_reloc_type_lookup (stdoutput,
3531 (bfd_reloc_code_real_type) reloc);
3532 int size = bfd_get_reloc_size (howto);
3533
3534 if (reloc == BFD_RELOC_ARM_PLT32)
3535 {
3536 as_bad (_("(plt) is only valid on branch targets"));
3537 reloc = BFD_RELOC_UNUSED;
3538 size = 0;
3539 }
3540
3541 if (size > nbytes)
3542 as_bad (_("%s relocations do not fit in %d bytes"),
3543 howto->name, nbytes);
3544 else
3545 {
3546 /* We've parsed an expression stopping at O_symbol.
3547 But there may be more expression left now that we
3548 have parsed the relocation marker. Parse it again.
3549 XXX Surely there is a cleaner way to do this. */
3550 char *p = input_line_pointer;
3551 int offset;
3552 char *save_buf = (char *) alloca (input_line_pointer - base);
3553 memcpy (save_buf, base, input_line_pointer - base);
3554 memmove (base + (input_line_pointer - before_reloc),
3555 base, before_reloc - base);
3556
3557 input_line_pointer = base + (input_line_pointer-before_reloc);
3558 expression (&exp);
3559 memcpy (base, save_buf, p - base);
3560
3561 offset = nbytes - size;
3562 p = frag_more (nbytes);
3563 memset (p, 0, nbytes);
3564 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3565 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3566 }
3567 }
3568 }
3569 }
3570 while (*input_line_pointer++ == ',');
3571
3572 /* Put terminator back into stream. */
3573 input_line_pointer --;
3574 demand_empty_rest_of_line ();
3575 }
3576
3577 /* Emit an expression containing a 32-bit thumb instruction.
3578 Implementation based on put_thumb32_insn. */
3579
3580 static void
3581 emit_thumb32_expr (expressionS * exp)
3582 {
3583 expressionS exp_high = *exp;
3584
3585 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3586 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3587 exp->X_add_number &= 0xffff;
3588 emit_expr (exp, (unsigned int) THUMB_SIZE);
3589 }
3590
3591 /* Guess the instruction size based on the opcode. */
3592
3593 static int
3594 thumb_insn_size (int opcode)
3595 {
3596 if ((unsigned int) opcode < 0xe800u)
3597 return 2;
3598 else if ((unsigned int) opcode >= 0xe8000000u)
3599 return 4;
3600 else
3601 return 0;
3602 }
3603
3604 static bfd_boolean
3605 emit_insn (expressionS *exp, int nbytes)
3606 {
3607 int size = 0;
3608
3609 if (exp->X_op == O_constant)
3610 {
3611 size = nbytes;
3612
3613 if (size == 0)
3614 size = thumb_insn_size (exp->X_add_number);
3615
3616 if (size != 0)
3617 {
3618 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3619 {
3620 as_bad (_(".inst.n operand too big. "\
3621 "Use .inst.w instead"));
3622 size = 0;
3623 }
3624 else
3625 {
3626 if (now_it.state == AUTOMATIC_IT_BLOCK)
3627 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3628 else
3629 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3630
3631 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3632 emit_thumb32_expr (exp);
3633 else
3634 emit_expr (exp, (unsigned int) size);
3635
3636 it_fsm_post_encode ();
3637 }
3638 }
3639 else
3640 as_bad (_("cannot determine Thumb instruction size. " \
3641 "Use .inst.n/.inst.w instead"));
3642 }
3643 else
3644 as_bad (_("constant expression required"));
3645
3646 return (size != 0);
3647 }
3648
3649 /* Like s_arm_elf_cons but do not use md_cons_align and
3650 set the mapping state to MAP_ARM/MAP_THUMB. */
3651
3652 static void
3653 s_arm_elf_inst (int nbytes)
3654 {
3655 if (is_it_end_of_statement ())
3656 {
3657 demand_empty_rest_of_line ();
3658 return;
3659 }
3660
3661 /* Calling mapping_state () here will not change ARM/THUMB,
3662 but will ensure not to be in DATA state. */
3663
3664 if (thumb_mode)
3665 mapping_state (MAP_THUMB);
3666 else
3667 {
3668 if (nbytes != 0)
3669 {
3670 as_bad (_("width suffixes are invalid in ARM mode"));
3671 ignore_rest_of_line ();
3672 return;
3673 }
3674
3675 nbytes = 4;
3676
3677 mapping_state (MAP_ARM);
3678 }
3679
3680 do
3681 {
3682 expressionS exp;
3683
3684 expression (& exp);
3685
3686 if (! emit_insn (& exp, nbytes))
3687 {
3688 ignore_rest_of_line ();
3689 return;
3690 }
3691 }
3692 while (*input_line_pointer++ == ',');
3693
3694 /* Put terminator back into stream. */
3695 input_line_pointer --;
3696 demand_empty_rest_of_line ();
3697 }
3698
3699 /* Parse a .rel31 directive. */
3700
3701 static void
3702 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3703 {
3704 expressionS exp;
3705 char *p;
3706 valueT highbit;
3707
3708 highbit = 0;
3709 if (*input_line_pointer == '1')
3710 highbit = 0x80000000;
3711 else if (*input_line_pointer != '0')
3712 as_bad (_("expected 0 or 1"));
3713
3714 input_line_pointer++;
3715 if (*input_line_pointer != ',')
3716 as_bad (_("missing comma"));
3717 input_line_pointer++;
3718
3719 #ifdef md_flush_pending_output
3720 md_flush_pending_output ();
3721 #endif
3722
3723 #ifdef md_cons_align
3724 md_cons_align (4);
3725 #endif
3726
3727 mapping_state (MAP_DATA);
3728
3729 expression (&exp);
3730
3731 p = frag_more (4);
3732 md_number_to_chars (p, highbit, 4);
3733 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3734 BFD_RELOC_ARM_PREL31);
3735
3736 demand_empty_rest_of_line ();
3737 }
3738
3739 /* Directives: AEABI stack-unwind tables. */
3740
3741 /* Parse an unwind_fnstart directive. Simply records the current location. */
3742
3743 static void
3744 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3745 {
3746 demand_empty_rest_of_line ();
3747 if (unwind.proc_start)
3748 {
3749 as_bad (_("duplicate .fnstart directive"));
3750 return;
3751 }
3752
3753 /* Mark the start of the function. */
3754 unwind.proc_start = expr_build_dot ();
3755
3756 /* Reset the rest of the unwind info. */
3757 unwind.opcode_count = 0;
3758 unwind.table_entry = NULL;
3759 unwind.personality_routine = NULL;
3760 unwind.personality_index = -1;
3761 unwind.frame_size = 0;
3762 unwind.fp_offset = 0;
3763 unwind.fp_reg = REG_SP;
3764 unwind.fp_used = 0;
3765 unwind.sp_restored = 0;
3766 }
3767
3768
3769 /* Parse a handlerdata directive. Creates the exception handling table entry
3770 for the function. */
3771
3772 static void
3773 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3774 {
3775 demand_empty_rest_of_line ();
3776 if (!unwind.proc_start)
3777 as_bad (MISSING_FNSTART);
3778
3779 if (unwind.table_entry)
3780 as_bad (_("duplicate .handlerdata directive"));
3781
3782 create_unwind_entry (1);
3783 }
3784
3785 /* Parse an unwind_fnend directive. Generates the index table entry. */
3786
3787 static void
3788 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3789 {
3790 long where;
3791 char *ptr;
3792 valueT val;
3793 unsigned int marked_pr_dependency;
3794
3795 demand_empty_rest_of_line ();
3796
3797 if (!unwind.proc_start)
3798 {
3799 as_bad (_(".fnend directive without .fnstart"));
3800 return;
3801 }
3802
3803 /* Add eh table entry. */
3804 if (unwind.table_entry == NULL)
3805 val = create_unwind_entry (0);
3806 else
3807 val = 0;
3808
3809 /* Add index table entry. This is two words. */
3810 start_unwind_section (unwind.saved_seg, 1);
3811 frag_align (2, 0, 0);
3812 record_alignment (now_seg, 2);
3813
3814 ptr = frag_more (8);
3815 memset (ptr, 0, 8);
3816 where = frag_now_fix () - 8;
3817
3818 /* Self relative offset of the function start. */
3819 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3820 BFD_RELOC_ARM_PREL31);
3821
3822 /* Indicate dependency on EHABI-defined personality routines to the
3823 linker, if it hasn't been done already. */
3824 marked_pr_dependency
3825 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3826 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3827 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3828 {
3829 static const char *const name[] =
3830 {
3831 "__aeabi_unwind_cpp_pr0",
3832 "__aeabi_unwind_cpp_pr1",
3833 "__aeabi_unwind_cpp_pr2"
3834 };
3835 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3836 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3837 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3838 |= 1 << unwind.personality_index;
3839 }
3840
3841 if (val)
3842 /* Inline exception table entry. */
3843 md_number_to_chars (ptr + 4, val, 4);
3844 else
3845 /* Self relative offset of the table entry. */
3846 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3847 BFD_RELOC_ARM_PREL31);
3848
3849 /* Restore the original section. */
3850 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3851
3852 unwind.proc_start = NULL;
3853 }
3854
3855
3856 /* Parse an unwind_cantunwind directive. */
3857
3858 static void
3859 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3860 {
3861 demand_empty_rest_of_line ();
3862 if (!unwind.proc_start)
3863 as_bad (MISSING_FNSTART);
3864
3865 if (unwind.personality_routine || unwind.personality_index != -1)
3866 as_bad (_("personality routine specified for cantunwind frame"));
3867
3868 unwind.personality_index = -2;
3869 }
3870
3871
3872 /* Parse a personalityindex directive. */
3873
3874 static void
3875 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3876 {
3877 expressionS exp;
3878
3879 if (!unwind.proc_start)
3880 as_bad (MISSING_FNSTART);
3881
3882 if (unwind.personality_routine || unwind.personality_index != -1)
3883 as_bad (_("duplicate .personalityindex directive"));
3884
3885 expression (&exp);
3886
3887 if (exp.X_op != O_constant
3888 || exp.X_add_number < 0 || exp.X_add_number > 15)
3889 {
3890 as_bad (_("bad personality routine number"));
3891 ignore_rest_of_line ();
3892 return;
3893 }
3894
3895 unwind.personality_index = exp.X_add_number;
3896
3897 demand_empty_rest_of_line ();
3898 }
3899
3900
3901 /* Parse a personality directive. */
3902
3903 static void
3904 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3905 {
3906 char *name, *p, c;
3907
3908 if (!unwind.proc_start)
3909 as_bad (MISSING_FNSTART);
3910
3911 if (unwind.personality_routine || unwind.personality_index != -1)
3912 as_bad (_("duplicate .personality directive"));
3913
3914 name = input_line_pointer;
3915 c = get_symbol_end ();
3916 p = input_line_pointer;
3917 unwind.personality_routine = symbol_find_or_make (name);
3918 *p = c;
3919 demand_empty_rest_of_line ();
3920 }
3921
3922
3923 /* Parse a directive saving core registers. */
3924
3925 static void
3926 s_arm_unwind_save_core (void)
3927 {
3928 valueT op;
3929 long range;
3930 int n;
3931
3932 range = parse_reg_list (&input_line_pointer);
3933 if (range == FAIL)
3934 {
3935 as_bad (_("expected register list"));
3936 ignore_rest_of_line ();
3937 return;
3938 }
3939
3940 demand_empty_rest_of_line ();
3941
3942 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3943 into .unwind_save {..., sp...}. We aren't bothered about the value of
3944 ip because it is clobbered by calls. */
3945 if (unwind.sp_restored && unwind.fp_reg == 12
3946 && (range & 0x3000) == 0x1000)
3947 {
3948 unwind.opcode_count--;
3949 unwind.sp_restored = 0;
3950 range = (range | 0x2000) & ~0x1000;
3951 unwind.pending_offset = 0;
3952 }
3953
3954 /* Pop r4-r15. */
3955 if (range & 0xfff0)
3956 {
3957 /* See if we can use the short opcodes. These pop a block of up to 8
3958 registers starting with r4, plus maybe r14. */
3959 for (n = 0; n < 8; n++)
3960 {
3961 /* Break at the first non-saved register. */
3962 if ((range & (1 << (n + 4))) == 0)
3963 break;
3964 }
3965 /* See if there are any other bits set. */
3966 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3967 {
3968 /* Use the long form. */
3969 op = 0x8000 | ((range >> 4) & 0xfff);
3970 add_unwind_opcode (op, 2);
3971 }
3972 else
3973 {
3974 /* Use the short form. */
3975 if (range & 0x4000)
3976 op = 0xa8; /* Pop r14. */
3977 else
3978 op = 0xa0; /* Do not pop r14. */
3979 op |= (n - 1);
3980 add_unwind_opcode (op, 1);
3981 }
3982 }
3983
3984 /* Pop r0-r3. */
3985 if (range & 0xf)
3986 {
3987 op = 0xb100 | (range & 0xf);
3988 add_unwind_opcode (op, 2);
3989 }
3990
3991 /* Record the number of bytes pushed. */
3992 for (n = 0; n < 16; n++)
3993 {
3994 if (range & (1 << n))
3995 unwind.frame_size += 4;
3996 }
3997 }
3998
3999
4000 /* Parse a directive saving FPA registers. */
4001
4002 static void
4003 s_arm_unwind_save_fpa (int reg)
4004 {
4005 expressionS exp;
4006 int num_regs;
4007 valueT op;
4008
4009 /* Get Number of registers to transfer. */
4010 if (skip_past_comma (&input_line_pointer) != FAIL)
4011 expression (&exp);
4012 else
4013 exp.X_op = O_illegal;
4014
4015 if (exp.X_op != O_constant)
4016 {
4017 as_bad (_("expected , <constant>"));
4018 ignore_rest_of_line ();
4019 return;
4020 }
4021
4022 num_regs = exp.X_add_number;
4023
4024 if (num_regs < 1 || num_regs > 4)
4025 {
4026 as_bad (_("number of registers must be in the range [1:4]"));
4027 ignore_rest_of_line ();
4028 return;
4029 }
4030
4031 demand_empty_rest_of_line ();
4032
4033 if (reg == 4)
4034 {
4035 /* Short form. */
4036 op = 0xb4 | (num_regs - 1);
4037 add_unwind_opcode (op, 1);
4038 }
4039 else
4040 {
4041 /* Long form. */
4042 op = 0xc800 | (reg << 4) | (num_regs - 1);
4043 add_unwind_opcode (op, 2);
4044 }
4045 unwind.frame_size += num_regs * 12;
4046 }
4047
4048
4049 /* Parse a directive saving VFP registers for ARMv6 and above. */
4050
4051 static void
4052 s_arm_unwind_save_vfp_armv6 (void)
4053 {
4054 int count;
4055 unsigned int start;
4056 valueT op;
4057 int num_vfpv3_regs = 0;
4058 int num_regs_below_16;
4059
4060 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4061 if (count == FAIL)
4062 {
4063 as_bad (_("expected register list"));
4064 ignore_rest_of_line ();
4065 return;
4066 }
4067
4068 demand_empty_rest_of_line ();
4069
4070 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4071 than FSTMX/FLDMX-style ones). */
4072
4073 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4074 if (start >= 16)
4075 num_vfpv3_regs = count;
4076 else if (start + count > 16)
4077 num_vfpv3_regs = start + count - 16;
4078
4079 if (num_vfpv3_regs > 0)
4080 {
4081 int start_offset = start > 16 ? start - 16 : 0;
4082 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4083 add_unwind_opcode (op, 2);
4084 }
4085
4086 /* Generate opcode for registers numbered in the range 0 .. 15. */
4087 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4088 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4089 if (num_regs_below_16 > 0)
4090 {
4091 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4092 add_unwind_opcode (op, 2);
4093 }
4094
4095 unwind.frame_size += count * 8;
4096 }
4097
4098
4099 /* Parse a directive saving VFP registers for pre-ARMv6. */
4100
4101 static void
4102 s_arm_unwind_save_vfp (void)
4103 {
4104 int count;
4105 unsigned int reg;
4106 valueT op;
4107
4108 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4109 if (count == FAIL)
4110 {
4111 as_bad (_("expected register list"));
4112 ignore_rest_of_line ();
4113 return;
4114 }
4115
4116 demand_empty_rest_of_line ();
4117
4118 if (reg == 8)
4119 {
4120 /* Short form. */
4121 op = 0xb8 | (count - 1);
4122 add_unwind_opcode (op, 1);
4123 }
4124 else
4125 {
4126 /* Long form. */
4127 op = 0xb300 | (reg << 4) | (count - 1);
4128 add_unwind_opcode (op, 2);
4129 }
4130 unwind.frame_size += count * 8 + 4;
4131 }
4132
4133
4134 /* Parse a directive saving iWMMXt data registers. */
4135
4136 static void
4137 s_arm_unwind_save_mmxwr (void)
4138 {
4139 int reg;
4140 int hi_reg;
4141 int i;
4142 unsigned mask = 0;
4143 valueT op;
4144
4145 if (*input_line_pointer == '{')
4146 input_line_pointer++;
4147
4148 do
4149 {
4150 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4151
4152 if (reg == FAIL)
4153 {
4154 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4155 goto error;
4156 }
4157
4158 if (mask >> reg)
4159 as_tsktsk (_("register list not in ascending order"));
4160 mask |= 1 << reg;
4161
4162 if (*input_line_pointer == '-')
4163 {
4164 input_line_pointer++;
4165 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4166 if (hi_reg == FAIL)
4167 {
4168 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4169 goto error;
4170 }
4171 else if (reg >= hi_reg)
4172 {
4173 as_bad (_("bad register range"));
4174 goto error;
4175 }
4176 for (; reg < hi_reg; reg++)
4177 mask |= 1 << reg;
4178 }
4179 }
4180 while (skip_past_comma (&input_line_pointer) != FAIL);
4181
4182 skip_past_char (&input_line_pointer, '}');
4183
4184 demand_empty_rest_of_line ();
4185
4186 /* Generate any deferred opcodes because we're going to be looking at
4187 the list. */
4188 flush_pending_unwind ();
4189
4190 for (i = 0; i < 16; i++)
4191 {
4192 if (mask & (1 << i))
4193 unwind.frame_size += 8;
4194 }
4195
4196 /* Attempt to combine with a previous opcode. We do this because gcc
4197 likes to output separate unwind directives for a single block of
4198 registers. */
4199 if (unwind.opcode_count > 0)
4200 {
4201 i = unwind.opcodes[unwind.opcode_count - 1];
4202 if ((i & 0xf8) == 0xc0)
4203 {
4204 i &= 7;
4205 /* Only merge if the blocks are contiguous. */
4206 if (i < 6)
4207 {
4208 if ((mask & 0xfe00) == (1 << 9))
4209 {
4210 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4211 unwind.opcode_count--;
4212 }
4213 }
4214 else if (i == 6 && unwind.opcode_count >= 2)
4215 {
4216 i = unwind.opcodes[unwind.opcode_count - 2];
4217 reg = i >> 4;
4218 i &= 0xf;
4219
4220 op = 0xffff << (reg - 1);
4221 if (reg > 0
4222 && ((mask & op) == (1u << (reg - 1))))
4223 {
4224 op = (1 << (reg + i + 1)) - 1;
4225 op &= ~((1 << reg) - 1);
4226 mask |= op;
4227 unwind.opcode_count -= 2;
4228 }
4229 }
4230 }
4231 }
4232
4233 hi_reg = 15;
4234 /* We want to generate opcodes in the order the registers have been
4235 saved, ie. descending order. */
4236 for (reg = 15; reg >= -1; reg--)
4237 {
4238 /* Save registers in blocks. */
4239 if (reg < 0
4240 || !(mask & (1 << reg)))
4241 {
4242 /* We found an unsaved reg. Generate opcodes to save the
4243 preceding block. */
4244 if (reg != hi_reg)
4245 {
4246 if (reg == 9)
4247 {
4248 /* Short form. */
4249 op = 0xc0 | (hi_reg - 10);
4250 add_unwind_opcode (op, 1);
4251 }
4252 else
4253 {
4254 /* Long form. */
4255 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4256 add_unwind_opcode (op, 2);
4257 }
4258 }
4259 hi_reg = reg - 1;
4260 }
4261 }
4262
4263 return;
4264 error:
4265 ignore_rest_of_line ();
4266 }
4267
4268 static void
4269 s_arm_unwind_save_mmxwcg (void)
4270 {
4271 int reg;
4272 int hi_reg;
4273 unsigned mask = 0;
4274 valueT op;
4275
4276 if (*input_line_pointer == '{')
4277 input_line_pointer++;
4278
4279 skip_whitespace (input_line_pointer);
4280
4281 do
4282 {
4283 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4284
4285 if (reg == FAIL)
4286 {
4287 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4288 goto error;
4289 }
4290
4291 reg -= 8;
4292 if (mask >> reg)
4293 as_tsktsk (_("register list not in ascending order"));
4294 mask |= 1 << reg;
4295
4296 if (*input_line_pointer == '-')
4297 {
4298 input_line_pointer++;
4299 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4300 if (hi_reg == FAIL)
4301 {
4302 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4303 goto error;
4304 }
4305 else if (reg >= hi_reg)
4306 {
4307 as_bad (_("bad register range"));
4308 goto error;
4309 }
4310 for (; reg < hi_reg; reg++)
4311 mask |= 1 << reg;
4312 }
4313 }
4314 while (skip_past_comma (&input_line_pointer) != FAIL);
4315
4316 skip_past_char (&input_line_pointer, '}');
4317
4318 demand_empty_rest_of_line ();
4319
4320 /* Generate any deferred opcodes because we're going to be looking at
4321 the list. */
4322 flush_pending_unwind ();
4323
4324 for (reg = 0; reg < 16; reg++)
4325 {
4326 if (mask & (1 << reg))
4327 unwind.frame_size += 4;
4328 }
4329 op = 0xc700 | mask;
4330 add_unwind_opcode (op, 2);
4331 return;
4332 error:
4333 ignore_rest_of_line ();
4334 }
4335
4336
4337 /* Parse an unwind_save directive.
4338 If the argument is non-zero, this is a .vsave directive. */
4339
4340 static void
4341 s_arm_unwind_save (int arch_v6)
4342 {
4343 char *peek;
4344 struct reg_entry *reg;
4345 bfd_boolean had_brace = FALSE;
4346
4347 if (!unwind.proc_start)
4348 as_bad (MISSING_FNSTART);
4349
4350 /* Figure out what sort of save we have. */
4351 peek = input_line_pointer;
4352
4353 if (*peek == '{')
4354 {
4355 had_brace = TRUE;
4356 peek++;
4357 }
4358
4359 reg = arm_reg_parse_multi (&peek);
4360
4361 if (!reg)
4362 {
4363 as_bad (_("register expected"));
4364 ignore_rest_of_line ();
4365 return;
4366 }
4367
4368 switch (reg->type)
4369 {
4370 case REG_TYPE_FN:
4371 if (had_brace)
4372 {
4373 as_bad (_("FPA .unwind_save does not take a register list"));
4374 ignore_rest_of_line ();
4375 return;
4376 }
4377 input_line_pointer = peek;
4378 s_arm_unwind_save_fpa (reg->number);
4379 return;
4380
4381 case REG_TYPE_RN:
4382 s_arm_unwind_save_core ();
4383 return;
4384
4385 case REG_TYPE_VFD:
4386 if (arch_v6)
4387 s_arm_unwind_save_vfp_armv6 ();
4388 else
4389 s_arm_unwind_save_vfp ();
4390 return;
4391
4392 case REG_TYPE_MMXWR:
4393 s_arm_unwind_save_mmxwr ();
4394 return;
4395
4396 case REG_TYPE_MMXWCG:
4397 s_arm_unwind_save_mmxwcg ();
4398 return;
4399
4400 default:
4401 as_bad (_(".unwind_save does not support this kind of register"));
4402 ignore_rest_of_line ();
4403 }
4404 }
4405
4406
4407 /* Parse an unwind_movsp directive. */
4408
4409 static void
4410 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4411 {
4412 int reg;
4413 valueT op;
4414 int offset;
4415
4416 if (!unwind.proc_start)
4417 as_bad (MISSING_FNSTART);
4418
4419 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4420 if (reg == FAIL)
4421 {
4422 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4423 ignore_rest_of_line ();
4424 return;
4425 }
4426
4427 /* Optional constant. */
4428 if (skip_past_comma (&input_line_pointer) != FAIL)
4429 {
4430 if (immediate_for_directive (&offset) == FAIL)
4431 return;
4432 }
4433 else
4434 offset = 0;
4435
4436 demand_empty_rest_of_line ();
4437
4438 if (reg == REG_SP || reg == REG_PC)
4439 {
4440 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4441 return;
4442 }
4443
4444 if (unwind.fp_reg != REG_SP)
4445 as_bad (_("unexpected .unwind_movsp directive"));
4446
4447 /* Generate opcode to restore the value. */
4448 op = 0x90 | reg;
4449 add_unwind_opcode (op, 1);
4450
4451 /* Record the information for later. */
4452 unwind.fp_reg = reg;
4453 unwind.fp_offset = unwind.frame_size - offset;
4454 unwind.sp_restored = 1;
4455 }
4456
4457 /* Parse an unwind_pad directive. */
4458
4459 static void
4460 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4461 {
4462 int offset;
4463
4464 if (!unwind.proc_start)
4465 as_bad (MISSING_FNSTART);
4466
4467 if (immediate_for_directive (&offset) == FAIL)
4468 return;
4469
4470 if (offset & 3)
4471 {
4472 as_bad (_("stack increment must be multiple of 4"));
4473 ignore_rest_of_line ();
4474 return;
4475 }
4476
4477 /* Don't generate any opcodes, just record the details for later. */
4478 unwind.frame_size += offset;
4479 unwind.pending_offset += offset;
4480
4481 demand_empty_rest_of_line ();
4482 }
4483
4484 /* Parse an unwind_setfp directive. */
4485
4486 static void
4487 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4488 {
4489 int sp_reg;
4490 int fp_reg;
4491 int offset;
4492
4493 if (!unwind.proc_start)
4494 as_bad (MISSING_FNSTART);
4495
4496 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4497 if (skip_past_comma (&input_line_pointer) == FAIL)
4498 sp_reg = FAIL;
4499 else
4500 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4501
4502 if (fp_reg == FAIL || sp_reg == FAIL)
4503 {
4504 as_bad (_("expected <reg>, <reg>"));
4505 ignore_rest_of_line ();
4506 return;
4507 }
4508
4509 /* Optional constant. */
4510 if (skip_past_comma (&input_line_pointer) != FAIL)
4511 {
4512 if (immediate_for_directive (&offset) == FAIL)
4513 return;
4514 }
4515 else
4516 offset = 0;
4517
4518 demand_empty_rest_of_line ();
4519
4520 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4521 {
4522 as_bad (_("register must be either sp or set by a previous"
4523 "unwind_movsp directive"));
4524 return;
4525 }
4526
4527 /* Don't generate any opcodes, just record the information for later. */
4528 unwind.fp_reg = fp_reg;
4529 unwind.fp_used = 1;
4530 if (sp_reg == REG_SP)
4531 unwind.fp_offset = unwind.frame_size - offset;
4532 else
4533 unwind.fp_offset -= offset;
4534 }
4535
4536 /* Parse an unwind_raw directive. */
4537
4538 static void
4539 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4540 {
4541 expressionS exp;
4542 /* This is an arbitrary limit. */
4543 unsigned char op[16];
4544 int count;
4545
4546 if (!unwind.proc_start)
4547 as_bad (MISSING_FNSTART);
4548
4549 expression (&exp);
4550 if (exp.X_op == O_constant
4551 && skip_past_comma (&input_line_pointer) != FAIL)
4552 {
4553 unwind.frame_size += exp.X_add_number;
4554 expression (&exp);
4555 }
4556 else
4557 exp.X_op = O_illegal;
4558
4559 if (exp.X_op != O_constant)
4560 {
4561 as_bad (_("expected <offset>, <opcode>"));
4562 ignore_rest_of_line ();
4563 return;
4564 }
4565
4566 count = 0;
4567
4568 /* Parse the opcode. */
4569 for (;;)
4570 {
4571 if (count >= 16)
4572 {
4573 as_bad (_("unwind opcode too long"));
4574 ignore_rest_of_line ();
4575 }
4576 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4577 {
4578 as_bad (_("invalid unwind opcode"));
4579 ignore_rest_of_line ();
4580 return;
4581 }
4582 op[count++] = exp.X_add_number;
4583
4584 /* Parse the next byte. */
4585 if (skip_past_comma (&input_line_pointer) == FAIL)
4586 break;
4587
4588 expression (&exp);
4589 }
4590
4591 /* Add the opcode bytes in reverse order. */
4592 while (count--)
4593 add_unwind_opcode (op[count], 1);
4594
4595 demand_empty_rest_of_line ();
4596 }
4597
4598
4599 /* Parse a .eabi_attribute directive. */
4600
4601 static void
4602 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4603 {
4604 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4605
4606 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4607 attributes_set_explicitly[tag] = 1;
4608 }
4609
4610 /* Emit a tls fix for the symbol. */
4611
4612 static void
4613 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4614 {
4615 char *p;
4616 expressionS exp;
4617 #ifdef md_flush_pending_output
4618 md_flush_pending_output ();
4619 #endif
4620
4621 #ifdef md_cons_align
4622 md_cons_align (4);
4623 #endif
4624
4625 /* Since we're just labelling the code, there's no need to define a
4626 mapping symbol. */
4627 expression (&exp);
4628 p = obstack_next_free (&frchain_now->frch_obstack);
4629 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4630 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4631 : BFD_RELOC_ARM_TLS_DESCSEQ);
4632 }
4633 #endif /* OBJ_ELF */
4634
4635 static void s_arm_arch (int);
4636 static void s_arm_object_arch (int);
4637 static void s_arm_cpu (int);
4638 static void s_arm_fpu (int);
4639 static void s_arm_arch_extension (int);
4640
4641 #ifdef TE_PE
4642
4643 static void
4644 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4645 {
4646 expressionS exp;
4647
4648 do
4649 {
4650 expression (&exp);
4651 if (exp.X_op == O_symbol)
4652 exp.X_op = O_secrel;
4653
4654 emit_expr (&exp, 4);
4655 }
4656 while (*input_line_pointer++ == ',');
4657
4658 input_line_pointer--;
4659 demand_empty_rest_of_line ();
4660 }
4661 #endif /* TE_PE */
4662
4663 /* This table describes all the machine specific pseudo-ops the assembler
4664 has to support. The fields are:
4665 pseudo-op name without dot
4666 function to call to execute this pseudo-op
4667 Integer arg to pass to the function. */
4668
4669 const pseudo_typeS md_pseudo_table[] =
4670 {
4671 /* Never called because '.req' does not start a line. */
4672 { "req", s_req, 0 },
4673 /* Following two are likewise never called. */
4674 { "dn", s_dn, 0 },
4675 { "qn", s_qn, 0 },
4676 { "unreq", s_unreq, 0 },
4677 { "bss", s_bss, 0 },
4678 { "align", s_align, 0 },
4679 { "arm", s_arm, 0 },
4680 { "thumb", s_thumb, 0 },
4681 { "code", s_code, 0 },
4682 { "force_thumb", s_force_thumb, 0 },
4683 { "thumb_func", s_thumb_func, 0 },
4684 { "thumb_set", s_thumb_set, 0 },
4685 { "even", s_even, 0 },
4686 { "ltorg", s_ltorg, 0 },
4687 { "pool", s_ltorg, 0 },
4688 { "syntax", s_syntax, 0 },
4689 { "cpu", s_arm_cpu, 0 },
4690 { "arch", s_arm_arch, 0 },
4691 { "object_arch", s_arm_object_arch, 0 },
4692 { "fpu", s_arm_fpu, 0 },
4693 { "arch_extension", s_arm_arch_extension, 0 },
4694 #ifdef OBJ_ELF
4695 { "word", s_arm_elf_cons, 4 },
4696 { "long", s_arm_elf_cons, 4 },
4697 { "inst.n", s_arm_elf_inst, 2 },
4698 { "inst.w", s_arm_elf_inst, 4 },
4699 { "inst", s_arm_elf_inst, 0 },
4700 { "rel31", s_arm_rel31, 0 },
4701 { "fnstart", s_arm_unwind_fnstart, 0 },
4702 { "fnend", s_arm_unwind_fnend, 0 },
4703 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4704 { "personality", s_arm_unwind_personality, 0 },
4705 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4706 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4707 { "save", s_arm_unwind_save, 0 },
4708 { "vsave", s_arm_unwind_save, 1 },
4709 { "movsp", s_arm_unwind_movsp, 0 },
4710 { "pad", s_arm_unwind_pad, 0 },
4711 { "setfp", s_arm_unwind_setfp, 0 },
4712 { "unwind_raw", s_arm_unwind_raw, 0 },
4713 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4714 { "tlsdescseq", s_arm_tls_descseq, 0 },
4715 #else
4716 { "word", cons, 4},
4717
4718 /* These are used for dwarf. */
4719 {"2byte", cons, 2},
4720 {"4byte", cons, 4},
4721 {"8byte", cons, 8},
4722 /* These are used for dwarf2. */
4723 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4724 { "loc", dwarf2_directive_loc, 0 },
4725 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4726 #endif
4727 { "extend", float_cons, 'x' },
4728 { "ldouble", float_cons, 'x' },
4729 { "packed", float_cons, 'p' },
4730 #ifdef TE_PE
4731 {"secrel32", pe_directive_secrel, 0},
4732 #endif
4733
4734 /* These are for compatibility with CodeComposer Studio. */
4735 {"ref", s_ccs_ref, 0},
4736 {"def", s_ccs_def, 0},
4737 {"asmfunc", s_ccs_asmfunc, 0},
4738 {"endasmfunc", s_ccs_endasmfunc, 0},
4739
4740 { 0, 0, 0 }
4741 };
4742 \f
4743 /* Parser functions used exclusively in instruction operands. */
4744
4745 /* Generic immediate-value read function for use in insn parsing.
4746 STR points to the beginning of the immediate (the leading #);
4747 VAL receives the value; if the value is outside [MIN, MAX]
4748 issue an error. PREFIX_OPT is true if the immediate prefix is
4749 optional. */
4750
4751 static int
4752 parse_immediate (char **str, int *val, int min, int max,
4753 bfd_boolean prefix_opt)
4754 {
4755 expressionS exp;
4756 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4757 if (exp.X_op != O_constant)
4758 {
4759 inst.error = _("constant expression required");
4760 return FAIL;
4761 }
4762
4763 if (exp.X_add_number < min || exp.X_add_number > max)
4764 {
4765 inst.error = _("immediate value out of range");
4766 return FAIL;
4767 }
4768
4769 *val = exp.X_add_number;
4770 return SUCCESS;
4771 }
4772
4773 /* Less-generic immediate-value read function with the possibility of loading a
4774 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4775 instructions. Puts the result directly in inst.operands[i]. */
4776
4777 static int
4778 parse_big_immediate (char **str, int i, expressionS *in_exp,
4779 bfd_boolean allow_symbol_p)
4780 {
4781 expressionS exp;
4782 expressionS *exp_p = in_exp ? in_exp : &exp;
4783 char *ptr = *str;
4784
4785 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4786
4787 if (exp_p->X_op == O_constant)
4788 {
4789 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4790 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4791 O_constant. We have to be careful not to break compilation for
4792 32-bit X_add_number, though. */
4793 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4794 {
4795 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4796 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4797 & 0xffffffff);
4798 inst.operands[i].regisimm = 1;
4799 }
4800 }
4801 else if (exp_p->X_op == O_big
4802 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4803 {
4804 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4805
4806 /* Bignums have their least significant bits in
4807 generic_bignum[0]. Make sure we put 32 bits in imm and
4808 32 bits in reg, in a (hopefully) portable way. */
4809 gas_assert (parts != 0);
4810
4811 /* Make sure that the number is not too big.
4812 PR 11972: Bignums can now be sign-extended to the
4813 size of a .octa so check that the out of range bits
4814 are all zero or all one. */
4815 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4816 {
4817 LITTLENUM_TYPE m = -1;
4818
4819 if (generic_bignum[parts * 2] != 0
4820 && generic_bignum[parts * 2] != m)
4821 return FAIL;
4822
4823 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4824 if (generic_bignum[j] != generic_bignum[j-1])
4825 return FAIL;
4826 }
4827
4828 inst.operands[i].imm = 0;
4829 for (j = 0; j < parts; j++, idx++)
4830 inst.operands[i].imm |= generic_bignum[idx]
4831 << (LITTLENUM_NUMBER_OF_BITS * j);
4832 inst.operands[i].reg = 0;
4833 for (j = 0; j < parts; j++, idx++)
4834 inst.operands[i].reg |= generic_bignum[idx]
4835 << (LITTLENUM_NUMBER_OF_BITS * j);
4836 inst.operands[i].regisimm = 1;
4837 }
4838 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4839 return FAIL;
4840
4841 *str = ptr;
4842
4843 return SUCCESS;
4844 }
4845
4846 /* Returns the pseudo-register number of an FPA immediate constant,
4847 or FAIL if there isn't a valid constant here. */
4848
4849 static int
4850 parse_fpa_immediate (char ** str)
4851 {
4852 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4853 char * save_in;
4854 expressionS exp;
4855 int i;
4856 int j;
4857
4858 /* First try and match exact strings, this is to guarantee
4859 that some formats will work even for cross assembly. */
4860
4861 for (i = 0; fp_const[i]; i++)
4862 {
4863 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4864 {
4865 char *start = *str;
4866
4867 *str += strlen (fp_const[i]);
4868 if (is_end_of_line[(unsigned char) **str])
4869 return i + 8;
4870 *str = start;
4871 }
4872 }
4873
4874 /* Just because we didn't get a match doesn't mean that the constant
4875 isn't valid, just that it is in a format that we don't
4876 automatically recognize. Try parsing it with the standard
4877 expression routines. */
4878
4879 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4880
4881 /* Look for a raw floating point number. */
4882 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4883 && is_end_of_line[(unsigned char) *save_in])
4884 {
4885 for (i = 0; i < NUM_FLOAT_VALS; i++)
4886 {
4887 for (j = 0; j < MAX_LITTLENUMS; j++)
4888 {
4889 if (words[j] != fp_values[i][j])
4890 break;
4891 }
4892
4893 if (j == MAX_LITTLENUMS)
4894 {
4895 *str = save_in;
4896 return i + 8;
4897 }
4898 }
4899 }
4900
4901 /* Try and parse a more complex expression, this will probably fail
4902 unless the code uses a floating point prefix (eg "0f"). */
4903 save_in = input_line_pointer;
4904 input_line_pointer = *str;
4905 if (expression (&exp) == absolute_section
4906 && exp.X_op == O_big
4907 && exp.X_add_number < 0)
4908 {
4909 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4910 Ditto for 15. */
4911 if (gen_to_words (words, 5, (long) 15) == 0)
4912 {
4913 for (i = 0; i < NUM_FLOAT_VALS; i++)
4914 {
4915 for (j = 0; j < MAX_LITTLENUMS; j++)
4916 {
4917 if (words[j] != fp_values[i][j])
4918 break;
4919 }
4920
4921 if (j == MAX_LITTLENUMS)
4922 {
4923 *str = input_line_pointer;
4924 input_line_pointer = save_in;
4925 return i + 8;
4926 }
4927 }
4928 }
4929 }
4930
4931 *str = input_line_pointer;
4932 input_line_pointer = save_in;
4933 inst.error = _("invalid FPA immediate expression");
4934 return FAIL;
4935 }
4936
4937 /* Returns 1 if a number has "quarter-precision" float format
4938 0baBbbbbbc defgh000 00000000 00000000. */
4939
4940 static int
4941 is_quarter_float (unsigned imm)
4942 {
4943 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4944 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4945 }
4946
4947 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4948 0baBbbbbbc defgh000 00000000 00000000.
4949 The zero and minus-zero cases need special handling, since they can't be
4950 encoded in the "quarter-precision" float format, but can nonetheless be
4951 loaded as integer constants. */
4952
4953 static unsigned
4954 parse_qfloat_immediate (char **ccp, int *immed)
4955 {
4956 char *str = *ccp;
4957 char *fpnum;
4958 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4959 int found_fpchar = 0;
4960
4961 skip_past_char (&str, '#');
4962
4963 /* We must not accidentally parse an integer as a floating-point number. Make
4964 sure that the value we parse is not an integer by checking for special
4965 characters '.' or 'e'.
4966 FIXME: This is a horrible hack, but doing better is tricky because type
4967 information isn't in a very usable state at parse time. */
4968 fpnum = str;
4969 skip_whitespace (fpnum);
4970
4971 if (strncmp (fpnum, "0x", 2) == 0)
4972 return FAIL;
4973 else
4974 {
4975 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4976 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4977 {
4978 found_fpchar = 1;
4979 break;
4980 }
4981
4982 if (!found_fpchar)
4983 return FAIL;
4984 }
4985
4986 if ((str = atof_ieee (str, 's', words)) != NULL)
4987 {
4988 unsigned fpword = 0;
4989 int i;
4990
4991 /* Our FP word must be 32 bits (single-precision FP). */
4992 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4993 {
4994 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4995 fpword |= words[i];
4996 }
4997
4998 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4999 *immed = fpword;
5000 else
5001 return FAIL;
5002
5003 *ccp = str;
5004
5005 return SUCCESS;
5006 }
5007
5008 return FAIL;
5009 }
5010
5011 /* Shift operands. */
5012 enum shift_kind
5013 {
5014 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5015 };
5016
5017 struct asm_shift_name
5018 {
5019 const char *name;
5020 enum shift_kind kind;
5021 };
5022
5023 /* Third argument to parse_shift. */
5024 enum parse_shift_mode
5025 {
5026 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5027 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5028 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5029 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5030 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5031 };
5032
5033 /* Parse a <shift> specifier on an ARM data processing instruction.
5034 This has three forms:
5035
5036 (LSL|LSR|ASL|ASR|ROR) Rs
5037 (LSL|LSR|ASL|ASR|ROR) #imm
5038 RRX
5039
5040 Note that ASL is assimilated to LSL in the instruction encoding, and
5041 RRX to ROR #0 (which cannot be written as such). */
5042
5043 static int
5044 parse_shift (char **str, int i, enum parse_shift_mode mode)
5045 {
5046 const struct asm_shift_name *shift_name;
5047 enum shift_kind shift;
5048 char *s = *str;
5049 char *p = s;
5050 int reg;
5051
5052 for (p = *str; ISALPHA (*p); p++)
5053 ;
5054
5055 if (p == *str)
5056 {
5057 inst.error = _("shift expression expected");
5058 return FAIL;
5059 }
5060
5061 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5062 p - *str);
5063
5064 if (shift_name == NULL)
5065 {
5066 inst.error = _("shift expression expected");
5067 return FAIL;
5068 }
5069
5070 shift = shift_name->kind;
5071
5072 switch (mode)
5073 {
5074 case NO_SHIFT_RESTRICT:
5075 case SHIFT_IMMEDIATE: break;
5076
5077 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5078 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5079 {
5080 inst.error = _("'LSL' or 'ASR' required");
5081 return FAIL;
5082 }
5083 break;
5084
5085 case SHIFT_LSL_IMMEDIATE:
5086 if (shift != SHIFT_LSL)
5087 {
5088 inst.error = _("'LSL' required");
5089 return FAIL;
5090 }
5091 break;
5092
5093 case SHIFT_ASR_IMMEDIATE:
5094 if (shift != SHIFT_ASR)
5095 {
5096 inst.error = _("'ASR' required");
5097 return FAIL;
5098 }
5099 break;
5100
5101 default: abort ();
5102 }
5103
5104 if (shift != SHIFT_RRX)
5105 {
5106 /* Whitespace can appear here if the next thing is a bare digit. */
5107 skip_whitespace (p);
5108
5109 if (mode == NO_SHIFT_RESTRICT
5110 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5111 {
5112 inst.operands[i].imm = reg;
5113 inst.operands[i].immisreg = 1;
5114 }
5115 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5116 return FAIL;
5117 }
5118 inst.operands[i].shift_kind = shift;
5119 inst.operands[i].shifted = 1;
5120 *str = p;
5121 return SUCCESS;
5122 }
5123
5124 /* Parse a <shifter_operand> for an ARM data processing instruction:
5125
5126 #<immediate>
5127 #<immediate>, <rotate>
5128 <Rm>
5129 <Rm>, <shift>
5130
5131 where <shift> is defined by parse_shift above, and <rotate> is a
5132 multiple of 2 between 0 and 30. Validation of immediate operands
5133 is deferred to md_apply_fix. */
5134
5135 static int
5136 parse_shifter_operand (char **str, int i)
5137 {
5138 int value;
5139 expressionS exp;
5140
5141 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5142 {
5143 inst.operands[i].reg = value;
5144 inst.operands[i].isreg = 1;
5145
5146 /* parse_shift will override this if appropriate */
5147 inst.reloc.exp.X_op = O_constant;
5148 inst.reloc.exp.X_add_number = 0;
5149
5150 if (skip_past_comma (str) == FAIL)
5151 return SUCCESS;
5152
5153 /* Shift operation on register. */
5154 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5155 }
5156
5157 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5158 return FAIL;
5159
5160 if (skip_past_comma (str) == SUCCESS)
5161 {
5162 /* #x, y -- ie explicit rotation by Y. */
5163 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5164 return FAIL;
5165
5166 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5167 {
5168 inst.error = _("constant expression expected");
5169 return FAIL;
5170 }
5171
5172 value = exp.X_add_number;
5173 if (value < 0 || value > 30 || value % 2 != 0)
5174 {
5175 inst.error = _("invalid rotation");
5176 return FAIL;
5177 }
5178 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5179 {
5180 inst.error = _("invalid constant");
5181 return FAIL;
5182 }
5183
5184 /* Encode as specified. */
5185 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5186 return SUCCESS;
5187 }
5188
5189 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5190 inst.reloc.pc_rel = 0;
5191 return SUCCESS;
5192 }
5193
5194 /* Group relocation information. Each entry in the table contains the
5195 textual name of the relocation as may appear in assembler source
5196 and must end with a colon.
5197 Along with this textual name are the relocation codes to be used if
5198 the corresponding instruction is an ALU instruction (ADD or SUB only),
5199 an LDR, an LDRS, or an LDC. */
5200
5201 struct group_reloc_table_entry
5202 {
5203 const char *name;
5204 int alu_code;
5205 int ldr_code;
5206 int ldrs_code;
5207 int ldc_code;
5208 };
5209
5210 typedef enum
5211 {
5212 /* Varieties of non-ALU group relocation. */
5213
5214 GROUP_LDR,
5215 GROUP_LDRS,
5216 GROUP_LDC
5217 } group_reloc_type;
5218
5219 static struct group_reloc_table_entry group_reloc_table[] =
5220 { /* Program counter relative: */
5221 { "pc_g0_nc",
5222 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5223 0, /* LDR */
5224 0, /* LDRS */
5225 0 }, /* LDC */
5226 { "pc_g0",
5227 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5228 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5229 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5230 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5231 { "pc_g1_nc",
5232 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5233 0, /* LDR */
5234 0, /* LDRS */
5235 0 }, /* LDC */
5236 { "pc_g1",
5237 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5238 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5239 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5240 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5241 { "pc_g2",
5242 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5243 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5244 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5245 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5246 /* Section base relative */
5247 { "sb_g0_nc",
5248 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5249 0, /* LDR */
5250 0, /* LDRS */
5251 0 }, /* LDC */
5252 { "sb_g0",
5253 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5254 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5255 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5256 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5257 { "sb_g1_nc",
5258 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5259 0, /* LDR */
5260 0, /* LDRS */
5261 0 }, /* LDC */
5262 { "sb_g1",
5263 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5264 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5265 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5266 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5267 { "sb_g2",
5268 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5269 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5270 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5271 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5272
5273 /* Given the address of a pointer pointing to the textual name of a group
5274 relocation as may appear in assembler source, attempt to find its details
5275 in group_reloc_table. The pointer will be updated to the character after
5276 the trailing colon. On failure, FAIL will be returned; SUCCESS
5277 otherwise. On success, *entry will be updated to point at the relevant
5278 group_reloc_table entry. */
5279
5280 static int
5281 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5282 {
5283 unsigned int i;
5284 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5285 {
5286 int length = strlen (group_reloc_table[i].name);
5287
5288 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5289 && (*str)[length] == ':')
5290 {
5291 *out = &group_reloc_table[i];
5292 *str += (length + 1);
5293 return SUCCESS;
5294 }
5295 }
5296
5297 return FAIL;
5298 }
5299
5300 /* Parse a <shifter_operand> for an ARM data processing instruction
5301 (as for parse_shifter_operand) where group relocations are allowed:
5302
5303 #<immediate>
5304 #<immediate>, <rotate>
5305 #:<group_reloc>:<expression>
5306 <Rm>
5307 <Rm>, <shift>
5308
5309 where <group_reloc> is one of the strings defined in group_reloc_table.
5310 The hashes are optional.
5311
5312 Everything else is as for parse_shifter_operand. */
5313
5314 static parse_operand_result
5315 parse_shifter_operand_group_reloc (char **str, int i)
5316 {
5317 /* Determine if we have the sequence of characters #: or just :
5318 coming next. If we do, then we check for a group relocation.
5319 If we don't, punt the whole lot to parse_shifter_operand. */
5320
5321 if (((*str)[0] == '#' && (*str)[1] == ':')
5322 || (*str)[0] == ':')
5323 {
5324 struct group_reloc_table_entry *entry;
5325
5326 if ((*str)[0] == '#')
5327 (*str) += 2;
5328 else
5329 (*str)++;
5330
5331 /* Try to parse a group relocation. Anything else is an error. */
5332 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5333 {
5334 inst.error = _("unknown group relocation");
5335 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5336 }
5337
5338 /* We now have the group relocation table entry corresponding to
5339 the name in the assembler source. Next, we parse the expression. */
5340 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5341 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5342
5343 /* Record the relocation type (always the ALU variant here). */
5344 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5345 gas_assert (inst.reloc.type != 0);
5346
5347 return PARSE_OPERAND_SUCCESS;
5348 }
5349 else
5350 return parse_shifter_operand (str, i) == SUCCESS
5351 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5352
5353 /* Never reached. */
5354 }
5355
5356 /* Parse a Neon alignment expression. Information is written to
5357 inst.operands[i]. We assume the initial ':' has been skipped.
5358
5359 align .imm = align << 8, .immisalign=1, .preind=0 */
5360 static parse_operand_result
5361 parse_neon_alignment (char **str, int i)
5362 {
5363 char *p = *str;
5364 expressionS exp;
5365
5366 my_get_expression (&exp, &p, GE_NO_PREFIX);
5367
5368 if (exp.X_op != O_constant)
5369 {
5370 inst.error = _("alignment must be constant");
5371 return PARSE_OPERAND_FAIL;
5372 }
5373
5374 inst.operands[i].imm = exp.X_add_number << 8;
5375 inst.operands[i].immisalign = 1;
5376 /* Alignments are not pre-indexes. */
5377 inst.operands[i].preind = 0;
5378
5379 *str = p;
5380 return PARSE_OPERAND_SUCCESS;
5381 }
5382
5383 /* Parse all forms of an ARM address expression. Information is written
5384 to inst.operands[i] and/or inst.reloc.
5385
5386 Preindexed addressing (.preind=1):
5387
5388 [Rn, #offset] .reg=Rn .reloc.exp=offset
5389 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5390 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5391 .shift_kind=shift .reloc.exp=shift_imm
5392
5393 These three may have a trailing ! which causes .writeback to be set also.
5394
5395 Postindexed addressing (.postind=1, .writeback=1):
5396
5397 [Rn], #offset .reg=Rn .reloc.exp=offset
5398 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5399 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5400 .shift_kind=shift .reloc.exp=shift_imm
5401
5402 Unindexed addressing (.preind=0, .postind=0):
5403
5404 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5405
5406 Other:
5407
5408 [Rn]{!} shorthand for [Rn,#0]{!}
5409 =immediate .isreg=0 .reloc.exp=immediate
5410 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5411
5412 It is the caller's responsibility to check for addressing modes not
5413 supported by the instruction, and to set inst.reloc.type. */
5414
5415 static parse_operand_result
5416 parse_address_main (char **str, int i, int group_relocations,
5417 group_reloc_type group_type)
5418 {
5419 char *p = *str;
5420 int reg;
5421
5422 if (skip_past_char (&p, '[') == FAIL)
5423 {
5424 if (skip_past_char (&p, '=') == FAIL)
5425 {
5426 /* Bare address - translate to PC-relative offset. */
5427 inst.reloc.pc_rel = 1;
5428 inst.operands[i].reg = REG_PC;
5429 inst.operands[i].isreg = 1;
5430 inst.operands[i].preind = 1;
5431
5432 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5433 return PARSE_OPERAND_FAIL;
5434 }
5435 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5436 /*allow_symbol_p=*/TRUE))
5437 return PARSE_OPERAND_FAIL;
5438
5439 *str = p;
5440 return PARSE_OPERAND_SUCCESS;
5441 }
5442
5443 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5444 skip_whitespace (p);
5445
5446 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5447 {
5448 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5449 return PARSE_OPERAND_FAIL;
5450 }
5451 inst.operands[i].reg = reg;
5452 inst.operands[i].isreg = 1;
5453
5454 if (skip_past_comma (&p) == SUCCESS)
5455 {
5456 inst.operands[i].preind = 1;
5457
5458 if (*p == '+') p++;
5459 else if (*p == '-') p++, inst.operands[i].negative = 1;
5460
5461 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5462 {
5463 inst.operands[i].imm = reg;
5464 inst.operands[i].immisreg = 1;
5465
5466 if (skip_past_comma (&p) == SUCCESS)
5467 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5468 return PARSE_OPERAND_FAIL;
5469 }
5470 else if (skip_past_char (&p, ':') == SUCCESS)
5471 {
5472 /* FIXME: '@' should be used here, but it's filtered out by generic
5473 code before we get to see it here. This may be subject to
5474 change. */
5475 parse_operand_result result = parse_neon_alignment (&p, i);
5476
5477 if (result != PARSE_OPERAND_SUCCESS)
5478 return result;
5479 }
5480 else
5481 {
5482 if (inst.operands[i].negative)
5483 {
5484 inst.operands[i].negative = 0;
5485 p--;
5486 }
5487
5488 if (group_relocations
5489 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5490 {
5491 struct group_reloc_table_entry *entry;
5492
5493 /* Skip over the #: or : sequence. */
5494 if (*p == '#')
5495 p += 2;
5496 else
5497 p++;
5498
5499 /* Try to parse a group relocation. Anything else is an
5500 error. */
5501 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5502 {
5503 inst.error = _("unknown group relocation");
5504 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5505 }
5506
5507 /* We now have the group relocation table entry corresponding to
5508 the name in the assembler source. Next, we parse the
5509 expression. */
5510 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5511 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5512
5513 /* Record the relocation type. */
5514 switch (group_type)
5515 {
5516 case GROUP_LDR:
5517 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5518 break;
5519
5520 case GROUP_LDRS:
5521 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5522 break;
5523
5524 case GROUP_LDC:
5525 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5526 break;
5527
5528 default:
5529 gas_assert (0);
5530 }
5531
5532 if (inst.reloc.type == 0)
5533 {
5534 inst.error = _("this group relocation is not allowed on this instruction");
5535 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5536 }
5537 }
5538 else
5539 {
5540 char *q = p;
5541 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5542 return PARSE_OPERAND_FAIL;
5543 /* If the offset is 0, find out if it's a +0 or -0. */
5544 if (inst.reloc.exp.X_op == O_constant
5545 && inst.reloc.exp.X_add_number == 0)
5546 {
5547 skip_whitespace (q);
5548 if (*q == '#')
5549 {
5550 q++;
5551 skip_whitespace (q);
5552 }
5553 if (*q == '-')
5554 inst.operands[i].negative = 1;
5555 }
5556 }
5557 }
5558 }
5559 else if (skip_past_char (&p, ':') == SUCCESS)
5560 {
5561 /* FIXME: '@' should be used here, but it's filtered out by generic code
5562 before we get to see it here. This may be subject to change. */
5563 parse_operand_result result = parse_neon_alignment (&p, i);
5564
5565 if (result != PARSE_OPERAND_SUCCESS)
5566 return result;
5567 }
5568
5569 if (skip_past_char (&p, ']') == FAIL)
5570 {
5571 inst.error = _("']' expected");
5572 return PARSE_OPERAND_FAIL;
5573 }
5574
5575 if (skip_past_char (&p, '!') == SUCCESS)
5576 inst.operands[i].writeback = 1;
5577
5578 else if (skip_past_comma (&p) == SUCCESS)
5579 {
5580 if (skip_past_char (&p, '{') == SUCCESS)
5581 {
5582 /* [Rn], {expr} - unindexed, with option */
5583 if (parse_immediate (&p, &inst.operands[i].imm,
5584 0, 255, TRUE) == FAIL)
5585 return PARSE_OPERAND_FAIL;
5586
5587 if (skip_past_char (&p, '}') == FAIL)
5588 {
5589 inst.error = _("'}' expected at end of 'option' field");
5590 return PARSE_OPERAND_FAIL;
5591 }
5592 if (inst.operands[i].preind)
5593 {
5594 inst.error = _("cannot combine index with option");
5595 return PARSE_OPERAND_FAIL;
5596 }
5597 *str = p;
5598 return PARSE_OPERAND_SUCCESS;
5599 }
5600 else
5601 {
5602 inst.operands[i].postind = 1;
5603 inst.operands[i].writeback = 1;
5604
5605 if (inst.operands[i].preind)
5606 {
5607 inst.error = _("cannot combine pre- and post-indexing");
5608 return PARSE_OPERAND_FAIL;
5609 }
5610
5611 if (*p == '+') p++;
5612 else if (*p == '-') p++, inst.operands[i].negative = 1;
5613
5614 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5615 {
5616 /* We might be using the immediate for alignment already. If we
5617 are, OR the register number into the low-order bits. */
5618 if (inst.operands[i].immisalign)
5619 inst.operands[i].imm |= reg;
5620 else
5621 inst.operands[i].imm = reg;
5622 inst.operands[i].immisreg = 1;
5623
5624 if (skip_past_comma (&p) == SUCCESS)
5625 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5626 return PARSE_OPERAND_FAIL;
5627 }
5628 else
5629 {
5630 char *q = p;
5631 if (inst.operands[i].negative)
5632 {
5633 inst.operands[i].negative = 0;
5634 p--;
5635 }
5636 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5637 return PARSE_OPERAND_FAIL;
5638 /* If the offset is 0, find out if it's a +0 or -0. */
5639 if (inst.reloc.exp.X_op == O_constant
5640 && inst.reloc.exp.X_add_number == 0)
5641 {
5642 skip_whitespace (q);
5643 if (*q == '#')
5644 {
5645 q++;
5646 skip_whitespace (q);
5647 }
5648 if (*q == '-')
5649 inst.operands[i].negative = 1;
5650 }
5651 }
5652 }
5653 }
5654
5655 /* If at this point neither .preind nor .postind is set, we have a
5656 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5657 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5658 {
5659 inst.operands[i].preind = 1;
5660 inst.reloc.exp.X_op = O_constant;
5661 inst.reloc.exp.X_add_number = 0;
5662 }
5663 *str = p;
5664 return PARSE_OPERAND_SUCCESS;
5665 }
5666
5667 static int
5668 parse_address (char **str, int i)
5669 {
5670 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5671 ? SUCCESS : FAIL;
5672 }
5673
5674 static parse_operand_result
5675 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5676 {
5677 return parse_address_main (str, i, 1, type);
5678 }
5679
5680 /* Parse an operand for a MOVW or MOVT instruction. */
5681 static int
5682 parse_half (char **str)
5683 {
5684 char * p;
5685
5686 p = *str;
5687 skip_past_char (&p, '#');
5688 if (strncasecmp (p, ":lower16:", 9) == 0)
5689 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5690 else if (strncasecmp (p, ":upper16:", 9) == 0)
5691 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5692
5693 if (inst.reloc.type != BFD_RELOC_UNUSED)
5694 {
5695 p += 9;
5696 skip_whitespace (p);
5697 }
5698
5699 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5700 return FAIL;
5701
5702 if (inst.reloc.type == BFD_RELOC_UNUSED)
5703 {
5704 if (inst.reloc.exp.X_op != O_constant)
5705 {
5706 inst.error = _("constant expression expected");
5707 return FAIL;
5708 }
5709 if (inst.reloc.exp.X_add_number < 0
5710 || inst.reloc.exp.X_add_number > 0xffff)
5711 {
5712 inst.error = _("immediate value out of range");
5713 return FAIL;
5714 }
5715 }
5716 *str = p;
5717 return SUCCESS;
5718 }
5719
5720 /* Miscellaneous. */
5721
5722 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5723 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5724 static int
5725 parse_psr (char **str, bfd_boolean lhs)
5726 {
5727 char *p;
5728 unsigned long psr_field;
5729 const struct asm_psr *psr;
5730 char *start;
5731 bfd_boolean is_apsr = FALSE;
5732 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5733
5734 /* PR gas/12698: If the user has specified -march=all then m_profile will
5735 be TRUE, but we want to ignore it in this case as we are building for any
5736 CPU type, including non-m variants. */
5737 if (selected_cpu.core == arm_arch_any.core)
5738 m_profile = FALSE;
5739
5740 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5741 feature for ease of use and backwards compatibility. */
5742 p = *str;
5743 if (strncasecmp (p, "SPSR", 4) == 0)
5744 {
5745 if (m_profile)
5746 goto unsupported_psr;
5747
5748 psr_field = SPSR_BIT;
5749 }
5750 else if (strncasecmp (p, "CPSR", 4) == 0)
5751 {
5752 if (m_profile)
5753 goto unsupported_psr;
5754
5755 psr_field = 0;
5756 }
5757 else if (strncasecmp (p, "APSR", 4) == 0)
5758 {
5759 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5760 and ARMv7-R architecture CPUs. */
5761 is_apsr = TRUE;
5762 psr_field = 0;
5763 }
5764 else if (m_profile)
5765 {
5766 start = p;
5767 do
5768 p++;
5769 while (ISALNUM (*p) || *p == '_');
5770
5771 if (strncasecmp (start, "iapsr", 5) == 0
5772 || strncasecmp (start, "eapsr", 5) == 0
5773 || strncasecmp (start, "xpsr", 4) == 0
5774 || strncasecmp (start, "psr", 3) == 0)
5775 p = start + strcspn (start, "rR") + 1;
5776
5777 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5778 p - start);
5779
5780 if (!psr)
5781 return FAIL;
5782
5783 /* If APSR is being written, a bitfield may be specified. Note that
5784 APSR itself is handled above. */
5785 if (psr->field <= 3)
5786 {
5787 psr_field = psr->field;
5788 is_apsr = TRUE;
5789 goto check_suffix;
5790 }
5791
5792 *str = p;
5793 /* M-profile MSR instructions have the mask field set to "10", except
5794 *PSR variants which modify APSR, which may use a different mask (and
5795 have been handled already). Do that by setting the PSR_f field
5796 here. */
5797 return psr->field | (lhs ? PSR_f : 0);
5798 }
5799 else
5800 goto unsupported_psr;
5801
5802 p += 4;
5803 check_suffix:
5804 if (*p == '_')
5805 {
5806 /* A suffix follows. */
5807 p++;
5808 start = p;
5809
5810 do
5811 p++;
5812 while (ISALNUM (*p) || *p == '_');
5813
5814 if (is_apsr)
5815 {
5816 /* APSR uses a notation for bits, rather than fields. */
5817 unsigned int nzcvq_bits = 0;
5818 unsigned int g_bit = 0;
5819 char *bit;
5820
5821 for (bit = start; bit != p; bit++)
5822 {
5823 switch (TOLOWER (*bit))
5824 {
5825 case 'n':
5826 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5827 break;
5828
5829 case 'z':
5830 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5831 break;
5832
5833 case 'c':
5834 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5835 break;
5836
5837 case 'v':
5838 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5839 break;
5840
5841 case 'q':
5842 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5843 break;
5844
5845 case 'g':
5846 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5847 break;
5848
5849 default:
5850 inst.error = _("unexpected bit specified after APSR");
5851 return FAIL;
5852 }
5853 }
5854
5855 if (nzcvq_bits == 0x1f)
5856 psr_field |= PSR_f;
5857
5858 if (g_bit == 0x1)
5859 {
5860 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5861 {
5862 inst.error = _("selected processor does not "
5863 "support DSP extension");
5864 return FAIL;
5865 }
5866
5867 psr_field |= PSR_s;
5868 }
5869
5870 if ((nzcvq_bits & 0x20) != 0
5871 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5872 || (g_bit & 0x2) != 0)
5873 {
5874 inst.error = _("bad bitmask specified after APSR");
5875 return FAIL;
5876 }
5877 }
5878 else
5879 {
5880 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5881 p - start);
5882 if (!psr)
5883 goto error;
5884
5885 psr_field |= psr->field;
5886 }
5887 }
5888 else
5889 {
5890 if (ISALNUM (*p))
5891 goto error; /* Garbage after "[CS]PSR". */
5892
5893 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5894 is deprecated, but allow it anyway. */
5895 if (is_apsr && lhs)
5896 {
5897 psr_field |= PSR_f;
5898 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5899 "deprecated"));
5900 }
5901 else if (!m_profile)
5902 /* These bits are never right for M-profile devices: don't set them
5903 (only code paths which read/write APSR reach here). */
5904 psr_field |= (PSR_c | PSR_f);
5905 }
5906 *str = p;
5907 return psr_field;
5908
5909 unsupported_psr:
5910 inst.error = _("selected processor does not support requested special "
5911 "purpose register");
5912 return FAIL;
5913
5914 error:
5915 inst.error = _("flag for {c}psr instruction expected");
5916 return FAIL;
5917 }
5918
5919 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5920 value suitable for splatting into the AIF field of the instruction. */
5921
5922 static int
5923 parse_cps_flags (char **str)
5924 {
5925 int val = 0;
5926 int saw_a_flag = 0;
5927 char *s = *str;
5928
5929 for (;;)
5930 switch (*s++)
5931 {
5932 case '\0': case ',':
5933 goto done;
5934
5935 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5936 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5937 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5938
5939 default:
5940 inst.error = _("unrecognized CPS flag");
5941 return FAIL;
5942 }
5943
5944 done:
5945 if (saw_a_flag == 0)
5946 {
5947 inst.error = _("missing CPS flags");
5948 return FAIL;
5949 }
5950
5951 *str = s - 1;
5952 return val;
5953 }
5954
5955 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5956 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5957
5958 static int
5959 parse_endian_specifier (char **str)
5960 {
5961 int little_endian;
5962 char *s = *str;
5963
5964 if (strncasecmp (s, "BE", 2))
5965 little_endian = 0;
5966 else if (strncasecmp (s, "LE", 2))
5967 little_endian = 1;
5968 else
5969 {
5970 inst.error = _("valid endian specifiers are be or le");
5971 return FAIL;
5972 }
5973
5974 if (ISALNUM (s[2]) || s[2] == '_')
5975 {
5976 inst.error = _("valid endian specifiers are be or le");
5977 return FAIL;
5978 }
5979
5980 *str = s + 2;
5981 return little_endian;
5982 }
5983
5984 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5985 value suitable for poking into the rotate field of an sxt or sxta
5986 instruction, or FAIL on error. */
5987
5988 static int
5989 parse_ror (char **str)
5990 {
5991 int rot;
5992 char *s = *str;
5993
5994 if (strncasecmp (s, "ROR", 3) == 0)
5995 s += 3;
5996 else
5997 {
5998 inst.error = _("missing rotation field after comma");
5999 return FAIL;
6000 }
6001
6002 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6003 return FAIL;
6004
6005 switch (rot)
6006 {
6007 case 0: *str = s; return 0x0;
6008 case 8: *str = s; return 0x1;
6009 case 16: *str = s; return 0x2;
6010 case 24: *str = s; return 0x3;
6011
6012 default:
6013 inst.error = _("rotation can only be 0, 8, 16, or 24");
6014 return FAIL;
6015 }
6016 }
6017
6018 /* Parse a conditional code (from conds[] below). The value returned is in the
6019 range 0 .. 14, or FAIL. */
6020 static int
6021 parse_cond (char **str)
6022 {
6023 char *q;
6024 const struct asm_cond *c;
6025 int n;
6026 /* Condition codes are always 2 characters, so matching up to
6027 3 characters is sufficient. */
6028 char cond[3];
6029
6030 q = *str;
6031 n = 0;
6032 while (ISALPHA (*q) && n < 3)
6033 {
6034 cond[n] = TOLOWER (*q);
6035 q++;
6036 n++;
6037 }
6038
6039 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6040 if (!c)
6041 {
6042 inst.error = _("condition required");
6043 return FAIL;
6044 }
6045
6046 *str = q;
6047 return c->value;
6048 }
6049
6050 /* If the given feature available in the selected CPU, mark it as used.
6051 Returns TRUE iff feature is available. */
6052 static bfd_boolean
6053 mark_feature_used (const arm_feature_set *feature)
6054 {
6055 /* Ensure the option is valid on the current architecture. */
6056 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6057 return FALSE;
6058
6059 /* Add the appropriate architecture feature for the barrier option used.
6060 */
6061 if (thumb_mode)
6062 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6063 else
6064 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6065
6066 return TRUE;
6067 }
6068
6069 /* Parse an option for a barrier instruction. Returns the encoding for the
6070 option, or FAIL. */
6071 static int
6072 parse_barrier (char **str)
6073 {
6074 char *p, *q;
6075 const struct asm_barrier_opt *o;
6076
6077 p = q = *str;
6078 while (ISALPHA (*q))
6079 q++;
6080
6081 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6082 q - p);
6083 if (!o)
6084 return FAIL;
6085
6086 if (!mark_feature_used (&o->arch))
6087 return FAIL;
6088
6089 *str = q;
6090 return o->value;
6091 }
6092
6093 /* Parse the operands of a table branch instruction. Similar to a memory
6094 operand. */
6095 static int
6096 parse_tb (char **str)
6097 {
6098 char * p = *str;
6099 int reg;
6100
6101 if (skip_past_char (&p, '[') == FAIL)
6102 {
6103 inst.error = _("'[' expected");
6104 return FAIL;
6105 }
6106
6107 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6108 {
6109 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6110 return FAIL;
6111 }
6112 inst.operands[0].reg = reg;
6113
6114 if (skip_past_comma (&p) == FAIL)
6115 {
6116 inst.error = _("',' expected");
6117 return FAIL;
6118 }
6119
6120 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6121 {
6122 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6123 return FAIL;
6124 }
6125 inst.operands[0].imm = reg;
6126
6127 if (skip_past_comma (&p) == SUCCESS)
6128 {
6129 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6130 return FAIL;
6131 if (inst.reloc.exp.X_add_number != 1)
6132 {
6133 inst.error = _("invalid shift");
6134 return FAIL;
6135 }
6136 inst.operands[0].shifted = 1;
6137 }
6138
6139 if (skip_past_char (&p, ']') == FAIL)
6140 {
6141 inst.error = _("']' expected");
6142 return FAIL;
6143 }
6144 *str = p;
6145 return SUCCESS;
6146 }
6147
6148 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6149 information on the types the operands can take and how they are encoded.
6150 Up to four operands may be read; this function handles setting the
6151 ".present" field for each read operand itself.
6152 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6153 else returns FAIL. */
6154
6155 static int
6156 parse_neon_mov (char **str, int *which_operand)
6157 {
6158 int i = *which_operand, val;
6159 enum arm_reg_type rtype;
6160 char *ptr = *str;
6161 struct neon_type_el optype;
6162
6163 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6164 {
6165 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6166 inst.operands[i].reg = val;
6167 inst.operands[i].isscalar = 1;
6168 inst.operands[i].vectype = optype;
6169 inst.operands[i++].present = 1;
6170
6171 if (skip_past_comma (&ptr) == FAIL)
6172 goto wanted_comma;
6173
6174 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6175 goto wanted_arm;
6176
6177 inst.operands[i].reg = val;
6178 inst.operands[i].isreg = 1;
6179 inst.operands[i].present = 1;
6180 }
6181 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6182 != FAIL)
6183 {
6184 /* Cases 0, 1, 2, 3, 5 (D only). */
6185 if (skip_past_comma (&ptr) == FAIL)
6186 goto wanted_comma;
6187
6188 inst.operands[i].reg = val;
6189 inst.operands[i].isreg = 1;
6190 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6191 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6192 inst.operands[i].isvec = 1;
6193 inst.operands[i].vectype = optype;
6194 inst.operands[i++].present = 1;
6195
6196 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6197 {
6198 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6199 Case 13: VMOV <Sd>, <Rm> */
6200 inst.operands[i].reg = val;
6201 inst.operands[i].isreg = 1;
6202 inst.operands[i].present = 1;
6203
6204 if (rtype == REG_TYPE_NQ)
6205 {
6206 first_error (_("can't use Neon quad register here"));
6207 return FAIL;
6208 }
6209 else if (rtype != REG_TYPE_VFS)
6210 {
6211 i++;
6212 if (skip_past_comma (&ptr) == FAIL)
6213 goto wanted_comma;
6214 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6215 goto wanted_arm;
6216 inst.operands[i].reg = val;
6217 inst.operands[i].isreg = 1;
6218 inst.operands[i].present = 1;
6219 }
6220 }
6221 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6222 &optype)) != FAIL)
6223 {
6224 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6225 Case 1: VMOV<c><q> <Dd>, <Dm>
6226 Case 8: VMOV.F32 <Sd>, <Sm>
6227 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6228
6229 inst.operands[i].reg = val;
6230 inst.operands[i].isreg = 1;
6231 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6232 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6233 inst.operands[i].isvec = 1;
6234 inst.operands[i].vectype = optype;
6235 inst.operands[i].present = 1;
6236
6237 if (skip_past_comma (&ptr) == SUCCESS)
6238 {
6239 /* Case 15. */
6240 i++;
6241
6242 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6243 goto wanted_arm;
6244
6245 inst.operands[i].reg = val;
6246 inst.operands[i].isreg = 1;
6247 inst.operands[i++].present = 1;
6248
6249 if (skip_past_comma (&ptr) == FAIL)
6250 goto wanted_comma;
6251
6252 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6253 goto wanted_arm;
6254
6255 inst.operands[i].reg = val;
6256 inst.operands[i].isreg = 1;
6257 inst.operands[i].present = 1;
6258 }
6259 }
6260 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6261 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6262 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6263 Case 10: VMOV.F32 <Sd>, #<imm>
6264 Case 11: VMOV.F64 <Dd>, #<imm> */
6265 inst.operands[i].immisfloat = 1;
6266 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6267 == SUCCESS)
6268 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6269 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6270 ;
6271 else
6272 {
6273 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6274 return FAIL;
6275 }
6276 }
6277 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6278 {
6279 /* Cases 6, 7. */
6280 inst.operands[i].reg = val;
6281 inst.operands[i].isreg = 1;
6282 inst.operands[i++].present = 1;
6283
6284 if (skip_past_comma (&ptr) == FAIL)
6285 goto wanted_comma;
6286
6287 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6288 {
6289 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6290 inst.operands[i].reg = val;
6291 inst.operands[i].isscalar = 1;
6292 inst.operands[i].present = 1;
6293 inst.operands[i].vectype = optype;
6294 }
6295 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6296 {
6297 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6298 inst.operands[i].reg = val;
6299 inst.operands[i].isreg = 1;
6300 inst.operands[i++].present = 1;
6301
6302 if (skip_past_comma (&ptr) == FAIL)
6303 goto wanted_comma;
6304
6305 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6306 == FAIL)
6307 {
6308 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6309 return FAIL;
6310 }
6311
6312 inst.operands[i].reg = val;
6313 inst.operands[i].isreg = 1;
6314 inst.operands[i].isvec = 1;
6315 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6316 inst.operands[i].vectype = optype;
6317 inst.operands[i].present = 1;
6318
6319 if (rtype == REG_TYPE_VFS)
6320 {
6321 /* Case 14. */
6322 i++;
6323 if (skip_past_comma (&ptr) == FAIL)
6324 goto wanted_comma;
6325 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6326 &optype)) == FAIL)
6327 {
6328 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6329 return FAIL;
6330 }
6331 inst.operands[i].reg = val;
6332 inst.operands[i].isreg = 1;
6333 inst.operands[i].isvec = 1;
6334 inst.operands[i].issingle = 1;
6335 inst.operands[i].vectype = optype;
6336 inst.operands[i].present = 1;
6337 }
6338 }
6339 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6340 != FAIL)
6341 {
6342 /* Case 13. */
6343 inst.operands[i].reg = val;
6344 inst.operands[i].isreg = 1;
6345 inst.operands[i].isvec = 1;
6346 inst.operands[i].issingle = 1;
6347 inst.operands[i].vectype = optype;
6348 inst.operands[i].present = 1;
6349 }
6350 }
6351 else
6352 {
6353 first_error (_("parse error"));
6354 return FAIL;
6355 }
6356
6357 /* Successfully parsed the operands. Update args. */
6358 *which_operand = i;
6359 *str = ptr;
6360 return SUCCESS;
6361
6362 wanted_comma:
6363 first_error (_("expected comma"));
6364 return FAIL;
6365
6366 wanted_arm:
6367 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6368 return FAIL;
6369 }
6370
6371 /* Use this macro when the operand constraints are different
6372 for ARM and THUMB (e.g. ldrd). */
6373 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6374 ((arm_operand) | ((thumb_operand) << 16))
6375
6376 /* Matcher codes for parse_operands. */
6377 enum operand_parse_code
6378 {
6379 OP_stop, /* end of line */
6380
6381 OP_RR, /* ARM register */
6382 OP_RRnpc, /* ARM register, not r15 */
6383 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6384 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6385 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6386 optional trailing ! */
6387 OP_RRw, /* ARM register, not r15, optional trailing ! */
6388 OP_RCP, /* Coprocessor number */
6389 OP_RCN, /* Coprocessor register */
6390 OP_RF, /* FPA register */
6391 OP_RVS, /* VFP single precision register */
6392 OP_RVD, /* VFP double precision register (0..15) */
6393 OP_RND, /* Neon double precision register (0..31) */
6394 OP_RNQ, /* Neon quad precision register */
6395 OP_RVSD, /* VFP single or double precision register */
6396 OP_RNDQ, /* Neon double or quad precision register */
6397 OP_RNSDQ, /* Neon single, double or quad precision register */
6398 OP_RNSC, /* Neon scalar D[X] */
6399 OP_RVC, /* VFP control register */
6400 OP_RMF, /* Maverick F register */
6401 OP_RMD, /* Maverick D register */
6402 OP_RMFX, /* Maverick FX register */
6403 OP_RMDX, /* Maverick DX register */
6404 OP_RMAX, /* Maverick AX register */
6405 OP_RMDS, /* Maverick DSPSC register */
6406 OP_RIWR, /* iWMMXt wR register */
6407 OP_RIWC, /* iWMMXt wC register */
6408 OP_RIWG, /* iWMMXt wCG register */
6409 OP_RXA, /* XScale accumulator register */
6410
6411 OP_REGLST, /* ARM register list */
6412 OP_VRSLST, /* VFP single-precision register list */
6413 OP_VRDLST, /* VFP double-precision register list */
6414 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6415 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6416 OP_NSTRLST, /* Neon element/structure list */
6417
6418 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6419 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6420 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6421 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6422 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6423 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6424 OP_VMOV, /* Neon VMOV operands. */
6425 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6426 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6427 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6428
6429 OP_I0, /* immediate zero */
6430 OP_I7, /* immediate value 0 .. 7 */
6431 OP_I15, /* 0 .. 15 */
6432 OP_I16, /* 1 .. 16 */
6433 OP_I16z, /* 0 .. 16 */
6434 OP_I31, /* 0 .. 31 */
6435 OP_I31w, /* 0 .. 31, optional trailing ! */
6436 OP_I32, /* 1 .. 32 */
6437 OP_I32z, /* 0 .. 32 */
6438 OP_I63, /* 0 .. 63 */
6439 OP_I63s, /* -64 .. 63 */
6440 OP_I64, /* 1 .. 64 */
6441 OP_I64z, /* 0 .. 64 */
6442 OP_I255, /* 0 .. 255 */
6443
6444 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6445 OP_I7b, /* 0 .. 7 */
6446 OP_I15b, /* 0 .. 15 */
6447 OP_I31b, /* 0 .. 31 */
6448
6449 OP_SH, /* shifter operand */
6450 OP_SHG, /* shifter operand with possible group relocation */
6451 OP_ADDR, /* Memory address expression (any mode) */
6452 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6453 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6454 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6455 OP_EXP, /* arbitrary expression */
6456 OP_EXPi, /* same, with optional immediate prefix */
6457 OP_EXPr, /* same, with optional relocation suffix */
6458 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6459
6460 OP_CPSF, /* CPS flags */
6461 OP_ENDI, /* Endianness specifier */
6462 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6463 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6464 OP_COND, /* conditional code */
6465 OP_TB, /* Table branch. */
6466
6467 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6468
6469 OP_RRnpc_I0, /* ARM register or literal 0 */
6470 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6471 OP_RR_EXi, /* ARM register or expression with imm prefix */
6472 OP_RF_IF, /* FPA register or immediate */
6473 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6474 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6475
6476 /* Optional operands. */
6477 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6478 OP_oI31b, /* 0 .. 31 */
6479 OP_oI32b, /* 1 .. 32 */
6480 OP_oI32z, /* 0 .. 32 */
6481 OP_oIffffb, /* 0 .. 65535 */
6482 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6483
6484 OP_oRR, /* ARM register */
6485 OP_oRRnpc, /* ARM register, not the PC */
6486 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6487 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6488 OP_oRND, /* Optional Neon double precision register */
6489 OP_oRNQ, /* Optional Neon quad precision register */
6490 OP_oRNDQ, /* Optional Neon double or quad precision register */
6491 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6492 OP_oSHll, /* LSL immediate */
6493 OP_oSHar, /* ASR immediate */
6494 OP_oSHllar, /* LSL or ASR immediate */
6495 OP_oROR, /* ROR 0/8/16/24 */
6496 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6497
6498 /* Some pre-defined mixed (ARM/THUMB) operands. */
6499 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6500 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6501 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6502
6503 OP_FIRST_OPTIONAL = OP_oI7b
6504 };
6505
6506 /* Generic instruction operand parser. This does no encoding and no
6507 semantic validation; it merely squirrels values away in the inst
6508 structure. Returns SUCCESS or FAIL depending on whether the
6509 specified grammar matched. */
6510 static int
6511 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6512 {
6513 unsigned const int *upat = pattern;
6514 char *backtrack_pos = 0;
6515 const char *backtrack_error = 0;
6516 int i, val = 0, backtrack_index = 0;
6517 enum arm_reg_type rtype;
6518 parse_operand_result result;
6519 unsigned int op_parse_code;
6520
6521 #define po_char_or_fail(chr) \
6522 do \
6523 { \
6524 if (skip_past_char (&str, chr) == FAIL) \
6525 goto bad_args; \
6526 } \
6527 while (0)
6528
6529 #define po_reg_or_fail(regtype) \
6530 do \
6531 { \
6532 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6533 & inst.operands[i].vectype); \
6534 if (val == FAIL) \
6535 { \
6536 first_error (_(reg_expected_msgs[regtype])); \
6537 goto failure; \
6538 } \
6539 inst.operands[i].reg = val; \
6540 inst.operands[i].isreg = 1; \
6541 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6542 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6543 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6544 || rtype == REG_TYPE_VFD \
6545 || rtype == REG_TYPE_NQ); \
6546 } \
6547 while (0)
6548
6549 #define po_reg_or_goto(regtype, label) \
6550 do \
6551 { \
6552 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6553 & inst.operands[i].vectype); \
6554 if (val == FAIL) \
6555 goto label; \
6556 \
6557 inst.operands[i].reg = val; \
6558 inst.operands[i].isreg = 1; \
6559 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6560 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6561 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6562 || rtype == REG_TYPE_VFD \
6563 || rtype == REG_TYPE_NQ); \
6564 } \
6565 while (0)
6566
6567 #define po_imm_or_fail(min, max, popt) \
6568 do \
6569 { \
6570 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6571 goto failure; \
6572 inst.operands[i].imm = val; \
6573 } \
6574 while (0)
6575
6576 #define po_scalar_or_goto(elsz, label) \
6577 do \
6578 { \
6579 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6580 if (val == FAIL) \
6581 goto label; \
6582 inst.operands[i].reg = val; \
6583 inst.operands[i].isscalar = 1; \
6584 } \
6585 while (0)
6586
6587 #define po_misc_or_fail(expr) \
6588 do \
6589 { \
6590 if (expr) \
6591 goto failure; \
6592 } \
6593 while (0)
6594
6595 #define po_misc_or_fail_no_backtrack(expr) \
6596 do \
6597 { \
6598 result = expr; \
6599 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6600 backtrack_pos = 0; \
6601 if (result != PARSE_OPERAND_SUCCESS) \
6602 goto failure; \
6603 } \
6604 while (0)
6605
6606 #define po_barrier_or_imm(str) \
6607 do \
6608 { \
6609 val = parse_barrier (&str); \
6610 if (val == FAIL && ! ISALPHA (*str)) \
6611 goto immediate; \
6612 if (val == FAIL \
6613 /* ISB can only take SY as an option. */ \
6614 || ((inst.instruction & 0xf0) == 0x60 \
6615 && val != 0xf)) \
6616 { \
6617 inst.error = _("invalid barrier type"); \
6618 backtrack_pos = 0; \
6619 goto failure; \
6620 } \
6621 } \
6622 while (0)
6623
6624 skip_whitespace (str);
6625
6626 for (i = 0; upat[i] != OP_stop; i++)
6627 {
6628 op_parse_code = upat[i];
6629 if (op_parse_code >= 1<<16)
6630 op_parse_code = thumb ? (op_parse_code >> 16)
6631 : (op_parse_code & ((1<<16)-1));
6632
6633 if (op_parse_code >= OP_FIRST_OPTIONAL)
6634 {
6635 /* Remember where we are in case we need to backtrack. */
6636 gas_assert (!backtrack_pos);
6637 backtrack_pos = str;
6638 backtrack_error = inst.error;
6639 backtrack_index = i;
6640 }
6641
6642 if (i > 0 && (i > 1 || inst.operands[0].present))
6643 po_char_or_fail (',');
6644
6645 switch (op_parse_code)
6646 {
6647 /* Registers */
6648 case OP_oRRnpc:
6649 case OP_oRRnpcsp:
6650 case OP_RRnpc:
6651 case OP_RRnpcsp:
6652 case OP_oRR:
6653 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6654 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6655 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6656 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6657 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6658 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6659 case OP_oRND:
6660 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6661 case OP_RVC:
6662 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6663 break;
6664 /* Also accept generic coprocessor regs for unknown registers. */
6665 coproc_reg:
6666 po_reg_or_fail (REG_TYPE_CN);
6667 break;
6668 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6669 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6670 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6671 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6672 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6673 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6674 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6675 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6676 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6677 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6678 case OP_oRNQ:
6679 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6680 case OP_oRNDQ:
6681 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6682 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6683 case OP_oRNSDQ:
6684 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6685
6686 /* Neon scalar. Using an element size of 8 means that some invalid
6687 scalars are accepted here, so deal with those in later code. */
6688 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6689
6690 case OP_RNDQ_I0:
6691 {
6692 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6693 break;
6694 try_imm0:
6695 po_imm_or_fail (0, 0, TRUE);
6696 }
6697 break;
6698
6699 case OP_RVSD_I0:
6700 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6701 break;
6702
6703 case OP_RR_RNSC:
6704 {
6705 po_scalar_or_goto (8, try_rr);
6706 break;
6707 try_rr:
6708 po_reg_or_fail (REG_TYPE_RN);
6709 }
6710 break;
6711
6712 case OP_RNSDQ_RNSC:
6713 {
6714 po_scalar_or_goto (8, try_nsdq);
6715 break;
6716 try_nsdq:
6717 po_reg_or_fail (REG_TYPE_NSDQ);
6718 }
6719 break;
6720
6721 case OP_RNDQ_RNSC:
6722 {
6723 po_scalar_or_goto (8, try_ndq);
6724 break;
6725 try_ndq:
6726 po_reg_or_fail (REG_TYPE_NDQ);
6727 }
6728 break;
6729
6730 case OP_RND_RNSC:
6731 {
6732 po_scalar_or_goto (8, try_vfd);
6733 break;
6734 try_vfd:
6735 po_reg_or_fail (REG_TYPE_VFD);
6736 }
6737 break;
6738
6739 case OP_VMOV:
6740 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6741 not careful then bad things might happen. */
6742 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6743 break;
6744
6745 case OP_RNDQ_Ibig:
6746 {
6747 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6748 break;
6749 try_immbig:
6750 /* There's a possibility of getting a 64-bit immediate here, so
6751 we need special handling. */
6752 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6753 == FAIL)
6754 {
6755 inst.error = _("immediate value is out of range");
6756 goto failure;
6757 }
6758 }
6759 break;
6760
6761 case OP_RNDQ_I63b:
6762 {
6763 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6764 break;
6765 try_shimm:
6766 po_imm_or_fail (0, 63, TRUE);
6767 }
6768 break;
6769
6770 case OP_RRnpcb:
6771 po_char_or_fail ('[');
6772 po_reg_or_fail (REG_TYPE_RN);
6773 po_char_or_fail (']');
6774 break;
6775
6776 case OP_RRnpctw:
6777 case OP_RRw:
6778 case OP_oRRw:
6779 po_reg_or_fail (REG_TYPE_RN);
6780 if (skip_past_char (&str, '!') == SUCCESS)
6781 inst.operands[i].writeback = 1;
6782 break;
6783
6784 /* Immediates */
6785 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6786 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6787 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6788 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6789 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6790 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6791 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6792 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6793 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6794 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6795 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6796 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6797
6798 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6799 case OP_oI7b:
6800 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6801 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6802 case OP_oI31b:
6803 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6804 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6805 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6806 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6807
6808 /* Immediate variants */
6809 case OP_oI255c:
6810 po_char_or_fail ('{');
6811 po_imm_or_fail (0, 255, TRUE);
6812 po_char_or_fail ('}');
6813 break;
6814
6815 case OP_I31w:
6816 /* The expression parser chokes on a trailing !, so we have
6817 to find it first and zap it. */
6818 {
6819 char *s = str;
6820 while (*s && *s != ',')
6821 s++;
6822 if (s[-1] == '!')
6823 {
6824 s[-1] = '\0';
6825 inst.operands[i].writeback = 1;
6826 }
6827 po_imm_or_fail (0, 31, TRUE);
6828 if (str == s - 1)
6829 str = s;
6830 }
6831 break;
6832
6833 /* Expressions */
6834 case OP_EXPi: EXPi:
6835 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6836 GE_OPT_PREFIX));
6837 break;
6838
6839 case OP_EXP:
6840 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6841 GE_NO_PREFIX));
6842 break;
6843
6844 case OP_EXPr: EXPr:
6845 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6846 GE_NO_PREFIX));
6847 if (inst.reloc.exp.X_op == O_symbol)
6848 {
6849 val = parse_reloc (&str);
6850 if (val == -1)
6851 {
6852 inst.error = _("unrecognized relocation suffix");
6853 goto failure;
6854 }
6855 else if (val != BFD_RELOC_UNUSED)
6856 {
6857 inst.operands[i].imm = val;
6858 inst.operands[i].hasreloc = 1;
6859 }
6860 }
6861 break;
6862
6863 /* Operand for MOVW or MOVT. */
6864 case OP_HALF:
6865 po_misc_or_fail (parse_half (&str));
6866 break;
6867
6868 /* Register or expression. */
6869 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6870 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6871
6872 /* Register or immediate. */
6873 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6874 I0: po_imm_or_fail (0, 0, FALSE); break;
6875
6876 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6877 IF:
6878 if (!is_immediate_prefix (*str))
6879 goto bad_args;
6880 str++;
6881 val = parse_fpa_immediate (&str);
6882 if (val == FAIL)
6883 goto failure;
6884 /* FPA immediates are encoded as registers 8-15.
6885 parse_fpa_immediate has already applied the offset. */
6886 inst.operands[i].reg = val;
6887 inst.operands[i].isreg = 1;
6888 break;
6889
6890 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6891 I32z: po_imm_or_fail (0, 32, FALSE); break;
6892
6893 /* Two kinds of register. */
6894 case OP_RIWR_RIWC:
6895 {
6896 struct reg_entry *rege = arm_reg_parse_multi (&str);
6897 if (!rege
6898 || (rege->type != REG_TYPE_MMXWR
6899 && rege->type != REG_TYPE_MMXWC
6900 && rege->type != REG_TYPE_MMXWCG))
6901 {
6902 inst.error = _("iWMMXt data or control register expected");
6903 goto failure;
6904 }
6905 inst.operands[i].reg = rege->number;
6906 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6907 }
6908 break;
6909
6910 case OP_RIWC_RIWG:
6911 {
6912 struct reg_entry *rege = arm_reg_parse_multi (&str);
6913 if (!rege
6914 || (rege->type != REG_TYPE_MMXWC
6915 && rege->type != REG_TYPE_MMXWCG))
6916 {
6917 inst.error = _("iWMMXt control register expected");
6918 goto failure;
6919 }
6920 inst.operands[i].reg = rege->number;
6921 inst.operands[i].isreg = 1;
6922 }
6923 break;
6924
6925 /* Misc */
6926 case OP_CPSF: val = parse_cps_flags (&str); break;
6927 case OP_ENDI: val = parse_endian_specifier (&str); break;
6928 case OP_oROR: val = parse_ror (&str); break;
6929 case OP_COND: val = parse_cond (&str); break;
6930 case OP_oBARRIER_I15:
6931 po_barrier_or_imm (str); break;
6932 immediate:
6933 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6934 goto failure;
6935 break;
6936
6937 case OP_wPSR:
6938 case OP_rPSR:
6939 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6940 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6941 {
6942 inst.error = _("Banked registers are not available with this "
6943 "architecture.");
6944 goto failure;
6945 }
6946 break;
6947 try_psr:
6948 val = parse_psr (&str, op_parse_code == OP_wPSR);
6949 break;
6950
6951 case OP_APSR_RR:
6952 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6953 break;
6954 try_apsr:
6955 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6956 instruction). */
6957 if (strncasecmp (str, "APSR_", 5) == 0)
6958 {
6959 unsigned found = 0;
6960 str += 5;
6961 while (found < 15)
6962 switch (*str++)
6963 {
6964 case 'c': found = (found & 1) ? 16 : found | 1; break;
6965 case 'n': found = (found & 2) ? 16 : found | 2; break;
6966 case 'z': found = (found & 4) ? 16 : found | 4; break;
6967 case 'v': found = (found & 8) ? 16 : found | 8; break;
6968 default: found = 16;
6969 }
6970 if (found != 15)
6971 goto failure;
6972 inst.operands[i].isvec = 1;
6973 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6974 inst.operands[i].reg = REG_PC;
6975 }
6976 else
6977 goto failure;
6978 break;
6979
6980 case OP_TB:
6981 po_misc_or_fail (parse_tb (&str));
6982 break;
6983
6984 /* Register lists. */
6985 case OP_REGLST:
6986 val = parse_reg_list (&str);
6987 if (*str == '^')
6988 {
6989 inst.operands[1].writeback = 1;
6990 str++;
6991 }
6992 break;
6993
6994 case OP_VRSLST:
6995 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6996 break;
6997
6998 case OP_VRDLST:
6999 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7000 break;
7001
7002 case OP_VRSDLST:
7003 /* Allow Q registers too. */
7004 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7005 REGLIST_NEON_D);
7006 if (val == FAIL)
7007 {
7008 inst.error = NULL;
7009 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7010 REGLIST_VFP_S);
7011 inst.operands[i].issingle = 1;
7012 }
7013 break;
7014
7015 case OP_NRDLST:
7016 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7017 REGLIST_NEON_D);
7018 break;
7019
7020 case OP_NSTRLST:
7021 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7022 &inst.operands[i].vectype);
7023 break;
7024
7025 /* Addressing modes */
7026 case OP_ADDR:
7027 po_misc_or_fail (parse_address (&str, i));
7028 break;
7029
7030 case OP_ADDRGLDR:
7031 po_misc_or_fail_no_backtrack (
7032 parse_address_group_reloc (&str, i, GROUP_LDR));
7033 break;
7034
7035 case OP_ADDRGLDRS:
7036 po_misc_or_fail_no_backtrack (
7037 parse_address_group_reloc (&str, i, GROUP_LDRS));
7038 break;
7039
7040 case OP_ADDRGLDC:
7041 po_misc_or_fail_no_backtrack (
7042 parse_address_group_reloc (&str, i, GROUP_LDC));
7043 break;
7044
7045 case OP_SH:
7046 po_misc_or_fail (parse_shifter_operand (&str, i));
7047 break;
7048
7049 case OP_SHG:
7050 po_misc_or_fail_no_backtrack (
7051 parse_shifter_operand_group_reloc (&str, i));
7052 break;
7053
7054 case OP_oSHll:
7055 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7056 break;
7057
7058 case OP_oSHar:
7059 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7060 break;
7061
7062 case OP_oSHllar:
7063 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7064 break;
7065
7066 default:
7067 as_fatal (_("unhandled operand code %d"), op_parse_code);
7068 }
7069
7070 /* Various value-based sanity checks and shared operations. We
7071 do not signal immediate failures for the register constraints;
7072 this allows a syntax error to take precedence. */
7073 switch (op_parse_code)
7074 {
7075 case OP_oRRnpc:
7076 case OP_RRnpc:
7077 case OP_RRnpcb:
7078 case OP_RRw:
7079 case OP_oRRw:
7080 case OP_RRnpc_I0:
7081 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7082 inst.error = BAD_PC;
7083 break;
7084
7085 case OP_oRRnpcsp:
7086 case OP_RRnpcsp:
7087 if (inst.operands[i].isreg)
7088 {
7089 if (inst.operands[i].reg == REG_PC)
7090 inst.error = BAD_PC;
7091 else if (inst.operands[i].reg == REG_SP)
7092 inst.error = BAD_SP;
7093 }
7094 break;
7095
7096 case OP_RRnpctw:
7097 if (inst.operands[i].isreg
7098 && inst.operands[i].reg == REG_PC
7099 && (inst.operands[i].writeback || thumb))
7100 inst.error = BAD_PC;
7101 break;
7102
7103 case OP_CPSF:
7104 case OP_ENDI:
7105 case OP_oROR:
7106 case OP_wPSR:
7107 case OP_rPSR:
7108 case OP_COND:
7109 case OP_oBARRIER_I15:
7110 case OP_REGLST:
7111 case OP_VRSLST:
7112 case OP_VRDLST:
7113 case OP_VRSDLST:
7114 case OP_NRDLST:
7115 case OP_NSTRLST:
7116 if (val == FAIL)
7117 goto failure;
7118 inst.operands[i].imm = val;
7119 break;
7120
7121 default:
7122 break;
7123 }
7124
7125 /* If we get here, this operand was successfully parsed. */
7126 inst.operands[i].present = 1;
7127 continue;
7128
7129 bad_args:
7130 inst.error = BAD_ARGS;
7131
7132 failure:
7133 if (!backtrack_pos)
7134 {
7135 /* The parse routine should already have set inst.error, but set a
7136 default here just in case. */
7137 if (!inst.error)
7138 inst.error = _("syntax error");
7139 return FAIL;
7140 }
7141
7142 /* Do not backtrack over a trailing optional argument that
7143 absorbed some text. We will only fail again, with the
7144 'garbage following instruction' error message, which is
7145 probably less helpful than the current one. */
7146 if (backtrack_index == i && backtrack_pos != str
7147 && upat[i+1] == OP_stop)
7148 {
7149 if (!inst.error)
7150 inst.error = _("syntax error");
7151 return FAIL;
7152 }
7153
7154 /* Try again, skipping the optional argument at backtrack_pos. */
7155 str = backtrack_pos;
7156 inst.error = backtrack_error;
7157 inst.operands[backtrack_index].present = 0;
7158 i = backtrack_index;
7159 backtrack_pos = 0;
7160 }
7161
7162 /* Check that we have parsed all the arguments. */
7163 if (*str != '\0' && !inst.error)
7164 inst.error = _("garbage following instruction");
7165
7166 return inst.error ? FAIL : SUCCESS;
7167 }
7168
7169 #undef po_char_or_fail
7170 #undef po_reg_or_fail
7171 #undef po_reg_or_goto
7172 #undef po_imm_or_fail
7173 #undef po_scalar_or_fail
7174 #undef po_barrier_or_imm
7175
7176 /* Shorthand macro for instruction encoding functions issuing errors. */
7177 #define constraint(expr, err) \
7178 do \
7179 { \
7180 if (expr) \
7181 { \
7182 inst.error = err; \
7183 return; \
7184 } \
7185 } \
7186 while (0)
7187
7188 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7189 instructions are unpredictable if these registers are used. This
7190 is the BadReg predicate in ARM's Thumb-2 documentation. */
7191 #define reject_bad_reg(reg) \
7192 do \
7193 if (reg == REG_SP || reg == REG_PC) \
7194 { \
7195 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7196 return; \
7197 } \
7198 while (0)
7199
7200 /* If REG is R13 (the stack pointer), warn that its use is
7201 deprecated. */
7202 #define warn_deprecated_sp(reg) \
7203 do \
7204 if (warn_on_deprecated && reg == REG_SP) \
7205 as_warn (_("use of r13 is deprecated")); \
7206 while (0)
7207
7208 /* Functions for operand encoding. ARM, then Thumb. */
7209
7210 #define rotate_left(v, n) (v << n | v >> (32 - n))
7211
7212 /* If VAL can be encoded in the immediate field of an ARM instruction,
7213 return the encoded form. Otherwise, return FAIL. */
7214
7215 static unsigned int
7216 encode_arm_immediate (unsigned int val)
7217 {
7218 unsigned int a, i;
7219
7220 for (i = 0; i < 32; i += 2)
7221 if ((a = rotate_left (val, i)) <= 0xff)
7222 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7223
7224 return FAIL;
7225 }
7226
7227 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7228 return the encoded form. Otherwise, return FAIL. */
7229 static unsigned int
7230 encode_thumb32_immediate (unsigned int val)
7231 {
7232 unsigned int a, i;
7233
7234 if (val <= 0xff)
7235 return val;
7236
7237 for (i = 1; i <= 24; i++)
7238 {
7239 a = val >> i;
7240 if ((val & ~(0xff << i)) == 0)
7241 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7242 }
7243
7244 a = val & 0xff;
7245 if (val == ((a << 16) | a))
7246 return 0x100 | a;
7247 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7248 return 0x300 | a;
7249
7250 a = val & 0xff00;
7251 if (val == ((a << 16) | a))
7252 return 0x200 | (a >> 8);
7253
7254 return FAIL;
7255 }
7256 /* Encode a VFP SP or DP register number into inst.instruction. */
7257
7258 static void
7259 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7260 {
7261 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7262 && reg > 15)
7263 {
7264 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7265 {
7266 if (thumb_mode)
7267 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7268 fpu_vfp_ext_d32);
7269 else
7270 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7271 fpu_vfp_ext_d32);
7272 }
7273 else
7274 {
7275 first_error (_("D register out of range for selected VFP version"));
7276 return;
7277 }
7278 }
7279
7280 switch (pos)
7281 {
7282 case VFP_REG_Sd:
7283 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7284 break;
7285
7286 case VFP_REG_Sn:
7287 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7288 break;
7289
7290 case VFP_REG_Sm:
7291 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7292 break;
7293
7294 case VFP_REG_Dd:
7295 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7296 break;
7297
7298 case VFP_REG_Dn:
7299 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7300 break;
7301
7302 case VFP_REG_Dm:
7303 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7304 break;
7305
7306 default:
7307 abort ();
7308 }
7309 }
7310
7311 /* Encode a <shift> in an ARM-format instruction. The immediate,
7312 if any, is handled by md_apply_fix. */
7313 static void
7314 encode_arm_shift (int i)
7315 {
7316 if (inst.operands[i].shift_kind == SHIFT_RRX)
7317 inst.instruction |= SHIFT_ROR << 5;
7318 else
7319 {
7320 inst.instruction |= inst.operands[i].shift_kind << 5;
7321 if (inst.operands[i].immisreg)
7322 {
7323 inst.instruction |= SHIFT_BY_REG;
7324 inst.instruction |= inst.operands[i].imm << 8;
7325 }
7326 else
7327 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7328 }
7329 }
7330
7331 static void
7332 encode_arm_shifter_operand (int i)
7333 {
7334 if (inst.operands[i].isreg)
7335 {
7336 inst.instruction |= inst.operands[i].reg;
7337 encode_arm_shift (i);
7338 }
7339 else
7340 {
7341 inst.instruction |= INST_IMMEDIATE;
7342 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7343 inst.instruction |= inst.operands[i].imm;
7344 }
7345 }
7346
7347 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7348 static void
7349 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7350 {
7351 /* PR 14260:
7352 Generate an error if the operand is not a register. */
7353 constraint (!inst.operands[i].isreg,
7354 _("Instruction does not support =N addresses"));
7355
7356 inst.instruction |= inst.operands[i].reg << 16;
7357
7358 if (inst.operands[i].preind)
7359 {
7360 if (is_t)
7361 {
7362 inst.error = _("instruction does not accept preindexed addressing");
7363 return;
7364 }
7365 inst.instruction |= PRE_INDEX;
7366 if (inst.operands[i].writeback)
7367 inst.instruction |= WRITE_BACK;
7368
7369 }
7370 else if (inst.operands[i].postind)
7371 {
7372 gas_assert (inst.operands[i].writeback);
7373 if (is_t)
7374 inst.instruction |= WRITE_BACK;
7375 }
7376 else /* unindexed - only for coprocessor */
7377 {
7378 inst.error = _("instruction does not accept unindexed addressing");
7379 return;
7380 }
7381
7382 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7383 && (((inst.instruction & 0x000f0000) >> 16)
7384 == ((inst.instruction & 0x0000f000) >> 12)))
7385 as_warn ((inst.instruction & LOAD_BIT)
7386 ? _("destination register same as write-back base")
7387 : _("source register same as write-back base"));
7388 }
7389
7390 /* inst.operands[i] was set up by parse_address. Encode it into an
7391 ARM-format mode 2 load or store instruction. If is_t is true,
7392 reject forms that cannot be used with a T instruction (i.e. not
7393 post-indexed). */
7394 static void
7395 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7396 {
7397 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7398
7399 encode_arm_addr_mode_common (i, is_t);
7400
7401 if (inst.operands[i].immisreg)
7402 {
7403 constraint ((inst.operands[i].imm == REG_PC
7404 || (is_pc && inst.operands[i].writeback)),
7405 BAD_PC_ADDRESSING);
7406 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7407 inst.instruction |= inst.operands[i].imm;
7408 if (!inst.operands[i].negative)
7409 inst.instruction |= INDEX_UP;
7410 if (inst.operands[i].shifted)
7411 {
7412 if (inst.operands[i].shift_kind == SHIFT_RRX)
7413 inst.instruction |= SHIFT_ROR << 5;
7414 else
7415 {
7416 inst.instruction |= inst.operands[i].shift_kind << 5;
7417 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7418 }
7419 }
7420 }
7421 else /* immediate offset in inst.reloc */
7422 {
7423 if (is_pc && !inst.reloc.pc_rel)
7424 {
7425 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7426
7427 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7428 cannot use PC in addressing.
7429 PC cannot be used in writeback addressing, either. */
7430 constraint ((is_t || inst.operands[i].writeback),
7431 BAD_PC_ADDRESSING);
7432
7433 /* Use of PC in str is deprecated for ARMv7. */
7434 if (warn_on_deprecated
7435 && !is_load
7436 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7437 as_warn (_("use of PC in this instruction is deprecated"));
7438 }
7439
7440 if (inst.reloc.type == BFD_RELOC_UNUSED)
7441 {
7442 /* Prefer + for zero encoded value. */
7443 if (!inst.operands[i].negative)
7444 inst.instruction |= INDEX_UP;
7445 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7446 }
7447 }
7448 }
7449
7450 /* inst.operands[i] was set up by parse_address. Encode it into an
7451 ARM-format mode 3 load or store instruction. Reject forms that
7452 cannot be used with such instructions. If is_t is true, reject
7453 forms that cannot be used with a T instruction (i.e. not
7454 post-indexed). */
7455 static void
7456 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7457 {
7458 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7459 {
7460 inst.error = _("instruction does not accept scaled register index");
7461 return;
7462 }
7463
7464 encode_arm_addr_mode_common (i, is_t);
7465
7466 if (inst.operands[i].immisreg)
7467 {
7468 constraint ((inst.operands[i].imm == REG_PC
7469 || (is_t && inst.operands[i].reg == REG_PC)),
7470 BAD_PC_ADDRESSING);
7471 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7472 BAD_PC_WRITEBACK);
7473 inst.instruction |= inst.operands[i].imm;
7474 if (!inst.operands[i].negative)
7475 inst.instruction |= INDEX_UP;
7476 }
7477 else /* immediate offset in inst.reloc */
7478 {
7479 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7480 && inst.operands[i].writeback),
7481 BAD_PC_WRITEBACK);
7482 inst.instruction |= HWOFFSET_IMM;
7483 if (inst.reloc.type == BFD_RELOC_UNUSED)
7484 {
7485 /* Prefer + for zero encoded value. */
7486 if (!inst.operands[i].negative)
7487 inst.instruction |= INDEX_UP;
7488
7489 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7490 }
7491 }
7492 }
7493
7494 /* Write immediate bits [7:0] to the following locations:
7495
7496 |28/24|23 19|18 16|15 4|3 0|
7497 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7498
7499 This function is used by VMOV/VMVN/VORR/VBIC. */
7500
7501 static void
7502 neon_write_immbits (unsigned immbits)
7503 {
7504 inst.instruction |= immbits & 0xf;
7505 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7506 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7507 }
7508
7509 /* Invert low-order SIZE bits of XHI:XLO. */
7510
7511 static void
7512 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7513 {
7514 unsigned immlo = xlo ? *xlo : 0;
7515 unsigned immhi = xhi ? *xhi : 0;
7516
7517 switch (size)
7518 {
7519 case 8:
7520 immlo = (~immlo) & 0xff;
7521 break;
7522
7523 case 16:
7524 immlo = (~immlo) & 0xffff;
7525 break;
7526
7527 case 64:
7528 immhi = (~immhi) & 0xffffffff;
7529 /* fall through. */
7530
7531 case 32:
7532 immlo = (~immlo) & 0xffffffff;
7533 break;
7534
7535 default:
7536 abort ();
7537 }
7538
7539 if (xlo)
7540 *xlo = immlo;
7541
7542 if (xhi)
7543 *xhi = immhi;
7544 }
7545
7546 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7547 A, B, C, D. */
7548
7549 static int
7550 neon_bits_same_in_bytes (unsigned imm)
7551 {
7552 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7553 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7554 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7555 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7556 }
7557
7558 /* For immediate of above form, return 0bABCD. */
7559
7560 static unsigned
7561 neon_squash_bits (unsigned imm)
7562 {
7563 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7564 | ((imm & 0x01000000) >> 21);
7565 }
7566
7567 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7568
7569 static unsigned
7570 neon_qfloat_bits (unsigned imm)
7571 {
7572 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7573 }
7574
7575 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7576 the instruction. *OP is passed as the initial value of the op field, and
7577 may be set to a different value depending on the constant (i.e.
7578 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7579 MVN). If the immediate looks like a repeated pattern then also
7580 try smaller element sizes. */
7581
7582 static int
7583 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7584 unsigned *immbits, int *op, int size,
7585 enum neon_el_type type)
7586 {
7587 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7588 float. */
7589 if (type == NT_float && !float_p)
7590 return FAIL;
7591
7592 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7593 {
7594 if (size != 32 || *op == 1)
7595 return FAIL;
7596 *immbits = neon_qfloat_bits (immlo);
7597 return 0xf;
7598 }
7599
7600 if (size == 64)
7601 {
7602 if (neon_bits_same_in_bytes (immhi)
7603 && neon_bits_same_in_bytes (immlo))
7604 {
7605 if (*op == 1)
7606 return FAIL;
7607 *immbits = (neon_squash_bits (immhi) << 4)
7608 | neon_squash_bits (immlo);
7609 *op = 1;
7610 return 0xe;
7611 }
7612
7613 if (immhi != immlo)
7614 return FAIL;
7615 }
7616
7617 if (size >= 32)
7618 {
7619 if (immlo == (immlo & 0x000000ff))
7620 {
7621 *immbits = immlo;
7622 return 0x0;
7623 }
7624 else if (immlo == (immlo & 0x0000ff00))
7625 {
7626 *immbits = immlo >> 8;
7627 return 0x2;
7628 }
7629 else if (immlo == (immlo & 0x00ff0000))
7630 {
7631 *immbits = immlo >> 16;
7632 return 0x4;
7633 }
7634 else if (immlo == (immlo & 0xff000000))
7635 {
7636 *immbits = immlo >> 24;
7637 return 0x6;
7638 }
7639 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7640 {
7641 *immbits = (immlo >> 8) & 0xff;
7642 return 0xc;
7643 }
7644 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7645 {
7646 *immbits = (immlo >> 16) & 0xff;
7647 return 0xd;
7648 }
7649
7650 if ((immlo & 0xffff) != (immlo >> 16))
7651 return FAIL;
7652 immlo &= 0xffff;
7653 }
7654
7655 if (size >= 16)
7656 {
7657 if (immlo == (immlo & 0x000000ff))
7658 {
7659 *immbits = immlo;
7660 return 0x8;
7661 }
7662 else if (immlo == (immlo & 0x0000ff00))
7663 {
7664 *immbits = immlo >> 8;
7665 return 0xa;
7666 }
7667
7668 if ((immlo & 0xff) != (immlo >> 8))
7669 return FAIL;
7670 immlo &= 0xff;
7671 }
7672
7673 if (immlo == (immlo & 0x000000ff))
7674 {
7675 /* Don't allow MVN with 8-bit immediate. */
7676 if (*op == 1)
7677 return FAIL;
7678 *immbits = immlo;
7679 return 0xe;
7680 }
7681
7682 return FAIL;
7683 }
7684
7685 enum lit_type
7686 {
7687 CONST_THUMB,
7688 CONST_ARM,
7689 CONST_VEC
7690 };
7691
7692 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7693 Determine whether it can be performed with a move instruction; if
7694 it can, convert inst.instruction to that move instruction and
7695 return TRUE; if it can't, convert inst.instruction to a literal-pool
7696 load and return FALSE. If this is not a valid thing to do in the
7697 current context, set inst.error and return TRUE.
7698
7699 inst.operands[i] describes the destination register. */
7700
7701 static bfd_boolean
7702 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7703 {
7704 unsigned long tbit;
7705 bfd_boolean thumb_p = (t == CONST_THUMB);
7706 bfd_boolean arm_p = (t == CONST_ARM);
7707 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
7708
7709 if (thumb_p)
7710 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7711 else
7712 tbit = LOAD_BIT;
7713
7714 if ((inst.instruction & tbit) == 0)
7715 {
7716 inst.error = _("invalid pseudo operation");
7717 return TRUE;
7718 }
7719 if (inst.reloc.exp.X_op != O_constant
7720 && inst.reloc.exp.X_op != O_symbol
7721 && inst.reloc.exp.X_op != O_big)
7722 {
7723 inst.error = _("constant expression expected");
7724 return TRUE;
7725 }
7726 if ((inst.reloc.exp.X_op == O_constant
7727 || inst.reloc.exp.X_op == O_big)
7728 && !inst.operands[i].issingle)
7729 {
7730 if (thumb_p && inst.reloc.exp.X_op == O_constant)
7731 {
7732 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7733 {
7734 /* This can be done with a mov(1) instruction. */
7735 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7736 inst.instruction |= inst.reloc.exp.X_add_number;
7737 return TRUE;
7738 }
7739 }
7740 else if (arm_p && inst.reloc.exp.X_op == O_constant)
7741 {
7742 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7743 if (value != FAIL)
7744 {
7745 /* This can be done with a mov instruction. */
7746 inst.instruction &= LITERAL_MASK;
7747 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7748 inst.instruction |= value & 0xfff;
7749 return TRUE;
7750 }
7751
7752 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7753 if (value != FAIL)
7754 {
7755 /* This can be done with a mvn instruction. */
7756 inst.instruction &= LITERAL_MASK;
7757 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7758 inst.instruction |= value & 0xfff;
7759 return TRUE;
7760 }
7761 }
7762 else if (vec64_p)
7763 {
7764 int op = 0;
7765 unsigned immbits = 0;
7766 unsigned immlo = inst.operands[1].imm;
7767 unsigned immhi = inst.operands[1].regisimm
7768 ? inst.operands[1].reg
7769 : inst.reloc.exp.X_unsigned
7770 ? 0
7771 : ((int64_t)((int) immlo)) >> 32;
7772 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7773 &op, 64, NT_invtype);
7774
7775 if (cmode == FAIL)
7776 {
7777 neon_invert_size (&immlo, &immhi, 64);
7778 op = !op;
7779 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7780 &op, 64, NT_invtype);
7781 }
7782 if (cmode != FAIL)
7783 {
7784 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7785 | (1 << 23)
7786 | (cmode << 8)
7787 | (op << 5)
7788 | (1 << 4);
7789 /* Fill other bits in vmov encoding for both thumb and arm. */
7790 if (thumb_mode)
7791 inst.instruction |= (0x7 << 29) | (0xF << 24);
7792 else
7793 inst.instruction |= (0xF << 28) | (0x1 << 25);
7794 neon_write_immbits (immbits);
7795 return TRUE;
7796 }
7797 }
7798 }
7799
7800 if (add_to_lit_pool ((!inst.operands[i].isvec
7801 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
7802 return TRUE;
7803
7804 inst.operands[1].reg = REG_PC;
7805 inst.operands[1].isreg = 1;
7806 inst.operands[1].preind = 1;
7807 inst.reloc.pc_rel = 1;
7808 inst.reloc.type = (thumb_p
7809 ? BFD_RELOC_ARM_THUMB_OFFSET
7810 : (mode_3
7811 ? BFD_RELOC_ARM_HWLITERAL
7812 : BFD_RELOC_ARM_LITERAL));
7813 return FALSE;
7814 }
7815
7816 /* inst.operands[i] was set up by parse_address. Encode it into an
7817 ARM-format instruction. Reject all forms which cannot be encoded
7818 into a coprocessor load/store instruction. If wb_ok is false,
7819 reject use of writeback; if unind_ok is false, reject use of
7820 unindexed addressing. If reloc_override is not 0, use it instead
7821 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7822 (in which case it is preserved). */
7823
7824 static int
7825 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7826 {
7827 if (!inst.operands[i].isreg)
7828 {
7829 gas_assert (inst.operands[0].isvec);
7830 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
7831 return SUCCESS;
7832 }
7833
7834 inst.instruction |= inst.operands[i].reg << 16;
7835
7836 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7837
7838 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7839 {
7840 gas_assert (!inst.operands[i].writeback);
7841 if (!unind_ok)
7842 {
7843 inst.error = _("instruction does not support unindexed addressing");
7844 return FAIL;
7845 }
7846 inst.instruction |= inst.operands[i].imm;
7847 inst.instruction |= INDEX_UP;
7848 return SUCCESS;
7849 }
7850
7851 if (inst.operands[i].preind)
7852 inst.instruction |= PRE_INDEX;
7853
7854 if (inst.operands[i].writeback)
7855 {
7856 if (inst.operands[i].reg == REG_PC)
7857 {
7858 inst.error = _("pc may not be used with write-back");
7859 return FAIL;
7860 }
7861 if (!wb_ok)
7862 {
7863 inst.error = _("instruction does not support writeback");
7864 return FAIL;
7865 }
7866 inst.instruction |= WRITE_BACK;
7867 }
7868
7869 if (reloc_override)
7870 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7871 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7872 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7873 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7874 {
7875 if (thumb_mode)
7876 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7877 else
7878 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7879 }
7880
7881 /* Prefer + for zero encoded value. */
7882 if (!inst.operands[i].negative)
7883 inst.instruction |= INDEX_UP;
7884
7885 return SUCCESS;
7886 }
7887
7888 /* Functions for instruction encoding, sorted by sub-architecture.
7889 First some generics; their names are taken from the conventional
7890 bit positions for register arguments in ARM format instructions. */
7891
7892 static void
7893 do_noargs (void)
7894 {
7895 }
7896
7897 static void
7898 do_rd (void)
7899 {
7900 inst.instruction |= inst.operands[0].reg << 12;
7901 }
7902
7903 static void
7904 do_rd_rm (void)
7905 {
7906 inst.instruction |= inst.operands[0].reg << 12;
7907 inst.instruction |= inst.operands[1].reg;
7908 }
7909
7910 static void
7911 do_rm_rn (void)
7912 {
7913 inst.instruction |= inst.operands[0].reg;
7914 inst.instruction |= inst.operands[1].reg << 16;
7915 }
7916
7917 static void
7918 do_rd_rn (void)
7919 {
7920 inst.instruction |= inst.operands[0].reg << 12;
7921 inst.instruction |= inst.operands[1].reg << 16;
7922 }
7923
7924 static void
7925 do_rn_rd (void)
7926 {
7927 inst.instruction |= inst.operands[0].reg << 16;
7928 inst.instruction |= inst.operands[1].reg << 12;
7929 }
7930
7931 static bfd_boolean
7932 check_obsolete (const arm_feature_set *feature, const char *msg)
7933 {
7934 if (ARM_CPU_IS_ANY (cpu_variant))
7935 {
7936 as_warn ("%s", msg);
7937 return TRUE;
7938 }
7939 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7940 {
7941 as_bad ("%s", msg);
7942 return TRUE;
7943 }
7944
7945 return FALSE;
7946 }
7947
7948 static void
7949 do_rd_rm_rn (void)
7950 {
7951 unsigned Rn = inst.operands[2].reg;
7952 /* Enforce restrictions on SWP instruction. */
7953 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7954 {
7955 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7956 _("Rn must not overlap other operands"));
7957
7958 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
7959 */
7960 if (!check_obsolete (&arm_ext_v8,
7961 _("swp{b} use is obsoleted for ARMv8 and later"))
7962 && warn_on_deprecated
7963 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
7964 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
7965 }
7966
7967 inst.instruction |= inst.operands[0].reg << 12;
7968 inst.instruction |= inst.operands[1].reg;
7969 inst.instruction |= Rn << 16;
7970 }
7971
7972 static void
7973 do_rd_rn_rm (void)
7974 {
7975 inst.instruction |= inst.operands[0].reg << 12;
7976 inst.instruction |= inst.operands[1].reg << 16;
7977 inst.instruction |= inst.operands[2].reg;
7978 }
7979
7980 static void
7981 do_rm_rd_rn (void)
7982 {
7983 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7984 constraint (((inst.reloc.exp.X_op != O_constant
7985 && inst.reloc.exp.X_op != O_illegal)
7986 || inst.reloc.exp.X_add_number != 0),
7987 BAD_ADDR_MODE);
7988 inst.instruction |= inst.operands[0].reg;
7989 inst.instruction |= inst.operands[1].reg << 12;
7990 inst.instruction |= inst.operands[2].reg << 16;
7991 }
7992
7993 static void
7994 do_imm0 (void)
7995 {
7996 inst.instruction |= inst.operands[0].imm;
7997 }
7998
7999 static void
8000 do_rd_cpaddr (void)
8001 {
8002 inst.instruction |= inst.operands[0].reg << 12;
8003 encode_arm_cp_address (1, TRUE, TRUE, 0);
8004 }
8005
8006 /* ARM instructions, in alphabetical order by function name (except
8007 that wrapper functions appear immediately after the function they
8008 wrap). */
8009
8010 /* This is a pseudo-op of the form "adr rd, label" to be converted
8011 into a relative address of the form "add rd, pc, #label-.-8". */
8012
8013 static void
8014 do_adr (void)
8015 {
8016 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8017
8018 /* Frag hacking will turn this into a sub instruction if the offset turns
8019 out to be negative. */
8020 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8021 inst.reloc.pc_rel = 1;
8022 inst.reloc.exp.X_add_number -= 8;
8023 }
8024
8025 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8026 into a relative address of the form:
8027 add rd, pc, #low(label-.-8)"
8028 add rd, rd, #high(label-.-8)" */
8029
8030 static void
8031 do_adrl (void)
8032 {
8033 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8034
8035 /* Frag hacking will turn this into a sub instruction if the offset turns
8036 out to be negative. */
8037 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8038 inst.reloc.pc_rel = 1;
8039 inst.size = INSN_SIZE * 2;
8040 inst.reloc.exp.X_add_number -= 8;
8041 }
8042
8043 static void
8044 do_arit (void)
8045 {
8046 if (!inst.operands[1].present)
8047 inst.operands[1].reg = inst.operands[0].reg;
8048 inst.instruction |= inst.operands[0].reg << 12;
8049 inst.instruction |= inst.operands[1].reg << 16;
8050 encode_arm_shifter_operand (2);
8051 }
8052
8053 static void
8054 do_barrier (void)
8055 {
8056 if (inst.operands[0].present)
8057 inst.instruction |= inst.operands[0].imm;
8058 else
8059 inst.instruction |= 0xf;
8060 }
8061
8062 static void
8063 do_bfc (void)
8064 {
8065 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8066 constraint (msb > 32, _("bit-field extends past end of register"));
8067 /* The instruction encoding stores the LSB and MSB,
8068 not the LSB and width. */
8069 inst.instruction |= inst.operands[0].reg << 12;
8070 inst.instruction |= inst.operands[1].imm << 7;
8071 inst.instruction |= (msb - 1) << 16;
8072 }
8073
8074 static void
8075 do_bfi (void)
8076 {
8077 unsigned int msb;
8078
8079 /* #0 in second position is alternative syntax for bfc, which is
8080 the same instruction but with REG_PC in the Rm field. */
8081 if (!inst.operands[1].isreg)
8082 inst.operands[1].reg = REG_PC;
8083
8084 msb = inst.operands[2].imm + inst.operands[3].imm;
8085 constraint (msb > 32, _("bit-field extends past end of register"));
8086 /* The instruction encoding stores the LSB and MSB,
8087 not the LSB and width. */
8088 inst.instruction |= inst.operands[0].reg << 12;
8089 inst.instruction |= inst.operands[1].reg;
8090 inst.instruction |= inst.operands[2].imm << 7;
8091 inst.instruction |= (msb - 1) << 16;
8092 }
8093
8094 static void
8095 do_bfx (void)
8096 {
8097 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8098 _("bit-field extends past end of register"));
8099 inst.instruction |= inst.operands[0].reg << 12;
8100 inst.instruction |= inst.operands[1].reg;
8101 inst.instruction |= inst.operands[2].imm << 7;
8102 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8103 }
8104
8105 /* ARM V5 breakpoint instruction (argument parse)
8106 BKPT <16 bit unsigned immediate>
8107 Instruction is not conditional.
8108 The bit pattern given in insns[] has the COND_ALWAYS condition,
8109 and it is an error if the caller tried to override that. */
8110
8111 static void
8112 do_bkpt (void)
8113 {
8114 /* Top 12 of 16 bits to bits 19:8. */
8115 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8116
8117 /* Bottom 4 of 16 bits to bits 3:0. */
8118 inst.instruction |= inst.operands[0].imm & 0xf;
8119 }
8120
8121 static void
8122 encode_branch (int default_reloc)
8123 {
8124 if (inst.operands[0].hasreloc)
8125 {
8126 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8127 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8128 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8129 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8130 ? BFD_RELOC_ARM_PLT32
8131 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8132 }
8133 else
8134 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8135 inst.reloc.pc_rel = 1;
8136 }
8137
8138 static void
8139 do_branch (void)
8140 {
8141 #ifdef OBJ_ELF
8142 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8143 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8144 else
8145 #endif
8146 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8147 }
8148
8149 static void
8150 do_bl (void)
8151 {
8152 #ifdef OBJ_ELF
8153 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8154 {
8155 if (inst.cond == COND_ALWAYS)
8156 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8157 else
8158 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8159 }
8160 else
8161 #endif
8162 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8163 }
8164
8165 /* ARM V5 branch-link-exchange instruction (argument parse)
8166 BLX <target_addr> ie BLX(1)
8167 BLX{<condition>} <Rm> ie BLX(2)
8168 Unfortunately, there are two different opcodes for this mnemonic.
8169 So, the insns[].value is not used, and the code here zaps values
8170 into inst.instruction.
8171 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8172
8173 static void
8174 do_blx (void)
8175 {
8176 if (inst.operands[0].isreg)
8177 {
8178 /* Arg is a register; the opcode provided by insns[] is correct.
8179 It is not illegal to do "blx pc", just useless. */
8180 if (inst.operands[0].reg == REG_PC)
8181 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8182
8183 inst.instruction |= inst.operands[0].reg;
8184 }
8185 else
8186 {
8187 /* Arg is an address; this instruction cannot be executed
8188 conditionally, and the opcode must be adjusted.
8189 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8190 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8191 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8192 inst.instruction = 0xfa000000;
8193 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8194 }
8195 }
8196
8197 static void
8198 do_bx (void)
8199 {
8200 bfd_boolean want_reloc;
8201
8202 if (inst.operands[0].reg == REG_PC)
8203 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8204
8205 inst.instruction |= inst.operands[0].reg;
8206 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8207 it is for ARMv4t or earlier. */
8208 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8209 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8210 want_reloc = TRUE;
8211
8212 #ifdef OBJ_ELF
8213 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8214 #endif
8215 want_reloc = FALSE;
8216
8217 if (want_reloc)
8218 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8219 }
8220
8221
8222 /* ARM v5TEJ. Jump to Jazelle code. */
8223
8224 static void
8225 do_bxj (void)
8226 {
8227 if (inst.operands[0].reg == REG_PC)
8228 as_tsktsk (_("use of r15 in bxj is not really useful"));
8229
8230 inst.instruction |= inst.operands[0].reg;
8231 }
8232
8233 /* Co-processor data operation:
8234 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8235 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8236 static void
8237 do_cdp (void)
8238 {
8239 inst.instruction |= inst.operands[0].reg << 8;
8240 inst.instruction |= inst.operands[1].imm << 20;
8241 inst.instruction |= inst.operands[2].reg << 12;
8242 inst.instruction |= inst.operands[3].reg << 16;
8243 inst.instruction |= inst.operands[4].reg;
8244 inst.instruction |= inst.operands[5].imm << 5;
8245 }
8246
8247 static void
8248 do_cmp (void)
8249 {
8250 inst.instruction |= inst.operands[0].reg << 16;
8251 encode_arm_shifter_operand (1);
8252 }
8253
8254 /* Transfer between coprocessor and ARM registers.
8255 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8256 MRC2
8257 MCR{cond}
8258 MCR2
8259
8260 No special properties. */
8261
8262 struct deprecated_coproc_regs_s
8263 {
8264 unsigned cp;
8265 int opc1;
8266 unsigned crn;
8267 unsigned crm;
8268 int opc2;
8269 arm_feature_set deprecated;
8270 arm_feature_set obsoleted;
8271 const char *dep_msg;
8272 const char *obs_msg;
8273 };
8274
8275 #define DEPR_ACCESS_V8 \
8276 N_("This coprocessor register access is deprecated in ARMv8")
8277
8278 /* Table of all deprecated coprocessor registers. */
8279 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8280 {
8281 {15, 0, 7, 10, 5, /* CP15DMB. */
8282 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8283 DEPR_ACCESS_V8, NULL},
8284 {15, 0, 7, 10, 4, /* CP15DSB. */
8285 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8286 DEPR_ACCESS_V8, NULL},
8287 {15, 0, 7, 5, 4, /* CP15ISB. */
8288 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8289 DEPR_ACCESS_V8, NULL},
8290 {14, 6, 1, 0, 0, /* TEEHBR. */
8291 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8292 DEPR_ACCESS_V8, NULL},
8293 {14, 6, 0, 0, 0, /* TEECR. */
8294 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8295 DEPR_ACCESS_V8, NULL},
8296 };
8297
8298 #undef DEPR_ACCESS_V8
8299
8300 static const size_t deprecated_coproc_reg_count =
8301 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8302
8303 static void
8304 do_co_reg (void)
8305 {
8306 unsigned Rd;
8307 size_t i;
8308
8309 Rd = inst.operands[2].reg;
8310 if (thumb_mode)
8311 {
8312 if (inst.instruction == 0xee000010
8313 || inst.instruction == 0xfe000010)
8314 /* MCR, MCR2 */
8315 reject_bad_reg (Rd);
8316 else
8317 /* MRC, MRC2 */
8318 constraint (Rd == REG_SP, BAD_SP);
8319 }
8320 else
8321 {
8322 /* MCR */
8323 if (inst.instruction == 0xe000010)
8324 constraint (Rd == REG_PC, BAD_PC);
8325 }
8326
8327 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8328 {
8329 const struct deprecated_coproc_regs_s *r =
8330 deprecated_coproc_regs + i;
8331
8332 if (inst.operands[0].reg == r->cp
8333 && inst.operands[1].imm == r->opc1
8334 && inst.operands[3].reg == r->crn
8335 && inst.operands[4].reg == r->crm
8336 && inst.operands[5].imm == r->opc2)
8337 {
8338 if (! ARM_CPU_IS_ANY (cpu_variant)
8339 && warn_on_deprecated
8340 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8341 as_warn ("%s", r->dep_msg);
8342 }
8343 }
8344
8345 inst.instruction |= inst.operands[0].reg << 8;
8346 inst.instruction |= inst.operands[1].imm << 21;
8347 inst.instruction |= Rd << 12;
8348 inst.instruction |= inst.operands[3].reg << 16;
8349 inst.instruction |= inst.operands[4].reg;
8350 inst.instruction |= inst.operands[5].imm << 5;
8351 }
8352
8353 /* Transfer between coprocessor register and pair of ARM registers.
8354 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8355 MCRR2
8356 MRRC{cond}
8357 MRRC2
8358
8359 Two XScale instructions are special cases of these:
8360
8361 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8362 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8363
8364 Result unpredictable if Rd or Rn is R15. */
8365
8366 static void
8367 do_co_reg2c (void)
8368 {
8369 unsigned Rd, Rn;
8370
8371 Rd = inst.operands[2].reg;
8372 Rn = inst.operands[3].reg;
8373
8374 if (thumb_mode)
8375 {
8376 reject_bad_reg (Rd);
8377 reject_bad_reg (Rn);
8378 }
8379 else
8380 {
8381 constraint (Rd == REG_PC, BAD_PC);
8382 constraint (Rn == REG_PC, BAD_PC);
8383 }
8384
8385 inst.instruction |= inst.operands[0].reg << 8;
8386 inst.instruction |= inst.operands[1].imm << 4;
8387 inst.instruction |= Rd << 12;
8388 inst.instruction |= Rn << 16;
8389 inst.instruction |= inst.operands[4].reg;
8390 }
8391
8392 static void
8393 do_cpsi (void)
8394 {
8395 inst.instruction |= inst.operands[0].imm << 6;
8396 if (inst.operands[1].present)
8397 {
8398 inst.instruction |= CPSI_MMOD;
8399 inst.instruction |= inst.operands[1].imm;
8400 }
8401 }
8402
8403 static void
8404 do_dbg (void)
8405 {
8406 inst.instruction |= inst.operands[0].imm;
8407 }
8408
8409 static void
8410 do_div (void)
8411 {
8412 unsigned Rd, Rn, Rm;
8413
8414 Rd = inst.operands[0].reg;
8415 Rn = (inst.operands[1].present
8416 ? inst.operands[1].reg : Rd);
8417 Rm = inst.operands[2].reg;
8418
8419 constraint ((Rd == REG_PC), BAD_PC);
8420 constraint ((Rn == REG_PC), BAD_PC);
8421 constraint ((Rm == REG_PC), BAD_PC);
8422
8423 inst.instruction |= Rd << 16;
8424 inst.instruction |= Rn << 0;
8425 inst.instruction |= Rm << 8;
8426 }
8427
8428 static void
8429 do_it (void)
8430 {
8431 /* There is no IT instruction in ARM mode. We
8432 process it to do the validation as if in
8433 thumb mode, just in case the code gets
8434 assembled for thumb using the unified syntax. */
8435
8436 inst.size = 0;
8437 if (unified_syntax)
8438 {
8439 set_it_insn_type (IT_INSN);
8440 now_it.mask = (inst.instruction & 0xf) | 0x10;
8441 now_it.cc = inst.operands[0].imm;
8442 }
8443 }
8444
8445 /* If there is only one register in the register list,
8446 then return its register number. Otherwise return -1. */
8447 static int
8448 only_one_reg_in_list (int range)
8449 {
8450 int i = ffs (range) - 1;
8451 return (i > 15 || range != (1 << i)) ? -1 : i;
8452 }
8453
8454 static void
8455 encode_ldmstm(int from_push_pop_mnem)
8456 {
8457 int base_reg = inst.operands[0].reg;
8458 int range = inst.operands[1].imm;
8459 int one_reg;
8460
8461 inst.instruction |= base_reg << 16;
8462 inst.instruction |= range;
8463
8464 if (inst.operands[1].writeback)
8465 inst.instruction |= LDM_TYPE_2_OR_3;
8466
8467 if (inst.operands[0].writeback)
8468 {
8469 inst.instruction |= WRITE_BACK;
8470 /* Check for unpredictable uses of writeback. */
8471 if (inst.instruction & LOAD_BIT)
8472 {
8473 /* Not allowed in LDM type 2. */
8474 if ((inst.instruction & LDM_TYPE_2_OR_3)
8475 && ((range & (1 << REG_PC)) == 0))
8476 as_warn (_("writeback of base register is UNPREDICTABLE"));
8477 /* Only allowed if base reg not in list for other types. */
8478 else if (range & (1 << base_reg))
8479 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8480 }
8481 else /* STM. */
8482 {
8483 /* Not allowed for type 2. */
8484 if (inst.instruction & LDM_TYPE_2_OR_3)
8485 as_warn (_("writeback of base register is UNPREDICTABLE"));
8486 /* Only allowed if base reg not in list, or first in list. */
8487 else if ((range & (1 << base_reg))
8488 && (range & ((1 << base_reg) - 1)))
8489 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8490 }
8491 }
8492
8493 /* If PUSH/POP has only one register, then use the A2 encoding. */
8494 one_reg = only_one_reg_in_list (range);
8495 if (from_push_pop_mnem && one_reg >= 0)
8496 {
8497 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8498
8499 inst.instruction &= A_COND_MASK;
8500 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8501 inst.instruction |= one_reg << 12;
8502 }
8503 }
8504
8505 static void
8506 do_ldmstm (void)
8507 {
8508 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8509 }
8510
8511 /* ARMv5TE load-consecutive (argument parse)
8512 Mode is like LDRH.
8513
8514 LDRccD R, mode
8515 STRccD R, mode. */
8516
8517 static void
8518 do_ldrd (void)
8519 {
8520 constraint (inst.operands[0].reg % 2 != 0,
8521 _("first transfer register must be even"));
8522 constraint (inst.operands[1].present
8523 && inst.operands[1].reg != inst.operands[0].reg + 1,
8524 _("can only transfer two consecutive registers"));
8525 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8526 constraint (!inst.operands[2].isreg, _("'[' expected"));
8527
8528 if (!inst.operands[1].present)
8529 inst.operands[1].reg = inst.operands[0].reg + 1;
8530
8531 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8532 register and the first register written; we have to diagnose
8533 overlap between the base and the second register written here. */
8534
8535 if (inst.operands[2].reg == inst.operands[1].reg
8536 && (inst.operands[2].writeback || inst.operands[2].postind))
8537 as_warn (_("base register written back, and overlaps "
8538 "second transfer register"));
8539
8540 if (!(inst.instruction & V4_STR_BIT))
8541 {
8542 /* For an index-register load, the index register must not overlap the
8543 destination (even if not write-back). */
8544 if (inst.operands[2].immisreg
8545 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8546 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8547 as_warn (_("index register overlaps transfer register"));
8548 }
8549 inst.instruction |= inst.operands[0].reg << 12;
8550 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8551 }
8552
8553 static void
8554 do_ldrex (void)
8555 {
8556 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8557 || inst.operands[1].postind || inst.operands[1].writeback
8558 || inst.operands[1].immisreg || inst.operands[1].shifted
8559 || inst.operands[1].negative
8560 /* This can arise if the programmer has written
8561 strex rN, rM, foo
8562 or if they have mistakenly used a register name as the last
8563 operand, eg:
8564 strex rN, rM, rX
8565 It is very difficult to distinguish between these two cases
8566 because "rX" might actually be a label. ie the register
8567 name has been occluded by a symbol of the same name. So we
8568 just generate a general 'bad addressing mode' type error
8569 message and leave it up to the programmer to discover the
8570 true cause and fix their mistake. */
8571 || (inst.operands[1].reg == REG_PC),
8572 BAD_ADDR_MODE);
8573
8574 constraint (inst.reloc.exp.X_op != O_constant
8575 || inst.reloc.exp.X_add_number != 0,
8576 _("offset must be zero in ARM encoding"));
8577
8578 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8579
8580 inst.instruction |= inst.operands[0].reg << 12;
8581 inst.instruction |= inst.operands[1].reg << 16;
8582 inst.reloc.type = BFD_RELOC_UNUSED;
8583 }
8584
8585 static void
8586 do_ldrexd (void)
8587 {
8588 constraint (inst.operands[0].reg % 2 != 0,
8589 _("even register required"));
8590 constraint (inst.operands[1].present
8591 && inst.operands[1].reg != inst.operands[0].reg + 1,
8592 _("can only load two consecutive registers"));
8593 /* If op 1 were present and equal to PC, this function wouldn't
8594 have been called in the first place. */
8595 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8596
8597 inst.instruction |= inst.operands[0].reg << 12;
8598 inst.instruction |= inst.operands[2].reg << 16;
8599 }
8600
8601 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8602 which is not a multiple of four is UNPREDICTABLE. */
8603 static void
8604 check_ldr_r15_aligned (void)
8605 {
8606 constraint (!(inst.operands[1].immisreg)
8607 && (inst.operands[0].reg == REG_PC
8608 && inst.operands[1].reg == REG_PC
8609 && (inst.reloc.exp.X_add_number & 0x3)),
8610 _("ldr to register 15 must be 4-byte alligned"));
8611 }
8612
8613 static void
8614 do_ldst (void)
8615 {
8616 inst.instruction |= inst.operands[0].reg << 12;
8617 if (!inst.operands[1].isreg)
8618 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8619 return;
8620 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8621 check_ldr_r15_aligned ();
8622 }
8623
8624 static void
8625 do_ldstt (void)
8626 {
8627 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8628 reject [Rn,...]. */
8629 if (inst.operands[1].preind)
8630 {
8631 constraint (inst.reloc.exp.X_op != O_constant
8632 || inst.reloc.exp.X_add_number != 0,
8633 _("this instruction requires a post-indexed address"));
8634
8635 inst.operands[1].preind = 0;
8636 inst.operands[1].postind = 1;
8637 inst.operands[1].writeback = 1;
8638 }
8639 inst.instruction |= inst.operands[0].reg << 12;
8640 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8641 }
8642
8643 /* Halfword and signed-byte load/store operations. */
8644
8645 static void
8646 do_ldstv4 (void)
8647 {
8648 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8649 inst.instruction |= inst.operands[0].reg << 12;
8650 if (!inst.operands[1].isreg)
8651 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8652 return;
8653 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8654 }
8655
8656 static void
8657 do_ldsttv4 (void)
8658 {
8659 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8660 reject [Rn,...]. */
8661 if (inst.operands[1].preind)
8662 {
8663 constraint (inst.reloc.exp.X_op != O_constant
8664 || inst.reloc.exp.X_add_number != 0,
8665 _("this instruction requires a post-indexed address"));
8666
8667 inst.operands[1].preind = 0;
8668 inst.operands[1].postind = 1;
8669 inst.operands[1].writeback = 1;
8670 }
8671 inst.instruction |= inst.operands[0].reg << 12;
8672 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8673 }
8674
8675 /* Co-processor register load/store.
8676 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8677 static void
8678 do_lstc (void)
8679 {
8680 inst.instruction |= inst.operands[0].reg << 8;
8681 inst.instruction |= inst.operands[1].reg << 12;
8682 encode_arm_cp_address (2, TRUE, TRUE, 0);
8683 }
8684
8685 static void
8686 do_mlas (void)
8687 {
8688 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8689 if (inst.operands[0].reg == inst.operands[1].reg
8690 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8691 && !(inst.instruction & 0x00400000))
8692 as_tsktsk (_("Rd and Rm should be different in mla"));
8693
8694 inst.instruction |= inst.operands[0].reg << 16;
8695 inst.instruction |= inst.operands[1].reg;
8696 inst.instruction |= inst.operands[2].reg << 8;
8697 inst.instruction |= inst.operands[3].reg << 12;
8698 }
8699
8700 static void
8701 do_mov (void)
8702 {
8703 inst.instruction |= inst.operands[0].reg << 12;
8704 encode_arm_shifter_operand (1);
8705 }
8706
8707 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8708 static void
8709 do_mov16 (void)
8710 {
8711 bfd_vma imm;
8712 bfd_boolean top;
8713
8714 top = (inst.instruction & 0x00400000) != 0;
8715 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8716 _(":lower16: not allowed this instruction"));
8717 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8718 _(":upper16: not allowed instruction"));
8719 inst.instruction |= inst.operands[0].reg << 12;
8720 if (inst.reloc.type == BFD_RELOC_UNUSED)
8721 {
8722 imm = inst.reloc.exp.X_add_number;
8723 /* The value is in two pieces: 0:11, 16:19. */
8724 inst.instruction |= (imm & 0x00000fff);
8725 inst.instruction |= (imm & 0x0000f000) << 4;
8726 }
8727 }
8728
8729 static void do_vfp_nsyn_opcode (const char *);
8730
8731 static int
8732 do_vfp_nsyn_mrs (void)
8733 {
8734 if (inst.operands[0].isvec)
8735 {
8736 if (inst.operands[1].reg != 1)
8737 first_error (_("operand 1 must be FPSCR"));
8738 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8739 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8740 do_vfp_nsyn_opcode ("fmstat");
8741 }
8742 else if (inst.operands[1].isvec)
8743 do_vfp_nsyn_opcode ("fmrx");
8744 else
8745 return FAIL;
8746
8747 return SUCCESS;
8748 }
8749
8750 static int
8751 do_vfp_nsyn_msr (void)
8752 {
8753 if (inst.operands[0].isvec)
8754 do_vfp_nsyn_opcode ("fmxr");
8755 else
8756 return FAIL;
8757
8758 return SUCCESS;
8759 }
8760
8761 static void
8762 do_vmrs (void)
8763 {
8764 unsigned Rt = inst.operands[0].reg;
8765
8766 if (thumb_mode && Rt == REG_SP)
8767 {
8768 inst.error = BAD_SP;
8769 return;
8770 }
8771
8772 /* APSR_ sets isvec. All other refs to PC are illegal. */
8773 if (!inst.operands[0].isvec && Rt == REG_PC)
8774 {
8775 inst.error = BAD_PC;
8776 return;
8777 }
8778
8779 /* If we get through parsing the register name, we just insert the number
8780 generated into the instruction without further validation. */
8781 inst.instruction |= (inst.operands[1].reg << 16);
8782 inst.instruction |= (Rt << 12);
8783 }
8784
8785 static void
8786 do_vmsr (void)
8787 {
8788 unsigned Rt = inst.operands[1].reg;
8789
8790 if (thumb_mode)
8791 reject_bad_reg (Rt);
8792 else if (Rt == REG_PC)
8793 {
8794 inst.error = BAD_PC;
8795 return;
8796 }
8797
8798 /* If we get through parsing the register name, we just insert the number
8799 generated into the instruction without further validation. */
8800 inst.instruction |= (inst.operands[0].reg << 16);
8801 inst.instruction |= (Rt << 12);
8802 }
8803
8804 static void
8805 do_mrs (void)
8806 {
8807 unsigned br;
8808
8809 if (do_vfp_nsyn_mrs () == SUCCESS)
8810 return;
8811
8812 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8813 inst.instruction |= inst.operands[0].reg << 12;
8814
8815 if (inst.operands[1].isreg)
8816 {
8817 br = inst.operands[1].reg;
8818 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8819 as_bad (_("bad register for mrs"));
8820 }
8821 else
8822 {
8823 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8824 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8825 != (PSR_c|PSR_f),
8826 _("'APSR', 'CPSR' or 'SPSR' expected"));
8827 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8828 }
8829
8830 inst.instruction |= br;
8831 }
8832
8833 /* Two possible forms:
8834 "{C|S}PSR_<field>, Rm",
8835 "{C|S}PSR_f, #expression". */
8836
8837 static void
8838 do_msr (void)
8839 {
8840 if (do_vfp_nsyn_msr () == SUCCESS)
8841 return;
8842
8843 inst.instruction |= inst.operands[0].imm;
8844 if (inst.operands[1].isreg)
8845 inst.instruction |= inst.operands[1].reg;
8846 else
8847 {
8848 inst.instruction |= INST_IMMEDIATE;
8849 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8850 inst.reloc.pc_rel = 0;
8851 }
8852 }
8853
8854 static void
8855 do_mul (void)
8856 {
8857 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8858
8859 if (!inst.operands[2].present)
8860 inst.operands[2].reg = inst.operands[0].reg;
8861 inst.instruction |= inst.operands[0].reg << 16;
8862 inst.instruction |= inst.operands[1].reg;
8863 inst.instruction |= inst.operands[2].reg << 8;
8864
8865 if (inst.operands[0].reg == inst.operands[1].reg
8866 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8867 as_tsktsk (_("Rd and Rm should be different in mul"));
8868 }
8869
8870 /* Long Multiply Parser
8871 UMULL RdLo, RdHi, Rm, Rs
8872 SMULL RdLo, RdHi, Rm, Rs
8873 UMLAL RdLo, RdHi, Rm, Rs
8874 SMLAL RdLo, RdHi, Rm, Rs. */
8875
8876 static void
8877 do_mull (void)
8878 {
8879 inst.instruction |= inst.operands[0].reg << 12;
8880 inst.instruction |= inst.operands[1].reg << 16;
8881 inst.instruction |= inst.operands[2].reg;
8882 inst.instruction |= inst.operands[3].reg << 8;
8883
8884 /* rdhi and rdlo must be different. */
8885 if (inst.operands[0].reg == inst.operands[1].reg)
8886 as_tsktsk (_("rdhi and rdlo must be different"));
8887
8888 /* rdhi, rdlo and rm must all be different before armv6. */
8889 if ((inst.operands[0].reg == inst.operands[2].reg
8890 || inst.operands[1].reg == inst.operands[2].reg)
8891 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8892 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8893 }
8894
8895 static void
8896 do_nop (void)
8897 {
8898 if (inst.operands[0].present
8899 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8900 {
8901 /* Architectural NOP hints are CPSR sets with no bits selected. */
8902 inst.instruction &= 0xf0000000;
8903 inst.instruction |= 0x0320f000;
8904 if (inst.operands[0].present)
8905 inst.instruction |= inst.operands[0].imm;
8906 }
8907 }
8908
8909 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8910 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8911 Condition defaults to COND_ALWAYS.
8912 Error if Rd, Rn or Rm are R15. */
8913
8914 static void
8915 do_pkhbt (void)
8916 {
8917 inst.instruction |= inst.operands[0].reg << 12;
8918 inst.instruction |= inst.operands[1].reg << 16;
8919 inst.instruction |= inst.operands[2].reg;
8920 if (inst.operands[3].present)
8921 encode_arm_shift (3);
8922 }
8923
8924 /* ARM V6 PKHTB (Argument Parse). */
8925
8926 static void
8927 do_pkhtb (void)
8928 {
8929 if (!inst.operands[3].present)
8930 {
8931 /* If the shift specifier is omitted, turn the instruction
8932 into pkhbt rd, rm, rn. */
8933 inst.instruction &= 0xfff00010;
8934 inst.instruction |= inst.operands[0].reg << 12;
8935 inst.instruction |= inst.operands[1].reg;
8936 inst.instruction |= inst.operands[2].reg << 16;
8937 }
8938 else
8939 {
8940 inst.instruction |= inst.operands[0].reg << 12;
8941 inst.instruction |= inst.operands[1].reg << 16;
8942 inst.instruction |= inst.operands[2].reg;
8943 encode_arm_shift (3);
8944 }
8945 }
8946
8947 /* ARMv5TE: Preload-Cache
8948 MP Extensions: Preload for write
8949
8950 PLD(W) <addr_mode>
8951
8952 Syntactically, like LDR with B=1, W=0, L=1. */
8953
8954 static void
8955 do_pld (void)
8956 {
8957 constraint (!inst.operands[0].isreg,
8958 _("'[' expected after PLD mnemonic"));
8959 constraint (inst.operands[0].postind,
8960 _("post-indexed expression used in preload instruction"));
8961 constraint (inst.operands[0].writeback,
8962 _("writeback used in preload instruction"));
8963 constraint (!inst.operands[0].preind,
8964 _("unindexed addressing used in preload instruction"));
8965 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8966 }
8967
8968 /* ARMv7: PLI <addr_mode> */
8969 static void
8970 do_pli (void)
8971 {
8972 constraint (!inst.operands[0].isreg,
8973 _("'[' expected after PLI mnemonic"));
8974 constraint (inst.operands[0].postind,
8975 _("post-indexed expression used in preload instruction"));
8976 constraint (inst.operands[0].writeback,
8977 _("writeback used in preload instruction"));
8978 constraint (!inst.operands[0].preind,
8979 _("unindexed addressing used in preload instruction"));
8980 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8981 inst.instruction &= ~PRE_INDEX;
8982 }
8983
8984 static void
8985 do_push_pop (void)
8986 {
8987 inst.operands[1] = inst.operands[0];
8988 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8989 inst.operands[0].isreg = 1;
8990 inst.operands[0].writeback = 1;
8991 inst.operands[0].reg = REG_SP;
8992 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8993 }
8994
8995 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8996 word at the specified address and the following word
8997 respectively.
8998 Unconditionally executed.
8999 Error if Rn is R15. */
9000
9001 static void
9002 do_rfe (void)
9003 {
9004 inst.instruction |= inst.operands[0].reg << 16;
9005 if (inst.operands[0].writeback)
9006 inst.instruction |= WRITE_BACK;
9007 }
9008
9009 /* ARM V6 ssat (argument parse). */
9010
9011 static void
9012 do_ssat (void)
9013 {
9014 inst.instruction |= inst.operands[0].reg << 12;
9015 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9016 inst.instruction |= inst.operands[2].reg;
9017
9018 if (inst.operands[3].present)
9019 encode_arm_shift (3);
9020 }
9021
9022 /* ARM V6 usat (argument parse). */
9023
9024 static void
9025 do_usat (void)
9026 {
9027 inst.instruction |= inst.operands[0].reg << 12;
9028 inst.instruction |= inst.operands[1].imm << 16;
9029 inst.instruction |= inst.operands[2].reg;
9030
9031 if (inst.operands[3].present)
9032 encode_arm_shift (3);
9033 }
9034
9035 /* ARM V6 ssat16 (argument parse). */
9036
9037 static void
9038 do_ssat16 (void)
9039 {
9040 inst.instruction |= inst.operands[0].reg << 12;
9041 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9042 inst.instruction |= inst.operands[2].reg;
9043 }
9044
9045 static void
9046 do_usat16 (void)
9047 {
9048 inst.instruction |= inst.operands[0].reg << 12;
9049 inst.instruction |= inst.operands[1].imm << 16;
9050 inst.instruction |= inst.operands[2].reg;
9051 }
9052
9053 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9054 preserving the other bits.
9055
9056 setend <endian_specifier>, where <endian_specifier> is either
9057 BE or LE. */
9058
9059 static void
9060 do_setend (void)
9061 {
9062 if (warn_on_deprecated
9063 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9064 as_warn (_("setend use is deprecated for ARMv8"));
9065
9066 if (inst.operands[0].imm)
9067 inst.instruction |= 0x200;
9068 }
9069
9070 static void
9071 do_shift (void)
9072 {
9073 unsigned int Rm = (inst.operands[1].present
9074 ? inst.operands[1].reg
9075 : inst.operands[0].reg);
9076
9077 inst.instruction |= inst.operands[0].reg << 12;
9078 inst.instruction |= Rm;
9079 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9080 {
9081 inst.instruction |= inst.operands[2].reg << 8;
9082 inst.instruction |= SHIFT_BY_REG;
9083 /* PR 12854: Error on extraneous shifts. */
9084 constraint (inst.operands[2].shifted,
9085 _("extraneous shift as part of operand to shift insn"));
9086 }
9087 else
9088 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9089 }
9090
9091 static void
9092 do_smc (void)
9093 {
9094 inst.reloc.type = BFD_RELOC_ARM_SMC;
9095 inst.reloc.pc_rel = 0;
9096 }
9097
9098 static void
9099 do_hvc (void)
9100 {
9101 inst.reloc.type = BFD_RELOC_ARM_HVC;
9102 inst.reloc.pc_rel = 0;
9103 }
9104
9105 static void
9106 do_swi (void)
9107 {
9108 inst.reloc.type = BFD_RELOC_ARM_SWI;
9109 inst.reloc.pc_rel = 0;
9110 }
9111
9112 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9113 SMLAxy{cond} Rd,Rm,Rs,Rn
9114 SMLAWy{cond} Rd,Rm,Rs,Rn
9115 Error if any register is R15. */
9116
9117 static void
9118 do_smla (void)
9119 {
9120 inst.instruction |= inst.operands[0].reg << 16;
9121 inst.instruction |= inst.operands[1].reg;
9122 inst.instruction |= inst.operands[2].reg << 8;
9123 inst.instruction |= inst.operands[3].reg << 12;
9124 }
9125
9126 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9127 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9128 Error if any register is R15.
9129 Warning if Rdlo == Rdhi. */
9130
9131 static void
9132 do_smlal (void)
9133 {
9134 inst.instruction |= inst.operands[0].reg << 12;
9135 inst.instruction |= inst.operands[1].reg << 16;
9136 inst.instruction |= inst.operands[2].reg;
9137 inst.instruction |= inst.operands[3].reg << 8;
9138
9139 if (inst.operands[0].reg == inst.operands[1].reg)
9140 as_tsktsk (_("rdhi and rdlo must be different"));
9141 }
9142
9143 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9144 SMULxy{cond} Rd,Rm,Rs
9145 Error if any register is R15. */
9146
9147 static void
9148 do_smul (void)
9149 {
9150 inst.instruction |= inst.operands[0].reg << 16;
9151 inst.instruction |= inst.operands[1].reg;
9152 inst.instruction |= inst.operands[2].reg << 8;
9153 }
9154
9155 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9156 the same for both ARM and Thumb-2. */
9157
9158 static void
9159 do_srs (void)
9160 {
9161 int reg;
9162
9163 if (inst.operands[0].present)
9164 {
9165 reg = inst.operands[0].reg;
9166 constraint (reg != REG_SP, _("SRS base register must be r13"));
9167 }
9168 else
9169 reg = REG_SP;
9170
9171 inst.instruction |= reg << 16;
9172 inst.instruction |= inst.operands[1].imm;
9173 if (inst.operands[0].writeback || inst.operands[1].writeback)
9174 inst.instruction |= WRITE_BACK;
9175 }
9176
9177 /* ARM V6 strex (argument parse). */
9178
9179 static void
9180 do_strex (void)
9181 {
9182 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9183 || inst.operands[2].postind || inst.operands[2].writeback
9184 || inst.operands[2].immisreg || inst.operands[2].shifted
9185 || inst.operands[2].negative
9186 /* See comment in do_ldrex(). */
9187 || (inst.operands[2].reg == REG_PC),
9188 BAD_ADDR_MODE);
9189
9190 constraint (inst.operands[0].reg == inst.operands[1].reg
9191 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9192
9193 constraint (inst.reloc.exp.X_op != O_constant
9194 || inst.reloc.exp.X_add_number != 0,
9195 _("offset must be zero in ARM encoding"));
9196
9197 inst.instruction |= inst.operands[0].reg << 12;
9198 inst.instruction |= inst.operands[1].reg;
9199 inst.instruction |= inst.operands[2].reg << 16;
9200 inst.reloc.type = BFD_RELOC_UNUSED;
9201 }
9202
9203 static void
9204 do_t_strexbh (void)
9205 {
9206 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9207 || inst.operands[2].postind || inst.operands[2].writeback
9208 || inst.operands[2].immisreg || inst.operands[2].shifted
9209 || inst.operands[2].negative,
9210 BAD_ADDR_MODE);
9211
9212 constraint (inst.operands[0].reg == inst.operands[1].reg
9213 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9214
9215 do_rm_rd_rn ();
9216 }
9217
9218 static void
9219 do_strexd (void)
9220 {
9221 constraint (inst.operands[1].reg % 2 != 0,
9222 _("even register required"));
9223 constraint (inst.operands[2].present
9224 && inst.operands[2].reg != inst.operands[1].reg + 1,
9225 _("can only store two consecutive registers"));
9226 /* If op 2 were present and equal to PC, this function wouldn't
9227 have been called in the first place. */
9228 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9229
9230 constraint (inst.operands[0].reg == inst.operands[1].reg
9231 || inst.operands[0].reg == inst.operands[1].reg + 1
9232 || inst.operands[0].reg == inst.operands[3].reg,
9233 BAD_OVERLAP);
9234
9235 inst.instruction |= inst.operands[0].reg << 12;
9236 inst.instruction |= inst.operands[1].reg;
9237 inst.instruction |= inst.operands[3].reg << 16;
9238 }
9239
9240 /* ARM V8 STRL. */
9241 static void
9242 do_stlex (void)
9243 {
9244 constraint (inst.operands[0].reg == inst.operands[1].reg
9245 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9246
9247 do_rd_rm_rn ();
9248 }
9249
9250 static void
9251 do_t_stlex (void)
9252 {
9253 constraint (inst.operands[0].reg == inst.operands[1].reg
9254 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9255
9256 do_rm_rd_rn ();
9257 }
9258
9259 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9260 extends it to 32-bits, and adds the result to a value in another
9261 register. You can specify a rotation by 0, 8, 16, or 24 bits
9262 before extracting the 16-bit value.
9263 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9264 Condition defaults to COND_ALWAYS.
9265 Error if any register uses R15. */
9266
9267 static void
9268 do_sxtah (void)
9269 {
9270 inst.instruction |= inst.operands[0].reg << 12;
9271 inst.instruction |= inst.operands[1].reg << 16;
9272 inst.instruction |= inst.operands[2].reg;
9273 inst.instruction |= inst.operands[3].imm << 10;
9274 }
9275
9276 /* ARM V6 SXTH.
9277
9278 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9279 Condition defaults to COND_ALWAYS.
9280 Error if any register uses R15. */
9281
9282 static void
9283 do_sxth (void)
9284 {
9285 inst.instruction |= inst.operands[0].reg << 12;
9286 inst.instruction |= inst.operands[1].reg;
9287 inst.instruction |= inst.operands[2].imm << 10;
9288 }
9289 \f
9290 /* VFP instructions. In a logical order: SP variant first, monad
9291 before dyad, arithmetic then move then load/store. */
9292
9293 static void
9294 do_vfp_sp_monadic (void)
9295 {
9296 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9297 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9298 }
9299
9300 static void
9301 do_vfp_sp_dyadic (void)
9302 {
9303 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9304 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9305 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9306 }
9307
9308 static void
9309 do_vfp_sp_compare_z (void)
9310 {
9311 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9312 }
9313
9314 static void
9315 do_vfp_dp_sp_cvt (void)
9316 {
9317 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9318 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9319 }
9320
9321 static void
9322 do_vfp_sp_dp_cvt (void)
9323 {
9324 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9325 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9326 }
9327
9328 static void
9329 do_vfp_reg_from_sp (void)
9330 {
9331 inst.instruction |= inst.operands[0].reg << 12;
9332 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9333 }
9334
9335 static void
9336 do_vfp_reg2_from_sp2 (void)
9337 {
9338 constraint (inst.operands[2].imm != 2,
9339 _("only two consecutive VFP SP registers allowed here"));
9340 inst.instruction |= inst.operands[0].reg << 12;
9341 inst.instruction |= inst.operands[1].reg << 16;
9342 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9343 }
9344
9345 static void
9346 do_vfp_sp_from_reg (void)
9347 {
9348 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9349 inst.instruction |= inst.operands[1].reg << 12;
9350 }
9351
9352 static void
9353 do_vfp_sp2_from_reg2 (void)
9354 {
9355 constraint (inst.operands[0].imm != 2,
9356 _("only two consecutive VFP SP registers allowed here"));
9357 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9358 inst.instruction |= inst.operands[1].reg << 12;
9359 inst.instruction |= inst.operands[2].reg << 16;
9360 }
9361
9362 static void
9363 do_vfp_sp_ldst (void)
9364 {
9365 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9366 encode_arm_cp_address (1, FALSE, TRUE, 0);
9367 }
9368
9369 static void
9370 do_vfp_dp_ldst (void)
9371 {
9372 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9373 encode_arm_cp_address (1, FALSE, TRUE, 0);
9374 }
9375
9376
9377 static void
9378 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9379 {
9380 if (inst.operands[0].writeback)
9381 inst.instruction |= WRITE_BACK;
9382 else
9383 constraint (ldstm_type != VFP_LDSTMIA,
9384 _("this addressing mode requires base-register writeback"));
9385 inst.instruction |= inst.operands[0].reg << 16;
9386 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9387 inst.instruction |= inst.operands[1].imm;
9388 }
9389
9390 static void
9391 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9392 {
9393 int count;
9394
9395 if (inst.operands[0].writeback)
9396 inst.instruction |= WRITE_BACK;
9397 else
9398 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9399 _("this addressing mode requires base-register writeback"));
9400
9401 inst.instruction |= inst.operands[0].reg << 16;
9402 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9403
9404 count = inst.operands[1].imm << 1;
9405 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9406 count += 1;
9407
9408 inst.instruction |= count;
9409 }
9410
9411 static void
9412 do_vfp_sp_ldstmia (void)
9413 {
9414 vfp_sp_ldstm (VFP_LDSTMIA);
9415 }
9416
9417 static void
9418 do_vfp_sp_ldstmdb (void)
9419 {
9420 vfp_sp_ldstm (VFP_LDSTMDB);
9421 }
9422
9423 static void
9424 do_vfp_dp_ldstmia (void)
9425 {
9426 vfp_dp_ldstm (VFP_LDSTMIA);
9427 }
9428
9429 static void
9430 do_vfp_dp_ldstmdb (void)
9431 {
9432 vfp_dp_ldstm (VFP_LDSTMDB);
9433 }
9434
9435 static void
9436 do_vfp_xp_ldstmia (void)
9437 {
9438 vfp_dp_ldstm (VFP_LDSTMIAX);
9439 }
9440
9441 static void
9442 do_vfp_xp_ldstmdb (void)
9443 {
9444 vfp_dp_ldstm (VFP_LDSTMDBX);
9445 }
9446
9447 static void
9448 do_vfp_dp_rd_rm (void)
9449 {
9450 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9451 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9452 }
9453
9454 static void
9455 do_vfp_dp_rn_rd (void)
9456 {
9457 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9458 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9459 }
9460
9461 static void
9462 do_vfp_dp_rd_rn (void)
9463 {
9464 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9465 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9466 }
9467
9468 static void
9469 do_vfp_dp_rd_rn_rm (void)
9470 {
9471 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9472 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9473 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9474 }
9475
9476 static void
9477 do_vfp_dp_rd (void)
9478 {
9479 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9480 }
9481
9482 static void
9483 do_vfp_dp_rm_rd_rn (void)
9484 {
9485 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9486 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9487 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9488 }
9489
9490 /* VFPv3 instructions. */
9491 static void
9492 do_vfp_sp_const (void)
9493 {
9494 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9495 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9496 inst.instruction |= (inst.operands[1].imm & 0x0f);
9497 }
9498
9499 static void
9500 do_vfp_dp_const (void)
9501 {
9502 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9503 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9504 inst.instruction |= (inst.operands[1].imm & 0x0f);
9505 }
9506
9507 static void
9508 vfp_conv (int srcsize)
9509 {
9510 int immbits = srcsize - inst.operands[1].imm;
9511
9512 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9513 {
9514 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9515 i.e. immbits must be in range 0 - 16. */
9516 inst.error = _("immediate value out of range, expected range [0, 16]");
9517 return;
9518 }
9519 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9520 {
9521 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9522 i.e. immbits must be in range 0 - 31. */
9523 inst.error = _("immediate value out of range, expected range [1, 32]");
9524 return;
9525 }
9526
9527 inst.instruction |= (immbits & 1) << 5;
9528 inst.instruction |= (immbits >> 1);
9529 }
9530
9531 static void
9532 do_vfp_sp_conv_16 (void)
9533 {
9534 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9535 vfp_conv (16);
9536 }
9537
9538 static void
9539 do_vfp_dp_conv_16 (void)
9540 {
9541 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9542 vfp_conv (16);
9543 }
9544
9545 static void
9546 do_vfp_sp_conv_32 (void)
9547 {
9548 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9549 vfp_conv (32);
9550 }
9551
9552 static void
9553 do_vfp_dp_conv_32 (void)
9554 {
9555 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9556 vfp_conv (32);
9557 }
9558 \f
9559 /* FPA instructions. Also in a logical order. */
9560
9561 static void
9562 do_fpa_cmp (void)
9563 {
9564 inst.instruction |= inst.operands[0].reg << 16;
9565 inst.instruction |= inst.operands[1].reg;
9566 }
9567
9568 static void
9569 do_fpa_ldmstm (void)
9570 {
9571 inst.instruction |= inst.operands[0].reg << 12;
9572 switch (inst.operands[1].imm)
9573 {
9574 case 1: inst.instruction |= CP_T_X; break;
9575 case 2: inst.instruction |= CP_T_Y; break;
9576 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9577 case 4: break;
9578 default: abort ();
9579 }
9580
9581 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9582 {
9583 /* The instruction specified "ea" or "fd", so we can only accept
9584 [Rn]{!}. The instruction does not really support stacking or
9585 unstacking, so we have to emulate these by setting appropriate
9586 bits and offsets. */
9587 constraint (inst.reloc.exp.X_op != O_constant
9588 || inst.reloc.exp.X_add_number != 0,
9589 _("this instruction does not support indexing"));
9590
9591 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9592 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9593
9594 if (!(inst.instruction & INDEX_UP))
9595 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9596
9597 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9598 {
9599 inst.operands[2].preind = 0;
9600 inst.operands[2].postind = 1;
9601 }
9602 }
9603
9604 encode_arm_cp_address (2, TRUE, TRUE, 0);
9605 }
9606 \f
9607 /* iWMMXt instructions: strictly in alphabetical order. */
9608
9609 static void
9610 do_iwmmxt_tandorc (void)
9611 {
9612 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9613 }
9614
9615 static void
9616 do_iwmmxt_textrc (void)
9617 {
9618 inst.instruction |= inst.operands[0].reg << 12;
9619 inst.instruction |= inst.operands[1].imm;
9620 }
9621
9622 static void
9623 do_iwmmxt_textrm (void)
9624 {
9625 inst.instruction |= inst.operands[0].reg << 12;
9626 inst.instruction |= inst.operands[1].reg << 16;
9627 inst.instruction |= inst.operands[2].imm;
9628 }
9629
9630 static void
9631 do_iwmmxt_tinsr (void)
9632 {
9633 inst.instruction |= inst.operands[0].reg << 16;
9634 inst.instruction |= inst.operands[1].reg << 12;
9635 inst.instruction |= inst.operands[2].imm;
9636 }
9637
9638 static void
9639 do_iwmmxt_tmia (void)
9640 {
9641 inst.instruction |= inst.operands[0].reg << 5;
9642 inst.instruction |= inst.operands[1].reg;
9643 inst.instruction |= inst.operands[2].reg << 12;
9644 }
9645
9646 static void
9647 do_iwmmxt_waligni (void)
9648 {
9649 inst.instruction |= inst.operands[0].reg << 12;
9650 inst.instruction |= inst.operands[1].reg << 16;
9651 inst.instruction |= inst.operands[2].reg;
9652 inst.instruction |= inst.operands[3].imm << 20;
9653 }
9654
9655 static void
9656 do_iwmmxt_wmerge (void)
9657 {
9658 inst.instruction |= inst.operands[0].reg << 12;
9659 inst.instruction |= inst.operands[1].reg << 16;
9660 inst.instruction |= inst.operands[2].reg;
9661 inst.instruction |= inst.operands[3].imm << 21;
9662 }
9663
9664 static void
9665 do_iwmmxt_wmov (void)
9666 {
9667 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9668 inst.instruction |= inst.operands[0].reg << 12;
9669 inst.instruction |= inst.operands[1].reg << 16;
9670 inst.instruction |= inst.operands[1].reg;
9671 }
9672
9673 static void
9674 do_iwmmxt_wldstbh (void)
9675 {
9676 int reloc;
9677 inst.instruction |= inst.operands[0].reg << 12;
9678 if (thumb_mode)
9679 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9680 else
9681 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9682 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9683 }
9684
9685 static void
9686 do_iwmmxt_wldstw (void)
9687 {
9688 /* RIWR_RIWC clears .isreg for a control register. */
9689 if (!inst.operands[0].isreg)
9690 {
9691 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9692 inst.instruction |= 0xf0000000;
9693 }
9694
9695 inst.instruction |= inst.operands[0].reg << 12;
9696 encode_arm_cp_address (1, TRUE, TRUE, 0);
9697 }
9698
9699 static void
9700 do_iwmmxt_wldstd (void)
9701 {
9702 inst.instruction |= inst.operands[0].reg << 12;
9703 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9704 && inst.operands[1].immisreg)
9705 {
9706 inst.instruction &= ~0x1a000ff;
9707 inst.instruction |= (0xf << 28);
9708 if (inst.operands[1].preind)
9709 inst.instruction |= PRE_INDEX;
9710 if (!inst.operands[1].negative)
9711 inst.instruction |= INDEX_UP;
9712 if (inst.operands[1].writeback)
9713 inst.instruction |= WRITE_BACK;
9714 inst.instruction |= inst.operands[1].reg << 16;
9715 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9716 inst.instruction |= inst.operands[1].imm;
9717 }
9718 else
9719 encode_arm_cp_address (1, TRUE, FALSE, 0);
9720 }
9721
9722 static void
9723 do_iwmmxt_wshufh (void)
9724 {
9725 inst.instruction |= inst.operands[0].reg << 12;
9726 inst.instruction |= inst.operands[1].reg << 16;
9727 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9728 inst.instruction |= (inst.operands[2].imm & 0x0f);
9729 }
9730
9731 static void
9732 do_iwmmxt_wzero (void)
9733 {
9734 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9735 inst.instruction |= inst.operands[0].reg;
9736 inst.instruction |= inst.operands[0].reg << 12;
9737 inst.instruction |= inst.operands[0].reg << 16;
9738 }
9739
9740 static void
9741 do_iwmmxt_wrwrwr_or_imm5 (void)
9742 {
9743 if (inst.operands[2].isreg)
9744 do_rd_rn_rm ();
9745 else {
9746 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9747 _("immediate operand requires iWMMXt2"));
9748 do_rd_rn ();
9749 if (inst.operands[2].imm == 0)
9750 {
9751 switch ((inst.instruction >> 20) & 0xf)
9752 {
9753 case 4:
9754 case 5:
9755 case 6:
9756 case 7:
9757 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9758 inst.operands[2].imm = 16;
9759 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9760 break;
9761 case 8:
9762 case 9:
9763 case 10:
9764 case 11:
9765 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9766 inst.operands[2].imm = 32;
9767 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9768 break;
9769 case 12:
9770 case 13:
9771 case 14:
9772 case 15:
9773 {
9774 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9775 unsigned long wrn;
9776 wrn = (inst.instruction >> 16) & 0xf;
9777 inst.instruction &= 0xff0fff0f;
9778 inst.instruction |= wrn;
9779 /* Bail out here; the instruction is now assembled. */
9780 return;
9781 }
9782 }
9783 }
9784 /* Map 32 -> 0, etc. */
9785 inst.operands[2].imm &= 0x1f;
9786 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9787 }
9788 }
9789 \f
9790 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9791 operations first, then control, shift, and load/store. */
9792
9793 /* Insns like "foo X,Y,Z". */
9794
9795 static void
9796 do_mav_triple (void)
9797 {
9798 inst.instruction |= inst.operands[0].reg << 16;
9799 inst.instruction |= inst.operands[1].reg;
9800 inst.instruction |= inst.operands[2].reg << 12;
9801 }
9802
9803 /* Insns like "foo W,X,Y,Z".
9804 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9805
9806 static void
9807 do_mav_quad (void)
9808 {
9809 inst.instruction |= inst.operands[0].reg << 5;
9810 inst.instruction |= inst.operands[1].reg << 12;
9811 inst.instruction |= inst.operands[2].reg << 16;
9812 inst.instruction |= inst.operands[3].reg;
9813 }
9814
9815 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9816 static void
9817 do_mav_dspsc (void)
9818 {
9819 inst.instruction |= inst.operands[1].reg << 12;
9820 }
9821
9822 /* Maverick shift immediate instructions.
9823 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9824 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9825
9826 static void
9827 do_mav_shift (void)
9828 {
9829 int imm = inst.operands[2].imm;
9830
9831 inst.instruction |= inst.operands[0].reg << 12;
9832 inst.instruction |= inst.operands[1].reg << 16;
9833
9834 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9835 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9836 Bit 4 should be 0. */
9837 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9838
9839 inst.instruction |= imm;
9840 }
9841 \f
9842 /* XScale instructions. Also sorted arithmetic before move. */
9843
9844 /* Xscale multiply-accumulate (argument parse)
9845 MIAcc acc0,Rm,Rs
9846 MIAPHcc acc0,Rm,Rs
9847 MIAxycc acc0,Rm,Rs. */
9848
9849 static void
9850 do_xsc_mia (void)
9851 {
9852 inst.instruction |= inst.operands[1].reg;
9853 inst.instruction |= inst.operands[2].reg << 12;
9854 }
9855
9856 /* Xscale move-accumulator-register (argument parse)
9857
9858 MARcc acc0,RdLo,RdHi. */
9859
9860 static void
9861 do_xsc_mar (void)
9862 {
9863 inst.instruction |= inst.operands[1].reg << 12;
9864 inst.instruction |= inst.operands[2].reg << 16;
9865 }
9866
9867 /* Xscale move-register-accumulator (argument parse)
9868
9869 MRAcc RdLo,RdHi,acc0. */
9870
9871 static void
9872 do_xsc_mra (void)
9873 {
9874 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9875 inst.instruction |= inst.operands[0].reg << 12;
9876 inst.instruction |= inst.operands[1].reg << 16;
9877 }
9878 \f
9879 /* Encoding functions relevant only to Thumb. */
9880
9881 /* inst.operands[i] is a shifted-register operand; encode
9882 it into inst.instruction in the format used by Thumb32. */
9883
9884 static void
9885 encode_thumb32_shifted_operand (int i)
9886 {
9887 unsigned int value = inst.reloc.exp.X_add_number;
9888 unsigned int shift = inst.operands[i].shift_kind;
9889
9890 constraint (inst.operands[i].immisreg,
9891 _("shift by register not allowed in thumb mode"));
9892 inst.instruction |= inst.operands[i].reg;
9893 if (shift == SHIFT_RRX)
9894 inst.instruction |= SHIFT_ROR << 4;
9895 else
9896 {
9897 constraint (inst.reloc.exp.X_op != O_constant,
9898 _("expression too complex"));
9899
9900 constraint (value > 32
9901 || (value == 32 && (shift == SHIFT_LSL
9902 || shift == SHIFT_ROR)),
9903 _("shift expression is too large"));
9904
9905 if (value == 0)
9906 shift = SHIFT_LSL;
9907 else if (value == 32)
9908 value = 0;
9909
9910 inst.instruction |= shift << 4;
9911 inst.instruction |= (value & 0x1c) << 10;
9912 inst.instruction |= (value & 0x03) << 6;
9913 }
9914 }
9915
9916
9917 /* inst.operands[i] was set up by parse_address. Encode it into a
9918 Thumb32 format load or store instruction. Reject forms that cannot
9919 be used with such instructions. If is_t is true, reject forms that
9920 cannot be used with a T instruction; if is_d is true, reject forms
9921 that cannot be used with a D instruction. If it is a store insn,
9922 reject PC in Rn. */
9923
9924 static void
9925 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9926 {
9927 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9928
9929 constraint (!inst.operands[i].isreg,
9930 _("Instruction does not support =N addresses"));
9931
9932 inst.instruction |= inst.operands[i].reg << 16;
9933 if (inst.operands[i].immisreg)
9934 {
9935 constraint (is_pc, BAD_PC_ADDRESSING);
9936 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9937 constraint (inst.operands[i].negative,
9938 _("Thumb does not support negative register indexing"));
9939 constraint (inst.operands[i].postind,
9940 _("Thumb does not support register post-indexing"));
9941 constraint (inst.operands[i].writeback,
9942 _("Thumb does not support register indexing with writeback"));
9943 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9944 _("Thumb supports only LSL in shifted register indexing"));
9945
9946 inst.instruction |= inst.operands[i].imm;
9947 if (inst.operands[i].shifted)
9948 {
9949 constraint (inst.reloc.exp.X_op != O_constant,
9950 _("expression too complex"));
9951 constraint (inst.reloc.exp.X_add_number < 0
9952 || inst.reloc.exp.X_add_number > 3,
9953 _("shift out of range"));
9954 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9955 }
9956 inst.reloc.type = BFD_RELOC_UNUSED;
9957 }
9958 else if (inst.operands[i].preind)
9959 {
9960 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9961 constraint (is_t && inst.operands[i].writeback,
9962 _("cannot use writeback with this instruction"));
9963 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
9964 BAD_PC_ADDRESSING);
9965
9966 if (is_d)
9967 {
9968 inst.instruction |= 0x01000000;
9969 if (inst.operands[i].writeback)
9970 inst.instruction |= 0x00200000;
9971 }
9972 else
9973 {
9974 inst.instruction |= 0x00000c00;
9975 if (inst.operands[i].writeback)
9976 inst.instruction |= 0x00000100;
9977 }
9978 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9979 }
9980 else if (inst.operands[i].postind)
9981 {
9982 gas_assert (inst.operands[i].writeback);
9983 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9984 constraint (is_t, _("cannot use post-indexing with this instruction"));
9985
9986 if (is_d)
9987 inst.instruction |= 0x00200000;
9988 else
9989 inst.instruction |= 0x00000900;
9990 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9991 }
9992 else /* unindexed - only for coprocessor */
9993 inst.error = _("instruction does not accept unindexed addressing");
9994 }
9995
9996 /* Table of Thumb instructions which exist in both 16- and 32-bit
9997 encodings (the latter only in post-V6T2 cores). The index is the
9998 value used in the insns table below. When there is more than one
9999 possible 16-bit encoding for the instruction, this table always
10000 holds variant (1).
10001 Also contains several pseudo-instructions used during relaxation. */
10002 #define T16_32_TAB \
10003 X(_adc, 4140, eb400000), \
10004 X(_adcs, 4140, eb500000), \
10005 X(_add, 1c00, eb000000), \
10006 X(_adds, 1c00, eb100000), \
10007 X(_addi, 0000, f1000000), \
10008 X(_addis, 0000, f1100000), \
10009 X(_add_pc,000f, f20f0000), \
10010 X(_add_sp,000d, f10d0000), \
10011 X(_adr, 000f, f20f0000), \
10012 X(_and, 4000, ea000000), \
10013 X(_ands, 4000, ea100000), \
10014 X(_asr, 1000, fa40f000), \
10015 X(_asrs, 1000, fa50f000), \
10016 X(_b, e000, f000b000), \
10017 X(_bcond, d000, f0008000), \
10018 X(_bic, 4380, ea200000), \
10019 X(_bics, 4380, ea300000), \
10020 X(_cmn, 42c0, eb100f00), \
10021 X(_cmp, 2800, ebb00f00), \
10022 X(_cpsie, b660, f3af8400), \
10023 X(_cpsid, b670, f3af8600), \
10024 X(_cpy, 4600, ea4f0000), \
10025 X(_dec_sp,80dd, f1ad0d00), \
10026 X(_eor, 4040, ea800000), \
10027 X(_eors, 4040, ea900000), \
10028 X(_inc_sp,00dd, f10d0d00), \
10029 X(_ldmia, c800, e8900000), \
10030 X(_ldr, 6800, f8500000), \
10031 X(_ldrb, 7800, f8100000), \
10032 X(_ldrh, 8800, f8300000), \
10033 X(_ldrsb, 5600, f9100000), \
10034 X(_ldrsh, 5e00, f9300000), \
10035 X(_ldr_pc,4800, f85f0000), \
10036 X(_ldr_pc2,4800, f85f0000), \
10037 X(_ldr_sp,9800, f85d0000), \
10038 X(_lsl, 0000, fa00f000), \
10039 X(_lsls, 0000, fa10f000), \
10040 X(_lsr, 0800, fa20f000), \
10041 X(_lsrs, 0800, fa30f000), \
10042 X(_mov, 2000, ea4f0000), \
10043 X(_movs, 2000, ea5f0000), \
10044 X(_mul, 4340, fb00f000), \
10045 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10046 X(_mvn, 43c0, ea6f0000), \
10047 X(_mvns, 43c0, ea7f0000), \
10048 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10049 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10050 X(_orr, 4300, ea400000), \
10051 X(_orrs, 4300, ea500000), \
10052 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10053 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10054 X(_rev, ba00, fa90f080), \
10055 X(_rev16, ba40, fa90f090), \
10056 X(_revsh, bac0, fa90f0b0), \
10057 X(_ror, 41c0, fa60f000), \
10058 X(_rors, 41c0, fa70f000), \
10059 X(_sbc, 4180, eb600000), \
10060 X(_sbcs, 4180, eb700000), \
10061 X(_stmia, c000, e8800000), \
10062 X(_str, 6000, f8400000), \
10063 X(_strb, 7000, f8000000), \
10064 X(_strh, 8000, f8200000), \
10065 X(_str_sp,9000, f84d0000), \
10066 X(_sub, 1e00, eba00000), \
10067 X(_subs, 1e00, ebb00000), \
10068 X(_subi, 8000, f1a00000), \
10069 X(_subis, 8000, f1b00000), \
10070 X(_sxtb, b240, fa4ff080), \
10071 X(_sxth, b200, fa0ff080), \
10072 X(_tst, 4200, ea100f00), \
10073 X(_uxtb, b2c0, fa5ff080), \
10074 X(_uxth, b280, fa1ff080), \
10075 X(_nop, bf00, f3af8000), \
10076 X(_yield, bf10, f3af8001), \
10077 X(_wfe, bf20, f3af8002), \
10078 X(_wfi, bf30, f3af8003), \
10079 X(_sev, bf40, f3af8004), \
10080 X(_sevl, bf50, f3af8005), \
10081 X(_udf, de00, f7f0a000)
10082
10083 /* To catch errors in encoding functions, the codes are all offset by
10084 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10085 as 16-bit instructions. */
10086 #define X(a,b,c) T_MNEM##a
10087 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10088 #undef X
10089
10090 #define X(a,b,c) 0x##b
10091 static const unsigned short thumb_op16[] = { T16_32_TAB };
10092 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10093 #undef X
10094
10095 #define X(a,b,c) 0x##c
10096 static const unsigned int thumb_op32[] = { T16_32_TAB };
10097 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10098 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10099 #undef X
10100 #undef T16_32_TAB
10101
10102 /* Thumb instruction encoders, in alphabetical order. */
10103
10104 /* ADDW or SUBW. */
10105
10106 static void
10107 do_t_add_sub_w (void)
10108 {
10109 int Rd, Rn;
10110
10111 Rd = inst.operands[0].reg;
10112 Rn = inst.operands[1].reg;
10113
10114 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10115 is the SP-{plus,minus}-immediate form of the instruction. */
10116 if (Rn == REG_SP)
10117 constraint (Rd == REG_PC, BAD_PC);
10118 else
10119 reject_bad_reg (Rd);
10120
10121 inst.instruction |= (Rn << 16) | (Rd << 8);
10122 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10123 }
10124
10125 /* Parse an add or subtract instruction. We get here with inst.instruction
10126 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10127
10128 static void
10129 do_t_add_sub (void)
10130 {
10131 int Rd, Rs, Rn;
10132
10133 Rd = inst.operands[0].reg;
10134 Rs = (inst.operands[1].present
10135 ? inst.operands[1].reg /* Rd, Rs, foo */
10136 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10137
10138 if (Rd == REG_PC)
10139 set_it_insn_type_last ();
10140
10141 if (unified_syntax)
10142 {
10143 bfd_boolean flags;
10144 bfd_boolean narrow;
10145 int opcode;
10146
10147 flags = (inst.instruction == T_MNEM_adds
10148 || inst.instruction == T_MNEM_subs);
10149 if (flags)
10150 narrow = !in_it_block ();
10151 else
10152 narrow = in_it_block ();
10153 if (!inst.operands[2].isreg)
10154 {
10155 int add;
10156
10157 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10158
10159 add = (inst.instruction == T_MNEM_add
10160 || inst.instruction == T_MNEM_adds);
10161 opcode = 0;
10162 if (inst.size_req != 4)
10163 {
10164 /* Attempt to use a narrow opcode, with relaxation if
10165 appropriate. */
10166 if (Rd == REG_SP && Rs == REG_SP && !flags)
10167 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10168 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10169 opcode = T_MNEM_add_sp;
10170 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10171 opcode = T_MNEM_add_pc;
10172 else if (Rd <= 7 && Rs <= 7 && narrow)
10173 {
10174 if (flags)
10175 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10176 else
10177 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10178 }
10179 if (opcode)
10180 {
10181 inst.instruction = THUMB_OP16(opcode);
10182 inst.instruction |= (Rd << 4) | Rs;
10183 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10184 if (inst.size_req != 2)
10185 inst.relax = opcode;
10186 }
10187 else
10188 constraint (inst.size_req == 2, BAD_HIREG);
10189 }
10190 if (inst.size_req == 4
10191 || (inst.size_req != 2 && !opcode))
10192 {
10193 if (Rd == REG_PC)
10194 {
10195 constraint (add, BAD_PC);
10196 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10197 _("only SUBS PC, LR, #const allowed"));
10198 constraint (inst.reloc.exp.X_op != O_constant,
10199 _("expression too complex"));
10200 constraint (inst.reloc.exp.X_add_number < 0
10201 || inst.reloc.exp.X_add_number > 0xff,
10202 _("immediate value out of range"));
10203 inst.instruction = T2_SUBS_PC_LR
10204 | inst.reloc.exp.X_add_number;
10205 inst.reloc.type = BFD_RELOC_UNUSED;
10206 return;
10207 }
10208 else if (Rs == REG_PC)
10209 {
10210 /* Always use addw/subw. */
10211 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10212 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10213 }
10214 else
10215 {
10216 inst.instruction = THUMB_OP32 (inst.instruction);
10217 inst.instruction = (inst.instruction & 0xe1ffffff)
10218 | 0x10000000;
10219 if (flags)
10220 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10221 else
10222 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10223 }
10224 inst.instruction |= Rd << 8;
10225 inst.instruction |= Rs << 16;
10226 }
10227 }
10228 else
10229 {
10230 unsigned int value = inst.reloc.exp.X_add_number;
10231 unsigned int shift = inst.operands[2].shift_kind;
10232
10233 Rn = inst.operands[2].reg;
10234 /* See if we can do this with a 16-bit instruction. */
10235 if (!inst.operands[2].shifted && inst.size_req != 4)
10236 {
10237 if (Rd > 7 || Rs > 7 || Rn > 7)
10238 narrow = FALSE;
10239
10240 if (narrow)
10241 {
10242 inst.instruction = ((inst.instruction == T_MNEM_adds
10243 || inst.instruction == T_MNEM_add)
10244 ? T_OPCODE_ADD_R3
10245 : T_OPCODE_SUB_R3);
10246 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10247 return;
10248 }
10249
10250 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10251 {
10252 /* Thumb-1 cores (except v6-M) require at least one high
10253 register in a narrow non flag setting add. */
10254 if (Rd > 7 || Rn > 7
10255 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10256 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10257 {
10258 if (Rd == Rn)
10259 {
10260 Rn = Rs;
10261 Rs = Rd;
10262 }
10263 inst.instruction = T_OPCODE_ADD_HI;
10264 inst.instruction |= (Rd & 8) << 4;
10265 inst.instruction |= (Rd & 7);
10266 inst.instruction |= Rn << 3;
10267 return;
10268 }
10269 }
10270 }
10271
10272 constraint (Rd == REG_PC, BAD_PC);
10273 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10274 constraint (Rs == REG_PC, BAD_PC);
10275 reject_bad_reg (Rn);
10276
10277 /* If we get here, it can't be done in 16 bits. */
10278 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10279 _("shift must be constant"));
10280 inst.instruction = THUMB_OP32 (inst.instruction);
10281 inst.instruction |= Rd << 8;
10282 inst.instruction |= Rs << 16;
10283 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10284 _("shift value over 3 not allowed in thumb mode"));
10285 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10286 _("only LSL shift allowed in thumb mode"));
10287 encode_thumb32_shifted_operand (2);
10288 }
10289 }
10290 else
10291 {
10292 constraint (inst.instruction == T_MNEM_adds
10293 || inst.instruction == T_MNEM_subs,
10294 BAD_THUMB32);
10295
10296 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10297 {
10298 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10299 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10300 BAD_HIREG);
10301
10302 inst.instruction = (inst.instruction == T_MNEM_add
10303 ? 0x0000 : 0x8000);
10304 inst.instruction |= (Rd << 4) | Rs;
10305 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10306 return;
10307 }
10308
10309 Rn = inst.operands[2].reg;
10310 constraint (inst.operands[2].shifted, _("unshifted register required"));
10311
10312 /* We now have Rd, Rs, and Rn set to registers. */
10313 if (Rd > 7 || Rs > 7 || Rn > 7)
10314 {
10315 /* Can't do this for SUB. */
10316 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10317 inst.instruction = T_OPCODE_ADD_HI;
10318 inst.instruction |= (Rd & 8) << 4;
10319 inst.instruction |= (Rd & 7);
10320 if (Rs == Rd)
10321 inst.instruction |= Rn << 3;
10322 else if (Rn == Rd)
10323 inst.instruction |= Rs << 3;
10324 else
10325 constraint (1, _("dest must overlap one source register"));
10326 }
10327 else
10328 {
10329 inst.instruction = (inst.instruction == T_MNEM_add
10330 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10331 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10332 }
10333 }
10334 }
10335
10336 static void
10337 do_t_adr (void)
10338 {
10339 unsigned Rd;
10340
10341 Rd = inst.operands[0].reg;
10342 reject_bad_reg (Rd);
10343
10344 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10345 {
10346 /* Defer to section relaxation. */
10347 inst.relax = inst.instruction;
10348 inst.instruction = THUMB_OP16 (inst.instruction);
10349 inst.instruction |= Rd << 4;
10350 }
10351 else if (unified_syntax && inst.size_req != 2)
10352 {
10353 /* Generate a 32-bit opcode. */
10354 inst.instruction = THUMB_OP32 (inst.instruction);
10355 inst.instruction |= Rd << 8;
10356 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10357 inst.reloc.pc_rel = 1;
10358 }
10359 else
10360 {
10361 /* Generate a 16-bit opcode. */
10362 inst.instruction = THUMB_OP16 (inst.instruction);
10363 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10364 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10365 inst.reloc.pc_rel = 1;
10366
10367 inst.instruction |= Rd << 4;
10368 }
10369 }
10370
10371 /* Arithmetic instructions for which there is just one 16-bit
10372 instruction encoding, and it allows only two low registers.
10373 For maximal compatibility with ARM syntax, we allow three register
10374 operands even when Thumb-32 instructions are not available, as long
10375 as the first two are identical. For instance, both "sbc r0,r1" and
10376 "sbc r0,r0,r1" are allowed. */
10377 static void
10378 do_t_arit3 (void)
10379 {
10380 int Rd, Rs, Rn;
10381
10382 Rd = inst.operands[0].reg;
10383 Rs = (inst.operands[1].present
10384 ? inst.operands[1].reg /* Rd, Rs, foo */
10385 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10386 Rn = inst.operands[2].reg;
10387
10388 reject_bad_reg (Rd);
10389 reject_bad_reg (Rs);
10390 if (inst.operands[2].isreg)
10391 reject_bad_reg (Rn);
10392
10393 if (unified_syntax)
10394 {
10395 if (!inst.operands[2].isreg)
10396 {
10397 /* For an immediate, we always generate a 32-bit opcode;
10398 section relaxation will shrink it later if possible. */
10399 inst.instruction = THUMB_OP32 (inst.instruction);
10400 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10401 inst.instruction |= Rd << 8;
10402 inst.instruction |= Rs << 16;
10403 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10404 }
10405 else
10406 {
10407 bfd_boolean narrow;
10408
10409 /* See if we can do this with a 16-bit instruction. */
10410 if (THUMB_SETS_FLAGS (inst.instruction))
10411 narrow = !in_it_block ();
10412 else
10413 narrow = in_it_block ();
10414
10415 if (Rd > 7 || Rn > 7 || Rs > 7)
10416 narrow = FALSE;
10417 if (inst.operands[2].shifted)
10418 narrow = FALSE;
10419 if (inst.size_req == 4)
10420 narrow = FALSE;
10421
10422 if (narrow
10423 && Rd == Rs)
10424 {
10425 inst.instruction = THUMB_OP16 (inst.instruction);
10426 inst.instruction |= Rd;
10427 inst.instruction |= Rn << 3;
10428 return;
10429 }
10430
10431 /* If we get here, it can't be done in 16 bits. */
10432 constraint (inst.operands[2].shifted
10433 && inst.operands[2].immisreg,
10434 _("shift must be constant"));
10435 inst.instruction = THUMB_OP32 (inst.instruction);
10436 inst.instruction |= Rd << 8;
10437 inst.instruction |= Rs << 16;
10438 encode_thumb32_shifted_operand (2);
10439 }
10440 }
10441 else
10442 {
10443 /* On its face this is a lie - the instruction does set the
10444 flags. However, the only supported mnemonic in this mode
10445 says it doesn't. */
10446 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10447
10448 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10449 _("unshifted register required"));
10450 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10451 constraint (Rd != Rs,
10452 _("dest and source1 must be the same register"));
10453
10454 inst.instruction = THUMB_OP16 (inst.instruction);
10455 inst.instruction |= Rd;
10456 inst.instruction |= Rn << 3;
10457 }
10458 }
10459
10460 /* Similarly, but for instructions where the arithmetic operation is
10461 commutative, so we can allow either of them to be different from
10462 the destination operand in a 16-bit instruction. For instance, all
10463 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10464 accepted. */
10465 static void
10466 do_t_arit3c (void)
10467 {
10468 int Rd, Rs, Rn;
10469
10470 Rd = inst.operands[0].reg;
10471 Rs = (inst.operands[1].present
10472 ? inst.operands[1].reg /* Rd, Rs, foo */
10473 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10474 Rn = inst.operands[2].reg;
10475
10476 reject_bad_reg (Rd);
10477 reject_bad_reg (Rs);
10478 if (inst.operands[2].isreg)
10479 reject_bad_reg (Rn);
10480
10481 if (unified_syntax)
10482 {
10483 if (!inst.operands[2].isreg)
10484 {
10485 /* For an immediate, we always generate a 32-bit opcode;
10486 section relaxation will shrink it later if possible. */
10487 inst.instruction = THUMB_OP32 (inst.instruction);
10488 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10489 inst.instruction |= Rd << 8;
10490 inst.instruction |= Rs << 16;
10491 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10492 }
10493 else
10494 {
10495 bfd_boolean narrow;
10496
10497 /* See if we can do this with a 16-bit instruction. */
10498 if (THUMB_SETS_FLAGS (inst.instruction))
10499 narrow = !in_it_block ();
10500 else
10501 narrow = in_it_block ();
10502
10503 if (Rd > 7 || Rn > 7 || Rs > 7)
10504 narrow = FALSE;
10505 if (inst.operands[2].shifted)
10506 narrow = FALSE;
10507 if (inst.size_req == 4)
10508 narrow = FALSE;
10509
10510 if (narrow)
10511 {
10512 if (Rd == Rs)
10513 {
10514 inst.instruction = THUMB_OP16 (inst.instruction);
10515 inst.instruction |= Rd;
10516 inst.instruction |= Rn << 3;
10517 return;
10518 }
10519 if (Rd == Rn)
10520 {
10521 inst.instruction = THUMB_OP16 (inst.instruction);
10522 inst.instruction |= Rd;
10523 inst.instruction |= Rs << 3;
10524 return;
10525 }
10526 }
10527
10528 /* If we get here, it can't be done in 16 bits. */
10529 constraint (inst.operands[2].shifted
10530 && inst.operands[2].immisreg,
10531 _("shift must be constant"));
10532 inst.instruction = THUMB_OP32 (inst.instruction);
10533 inst.instruction |= Rd << 8;
10534 inst.instruction |= Rs << 16;
10535 encode_thumb32_shifted_operand (2);
10536 }
10537 }
10538 else
10539 {
10540 /* On its face this is a lie - the instruction does set the
10541 flags. However, the only supported mnemonic in this mode
10542 says it doesn't. */
10543 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10544
10545 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10546 _("unshifted register required"));
10547 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10548
10549 inst.instruction = THUMB_OP16 (inst.instruction);
10550 inst.instruction |= Rd;
10551
10552 if (Rd == Rs)
10553 inst.instruction |= Rn << 3;
10554 else if (Rd == Rn)
10555 inst.instruction |= Rs << 3;
10556 else
10557 constraint (1, _("dest must overlap one source register"));
10558 }
10559 }
10560
10561 static void
10562 do_t_bfc (void)
10563 {
10564 unsigned Rd;
10565 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10566 constraint (msb > 32, _("bit-field extends past end of register"));
10567 /* The instruction encoding stores the LSB and MSB,
10568 not the LSB and width. */
10569 Rd = inst.operands[0].reg;
10570 reject_bad_reg (Rd);
10571 inst.instruction |= Rd << 8;
10572 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10573 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10574 inst.instruction |= msb - 1;
10575 }
10576
10577 static void
10578 do_t_bfi (void)
10579 {
10580 int Rd, Rn;
10581 unsigned int msb;
10582
10583 Rd = inst.operands[0].reg;
10584 reject_bad_reg (Rd);
10585
10586 /* #0 in second position is alternative syntax for bfc, which is
10587 the same instruction but with REG_PC in the Rm field. */
10588 if (!inst.operands[1].isreg)
10589 Rn = REG_PC;
10590 else
10591 {
10592 Rn = inst.operands[1].reg;
10593 reject_bad_reg (Rn);
10594 }
10595
10596 msb = inst.operands[2].imm + inst.operands[3].imm;
10597 constraint (msb > 32, _("bit-field extends past end of register"));
10598 /* The instruction encoding stores the LSB and MSB,
10599 not the LSB and width. */
10600 inst.instruction |= Rd << 8;
10601 inst.instruction |= Rn << 16;
10602 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10603 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10604 inst.instruction |= msb - 1;
10605 }
10606
10607 static void
10608 do_t_bfx (void)
10609 {
10610 unsigned Rd, Rn;
10611
10612 Rd = inst.operands[0].reg;
10613 Rn = inst.operands[1].reg;
10614
10615 reject_bad_reg (Rd);
10616 reject_bad_reg (Rn);
10617
10618 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10619 _("bit-field extends past end of register"));
10620 inst.instruction |= Rd << 8;
10621 inst.instruction |= Rn << 16;
10622 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10623 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10624 inst.instruction |= inst.operands[3].imm - 1;
10625 }
10626
10627 /* ARM V5 Thumb BLX (argument parse)
10628 BLX <target_addr> which is BLX(1)
10629 BLX <Rm> which is BLX(2)
10630 Unfortunately, there are two different opcodes for this mnemonic.
10631 So, the insns[].value is not used, and the code here zaps values
10632 into inst.instruction.
10633
10634 ??? How to take advantage of the additional two bits of displacement
10635 available in Thumb32 mode? Need new relocation? */
10636
10637 static void
10638 do_t_blx (void)
10639 {
10640 set_it_insn_type_last ();
10641
10642 if (inst.operands[0].isreg)
10643 {
10644 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10645 /* We have a register, so this is BLX(2). */
10646 inst.instruction |= inst.operands[0].reg << 3;
10647 }
10648 else
10649 {
10650 /* No register. This must be BLX(1). */
10651 inst.instruction = 0xf000e800;
10652 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10653 }
10654 }
10655
10656 static void
10657 do_t_branch (void)
10658 {
10659 int opcode;
10660 int cond;
10661 int reloc;
10662
10663 cond = inst.cond;
10664 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10665
10666 if (in_it_block ())
10667 {
10668 /* Conditional branches inside IT blocks are encoded as unconditional
10669 branches. */
10670 cond = COND_ALWAYS;
10671 }
10672 else
10673 cond = inst.cond;
10674
10675 if (cond != COND_ALWAYS)
10676 opcode = T_MNEM_bcond;
10677 else
10678 opcode = inst.instruction;
10679
10680 if (unified_syntax
10681 && (inst.size_req == 4
10682 || (inst.size_req != 2
10683 && (inst.operands[0].hasreloc
10684 || inst.reloc.exp.X_op == O_constant))))
10685 {
10686 inst.instruction = THUMB_OP32(opcode);
10687 if (cond == COND_ALWAYS)
10688 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10689 else
10690 {
10691 gas_assert (cond != 0xF);
10692 inst.instruction |= cond << 22;
10693 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10694 }
10695 }
10696 else
10697 {
10698 inst.instruction = THUMB_OP16(opcode);
10699 if (cond == COND_ALWAYS)
10700 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10701 else
10702 {
10703 inst.instruction |= cond << 8;
10704 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10705 }
10706 /* Allow section relaxation. */
10707 if (unified_syntax && inst.size_req != 2)
10708 inst.relax = opcode;
10709 }
10710 inst.reloc.type = reloc;
10711 inst.reloc.pc_rel = 1;
10712 }
10713
10714 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10715 between the two is the maximum immediate allowed - which is passed in
10716 RANGE. */
10717 static void
10718 do_t_bkpt_hlt1 (int range)
10719 {
10720 constraint (inst.cond != COND_ALWAYS,
10721 _("instruction is always unconditional"));
10722 if (inst.operands[0].present)
10723 {
10724 constraint (inst.operands[0].imm > range,
10725 _("immediate value out of range"));
10726 inst.instruction |= inst.operands[0].imm;
10727 }
10728
10729 set_it_insn_type (NEUTRAL_IT_INSN);
10730 }
10731
10732 static void
10733 do_t_hlt (void)
10734 {
10735 do_t_bkpt_hlt1 (63);
10736 }
10737
10738 static void
10739 do_t_bkpt (void)
10740 {
10741 do_t_bkpt_hlt1 (255);
10742 }
10743
10744 static void
10745 do_t_branch23 (void)
10746 {
10747 set_it_insn_type_last ();
10748 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10749
10750 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10751 this file. We used to simply ignore the PLT reloc type here --
10752 the branch encoding is now needed to deal with TLSCALL relocs.
10753 So if we see a PLT reloc now, put it back to how it used to be to
10754 keep the preexisting behaviour. */
10755 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10756 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10757
10758 #if defined(OBJ_COFF)
10759 /* If the destination of the branch is a defined symbol which does not have
10760 the THUMB_FUNC attribute, then we must be calling a function which has
10761 the (interfacearm) attribute. We look for the Thumb entry point to that
10762 function and change the branch to refer to that function instead. */
10763 if ( inst.reloc.exp.X_op == O_symbol
10764 && inst.reloc.exp.X_add_symbol != NULL
10765 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10766 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10767 inst.reloc.exp.X_add_symbol =
10768 find_real_start (inst.reloc.exp.X_add_symbol);
10769 #endif
10770 }
10771
10772 static void
10773 do_t_bx (void)
10774 {
10775 set_it_insn_type_last ();
10776 inst.instruction |= inst.operands[0].reg << 3;
10777 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10778 should cause the alignment to be checked once it is known. This is
10779 because BX PC only works if the instruction is word aligned. */
10780 }
10781
10782 static void
10783 do_t_bxj (void)
10784 {
10785 int Rm;
10786
10787 set_it_insn_type_last ();
10788 Rm = inst.operands[0].reg;
10789 reject_bad_reg (Rm);
10790 inst.instruction |= Rm << 16;
10791 }
10792
10793 static void
10794 do_t_clz (void)
10795 {
10796 unsigned Rd;
10797 unsigned Rm;
10798
10799 Rd = inst.operands[0].reg;
10800 Rm = inst.operands[1].reg;
10801
10802 reject_bad_reg (Rd);
10803 reject_bad_reg (Rm);
10804
10805 inst.instruction |= Rd << 8;
10806 inst.instruction |= Rm << 16;
10807 inst.instruction |= Rm;
10808 }
10809
10810 static void
10811 do_t_cps (void)
10812 {
10813 set_it_insn_type (OUTSIDE_IT_INSN);
10814 inst.instruction |= inst.operands[0].imm;
10815 }
10816
10817 static void
10818 do_t_cpsi (void)
10819 {
10820 set_it_insn_type (OUTSIDE_IT_INSN);
10821 if (unified_syntax
10822 && (inst.operands[1].present || inst.size_req == 4)
10823 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10824 {
10825 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10826 inst.instruction = 0xf3af8000;
10827 inst.instruction |= imod << 9;
10828 inst.instruction |= inst.operands[0].imm << 5;
10829 if (inst.operands[1].present)
10830 inst.instruction |= 0x100 | inst.operands[1].imm;
10831 }
10832 else
10833 {
10834 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10835 && (inst.operands[0].imm & 4),
10836 _("selected processor does not support 'A' form "
10837 "of this instruction"));
10838 constraint (inst.operands[1].present || inst.size_req == 4,
10839 _("Thumb does not support the 2-argument "
10840 "form of this instruction"));
10841 inst.instruction |= inst.operands[0].imm;
10842 }
10843 }
10844
10845 /* THUMB CPY instruction (argument parse). */
10846
10847 static void
10848 do_t_cpy (void)
10849 {
10850 if (inst.size_req == 4)
10851 {
10852 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10853 inst.instruction |= inst.operands[0].reg << 8;
10854 inst.instruction |= inst.operands[1].reg;
10855 }
10856 else
10857 {
10858 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10859 inst.instruction |= (inst.operands[0].reg & 0x7);
10860 inst.instruction |= inst.operands[1].reg << 3;
10861 }
10862 }
10863
10864 static void
10865 do_t_cbz (void)
10866 {
10867 set_it_insn_type (OUTSIDE_IT_INSN);
10868 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10869 inst.instruction |= inst.operands[0].reg;
10870 inst.reloc.pc_rel = 1;
10871 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10872 }
10873
10874 static void
10875 do_t_dbg (void)
10876 {
10877 inst.instruction |= inst.operands[0].imm;
10878 }
10879
10880 static void
10881 do_t_div (void)
10882 {
10883 unsigned Rd, Rn, Rm;
10884
10885 Rd = inst.operands[0].reg;
10886 Rn = (inst.operands[1].present
10887 ? inst.operands[1].reg : Rd);
10888 Rm = inst.operands[2].reg;
10889
10890 reject_bad_reg (Rd);
10891 reject_bad_reg (Rn);
10892 reject_bad_reg (Rm);
10893
10894 inst.instruction |= Rd << 8;
10895 inst.instruction |= Rn << 16;
10896 inst.instruction |= Rm;
10897 }
10898
10899 static void
10900 do_t_hint (void)
10901 {
10902 if (unified_syntax && inst.size_req == 4)
10903 inst.instruction = THUMB_OP32 (inst.instruction);
10904 else
10905 inst.instruction = THUMB_OP16 (inst.instruction);
10906 }
10907
10908 static void
10909 do_t_it (void)
10910 {
10911 unsigned int cond = inst.operands[0].imm;
10912
10913 set_it_insn_type (IT_INSN);
10914 now_it.mask = (inst.instruction & 0xf) | 0x10;
10915 now_it.cc = cond;
10916 now_it.warn_deprecated = FALSE;
10917
10918 /* If the condition is a negative condition, invert the mask. */
10919 if ((cond & 0x1) == 0x0)
10920 {
10921 unsigned int mask = inst.instruction & 0x000f;
10922
10923 if ((mask & 0x7) == 0)
10924 {
10925 /* No conversion needed. */
10926 now_it.block_length = 1;
10927 }
10928 else if ((mask & 0x3) == 0)
10929 {
10930 mask ^= 0x8;
10931 now_it.block_length = 2;
10932 }
10933 else if ((mask & 0x1) == 0)
10934 {
10935 mask ^= 0xC;
10936 now_it.block_length = 3;
10937 }
10938 else
10939 {
10940 mask ^= 0xE;
10941 now_it.block_length = 4;
10942 }
10943
10944 inst.instruction &= 0xfff0;
10945 inst.instruction |= mask;
10946 }
10947
10948 inst.instruction |= cond << 4;
10949 }
10950
10951 /* Helper function used for both push/pop and ldm/stm. */
10952 static void
10953 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10954 {
10955 bfd_boolean load;
10956
10957 load = (inst.instruction & (1 << 20)) != 0;
10958
10959 if (mask & (1 << 13))
10960 inst.error = _("SP not allowed in register list");
10961
10962 if ((mask & (1 << base)) != 0
10963 && writeback)
10964 inst.error = _("having the base register in the register list when "
10965 "using write back is UNPREDICTABLE");
10966
10967 if (load)
10968 {
10969 if (mask & (1 << 15))
10970 {
10971 if (mask & (1 << 14))
10972 inst.error = _("LR and PC should not both be in register list");
10973 else
10974 set_it_insn_type_last ();
10975 }
10976 }
10977 else
10978 {
10979 if (mask & (1 << 15))
10980 inst.error = _("PC not allowed in register list");
10981 }
10982
10983 if ((mask & (mask - 1)) == 0)
10984 {
10985 /* Single register transfers implemented as str/ldr. */
10986 if (writeback)
10987 {
10988 if (inst.instruction & (1 << 23))
10989 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10990 else
10991 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10992 }
10993 else
10994 {
10995 if (inst.instruction & (1 << 23))
10996 inst.instruction = 0x00800000; /* ia -> [base] */
10997 else
10998 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10999 }
11000
11001 inst.instruction |= 0xf8400000;
11002 if (load)
11003 inst.instruction |= 0x00100000;
11004
11005 mask = ffs (mask) - 1;
11006 mask <<= 12;
11007 }
11008 else if (writeback)
11009 inst.instruction |= WRITE_BACK;
11010
11011 inst.instruction |= mask;
11012 inst.instruction |= base << 16;
11013 }
11014
11015 static void
11016 do_t_ldmstm (void)
11017 {
11018 /* This really doesn't seem worth it. */
11019 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11020 _("expression too complex"));
11021 constraint (inst.operands[1].writeback,
11022 _("Thumb load/store multiple does not support {reglist}^"));
11023
11024 if (unified_syntax)
11025 {
11026 bfd_boolean narrow;
11027 unsigned mask;
11028
11029 narrow = FALSE;
11030 /* See if we can use a 16-bit instruction. */
11031 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11032 && inst.size_req != 4
11033 && !(inst.operands[1].imm & ~0xff))
11034 {
11035 mask = 1 << inst.operands[0].reg;
11036
11037 if (inst.operands[0].reg <= 7)
11038 {
11039 if (inst.instruction == T_MNEM_stmia
11040 ? inst.operands[0].writeback
11041 : (inst.operands[0].writeback
11042 == !(inst.operands[1].imm & mask)))
11043 {
11044 if (inst.instruction == T_MNEM_stmia
11045 && (inst.operands[1].imm & mask)
11046 && (inst.operands[1].imm & (mask - 1)))
11047 as_warn (_("value stored for r%d is UNKNOWN"),
11048 inst.operands[0].reg);
11049
11050 inst.instruction = THUMB_OP16 (inst.instruction);
11051 inst.instruction |= inst.operands[0].reg << 8;
11052 inst.instruction |= inst.operands[1].imm;
11053 narrow = TRUE;
11054 }
11055 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11056 {
11057 /* This means 1 register in reg list one of 3 situations:
11058 1. Instruction is stmia, but without writeback.
11059 2. lmdia without writeback, but with Rn not in
11060 reglist.
11061 3. ldmia with writeback, but with Rn in reglist.
11062 Case 3 is UNPREDICTABLE behaviour, so we handle
11063 case 1 and 2 which can be converted into a 16-bit
11064 str or ldr. The SP cases are handled below. */
11065 unsigned long opcode;
11066 /* First, record an error for Case 3. */
11067 if (inst.operands[1].imm & mask
11068 && inst.operands[0].writeback)
11069 inst.error =
11070 _("having the base register in the register list when "
11071 "using write back is UNPREDICTABLE");
11072
11073 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11074 : T_MNEM_ldr);
11075 inst.instruction = THUMB_OP16 (opcode);
11076 inst.instruction |= inst.operands[0].reg << 3;
11077 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11078 narrow = TRUE;
11079 }
11080 }
11081 else if (inst.operands[0] .reg == REG_SP)
11082 {
11083 if (inst.operands[0].writeback)
11084 {
11085 inst.instruction =
11086 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11087 ? T_MNEM_push : T_MNEM_pop);
11088 inst.instruction |= inst.operands[1].imm;
11089 narrow = TRUE;
11090 }
11091 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11092 {
11093 inst.instruction =
11094 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11095 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11096 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11097 narrow = TRUE;
11098 }
11099 }
11100 }
11101
11102 if (!narrow)
11103 {
11104 if (inst.instruction < 0xffff)
11105 inst.instruction = THUMB_OP32 (inst.instruction);
11106
11107 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11108 inst.operands[0].writeback);
11109 }
11110 }
11111 else
11112 {
11113 constraint (inst.operands[0].reg > 7
11114 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11115 constraint (inst.instruction != T_MNEM_ldmia
11116 && inst.instruction != T_MNEM_stmia,
11117 _("Thumb-2 instruction only valid in unified syntax"));
11118 if (inst.instruction == T_MNEM_stmia)
11119 {
11120 if (!inst.operands[0].writeback)
11121 as_warn (_("this instruction will write back the base register"));
11122 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11123 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11124 as_warn (_("value stored for r%d is UNKNOWN"),
11125 inst.operands[0].reg);
11126 }
11127 else
11128 {
11129 if (!inst.operands[0].writeback
11130 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11131 as_warn (_("this instruction will write back the base register"));
11132 else if (inst.operands[0].writeback
11133 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11134 as_warn (_("this instruction will not write back the base register"));
11135 }
11136
11137 inst.instruction = THUMB_OP16 (inst.instruction);
11138 inst.instruction |= inst.operands[0].reg << 8;
11139 inst.instruction |= inst.operands[1].imm;
11140 }
11141 }
11142
11143 static void
11144 do_t_ldrex (void)
11145 {
11146 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11147 || inst.operands[1].postind || inst.operands[1].writeback
11148 || inst.operands[1].immisreg || inst.operands[1].shifted
11149 || inst.operands[1].negative,
11150 BAD_ADDR_MODE);
11151
11152 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11153
11154 inst.instruction |= inst.operands[0].reg << 12;
11155 inst.instruction |= inst.operands[1].reg << 16;
11156 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11157 }
11158
11159 static void
11160 do_t_ldrexd (void)
11161 {
11162 if (!inst.operands[1].present)
11163 {
11164 constraint (inst.operands[0].reg == REG_LR,
11165 _("r14 not allowed as first register "
11166 "when second register is omitted"));
11167 inst.operands[1].reg = inst.operands[0].reg + 1;
11168 }
11169 constraint (inst.operands[0].reg == inst.operands[1].reg,
11170 BAD_OVERLAP);
11171
11172 inst.instruction |= inst.operands[0].reg << 12;
11173 inst.instruction |= inst.operands[1].reg << 8;
11174 inst.instruction |= inst.operands[2].reg << 16;
11175 }
11176
11177 static void
11178 do_t_ldst (void)
11179 {
11180 unsigned long opcode;
11181 int Rn;
11182
11183 if (inst.operands[0].isreg
11184 && !inst.operands[0].preind
11185 && inst.operands[0].reg == REG_PC)
11186 set_it_insn_type_last ();
11187
11188 opcode = inst.instruction;
11189 if (unified_syntax)
11190 {
11191 if (!inst.operands[1].isreg)
11192 {
11193 if (opcode <= 0xffff)
11194 inst.instruction = THUMB_OP32 (opcode);
11195 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11196 return;
11197 }
11198 if (inst.operands[1].isreg
11199 && !inst.operands[1].writeback
11200 && !inst.operands[1].shifted && !inst.operands[1].postind
11201 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11202 && opcode <= 0xffff
11203 && inst.size_req != 4)
11204 {
11205 /* Insn may have a 16-bit form. */
11206 Rn = inst.operands[1].reg;
11207 if (inst.operands[1].immisreg)
11208 {
11209 inst.instruction = THUMB_OP16 (opcode);
11210 /* [Rn, Rik] */
11211 if (Rn <= 7 && inst.operands[1].imm <= 7)
11212 goto op16;
11213 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11214 reject_bad_reg (inst.operands[1].imm);
11215 }
11216 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11217 && opcode != T_MNEM_ldrsb)
11218 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11219 || (Rn == REG_SP && opcode == T_MNEM_str))
11220 {
11221 /* [Rn, #const] */
11222 if (Rn > 7)
11223 {
11224 if (Rn == REG_PC)
11225 {
11226 if (inst.reloc.pc_rel)
11227 opcode = T_MNEM_ldr_pc2;
11228 else
11229 opcode = T_MNEM_ldr_pc;
11230 }
11231 else
11232 {
11233 if (opcode == T_MNEM_ldr)
11234 opcode = T_MNEM_ldr_sp;
11235 else
11236 opcode = T_MNEM_str_sp;
11237 }
11238 inst.instruction = inst.operands[0].reg << 8;
11239 }
11240 else
11241 {
11242 inst.instruction = inst.operands[0].reg;
11243 inst.instruction |= inst.operands[1].reg << 3;
11244 }
11245 inst.instruction |= THUMB_OP16 (opcode);
11246 if (inst.size_req == 2)
11247 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11248 else
11249 inst.relax = opcode;
11250 return;
11251 }
11252 }
11253 /* Definitely a 32-bit variant. */
11254
11255 /* Warning for Erratum 752419. */
11256 if (opcode == T_MNEM_ldr
11257 && inst.operands[0].reg == REG_SP
11258 && inst.operands[1].writeback == 1
11259 && !inst.operands[1].immisreg)
11260 {
11261 if (no_cpu_selected ()
11262 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11263 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11264 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11265 as_warn (_("This instruction may be unpredictable "
11266 "if executed on M-profile cores "
11267 "with interrupts enabled."));
11268 }
11269
11270 /* Do some validations regarding addressing modes. */
11271 if (inst.operands[1].immisreg)
11272 reject_bad_reg (inst.operands[1].imm);
11273
11274 constraint (inst.operands[1].writeback == 1
11275 && inst.operands[0].reg == inst.operands[1].reg,
11276 BAD_OVERLAP);
11277
11278 inst.instruction = THUMB_OP32 (opcode);
11279 inst.instruction |= inst.operands[0].reg << 12;
11280 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11281 check_ldr_r15_aligned ();
11282 return;
11283 }
11284
11285 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11286
11287 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11288 {
11289 /* Only [Rn,Rm] is acceptable. */
11290 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11291 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11292 || inst.operands[1].postind || inst.operands[1].shifted
11293 || inst.operands[1].negative,
11294 _("Thumb does not support this addressing mode"));
11295 inst.instruction = THUMB_OP16 (inst.instruction);
11296 goto op16;
11297 }
11298
11299 inst.instruction = THUMB_OP16 (inst.instruction);
11300 if (!inst.operands[1].isreg)
11301 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11302 return;
11303
11304 constraint (!inst.operands[1].preind
11305 || inst.operands[1].shifted
11306 || inst.operands[1].writeback,
11307 _("Thumb does not support this addressing mode"));
11308 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11309 {
11310 constraint (inst.instruction & 0x0600,
11311 _("byte or halfword not valid for base register"));
11312 constraint (inst.operands[1].reg == REG_PC
11313 && !(inst.instruction & THUMB_LOAD_BIT),
11314 _("r15 based store not allowed"));
11315 constraint (inst.operands[1].immisreg,
11316 _("invalid base register for register offset"));
11317
11318 if (inst.operands[1].reg == REG_PC)
11319 inst.instruction = T_OPCODE_LDR_PC;
11320 else if (inst.instruction & THUMB_LOAD_BIT)
11321 inst.instruction = T_OPCODE_LDR_SP;
11322 else
11323 inst.instruction = T_OPCODE_STR_SP;
11324
11325 inst.instruction |= inst.operands[0].reg << 8;
11326 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11327 return;
11328 }
11329
11330 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11331 if (!inst.operands[1].immisreg)
11332 {
11333 /* Immediate offset. */
11334 inst.instruction |= inst.operands[0].reg;
11335 inst.instruction |= inst.operands[1].reg << 3;
11336 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11337 return;
11338 }
11339
11340 /* Register offset. */
11341 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11342 constraint (inst.operands[1].negative,
11343 _("Thumb does not support this addressing mode"));
11344
11345 op16:
11346 switch (inst.instruction)
11347 {
11348 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11349 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11350 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11351 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11352 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11353 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11354 case 0x5600 /* ldrsb */:
11355 case 0x5e00 /* ldrsh */: break;
11356 default: abort ();
11357 }
11358
11359 inst.instruction |= inst.operands[0].reg;
11360 inst.instruction |= inst.operands[1].reg << 3;
11361 inst.instruction |= inst.operands[1].imm << 6;
11362 }
11363
11364 static void
11365 do_t_ldstd (void)
11366 {
11367 if (!inst.operands[1].present)
11368 {
11369 inst.operands[1].reg = inst.operands[0].reg + 1;
11370 constraint (inst.operands[0].reg == REG_LR,
11371 _("r14 not allowed here"));
11372 constraint (inst.operands[0].reg == REG_R12,
11373 _("r12 not allowed here"));
11374 }
11375
11376 if (inst.operands[2].writeback
11377 && (inst.operands[0].reg == inst.operands[2].reg
11378 || inst.operands[1].reg == inst.operands[2].reg))
11379 as_warn (_("base register written back, and overlaps "
11380 "one of transfer registers"));
11381
11382 inst.instruction |= inst.operands[0].reg << 12;
11383 inst.instruction |= inst.operands[1].reg << 8;
11384 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11385 }
11386
11387 static void
11388 do_t_ldstt (void)
11389 {
11390 inst.instruction |= inst.operands[0].reg << 12;
11391 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11392 }
11393
11394 static void
11395 do_t_mla (void)
11396 {
11397 unsigned Rd, Rn, Rm, Ra;
11398
11399 Rd = inst.operands[0].reg;
11400 Rn = inst.operands[1].reg;
11401 Rm = inst.operands[2].reg;
11402 Ra = inst.operands[3].reg;
11403
11404 reject_bad_reg (Rd);
11405 reject_bad_reg (Rn);
11406 reject_bad_reg (Rm);
11407 reject_bad_reg (Ra);
11408
11409 inst.instruction |= Rd << 8;
11410 inst.instruction |= Rn << 16;
11411 inst.instruction |= Rm;
11412 inst.instruction |= Ra << 12;
11413 }
11414
11415 static void
11416 do_t_mlal (void)
11417 {
11418 unsigned RdLo, RdHi, Rn, Rm;
11419
11420 RdLo = inst.operands[0].reg;
11421 RdHi = inst.operands[1].reg;
11422 Rn = inst.operands[2].reg;
11423 Rm = inst.operands[3].reg;
11424
11425 reject_bad_reg (RdLo);
11426 reject_bad_reg (RdHi);
11427 reject_bad_reg (Rn);
11428 reject_bad_reg (Rm);
11429
11430 inst.instruction |= RdLo << 12;
11431 inst.instruction |= RdHi << 8;
11432 inst.instruction |= Rn << 16;
11433 inst.instruction |= Rm;
11434 }
11435
11436 static void
11437 do_t_mov_cmp (void)
11438 {
11439 unsigned Rn, Rm;
11440
11441 Rn = inst.operands[0].reg;
11442 Rm = inst.operands[1].reg;
11443
11444 if (Rn == REG_PC)
11445 set_it_insn_type_last ();
11446
11447 if (unified_syntax)
11448 {
11449 int r0off = (inst.instruction == T_MNEM_mov
11450 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11451 unsigned long opcode;
11452 bfd_boolean narrow;
11453 bfd_boolean low_regs;
11454
11455 low_regs = (Rn <= 7 && Rm <= 7);
11456 opcode = inst.instruction;
11457 if (in_it_block ())
11458 narrow = opcode != T_MNEM_movs;
11459 else
11460 narrow = opcode != T_MNEM_movs || low_regs;
11461 if (inst.size_req == 4
11462 || inst.operands[1].shifted)
11463 narrow = FALSE;
11464
11465 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11466 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11467 && !inst.operands[1].shifted
11468 && Rn == REG_PC
11469 && Rm == REG_LR)
11470 {
11471 inst.instruction = T2_SUBS_PC_LR;
11472 return;
11473 }
11474
11475 if (opcode == T_MNEM_cmp)
11476 {
11477 constraint (Rn == REG_PC, BAD_PC);
11478 if (narrow)
11479 {
11480 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11481 but valid. */
11482 warn_deprecated_sp (Rm);
11483 /* R15 was documented as a valid choice for Rm in ARMv6,
11484 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11485 tools reject R15, so we do too. */
11486 constraint (Rm == REG_PC, BAD_PC);
11487 }
11488 else
11489 reject_bad_reg (Rm);
11490 }
11491 else if (opcode == T_MNEM_mov
11492 || opcode == T_MNEM_movs)
11493 {
11494 if (inst.operands[1].isreg)
11495 {
11496 if (opcode == T_MNEM_movs)
11497 {
11498 reject_bad_reg (Rn);
11499 reject_bad_reg (Rm);
11500 }
11501 else if (narrow)
11502 {
11503 /* This is mov.n. */
11504 if ((Rn == REG_SP || Rn == REG_PC)
11505 && (Rm == REG_SP || Rm == REG_PC))
11506 {
11507 as_warn (_("Use of r%u as a source register is "
11508 "deprecated when r%u is the destination "
11509 "register."), Rm, Rn);
11510 }
11511 }
11512 else
11513 {
11514 /* This is mov.w. */
11515 constraint (Rn == REG_PC, BAD_PC);
11516 constraint (Rm == REG_PC, BAD_PC);
11517 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11518 }
11519 }
11520 else
11521 reject_bad_reg (Rn);
11522 }
11523
11524 if (!inst.operands[1].isreg)
11525 {
11526 /* Immediate operand. */
11527 if (!in_it_block () && opcode == T_MNEM_mov)
11528 narrow = 0;
11529 if (low_regs && narrow)
11530 {
11531 inst.instruction = THUMB_OP16 (opcode);
11532 inst.instruction |= Rn << 8;
11533 if (inst.size_req == 2)
11534 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11535 else
11536 inst.relax = opcode;
11537 }
11538 else
11539 {
11540 inst.instruction = THUMB_OP32 (inst.instruction);
11541 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11542 inst.instruction |= Rn << r0off;
11543 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11544 }
11545 }
11546 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11547 && (inst.instruction == T_MNEM_mov
11548 || inst.instruction == T_MNEM_movs))
11549 {
11550 /* Register shifts are encoded as separate shift instructions. */
11551 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11552
11553 if (in_it_block ())
11554 narrow = !flags;
11555 else
11556 narrow = flags;
11557
11558 if (inst.size_req == 4)
11559 narrow = FALSE;
11560
11561 if (!low_regs || inst.operands[1].imm > 7)
11562 narrow = FALSE;
11563
11564 if (Rn != Rm)
11565 narrow = FALSE;
11566
11567 switch (inst.operands[1].shift_kind)
11568 {
11569 case SHIFT_LSL:
11570 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11571 break;
11572 case SHIFT_ASR:
11573 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11574 break;
11575 case SHIFT_LSR:
11576 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11577 break;
11578 case SHIFT_ROR:
11579 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11580 break;
11581 default:
11582 abort ();
11583 }
11584
11585 inst.instruction = opcode;
11586 if (narrow)
11587 {
11588 inst.instruction |= Rn;
11589 inst.instruction |= inst.operands[1].imm << 3;
11590 }
11591 else
11592 {
11593 if (flags)
11594 inst.instruction |= CONDS_BIT;
11595
11596 inst.instruction |= Rn << 8;
11597 inst.instruction |= Rm << 16;
11598 inst.instruction |= inst.operands[1].imm;
11599 }
11600 }
11601 else if (!narrow)
11602 {
11603 /* Some mov with immediate shift have narrow variants.
11604 Register shifts are handled above. */
11605 if (low_regs && inst.operands[1].shifted
11606 && (inst.instruction == T_MNEM_mov
11607 || inst.instruction == T_MNEM_movs))
11608 {
11609 if (in_it_block ())
11610 narrow = (inst.instruction == T_MNEM_mov);
11611 else
11612 narrow = (inst.instruction == T_MNEM_movs);
11613 }
11614
11615 if (narrow)
11616 {
11617 switch (inst.operands[1].shift_kind)
11618 {
11619 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11620 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11621 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11622 default: narrow = FALSE; break;
11623 }
11624 }
11625
11626 if (narrow)
11627 {
11628 inst.instruction |= Rn;
11629 inst.instruction |= Rm << 3;
11630 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11631 }
11632 else
11633 {
11634 inst.instruction = THUMB_OP32 (inst.instruction);
11635 inst.instruction |= Rn << r0off;
11636 encode_thumb32_shifted_operand (1);
11637 }
11638 }
11639 else
11640 switch (inst.instruction)
11641 {
11642 case T_MNEM_mov:
11643 /* In v4t or v5t a move of two lowregs produces unpredictable
11644 results. Don't allow this. */
11645 if (low_regs)
11646 {
11647 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11648 "MOV Rd, Rs with two low registers is not "
11649 "permitted on this architecture");
11650 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11651 arm_ext_v6);
11652 }
11653
11654 inst.instruction = T_OPCODE_MOV_HR;
11655 inst.instruction |= (Rn & 0x8) << 4;
11656 inst.instruction |= (Rn & 0x7);
11657 inst.instruction |= Rm << 3;
11658 break;
11659
11660 case T_MNEM_movs:
11661 /* We know we have low registers at this point.
11662 Generate LSLS Rd, Rs, #0. */
11663 inst.instruction = T_OPCODE_LSL_I;
11664 inst.instruction |= Rn;
11665 inst.instruction |= Rm << 3;
11666 break;
11667
11668 case T_MNEM_cmp:
11669 if (low_regs)
11670 {
11671 inst.instruction = T_OPCODE_CMP_LR;
11672 inst.instruction |= Rn;
11673 inst.instruction |= Rm << 3;
11674 }
11675 else
11676 {
11677 inst.instruction = T_OPCODE_CMP_HR;
11678 inst.instruction |= (Rn & 0x8) << 4;
11679 inst.instruction |= (Rn & 0x7);
11680 inst.instruction |= Rm << 3;
11681 }
11682 break;
11683 }
11684 return;
11685 }
11686
11687 inst.instruction = THUMB_OP16 (inst.instruction);
11688
11689 /* PR 10443: Do not silently ignore shifted operands. */
11690 constraint (inst.operands[1].shifted,
11691 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11692
11693 if (inst.operands[1].isreg)
11694 {
11695 if (Rn < 8 && Rm < 8)
11696 {
11697 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11698 since a MOV instruction produces unpredictable results. */
11699 if (inst.instruction == T_OPCODE_MOV_I8)
11700 inst.instruction = T_OPCODE_ADD_I3;
11701 else
11702 inst.instruction = T_OPCODE_CMP_LR;
11703
11704 inst.instruction |= Rn;
11705 inst.instruction |= Rm << 3;
11706 }
11707 else
11708 {
11709 if (inst.instruction == T_OPCODE_MOV_I8)
11710 inst.instruction = T_OPCODE_MOV_HR;
11711 else
11712 inst.instruction = T_OPCODE_CMP_HR;
11713 do_t_cpy ();
11714 }
11715 }
11716 else
11717 {
11718 constraint (Rn > 7,
11719 _("only lo regs allowed with immediate"));
11720 inst.instruction |= Rn << 8;
11721 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11722 }
11723 }
11724
11725 static void
11726 do_t_mov16 (void)
11727 {
11728 unsigned Rd;
11729 bfd_vma imm;
11730 bfd_boolean top;
11731
11732 top = (inst.instruction & 0x00800000) != 0;
11733 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11734 {
11735 constraint (top, _(":lower16: not allowed this instruction"));
11736 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11737 }
11738 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11739 {
11740 constraint (!top, _(":upper16: not allowed this instruction"));
11741 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11742 }
11743
11744 Rd = inst.operands[0].reg;
11745 reject_bad_reg (Rd);
11746
11747 inst.instruction |= Rd << 8;
11748 if (inst.reloc.type == BFD_RELOC_UNUSED)
11749 {
11750 imm = inst.reloc.exp.X_add_number;
11751 inst.instruction |= (imm & 0xf000) << 4;
11752 inst.instruction |= (imm & 0x0800) << 15;
11753 inst.instruction |= (imm & 0x0700) << 4;
11754 inst.instruction |= (imm & 0x00ff);
11755 }
11756 }
11757
11758 static void
11759 do_t_mvn_tst (void)
11760 {
11761 unsigned Rn, Rm;
11762
11763 Rn = inst.operands[0].reg;
11764 Rm = inst.operands[1].reg;
11765
11766 if (inst.instruction == T_MNEM_cmp
11767 || inst.instruction == T_MNEM_cmn)
11768 constraint (Rn == REG_PC, BAD_PC);
11769 else
11770 reject_bad_reg (Rn);
11771 reject_bad_reg (Rm);
11772
11773 if (unified_syntax)
11774 {
11775 int r0off = (inst.instruction == T_MNEM_mvn
11776 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11777 bfd_boolean narrow;
11778
11779 if (inst.size_req == 4
11780 || inst.instruction > 0xffff
11781 || inst.operands[1].shifted
11782 || Rn > 7 || Rm > 7)
11783 narrow = FALSE;
11784 else if (inst.instruction == T_MNEM_cmn
11785 || inst.instruction == T_MNEM_tst)
11786 narrow = TRUE;
11787 else if (THUMB_SETS_FLAGS (inst.instruction))
11788 narrow = !in_it_block ();
11789 else
11790 narrow = in_it_block ();
11791
11792 if (!inst.operands[1].isreg)
11793 {
11794 /* For an immediate, we always generate a 32-bit opcode;
11795 section relaxation will shrink it later if possible. */
11796 if (inst.instruction < 0xffff)
11797 inst.instruction = THUMB_OP32 (inst.instruction);
11798 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11799 inst.instruction |= Rn << r0off;
11800 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11801 }
11802 else
11803 {
11804 /* See if we can do this with a 16-bit instruction. */
11805 if (narrow)
11806 {
11807 inst.instruction = THUMB_OP16 (inst.instruction);
11808 inst.instruction |= Rn;
11809 inst.instruction |= Rm << 3;
11810 }
11811 else
11812 {
11813 constraint (inst.operands[1].shifted
11814 && inst.operands[1].immisreg,
11815 _("shift must be constant"));
11816 if (inst.instruction < 0xffff)
11817 inst.instruction = THUMB_OP32 (inst.instruction);
11818 inst.instruction |= Rn << r0off;
11819 encode_thumb32_shifted_operand (1);
11820 }
11821 }
11822 }
11823 else
11824 {
11825 constraint (inst.instruction > 0xffff
11826 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11827 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11828 _("unshifted register required"));
11829 constraint (Rn > 7 || Rm > 7,
11830 BAD_HIREG);
11831
11832 inst.instruction = THUMB_OP16 (inst.instruction);
11833 inst.instruction |= Rn;
11834 inst.instruction |= Rm << 3;
11835 }
11836 }
11837
11838 static void
11839 do_t_mrs (void)
11840 {
11841 unsigned Rd;
11842
11843 if (do_vfp_nsyn_mrs () == SUCCESS)
11844 return;
11845
11846 Rd = inst.operands[0].reg;
11847 reject_bad_reg (Rd);
11848 inst.instruction |= Rd << 8;
11849
11850 if (inst.operands[1].isreg)
11851 {
11852 unsigned br = inst.operands[1].reg;
11853 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11854 as_bad (_("bad register for mrs"));
11855
11856 inst.instruction |= br & (0xf << 16);
11857 inst.instruction |= (br & 0x300) >> 4;
11858 inst.instruction |= (br & SPSR_BIT) >> 2;
11859 }
11860 else
11861 {
11862 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11863
11864 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11865 {
11866 /* PR gas/12698: The constraint is only applied for m_profile.
11867 If the user has specified -march=all, we want to ignore it as
11868 we are building for any CPU type, including non-m variants. */
11869 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11870 constraint ((flags != 0) && m_profile, _("selected processor does "
11871 "not support requested special purpose register"));
11872 }
11873 else
11874 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11875 devices). */
11876 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11877 _("'APSR', 'CPSR' or 'SPSR' expected"));
11878
11879 inst.instruction |= (flags & SPSR_BIT) >> 2;
11880 inst.instruction |= inst.operands[1].imm & 0xff;
11881 inst.instruction |= 0xf0000;
11882 }
11883 }
11884
11885 static void
11886 do_t_msr (void)
11887 {
11888 int flags;
11889 unsigned Rn;
11890
11891 if (do_vfp_nsyn_msr () == SUCCESS)
11892 return;
11893
11894 constraint (!inst.operands[1].isreg,
11895 _("Thumb encoding does not support an immediate here"));
11896
11897 if (inst.operands[0].isreg)
11898 flags = (int)(inst.operands[0].reg);
11899 else
11900 flags = inst.operands[0].imm;
11901
11902 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11903 {
11904 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11905
11906 /* PR gas/12698: The constraint is only applied for m_profile.
11907 If the user has specified -march=all, we want to ignore it as
11908 we are building for any CPU type, including non-m variants. */
11909 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11910 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11911 && (bits & ~(PSR_s | PSR_f)) != 0)
11912 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11913 && bits != PSR_f)) && m_profile,
11914 _("selected processor does not support requested special "
11915 "purpose register"));
11916 }
11917 else
11918 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11919 "requested special purpose register"));
11920
11921 Rn = inst.operands[1].reg;
11922 reject_bad_reg (Rn);
11923
11924 inst.instruction |= (flags & SPSR_BIT) >> 2;
11925 inst.instruction |= (flags & 0xf0000) >> 8;
11926 inst.instruction |= (flags & 0x300) >> 4;
11927 inst.instruction |= (flags & 0xff);
11928 inst.instruction |= Rn << 16;
11929 }
11930
11931 static void
11932 do_t_mul (void)
11933 {
11934 bfd_boolean narrow;
11935 unsigned Rd, Rn, Rm;
11936
11937 if (!inst.operands[2].present)
11938 inst.operands[2].reg = inst.operands[0].reg;
11939
11940 Rd = inst.operands[0].reg;
11941 Rn = inst.operands[1].reg;
11942 Rm = inst.operands[2].reg;
11943
11944 if (unified_syntax)
11945 {
11946 if (inst.size_req == 4
11947 || (Rd != Rn
11948 && Rd != Rm)
11949 || Rn > 7
11950 || Rm > 7)
11951 narrow = FALSE;
11952 else if (inst.instruction == T_MNEM_muls)
11953 narrow = !in_it_block ();
11954 else
11955 narrow = in_it_block ();
11956 }
11957 else
11958 {
11959 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11960 constraint (Rn > 7 || Rm > 7,
11961 BAD_HIREG);
11962 narrow = TRUE;
11963 }
11964
11965 if (narrow)
11966 {
11967 /* 16-bit MULS/Conditional MUL. */
11968 inst.instruction = THUMB_OP16 (inst.instruction);
11969 inst.instruction |= Rd;
11970
11971 if (Rd == Rn)
11972 inst.instruction |= Rm << 3;
11973 else if (Rd == Rm)
11974 inst.instruction |= Rn << 3;
11975 else
11976 constraint (1, _("dest must overlap one source register"));
11977 }
11978 else
11979 {
11980 constraint (inst.instruction != T_MNEM_mul,
11981 _("Thumb-2 MUL must not set flags"));
11982 /* 32-bit MUL. */
11983 inst.instruction = THUMB_OP32 (inst.instruction);
11984 inst.instruction |= Rd << 8;
11985 inst.instruction |= Rn << 16;
11986 inst.instruction |= Rm << 0;
11987
11988 reject_bad_reg (Rd);
11989 reject_bad_reg (Rn);
11990 reject_bad_reg (Rm);
11991 }
11992 }
11993
11994 static void
11995 do_t_mull (void)
11996 {
11997 unsigned RdLo, RdHi, Rn, Rm;
11998
11999 RdLo = inst.operands[0].reg;
12000 RdHi = inst.operands[1].reg;
12001 Rn = inst.operands[2].reg;
12002 Rm = inst.operands[3].reg;
12003
12004 reject_bad_reg (RdLo);
12005 reject_bad_reg (RdHi);
12006 reject_bad_reg (Rn);
12007 reject_bad_reg (Rm);
12008
12009 inst.instruction |= RdLo << 12;
12010 inst.instruction |= RdHi << 8;
12011 inst.instruction |= Rn << 16;
12012 inst.instruction |= Rm;
12013
12014 if (RdLo == RdHi)
12015 as_tsktsk (_("rdhi and rdlo must be different"));
12016 }
12017
12018 static void
12019 do_t_nop (void)
12020 {
12021 set_it_insn_type (NEUTRAL_IT_INSN);
12022
12023 if (unified_syntax)
12024 {
12025 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12026 {
12027 inst.instruction = THUMB_OP32 (inst.instruction);
12028 inst.instruction |= inst.operands[0].imm;
12029 }
12030 else
12031 {
12032 /* PR9722: Check for Thumb2 availability before
12033 generating a thumb2 nop instruction. */
12034 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12035 {
12036 inst.instruction = THUMB_OP16 (inst.instruction);
12037 inst.instruction |= inst.operands[0].imm << 4;
12038 }
12039 else
12040 inst.instruction = 0x46c0;
12041 }
12042 }
12043 else
12044 {
12045 constraint (inst.operands[0].present,
12046 _("Thumb does not support NOP with hints"));
12047 inst.instruction = 0x46c0;
12048 }
12049 }
12050
12051 static void
12052 do_t_neg (void)
12053 {
12054 if (unified_syntax)
12055 {
12056 bfd_boolean narrow;
12057
12058 if (THUMB_SETS_FLAGS (inst.instruction))
12059 narrow = !in_it_block ();
12060 else
12061 narrow = in_it_block ();
12062 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12063 narrow = FALSE;
12064 if (inst.size_req == 4)
12065 narrow = FALSE;
12066
12067 if (!narrow)
12068 {
12069 inst.instruction = THUMB_OP32 (inst.instruction);
12070 inst.instruction |= inst.operands[0].reg << 8;
12071 inst.instruction |= inst.operands[1].reg << 16;
12072 }
12073 else
12074 {
12075 inst.instruction = THUMB_OP16 (inst.instruction);
12076 inst.instruction |= inst.operands[0].reg;
12077 inst.instruction |= inst.operands[1].reg << 3;
12078 }
12079 }
12080 else
12081 {
12082 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12083 BAD_HIREG);
12084 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12085
12086 inst.instruction = THUMB_OP16 (inst.instruction);
12087 inst.instruction |= inst.operands[0].reg;
12088 inst.instruction |= inst.operands[1].reg << 3;
12089 }
12090 }
12091
12092 static void
12093 do_t_orn (void)
12094 {
12095 unsigned Rd, Rn;
12096
12097 Rd = inst.operands[0].reg;
12098 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12099
12100 reject_bad_reg (Rd);
12101 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12102 reject_bad_reg (Rn);
12103
12104 inst.instruction |= Rd << 8;
12105 inst.instruction |= Rn << 16;
12106
12107 if (!inst.operands[2].isreg)
12108 {
12109 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12110 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12111 }
12112 else
12113 {
12114 unsigned Rm;
12115
12116 Rm = inst.operands[2].reg;
12117 reject_bad_reg (Rm);
12118
12119 constraint (inst.operands[2].shifted
12120 && inst.operands[2].immisreg,
12121 _("shift must be constant"));
12122 encode_thumb32_shifted_operand (2);
12123 }
12124 }
12125
12126 static void
12127 do_t_pkhbt (void)
12128 {
12129 unsigned Rd, Rn, Rm;
12130
12131 Rd = inst.operands[0].reg;
12132 Rn = inst.operands[1].reg;
12133 Rm = inst.operands[2].reg;
12134
12135 reject_bad_reg (Rd);
12136 reject_bad_reg (Rn);
12137 reject_bad_reg (Rm);
12138
12139 inst.instruction |= Rd << 8;
12140 inst.instruction |= Rn << 16;
12141 inst.instruction |= Rm;
12142 if (inst.operands[3].present)
12143 {
12144 unsigned int val = inst.reloc.exp.X_add_number;
12145 constraint (inst.reloc.exp.X_op != O_constant,
12146 _("expression too complex"));
12147 inst.instruction |= (val & 0x1c) << 10;
12148 inst.instruction |= (val & 0x03) << 6;
12149 }
12150 }
12151
12152 static void
12153 do_t_pkhtb (void)
12154 {
12155 if (!inst.operands[3].present)
12156 {
12157 unsigned Rtmp;
12158
12159 inst.instruction &= ~0x00000020;
12160
12161 /* PR 10168. Swap the Rm and Rn registers. */
12162 Rtmp = inst.operands[1].reg;
12163 inst.operands[1].reg = inst.operands[2].reg;
12164 inst.operands[2].reg = Rtmp;
12165 }
12166 do_t_pkhbt ();
12167 }
12168
12169 static void
12170 do_t_pld (void)
12171 {
12172 if (inst.operands[0].immisreg)
12173 reject_bad_reg (inst.operands[0].imm);
12174
12175 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12176 }
12177
12178 static void
12179 do_t_push_pop (void)
12180 {
12181 unsigned mask;
12182
12183 constraint (inst.operands[0].writeback,
12184 _("push/pop do not support {reglist}^"));
12185 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12186 _("expression too complex"));
12187
12188 mask = inst.operands[0].imm;
12189 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12190 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12191 else if (inst.size_req != 4
12192 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12193 ? REG_LR : REG_PC)))
12194 {
12195 inst.instruction = THUMB_OP16 (inst.instruction);
12196 inst.instruction |= THUMB_PP_PC_LR;
12197 inst.instruction |= mask & 0xff;
12198 }
12199 else if (unified_syntax)
12200 {
12201 inst.instruction = THUMB_OP32 (inst.instruction);
12202 encode_thumb2_ldmstm (13, mask, TRUE);
12203 }
12204 else
12205 {
12206 inst.error = _("invalid register list to push/pop instruction");
12207 return;
12208 }
12209 }
12210
12211 static void
12212 do_t_rbit (void)
12213 {
12214 unsigned Rd, Rm;
12215
12216 Rd = inst.operands[0].reg;
12217 Rm = inst.operands[1].reg;
12218
12219 reject_bad_reg (Rd);
12220 reject_bad_reg (Rm);
12221
12222 inst.instruction |= Rd << 8;
12223 inst.instruction |= Rm << 16;
12224 inst.instruction |= Rm;
12225 }
12226
12227 static void
12228 do_t_rev (void)
12229 {
12230 unsigned Rd, Rm;
12231
12232 Rd = inst.operands[0].reg;
12233 Rm = inst.operands[1].reg;
12234
12235 reject_bad_reg (Rd);
12236 reject_bad_reg (Rm);
12237
12238 if (Rd <= 7 && Rm <= 7
12239 && inst.size_req != 4)
12240 {
12241 inst.instruction = THUMB_OP16 (inst.instruction);
12242 inst.instruction |= Rd;
12243 inst.instruction |= Rm << 3;
12244 }
12245 else if (unified_syntax)
12246 {
12247 inst.instruction = THUMB_OP32 (inst.instruction);
12248 inst.instruction |= Rd << 8;
12249 inst.instruction |= Rm << 16;
12250 inst.instruction |= Rm;
12251 }
12252 else
12253 inst.error = BAD_HIREG;
12254 }
12255
12256 static void
12257 do_t_rrx (void)
12258 {
12259 unsigned Rd, Rm;
12260
12261 Rd = inst.operands[0].reg;
12262 Rm = inst.operands[1].reg;
12263
12264 reject_bad_reg (Rd);
12265 reject_bad_reg (Rm);
12266
12267 inst.instruction |= Rd << 8;
12268 inst.instruction |= Rm;
12269 }
12270
12271 static void
12272 do_t_rsb (void)
12273 {
12274 unsigned Rd, Rs;
12275
12276 Rd = inst.operands[0].reg;
12277 Rs = (inst.operands[1].present
12278 ? inst.operands[1].reg /* Rd, Rs, foo */
12279 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12280
12281 reject_bad_reg (Rd);
12282 reject_bad_reg (Rs);
12283 if (inst.operands[2].isreg)
12284 reject_bad_reg (inst.operands[2].reg);
12285
12286 inst.instruction |= Rd << 8;
12287 inst.instruction |= Rs << 16;
12288 if (!inst.operands[2].isreg)
12289 {
12290 bfd_boolean narrow;
12291
12292 if ((inst.instruction & 0x00100000) != 0)
12293 narrow = !in_it_block ();
12294 else
12295 narrow = in_it_block ();
12296
12297 if (Rd > 7 || Rs > 7)
12298 narrow = FALSE;
12299
12300 if (inst.size_req == 4 || !unified_syntax)
12301 narrow = FALSE;
12302
12303 if (inst.reloc.exp.X_op != O_constant
12304 || inst.reloc.exp.X_add_number != 0)
12305 narrow = FALSE;
12306
12307 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12308 relaxation, but it doesn't seem worth the hassle. */
12309 if (narrow)
12310 {
12311 inst.reloc.type = BFD_RELOC_UNUSED;
12312 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12313 inst.instruction |= Rs << 3;
12314 inst.instruction |= Rd;
12315 }
12316 else
12317 {
12318 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12319 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12320 }
12321 }
12322 else
12323 encode_thumb32_shifted_operand (2);
12324 }
12325
12326 static void
12327 do_t_setend (void)
12328 {
12329 if (warn_on_deprecated
12330 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12331 as_warn (_("setend use is deprecated for ARMv8"));
12332
12333 set_it_insn_type (OUTSIDE_IT_INSN);
12334 if (inst.operands[0].imm)
12335 inst.instruction |= 0x8;
12336 }
12337
12338 static void
12339 do_t_shift (void)
12340 {
12341 if (!inst.operands[1].present)
12342 inst.operands[1].reg = inst.operands[0].reg;
12343
12344 if (unified_syntax)
12345 {
12346 bfd_boolean narrow;
12347 int shift_kind;
12348
12349 switch (inst.instruction)
12350 {
12351 case T_MNEM_asr:
12352 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12353 case T_MNEM_lsl:
12354 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12355 case T_MNEM_lsr:
12356 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12357 case T_MNEM_ror:
12358 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12359 default: abort ();
12360 }
12361
12362 if (THUMB_SETS_FLAGS (inst.instruction))
12363 narrow = !in_it_block ();
12364 else
12365 narrow = in_it_block ();
12366 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12367 narrow = FALSE;
12368 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12369 narrow = FALSE;
12370 if (inst.operands[2].isreg
12371 && (inst.operands[1].reg != inst.operands[0].reg
12372 || inst.operands[2].reg > 7))
12373 narrow = FALSE;
12374 if (inst.size_req == 4)
12375 narrow = FALSE;
12376
12377 reject_bad_reg (inst.operands[0].reg);
12378 reject_bad_reg (inst.operands[1].reg);
12379
12380 if (!narrow)
12381 {
12382 if (inst.operands[2].isreg)
12383 {
12384 reject_bad_reg (inst.operands[2].reg);
12385 inst.instruction = THUMB_OP32 (inst.instruction);
12386 inst.instruction |= inst.operands[0].reg << 8;
12387 inst.instruction |= inst.operands[1].reg << 16;
12388 inst.instruction |= inst.operands[2].reg;
12389
12390 /* PR 12854: Error on extraneous shifts. */
12391 constraint (inst.operands[2].shifted,
12392 _("extraneous shift as part of operand to shift insn"));
12393 }
12394 else
12395 {
12396 inst.operands[1].shifted = 1;
12397 inst.operands[1].shift_kind = shift_kind;
12398 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12399 ? T_MNEM_movs : T_MNEM_mov);
12400 inst.instruction |= inst.operands[0].reg << 8;
12401 encode_thumb32_shifted_operand (1);
12402 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12403 inst.reloc.type = BFD_RELOC_UNUSED;
12404 }
12405 }
12406 else
12407 {
12408 if (inst.operands[2].isreg)
12409 {
12410 switch (shift_kind)
12411 {
12412 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12413 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12414 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12415 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12416 default: abort ();
12417 }
12418
12419 inst.instruction |= inst.operands[0].reg;
12420 inst.instruction |= inst.operands[2].reg << 3;
12421
12422 /* PR 12854: Error on extraneous shifts. */
12423 constraint (inst.operands[2].shifted,
12424 _("extraneous shift as part of operand to shift insn"));
12425 }
12426 else
12427 {
12428 switch (shift_kind)
12429 {
12430 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12431 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12432 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12433 default: abort ();
12434 }
12435 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12436 inst.instruction |= inst.operands[0].reg;
12437 inst.instruction |= inst.operands[1].reg << 3;
12438 }
12439 }
12440 }
12441 else
12442 {
12443 constraint (inst.operands[0].reg > 7
12444 || inst.operands[1].reg > 7, BAD_HIREG);
12445 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12446
12447 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12448 {
12449 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12450 constraint (inst.operands[0].reg != inst.operands[1].reg,
12451 _("source1 and dest must be same register"));
12452
12453 switch (inst.instruction)
12454 {
12455 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12456 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12457 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12458 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12459 default: abort ();
12460 }
12461
12462 inst.instruction |= inst.operands[0].reg;
12463 inst.instruction |= inst.operands[2].reg << 3;
12464
12465 /* PR 12854: Error on extraneous shifts. */
12466 constraint (inst.operands[2].shifted,
12467 _("extraneous shift as part of operand to shift insn"));
12468 }
12469 else
12470 {
12471 switch (inst.instruction)
12472 {
12473 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12474 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12475 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12476 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12477 default: abort ();
12478 }
12479 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12480 inst.instruction |= inst.operands[0].reg;
12481 inst.instruction |= inst.operands[1].reg << 3;
12482 }
12483 }
12484 }
12485
12486 static void
12487 do_t_simd (void)
12488 {
12489 unsigned Rd, Rn, Rm;
12490
12491 Rd = inst.operands[0].reg;
12492 Rn = inst.operands[1].reg;
12493 Rm = inst.operands[2].reg;
12494
12495 reject_bad_reg (Rd);
12496 reject_bad_reg (Rn);
12497 reject_bad_reg (Rm);
12498
12499 inst.instruction |= Rd << 8;
12500 inst.instruction |= Rn << 16;
12501 inst.instruction |= Rm;
12502 }
12503
12504 static void
12505 do_t_simd2 (void)
12506 {
12507 unsigned Rd, Rn, Rm;
12508
12509 Rd = inst.operands[0].reg;
12510 Rm = inst.operands[1].reg;
12511 Rn = inst.operands[2].reg;
12512
12513 reject_bad_reg (Rd);
12514 reject_bad_reg (Rn);
12515 reject_bad_reg (Rm);
12516
12517 inst.instruction |= Rd << 8;
12518 inst.instruction |= Rn << 16;
12519 inst.instruction |= Rm;
12520 }
12521
12522 static void
12523 do_t_smc (void)
12524 {
12525 unsigned int value = inst.reloc.exp.X_add_number;
12526 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12527 _("SMC is not permitted on this architecture"));
12528 constraint (inst.reloc.exp.X_op != O_constant,
12529 _("expression too complex"));
12530 inst.reloc.type = BFD_RELOC_UNUSED;
12531 inst.instruction |= (value & 0xf000) >> 12;
12532 inst.instruction |= (value & 0x0ff0);
12533 inst.instruction |= (value & 0x000f) << 16;
12534 /* PR gas/15623: SMC instructions must be last in an IT block. */
12535 set_it_insn_type_last ();
12536 }
12537
12538 static void
12539 do_t_hvc (void)
12540 {
12541 unsigned int value = inst.reloc.exp.X_add_number;
12542
12543 inst.reloc.type = BFD_RELOC_UNUSED;
12544 inst.instruction |= (value & 0x0fff);
12545 inst.instruction |= (value & 0xf000) << 4;
12546 }
12547
12548 static void
12549 do_t_ssat_usat (int bias)
12550 {
12551 unsigned Rd, Rn;
12552
12553 Rd = inst.operands[0].reg;
12554 Rn = inst.operands[2].reg;
12555
12556 reject_bad_reg (Rd);
12557 reject_bad_reg (Rn);
12558
12559 inst.instruction |= Rd << 8;
12560 inst.instruction |= inst.operands[1].imm - bias;
12561 inst.instruction |= Rn << 16;
12562
12563 if (inst.operands[3].present)
12564 {
12565 offsetT shift_amount = inst.reloc.exp.X_add_number;
12566
12567 inst.reloc.type = BFD_RELOC_UNUSED;
12568
12569 constraint (inst.reloc.exp.X_op != O_constant,
12570 _("expression too complex"));
12571
12572 if (shift_amount != 0)
12573 {
12574 constraint (shift_amount > 31,
12575 _("shift expression is too large"));
12576
12577 if (inst.operands[3].shift_kind == SHIFT_ASR)
12578 inst.instruction |= 0x00200000; /* sh bit. */
12579
12580 inst.instruction |= (shift_amount & 0x1c) << 10;
12581 inst.instruction |= (shift_amount & 0x03) << 6;
12582 }
12583 }
12584 }
12585
12586 static void
12587 do_t_ssat (void)
12588 {
12589 do_t_ssat_usat (1);
12590 }
12591
12592 static void
12593 do_t_ssat16 (void)
12594 {
12595 unsigned Rd, Rn;
12596
12597 Rd = inst.operands[0].reg;
12598 Rn = inst.operands[2].reg;
12599
12600 reject_bad_reg (Rd);
12601 reject_bad_reg (Rn);
12602
12603 inst.instruction |= Rd << 8;
12604 inst.instruction |= inst.operands[1].imm - 1;
12605 inst.instruction |= Rn << 16;
12606 }
12607
12608 static void
12609 do_t_strex (void)
12610 {
12611 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12612 || inst.operands[2].postind || inst.operands[2].writeback
12613 || inst.operands[2].immisreg || inst.operands[2].shifted
12614 || inst.operands[2].negative,
12615 BAD_ADDR_MODE);
12616
12617 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12618
12619 inst.instruction |= inst.operands[0].reg << 8;
12620 inst.instruction |= inst.operands[1].reg << 12;
12621 inst.instruction |= inst.operands[2].reg << 16;
12622 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12623 }
12624
12625 static void
12626 do_t_strexd (void)
12627 {
12628 if (!inst.operands[2].present)
12629 inst.operands[2].reg = inst.operands[1].reg + 1;
12630
12631 constraint (inst.operands[0].reg == inst.operands[1].reg
12632 || inst.operands[0].reg == inst.operands[2].reg
12633 || inst.operands[0].reg == inst.operands[3].reg,
12634 BAD_OVERLAP);
12635
12636 inst.instruction |= inst.operands[0].reg;
12637 inst.instruction |= inst.operands[1].reg << 12;
12638 inst.instruction |= inst.operands[2].reg << 8;
12639 inst.instruction |= inst.operands[3].reg << 16;
12640 }
12641
12642 static void
12643 do_t_sxtah (void)
12644 {
12645 unsigned Rd, Rn, Rm;
12646
12647 Rd = inst.operands[0].reg;
12648 Rn = inst.operands[1].reg;
12649 Rm = inst.operands[2].reg;
12650
12651 reject_bad_reg (Rd);
12652 reject_bad_reg (Rn);
12653 reject_bad_reg (Rm);
12654
12655 inst.instruction |= Rd << 8;
12656 inst.instruction |= Rn << 16;
12657 inst.instruction |= Rm;
12658 inst.instruction |= inst.operands[3].imm << 4;
12659 }
12660
12661 static void
12662 do_t_sxth (void)
12663 {
12664 unsigned Rd, Rm;
12665
12666 Rd = inst.operands[0].reg;
12667 Rm = inst.operands[1].reg;
12668
12669 reject_bad_reg (Rd);
12670 reject_bad_reg (Rm);
12671
12672 if (inst.instruction <= 0xffff
12673 && inst.size_req != 4
12674 && Rd <= 7 && Rm <= 7
12675 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12676 {
12677 inst.instruction = THUMB_OP16 (inst.instruction);
12678 inst.instruction |= Rd;
12679 inst.instruction |= Rm << 3;
12680 }
12681 else if (unified_syntax)
12682 {
12683 if (inst.instruction <= 0xffff)
12684 inst.instruction = THUMB_OP32 (inst.instruction);
12685 inst.instruction |= Rd << 8;
12686 inst.instruction |= Rm;
12687 inst.instruction |= inst.operands[2].imm << 4;
12688 }
12689 else
12690 {
12691 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12692 _("Thumb encoding does not support rotation"));
12693 constraint (1, BAD_HIREG);
12694 }
12695 }
12696
12697 static void
12698 do_t_swi (void)
12699 {
12700 /* We have to do the following check manually as ARM_EXT_OS only applies
12701 to ARM_EXT_V6M. */
12702 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12703 {
12704 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12705 /* This only applies to the v6m howver, not later architectures. */
12706 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12707 as_bad (_("SVC is not permitted on this architecture"));
12708 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12709 }
12710
12711 inst.reloc.type = BFD_RELOC_ARM_SWI;
12712 }
12713
12714 static void
12715 do_t_tb (void)
12716 {
12717 unsigned Rn, Rm;
12718 int half;
12719
12720 half = (inst.instruction & 0x10) != 0;
12721 set_it_insn_type_last ();
12722 constraint (inst.operands[0].immisreg,
12723 _("instruction requires register index"));
12724
12725 Rn = inst.operands[0].reg;
12726 Rm = inst.operands[0].imm;
12727
12728 constraint (Rn == REG_SP, BAD_SP);
12729 reject_bad_reg (Rm);
12730
12731 constraint (!half && inst.operands[0].shifted,
12732 _("instruction does not allow shifted index"));
12733 inst.instruction |= (Rn << 16) | Rm;
12734 }
12735
12736 static void
12737 do_t_udf (void)
12738 {
12739 if (!inst.operands[0].present)
12740 inst.operands[0].imm = 0;
12741
12742 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12743 {
12744 constraint (inst.size_req == 2,
12745 _("immediate value out of range"));
12746 inst.instruction = THUMB_OP32 (inst.instruction);
12747 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12748 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12749 }
12750 else
12751 {
12752 inst.instruction = THUMB_OP16 (inst.instruction);
12753 inst.instruction |= inst.operands[0].imm;
12754 }
12755
12756 set_it_insn_type (NEUTRAL_IT_INSN);
12757 }
12758
12759
12760 static void
12761 do_t_usat (void)
12762 {
12763 do_t_ssat_usat (0);
12764 }
12765
12766 static void
12767 do_t_usat16 (void)
12768 {
12769 unsigned Rd, Rn;
12770
12771 Rd = inst.operands[0].reg;
12772 Rn = inst.operands[2].reg;
12773
12774 reject_bad_reg (Rd);
12775 reject_bad_reg (Rn);
12776
12777 inst.instruction |= Rd << 8;
12778 inst.instruction |= inst.operands[1].imm;
12779 inst.instruction |= Rn << 16;
12780 }
12781
12782 /* Neon instruction encoder helpers. */
12783
12784 /* Encodings for the different types for various Neon opcodes. */
12785
12786 /* An "invalid" code for the following tables. */
12787 #define N_INV -1u
12788
12789 struct neon_tab_entry
12790 {
12791 unsigned integer;
12792 unsigned float_or_poly;
12793 unsigned scalar_or_imm;
12794 };
12795
12796 /* Map overloaded Neon opcodes to their respective encodings. */
12797 #define NEON_ENC_TAB \
12798 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12799 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12800 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12801 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12802 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12803 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12804 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12805 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12806 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12807 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12808 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12809 /* Register variants of the following two instructions are encoded as
12810 vcge / vcgt with the operands reversed. */ \
12811 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12812 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12813 X(vfma, N_INV, 0x0000c10, N_INV), \
12814 X(vfms, N_INV, 0x0200c10, N_INV), \
12815 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12816 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12817 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12818 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12819 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12820 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12821 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12822 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12823 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12824 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12825 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12826 X(vshl, 0x0000400, N_INV, 0x0800510), \
12827 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12828 X(vand, 0x0000110, N_INV, 0x0800030), \
12829 X(vbic, 0x0100110, N_INV, 0x0800030), \
12830 X(veor, 0x1000110, N_INV, N_INV), \
12831 X(vorn, 0x0300110, N_INV, 0x0800010), \
12832 X(vorr, 0x0200110, N_INV, 0x0800010), \
12833 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12834 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12835 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12836 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12837 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12838 X(vst1, 0x0000000, 0x0800000, N_INV), \
12839 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12840 X(vst2, 0x0000100, 0x0800100, N_INV), \
12841 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12842 X(vst3, 0x0000200, 0x0800200, N_INV), \
12843 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12844 X(vst4, 0x0000300, 0x0800300, N_INV), \
12845 X(vmovn, 0x1b20200, N_INV, N_INV), \
12846 X(vtrn, 0x1b20080, N_INV, N_INV), \
12847 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12848 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12849 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12850 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12851 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12852 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12853 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12854 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12855 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12856 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12857 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12858 X(vseleq, 0xe000a00, N_INV, N_INV), \
12859 X(vselvs, 0xe100a00, N_INV, N_INV), \
12860 X(vselge, 0xe200a00, N_INV, N_INV), \
12861 X(vselgt, 0xe300a00, N_INV, N_INV), \
12862 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12863 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12864 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12865 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12866 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12867 X(aes, 0x3b00300, N_INV, N_INV), \
12868 X(sha3op, 0x2000c00, N_INV, N_INV), \
12869 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12870 X(sha2op, 0x3ba0380, N_INV, N_INV)
12871
12872 enum neon_opc
12873 {
12874 #define X(OPC,I,F,S) N_MNEM_##OPC
12875 NEON_ENC_TAB
12876 #undef X
12877 };
12878
12879 static const struct neon_tab_entry neon_enc_tab[] =
12880 {
12881 #define X(OPC,I,F,S) { (I), (F), (S) }
12882 NEON_ENC_TAB
12883 #undef X
12884 };
12885
12886 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12887 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12888 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12889 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12890 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12891 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12892 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12893 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12894 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12895 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12896 #define NEON_ENC_SINGLE_(X) \
12897 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12898 #define NEON_ENC_DOUBLE_(X) \
12899 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12900 #define NEON_ENC_FPV8_(X) \
12901 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12902
12903 #define NEON_ENCODE(type, inst) \
12904 do \
12905 { \
12906 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12907 inst.is_neon = 1; \
12908 } \
12909 while (0)
12910
12911 #define check_neon_suffixes \
12912 do \
12913 { \
12914 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12915 { \
12916 as_bad (_("invalid neon suffix for non neon instruction")); \
12917 return; \
12918 } \
12919 } \
12920 while (0)
12921
12922 /* Define shapes for instruction operands. The following mnemonic characters
12923 are used in this table:
12924
12925 F - VFP S<n> register
12926 D - Neon D<n> register
12927 Q - Neon Q<n> register
12928 I - Immediate
12929 S - Scalar
12930 R - ARM register
12931 L - D<n> register list
12932
12933 This table is used to generate various data:
12934 - enumerations of the form NS_DDR to be used as arguments to
12935 neon_select_shape.
12936 - a table classifying shapes into single, double, quad, mixed.
12937 - a table used to drive neon_select_shape. */
12938
12939 #define NEON_SHAPE_DEF \
12940 X(3, (D, D, D), DOUBLE), \
12941 X(3, (Q, Q, Q), QUAD), \
12942 X(3, (D, D, I), DOUBLE), \
12943 X(3, (Q, Q, I), QUAD), \
12944 X(3, (D, D, S), DOUBLE), \
12945 X(3, (Q, Q, S), QUAD), \
12946 X(2, (D, D), DOUBLE), \
12947 X(2, (Q, Q), QUAD), \
12948 X(2, (D, S), DOUBLE), \
12949 X(2, (Q, S), QUAD), \
12950 X(2, (D, R), DOUBLE), \
12951 X(2, (Q, R), QUAD), \
12952 X(2, (D, I), DOUBLE), \
12953 X(2, (Q, I), QUAD), \
12954 X(3, (D, L, D), DOUBLE), \
12955 X(2, (D, Q), MIXED), \
12956 X(2, (Q, D), MIXED), \
12957 X(3, (D, Q, I), MIXED), \
12958 X(3, (Q, D, I), MIXED), \
12959 X(3, (Q, D, D), MIXED), \
12960 X(3, (D, Q, Q), MIXED), \
12961 X(3, (Q, Q, D), MIXED), \
12962 X(3, (Q, D, S), MIXED), \
12963 X(3, (D, Q, S), MIXED), \
12964 X(4, (D, D, D, I), DOUBLE), \
12965 X(4, (Q, Q, Q, I), QUAD), \
12966 X(2, (F, F), SINGLE), \
12967 X(3, (F, F, F), SINGLE), \
12968 X(2, (F, I), SINGLE), \
12969 X(2, (F, D), MIXED), \
12970 X(2, (D, F), MIXED), \
12971 X(3, (F, F, I), MIXED), \
12972 X(4, (R, R, F, F), SINGLE), \
12973 X(4, (F, F, R, R), SINGLE), \
12974 X(3, (D, R, R), DOUBLE), \
12975 X(3, (R, R, D), DOUBLE), \
12976 X(2, (S, R), SINGLE), \
12977 X(2, (R, S), SINGLE), \
12978 X(2, (F, R), SINGLE), \
12979 X(2, (R, F), SINGLE)
12980
12981 #define S2(A,B) NS_##A##B
12982 #define S3(A,B,C) NS_##A##B##C
12983 #define S4(A,B,C,D) NS_##A##B##C##D
12984
12985 #define X(N, L, C) S##N L
12986
12987 enum neon_shape
12988 {
12989 NEON_SHAPE_DEF,
12990 NS_NULL
12991 };
12992
12993 #undef X
12994 #undef S2
12995 #undef S3
12996 #undef S4
12997
12998 enum neon_shape_class
12999 {
13000 SC_SINGLE,
13001 SC_DOUBLE,
13002 SC_QUAD,
13003 SC_MIXED
13004 };
13005
13006 #define X(N, L, C) SC_##C
13007
13008 static enum neon_shape_class neon_shape_class[] =
13009 {
13010 NEON_SHAPE_DEF
13011 };
13012
13013 #undef X
13014
13015 enum neon_shape_el
13016 {
13017 SE_F,
13018 SE_D,
13019 SE_Q,
13020 SE_I,
13021 SE_S,
13022 SE_R,
13023 SE_L
13024 };
13025
13026 /* Register widths of above. */
13027 static unsigned neon_shape_el_size[] =
13028 {
13029 32,
13030 64,
13031 128,
13032 0,
13033 32,
13034 32,
13035 0
13036 };
13037
13038 struct neon_shape_info
13039 {
13040 unsigned els;
13041 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13042 };
13043
13044 #define S2(A,B) { SE_##A, SE_##B }
13045 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13046 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13047
13048 #define X(N, L, C) { N, S##N L }
13049
13050 static struct neon_shape_info neon_shape_tab[] =
13051 {
13052 NEON_SHAPE_DEF
13053 };
13054
13055 #undef X
13056 #undef S2
13057 #undef S3
13058 #undef S4
13059
13060 /* Bit masks used in type checking given instructions.
13061 'N_EQK' means the type must be the same as (or based on in some way) the key
13062 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13063 set, various other bits can be set as well in order to modify the meaning of
13064 the type constraint. */
13065
13066 enum neon_type_mask
13067 {
13068 N_S8 = 0x0000001,
13069 N_S16 = 0x0000002,
13070 N_S32 = 0x0000004,
13071 N_S64 = 0x0000008,
13072 N_U8 = 0x0000010,
13073 N_U16 = 0x0000020,
13074 N_U32 = 0x0000040,
13075 N_U64 = 0x0000080,
13076 N_I8 = 0x0000100,
13077 N_I16 = 0x0000200,
13078 N_I32 = 0x0000400,
13079 N_I64 = 0x0000800,
13080 N_8 = 0x0001000,
13081 N_16 = 0x0002000,
13082 N_32 = 0x0004000,
13083 N_64 = 0x0008000,
13084 N_P8 = 0x0010000,
13085 N_P16 = 0x0020000,
13086 N_F16 = 0x0040000,
13087 N_F32 = 0x0080000,
13088 N_F64 = 0x0100000,
13089 N_P64 = 0x0200000,
13090 N_KEY = 0x1000000, /* Key element (main type specifier). */
13091 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13092 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13093 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13094 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13095 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13096 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13097 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13098 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13099 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13100 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13101 N_UTYP = 0,
13102 N_MAX_NONSPECIAL = N_P64
13103 };
13104
13105 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13106
13107 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13108 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13109 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13110 #define N_SUF_32 (N_SU_32 | N_F32)
13111 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13112 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13113
13114 /* Pass this as the first type argument to neon_check_type to ignore types
13115 altogether. */
13116 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13117
13118 /* Select a "shape" for the current instruction (describing register types or
13119 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13120 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13121 function of operand parsing, so this function doesn't need to be called.
13122 Shapes should be listed in order of decreasing length. */
13123
13124 static enum neon_shape
13125 neon_select_shape (enum neon_shape shape, ...)
13126 {
13127 va_list ap;
13128 enum neon_shape first_shape = shape;
13129
13130 /* Fix missing optional operands. FIXME: we don't know at this point how
13131 many arguments we should have, so this makes the assumption that we have
13132 > 1. This is true of all current Neon opcodes, I think, but may not be
13133 true in the future. */
13134 if (!inst.operands[1].present)
13135 inst.operands[1] = inst.operands[0];
13136
13137 va_start (ap, shape);
13138
13139 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13140 {
13141 unsigned j;
13142 int matches = 1;
13143
13144 for (j = 0; j < neon_shape_tab[shape].els; j++)
13145 {
13146 if (!inst.operands[j].present)
13147 {
13148 matches = 0;
13149 break;
13150 }
13151
13152 switch (neon_shape_tab[shape].el[j])
13153 {
13154 case SE_F:
13155 if (!(inst.operands[j].isreg
13156 && inst.operands[j].isvec
13157 && inst.operands[j].issingle
13158 && !inst.operands[j].isquad))
13159 matches = 0;
13160 break;
13161
13162 case SE_D:
13163 if (!(inst.operands[j].isreg
13164 && inst.operands[j].isvec
13165 && !inst.operands[j].isquad
13166 && !inst.operands[j].issingle))
13167 matches = 0;
13168 break;
13169
13170 case SE_R:
13171 if (!(inst.operands[j].isreg
13172 && !inst.operands[j].isvec))
13173 matches = 0;
13174 break;
13175
13176 case SE_Q:
13177 if (!(inst.operands[j].isreg
13178 && inst.operands[j].isvec
13179 && inst.operands[j].isquad
13180 && !inst.operands[j].issingle))
13181 matches = 0;
13182 break;
13183
13184 case SE_I:
13185 if (!(!inst.operands[j].isreg
13186 && !inst.operands[j].isscalar))
13187 matches = 0;
13188 break;
13189
13190 case SE_S:
13191 if (!(!inst.operands[j].isreg
13192 && inst.operands[j].isscalar))
13193 matches = 0;
13194 break;
13195
13196 case SE_L:
13197 break;
13198 }
13199 if (!matches)
13200 break;
13201 }
13202 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13203 /* We've matched all the entries in the shape table, and we don't
13204 have any left over operands which have not been matched. */
13205 break;
13206 }
13207
13208 va_end (ap);
13209
13210 if (shape == NS_NULL && first_shape != NS_NULL)
13211 first_error (_("invalid instruction shape"));
13212
13213 return shape;
13214 }
13215
13216 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13217 means the Q bit should be set). */
13218
13219 static int
13220 neon_quad (enum neon_shape shape)
13221 {
13222 return neon_shape_class[shape] == SC_QUAD;
13223 }
13224
13225 static void
13226 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13227 unsigned *g_size)
13228 {
13229 /* Allow modification to be made to types which are constrained to be
13230 based on the key element, based on bits set alongside N_EQK. */
13231 if ((typebits & N_EQK) != 0)
13232 {
13233 if ((typebits & N_HLF) != 0)
13234 *g_size /= 2;
13235 else if ((typebits & N_DBL) != 0)
13236 *g_size *= 2;
13237 if ((typebits & N_SGN) != 0)
13238 *g_type = NT_signed;
13239 else if ((typebits & N_UNS) != 0)
13240 *g_type = NT_unsigned;
13241 else if ((typebits & N_INT) != 0)
13242 *g_type = NT_integer;
13243 else if ((typebits & N_FLT) != 0)
13244 *g_type = NT_float;
13245 else if ((typebits & N_SIZ) != 0)
13246 *g_type = NT_untyped;
13247 }
13248 }
13249
13250 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13251 operand type, i.e. the single type specified in a Neon instruction when it
13252 is the only one given. */
13253
13254 static struct neon_type_el
13255 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13256 {
13257 struct neon_type_el dest = *key;
13258
13259 gas_assert ((thisarg & N_EQK) != 0);
13260
13261 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13262
13263 return dest;
13264 }
13265
13266 /* Convert Neon type and size into compact bitmask representation. */
13267
13268 static enum neon_type_mask
13269 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13270 {
13271 switch (type)
13272 {
13273 case NT_untyped:
13274 switch (size)
13275 {
13276 case 8: return N_8;
13277 case 16: return N_16;
13278 case 32: return N_32;
13279 case 64: return N_64;
13280 default: ;
13281 }
13282 break;
13283
13284 case NT_integer:
13285 switch (size)
13286 {
13287 case 8: return N_I8;
13288 case 16: return N_I16;
13289 case 32: return N_I32;
13290 case 64: return N_I64;
13291 default: ;
13292 }
13293 break;
13294
13295 case NT_float:
13296 switch (size)
13297 {
13298 case 16: return N_F16;
13299 case 32: return N_F32;
13300 case 64: return N_F64;
13301 default: ;
13302 }
13303 break;
13304
13305 case NT_poly:
13306 switch (size)
13307 {
13308 case 8: return N_P8;
13309 case 16: return N_P16;
13310 case 64: return N_P64;
13311 default: ;
13312 }
13313 break;
13314
13315 case NT_signed:
13316 switch (size)
13317 {
13318 case 8: return N_S8;
13319 case 16: return N_S16;
13320 case 32: return N_S32;
13321 case 64: return N_S64;
13322 default: ;
13323 }
13324 break;
13325
13326 case NT_unsigned:
13327 switch (size)
13328 {
13329 case 8: return N_U8;
13330 case 16: return N_U16;
13331 case 32: return N_U32;
13332 case 64: return N_U64;
13333 default: ;
13334 }
13335 break;
13336
13337 default: ;
13338 }
13339
13340 return N_UTYP;
13341 }
13342
13343 /* Convert compact Neon bitmask type representation to a type and size. Only
13344 handles the case where a single bit is set in the mask. */
13345
13346 static int
13347 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13348 enum neon_type_mask mask)
13349 {
13350 if ((mask & N_EQK) != 0)
13351 return FAIL;
13352
13353 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13354 *size = 8;
13355 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13356 *size = 16;
13357 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13358 *size = 32;
13359 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13360 *size = 64;
13361 else
13362 return FAIL;
13363
13364 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13365 *type = NT_signed;
13366 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13367 *type = NT_unsigned;
13368 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13369 *type = NT_integer;
13370 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13371 *type = NT_untyped;
13372 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13373 *type = NT_poly;
13374 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13375 *type = NT_float;
13376 else
13377 return FAIL;
13378
13379 return SUCCESS;
13380 }
13381
13382 /* Modify a bitmask of allowed types. This is only needed for type
13383 relaxation. */
13384
13385 static unsigned
13386 modify_types_allowed (unsigned allowed, unsigned mods)
13387 {
13388 unsigned size;
13389 enum neon_el_type type;
13390 unsigned destmask;
13391 int i;
13392
13393 destmask = 0;
13394
13395 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13396 {
13397 if (el_type_of_type_chk (&type, &size,
13398 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13399 {
13400 neon_modify_type_size (mods, &type, &size);
13401 destmask |= type_chk_of_el_type (type, size);
13402 }
13403 }
13404
13405 return destmask;
13406 }
13407
13408 /* Check type and return type classification.
13409 The manual states (paraphrase): If one datatype is given, it indicates the
13410 type given in:
13411 - the second operand, if there is one
13412 - the operand, if there is no second operand
13413 - the result, if there are no operands.
13414 This isn't quite good enough though, so we use a concept of a "key" datatype
13415 which is set on a per-instruction basis, which is the one which matters when
13416 only one data type is written.
13417 Note: this function has side-effects (e.g. filling in missing operands). All
13418 Neon instructions should call it before performing bit encoding. */
13419
13420 static struct neon_type_el
13421 neon_check_type (unsigned els, enum neon_shape ns, ...)
13422 {
13423 va_list ap;
13424 unsigned i, pass, key_el = 0;
13425 unsigned types[NEON_MAX_TYPE_ELS];
13426 enum neon_el_type k_type = NT_invtype;
13427 unsigned k_size = -1u;
13428 struct neon_type_el badtype = {NT_invtype, -1};
13429 unsigned key_allowed = 0;
13430
13431 /* Optional registers in Neon instructions are always (not) in operand 1.
13432 Fill in the missing operand here, if it was omitted. */
13433 if (els > 1 && !inst.operands[1].present)
13434 inst.operands[1] = inst.operands[0];
13435
13436 /* Suck up all the varargs. */
13437 va_start (ap, ns);
13438 for (i = 0; i < els; i++)
13439 {
13440 unsigned thisarg = va_arg (ap, unsigned);
13441 if (thisarg == N_IGNORE_TYPE)
13442 {
13443 va_end (ap);
13444 return badtype;
13445 }
13446 types[i] = thisarg;
13447 if ((thisarg & N_KEY) != 0)
13448 key_el = i;
13449 }
13450 va_end (ap);
13451
13452 if (inst.vectype.elems > 0)
13453 for (i = 0; i < els; i++)
13454 if (inst.operands[i].vectype.type != NT_invtype)
13455 {
13456 first_error (_("types specified in both the mnemonic and operands"));
13457 return badtype;
13458 }
13459
13460 /* Duplicate inst.vectype elements here as necessary.
13461 FIXME: No idea if this is exactly the same as the ARM assembler,
13462 particularly when an insn takes one register and one non-register
13463 operand. */
13464 if (inst.vectype.elems == 1 && els > 1)
13465 {
13466 unsigned j;
13467 inst.vectype.elems = els;
13468 inst.vectype.el[key_el] = inst.vectype.el[0];
13469 for (j = 0; j < els; j++)
13470 if (j != key_el)
13471 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13472 types[j]);
13473 }
13474 else if (inst.vectype.elems == 0 && els > 0)
13475 {
13476 unsigned j;
13477 /* No types were given after the mnemonic, so look for types specified
13478 after each operand. We allow some flexibility here; as long as the
13479 "key" operand has a type, we can infer the others. */
13480 for (j = 0; j < els; j++)
13481 if (inst.operands[j].vectype.type != NT_invtype)
13482 inst.vectype.el[j] = inst.operands[j].vectype;
13483
13484 if (inst.operands[key_el].vectype.type != NT_invtype)
13485 {
13486 for (j = 0; j < els; j++)
13487 if (inst.operands[j].vectype.type == NT_invtype)
13488 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13489 types[j]);
13490 }
13491 else
13492 {
13493 first_error (_("operand types can't be inferred"));
13494 return badtype;
13495 }
13496 }
13497 else if (inst.vectype.elems != els)
13498 {
13499 first_error (_("type specifier has the wrong number of parts"));
13500 return badtype;
13501 }
13502
13503 for (pass = 0; pass < 2; pass++)
13504 {
13505 for (i = 0; i < els; i++)
13506 {
13507 unsigned thisarg = types[i];
13508 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13509 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13510 enum neon_el_type g_type = inst.vectype.el[i].type;
13511 unsigned g_size = inst.vectype.el[i].size;
13512
13513 /* Decay more-specific signed & unsigned types to sign-insensitive
13514 integer types if sign-specific variants are unavailable. */
13515 if ((g_type == NT_signed || g_type == NT_unsigned)
13516 && (types_allowed & N_SU_ALL) == 0)
13517 g_type = NT_integer;
13518
13519 /* If only untyped args are allowed, decay any more specific types to
13520 them. Some instructions only care about signs for some element
13521 sizes, so handle that properly. */
13522 if (((types_allowed & N_UNT) == 0)
13523 && ((g_size == 8 && (types_allowed & N_8) != 0)
13524 || (g_size == 16 && (types_allowed & N_16) != 0)
13525 || (g_size == 32 && (types_allowed & N_32) != 0)
13526 || (g_size == 64 && (types_allowed & N_64) != 0)))
13527 g_type = NT_untyped;
13528
13529 if (pass == 0)
13530 {
13531 if ((thisarg & N_KEY) != 0)
13532 {
13533 k_type = g_type;
13534 k_size = g_size;
13535 key_allowed = thisarg & ~N_KEY;
13536 }
13537 }
13538 else
13539 {
13540 if ((thisarg & N_VFP) != 0)
13541 {
13542 enum neon_shape_el regshape;
13543 unsigned regwidth, match;
13544
13545 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13546 if (ns == NS_NULL)
13547 {
13548 first_error (_("invalid instruction shape"));
13549 return badtype;
13550 }
13551 regshape = neon_shape_tab[ns].el[i];
13552 regwidth = neon_shape_el_size[regshape];
13553
13554 /* In VFP mode, operands must match register widths. If we
13555 have a key operand, use its width, else use the width of
13556 the current operand. */
13557 if (k_size != -1u)
13558 match = k_size;
13559 else
13560 match = g_size;
13561
13562 if (regwidth != match)
13563 {
13564 first_error (_("operand size must match register width"));
13565 return badtype;
13566 }
13567 }
13568
13569 if ((thisarg & N_EQK) == 0)
13570 {
13571 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13572
13573 if ((given_type & types_allowed) == 0)
13574 {
13575 first_error (_("bad type in Neon instruction"));
13576 return badtype;
13577 }
13578 }
13579 else
13580 {
13581 enum neon_el_type mod_k_type = k_type;
13582 unsigned mod_k_size = k_size;
13583 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13584 if (g_type != mod_k_type || g_size != mod_k_size)
13585 {
13586 first_error (_("inconsistent types in Neon instruction"));
13587 return badtype;
13588 }
13589 }
13590 }
13591 }
13592 }
13593
13594 return inst.vectype.el[key_el];
13595 }
13596
13597 /* Neon-style VFP instruction forwarding. */
13598
13599 /* Thumb VFP instructions have 0xE in the condition field. */
13600
13601 static void
13602 do_vfp_cond_or_thumb (void)
13603 {
13604 inst.is_neon = 1;
13605
13606 if (thumb_mode)
13607 inst.instruction |= 0xe0000000;
13608 else
13609 inst.instruction |= inst.cond << 28;
13610 }
13611
13612 /* Look up and encode a simple mnemonic, for use as a helper function for the
13613 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13614 etc. It is assumed that operand parsing has already been done, and that the
13615 operands are in the form expected by the given opcode (this isn't necessarily
13616 the same as the form in which they were parsed, hence some massaging must
13617 take place before this function is called).
13618 Checks current arch version against that in the looked-up opcode. */
13619
13620 static void
13621 do_vfp_nsyn_opcode (const char *opname)
13622 {
13623 const struct asm_opcode *opcode;
13624
13625 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13626
13627 if (!opcode)
13628 abort ();
13629
13630 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13631 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13632 _(BAD_FPU));
13633
13634 inst.is_neon = 1;
13635
13636 if (thumb_mode)
13637 {
13638 inst.instruction = opcode->tvalue;
13639 opcode->tencode ();
13640 }
13641 else
13642 {
13643 inst.instruction = (inst.cond << 28) | opcode->avalue;
13644 opcode->aencode ();
13645 }
13646 }
13647
13648 static void
13649 do_vfp_nsyn_add_sub (enum neon_shape rs)
13650 {
13651 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13652
13653 if (rs == NS_FFF)
13654 {
13655 if (is_add)
13656 do_vfp_nsyn_opcode ("fadds");
13657 else
13658 do_vfp_nsyn_opcode ("fsubs");
13659 }
13660 else
13661 {
13662 if (is_add)
13663 do_vfp_nsyn_opcode ("faddd");
13664 else
13665 do_vfp_nsyn_opcode ("fsubd");
13666 }
13667 }
13668
13669 /* Check operand types to see if this is a VFP instruction, and if so call
13670 PFN (). */
13671
13672 static int
13673 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13674 {
13675 enum neon_shape rs;
13676 struct neon_type_el et;
13677
13678 switch (args)
13679 {
13680 case 2:
13681 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13682 et = neon_check_type (2, rs,
13683 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13684 break;
13685
13686 case 3:
13687 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13688 et = neon_check_type (3, rs,
13689 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13690 break;
13691
13692 default:
13693 abort ();
13694 }
13695
13696 if (et.type != NT_invtype)
13697 {
13698 pfn (rs);
13699 return SUCCESS;
13700 }
13701
13702 inst.error = NULL;
13703 return FAIL;
13704 }
13705
13706 static void
13707 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13708 {
13709 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13710
13711 if (rs == NS_FFF)
13712 {
13713 if (is_mla)
13714 do_vfp_nsyn_opcode ("fmacs");
13715 else
13716 do_vfp_nsyn_opcode ("fnmacs");
13717 }
13718 else
13719 {
13720 if (is_mla)
13721 do_vfp_nsyn_opcode ("fmacd");
13722 else
13723 do_vfp_nsyn_opcode ("fnmacd");
13724 }
13725 }
13726
13727 static void
13728 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13729 {
13730 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13731
13732 if (rs == NS_FFF)
13733 {
13734 if (is_fma)
13735 do_vfp_nsyn_opcode ("ffmas");
13736 else
13737 do_vfp_nsyn_opcode ("ffnmas");
13738 }
13739 else
13740 {
13741 if (is_fma)
13742 do_vfp_nsyn_opcode ("ffmad");
13743 else
13744 do_vfp_nsyn_opcode ("ffnmad");
13745 }
13746 }
13747
13748 static void
13749 do_vfp_nsyn_mul (enum neon_shape rs)
13750 {
13751 if (rs == NS_FFF)
13752 do_vfp_nsyn_opcode ("fmuls");
13753 else
13754 do_vfp_nsyn_opcode ("fmuld");
13755 }
13756
13757 static void
13758 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13759 {
13760 int is_neg = (inst.instruction & 0x80) != 0;
13761 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13762
13763 if (rs == NS_FF)
13764 {
13765 if (is_neg)
13766 do_vfp_nsyn_opcode ("fnegs");
13767 else
13768 do_vfp_nsyn_opcode ("fabss");
13769 }
13770 else
13771 {
13772 if (is_neg)
13773 do_vfp_nsyn_opcode ("fnegd");
13774 else
13775 do_vfp_nsyn_opcode ("fabsd");
13776 }
13777 }
13778
13779 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13780 insns belong to Neon, and are handled elsewhere. */
13781
13782 static void
13783 do_vfp_nsyn_ldm_stm (int is_dbmode)
13784 {
13785 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13786 if (is_ldm)
13787 {
13788 if (is_dbmode)
13789 do_vfp_nsyn_opcode ("fldmdbs");
13790 else
13791 do_vfp_nsyn_opcode ("fldmias");
13792 }
13793 else
13794 {
13795 if (is_dbmode)
13796 do_vfp_nsyn_opcode ("fstmdbs");
13797 else
13798 do_vfp_nsyn_opcode ("fstmias");
13799 }
13800 }
13801
13802 static void
13803 do_vfp_nsyn_sqrt (void)
13804 {
13805 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13806 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13807
13808 if (rs == NS_FF)
13809 do_vfp_nsyn_opcode ("fsqrts");
13810 else
13811 do_vfp_nsyn_opcode ("fsqrtd");
13812 }
13813
13814 static void
13815 do_vfp_nsyn_div (void)
13816 {
13817 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13818 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13819 N_F32 | N_F64 | N_KEY | N_VFP);
13820
13821 if (rs == NS_FFF)
13822 do_vfp_nsyn_opcode ("fdivs");
13823 else
13824 do_vfp_nsyn_opcode ("fdivd");
13825 }
13826
13827 static void
13828 do_vfp_nsyn_nmul (void)
13829 {
13830 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13831 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13832 N_F32 | N_F64 | N_KEY | N_VFP);
13833
13834 if (rs == NS_FFF)
13835 {
13836 NEON_ENCODE (SINGLE, inst);
13837 do_vfp_sp_dyadic ();
13838 }
13839 else
13840 {
13841 NEON_ENCODE (DOUBLE, inst);
13842 do_vfp_dp_rd_rn_rm ();
13843 }
13844 do_vfp_cond_or_thumb ();
13845 }
13846
13847 static void
13848 do_vfp_nsyn_cmp (void)
13849 {
13850 if (inst.operands[1].isreg)
13851 {
13852 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13853 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13854
13855 if (rs == NS_FF)
13856 {
13857 NEON_ENCODE (SINGLE, inst);
13858 do_vfp_sp_monadic ();
13859 }
13860 else
13861 {
13862 NEON_ENCODE (DOUBLE, inst);
13863 do_vfp_dp_rd_rm ();
13864 }
13865 }
13866 else
13867 {
13868 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13869 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13870
13871 switch (inst.instruction & 0x0fffffff)
13872 {
13873 case N_MNEM_vcmp:
13874 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13875 break;
13876 case N_MNEM_vcmpe:
13877 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13878 break;
13879 default:
13880 abort ();
13881 }
13882
13883 if (rs == NS_FI)
13884 {
13885 NEON_ENCODE (SINGLE, inst);
13886 do_vfp_sp_compare_z ();
13887 }
13888 else
13889 {
13890 NEON_ENCODE (DOUBLE, inst);
13891 do_vfp_dp_rd ();
13892 }
13893 }
13894 do_vfp_cond_or_thumb ();
13895 }
13896
13897 static void
13898 nsyn_insert_sp (void)
13899 {
13900 inst.operands[1] = inst.operands[0];
13901 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13902 inst.operands[0].reg = REG_SP;
13903 inst.operands[0].isreg = 1;
13904 inst.operands[0].writeback = 1;
13905 inst.operands[0].present = 1;
13906 }
13907
13908 static void
13909 do_vfp_nsyn_push (void)
13910 {
13911 nsyn_insert_sp ();
13912 if (inst.operands[1].issingle)
13913 do_vfp_nsyn_opcode ("fstmdbs");
13914 else
13915 do_vfp_nsyn_opcode ("fstmdbd");
13916 }
13917
13918 static void
13919 do_vfp_nsyn_pop (void)
13920 {
13921 nsyn_insert_sp ();
13922 if (inst.operands[1].issingle)
13923 do_vfp_nsyn_opcode ("fldmias");
13924 else
13925 do_vfp_nsyn_opcode ("fldmiad");
13926 }
13927
13928 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13929 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13930
13931 static void
13932 neon_dp_fixup (struct arm_it* insn)
13933 {
13934 unsigned int i = insn->instruction;
13935 insn->is_neon = 1;
13936
13937 if (thumb_mode)
13938 {
13939 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13940 if (i & (1 << 24))
13941 i |= 1 << 28;
13942
13943 i &= ~(1 << 24);
13944
13945 i |= 0xef000000;
13946 }
13947 else
13948 i |= 0xf2000000;
13949
13950 insn->instruction = i;
13951 }
13952
13953 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13954 (0, 1, 2, 3). */
13955
13956 static unsigned
13957 neon_logbits (unsigned x)
13958 {
13959 return ffs (x) - 4;
13960 }
13961
13962 #define LOW4(R) ((R) & 0xf)
13963 #define HI1(R) (((R) >> 4) & 1)
13964
13965 /* Encode insns with bit pattern:
13966
13967 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13968 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
13969
13970 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13971 different meaning for some instruction. */
13972
13973 static void
13974 neon_three_same (int isquad, int ubit, int size)
13975 {
13976 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13977 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13978 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13979 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13980 inst.instruction |= LOW4 (inst.operands[2].reg);
13981 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13982 inst.instruction |= (isquad != 0) << 6;
13983 inst.instruction |= (ubit != 0) << 24;
13984 if (size != -1)
13985 inst.instruction |= neon_logbits (size) << 20;
13986
13987 neon_dp_fixup (&inst);
13988 }
13989
13990 /* Encode instructions of the form:
13991
13992 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
13993 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
13994
13995 Don't write size if SIZE == -1. */
13996
13997 static void
13998 neon_two_same (int qbit, int ubit, int size)
13999 {
14000 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14001 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14002 inst.instruction |= LOW4 (inst.operands[1].reg);
14003 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14004 inst.instruction |= (qbit != 0) << 6;
14005 inst.instruction |= (ubit != 0) << 24;
14006
14007 if (size != -1)
14008 inst.instruction |= neon_logbits (size) << 18;
14009
14010 neon_dp_fixup (&inst);
14011 }
14012
14013 /* Neon instruction encoders, in approximate order of appearance. */
14014
14015 static void
14016 do_neon_dyadic_i_su (void)
14017 {
14018 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14019 struct neon_type_el et = neon_check_type (3, rs,
14020 N_EQK, N_EQK, N_SU_32 | N_KEY);
14021 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14022 }
14023
14024 static void
14025 do_neon_dyadic_i64_su (void)
14026 {
14027 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14028 struct neon_type_el et = neon_check_type (3, rs,
14029 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14030 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14031 }
14032
14033 static void
14034 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14035 unsigned immbits)
14036 {
14037 unsigned size = et.size >> 3;
14038 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14039 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14040 inst.instruction |= LOW4 (inst.operands[1].reg);
14041 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14042 inst.instruction |= (isquad != 0) << 6;
14043 inst.instruction |= immbits << 16;
14044 inst.instruction |= (size >> 3) << 7;
14045 inst.instruction |= (size & 0x7) << 19;
14046 if (write_ubit)
14047 inst.instruction |= (uval != 0) << 24;
14048
14049 neon_dp_fixup (&inst);
14050 }
14051
14052 static void
14053 do_neon_shl_imm (void)
14054 {
14055 if (!inst.operands[2].isreg)
14056 {
14057 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14058 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14059 NEON_ENCODE (IMMED, inst);
14060 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
14061 }
14062 else
14063 {
14064 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14065 struct neon_type_el et = neon_check_type (3, rs,
14066 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14067 unsigned int tmp;
14068
14069 /* VSHL/VQSHL 3-register variants have syntax such as:
14070 vshl.xx Dd, Dm, Dn
14071 whereas other 3-register operations encoded by neon_three_same have
14072 syntax like:
14073 vadd.xx Dd, Dn, Dm
14074 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14075 here. */
14076 tmp = inst.operands[2].reg;
14077 inst.operands[2].reg = inst.operands[1].reg;
14078 inst.operands[1].reg = tmp;
14079 NEON_ENCODE (INTEGER, inst);
14080 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14081 }
14082 }
14083
14084 static void
14085 do_neon_qshl_imm (void)
14086 {
14087 if (!inst.operands[2].isreg)
14088 {
14089 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14090 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14091
14092 NEON_ENCODE (IMMED, inst);
14093 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14094 inst.operands[2].imm);
14095 }
14096 else
14097 {
14098 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14099 struct neon_type_el et = neon_check_type (3, rs,
14100 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14101 unsigned int tmp;
14102
14103 /* See note in do_neon_shl_imm. */
14104 tmp = inst.operands[2].reg;
14105 inst.operands[2].reg = inst.operands[1].reg;
14106 inst.operands[1].reg = tmp;
14107 NEON_ENCODE (INTEGER, inst);
14108 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14109 }
14110 }
14111
14112 static void
14113 do_neon_rshl (void)
14114 {
14115 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14116 struct neon_type_el et = neon_check_type (3, rs,
14117 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14118 unsigned int tmp;
14119
14120 tmp = inst.operands[2].reg;
14121 inst.operands[2].reg = inst.operands[1].reg;
14122 inst.operands[1].reg = tmp;
14123 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14124 }
14125
14126 static int
14127 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14128 {
14129 /* Handle .I8 pseudo-instructions. */
14130 if (size == 8)
14131 {
14132 /* Unfortunately, this will make everything apart from zero out-of-range.
14133 FIXME is this the intended semantics? There doesn't seem much point in
14134 accepting .I8 if so. */
14135 immediate |= immediate << 8;
14136 size = 16;
14137 }
14138
14139 if (size >= 32)
14140 {
14141 if (immediate == (immediate & 0x000000ff))
14142 {
14143 *immbits = immediate;
14144 return 0x1;
14145 }
14146 else if (immediate == (immediate & 0x0000ff00))
14147 {
14148 *immbits = immediate >> 8;
14149 return 0x3;
14150 }
14151 else if (immediate == (immediate & 0x00ff0000))
14152 {
14153 *immbits = immediate >> 16;
14154 return 0x5;
14155 }
14156 else if (immediate == (immediate & 0xff000000))
14157 {
14158 *immbits = immediate >> 24;
14159 return 0x7;
14160 }
14161 if ((immediate & 0xffff) != (immediate >> 16))
14162 goto bad_immediate;
14163 immediate &= 0xffff;
14164 }
14165
14166 if (immediate == (immediate & 0x000000ff))
14167 {
14168 *immbits = immediate;
14169 return 0x9;
14170 }
14171 else if (immediate == (immediate & 0x0000ff00))
14172 {
14173 *immbits = immediate >> 8;
14174 return 0xb;
14175 }
14176
14177 bad_immediate:
14178 first_error (_("immediate value out of range"));
14179 return FAIL;
14180 }
14181
14182 static void
14183 do_neon_logic (void)
14184 {
14185 if (inst.operands[2].present && inst.operands[2].isreg)
14186 {
14187 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14188 neon_check_type (3, rs, N_IGNORE_TYPE);
14189 /* U bit and size field were set as part of the bitmask. */
14190 NEON_ENCODE (INTEGER, inst);
14191 neon_three_same (neon_quad (rs), 0, -1);
14192 }
14193 else
14194 {
14195 const int three_ops_form = (inst.operands[2].present
14196 && !inst.operands[2].isreg);
14197 const int immoperand = (three_ops_form ? 2 : 1);
14198 enum neon_shape rs = (three_ops_form
14199 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14200 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14201 struct neon_type_el et = neon_check_type (2, rs,
14202 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14203 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14204 unsigned immbits;
14205 int cmode;
14206
14207 if (et.type == NT_invtype)
14208 return;
14209
14210 if (three_ops_form)
14211 constraint (inst.operands[0].reg != inst.operands[1].reg,
14212 _("first and second operands shall be the same register"));
14213
14214 NEON_ENCODE (IMMED, inst);
14215
14216 immbits = inst.operands[immoperand].imm;
14217 if (et.size == 64)
14218 {
14219 /* .i64 is a pseudo-op, so the immediate must be a repeating
14220 pattern. */
14221 if (immbits != (inst.operands[immoperand].regisimm ?
14222 inst.operands[immoperand].reg : 0))
14223 {
14224 /* Set immbits to an invalid constant. */
14225 immbits = 0xdeadbeef;
14226 }
14227 }
14228
14229 switch (opcode)
14230 {
14231 case N_MNEM_vbic:
14232 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14233 break;
14234
14235 case N_MNEM_vorr:
14236 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14237 break;
14238
14239 case N_MNEM_vand:
14240 /* Pseudo-instruction for VBIC. */
14241 neon_invert_size (&immbits, 0, et.size);
14242 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14243 break;
14244
14245 case N_MNEM_vorn:
14246 /* Pseudo-instruction for VORR. */
14247 neon_invert_size (&immbits, 0, et.size);
14248 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14249 break;
14250
14251 default:
14252 abort ();
14253 }
14254
14255 if (cmode == FAIL)
14256 return;
14257
14258 inst.instruction |= neon_quad (rs) << 6;
14259 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14260 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14261 inst.instruction |= cmode << 8;
14262 neon_write_immbits (immbits);
14263
14264 neon_dp_fixup (&inst);
14265 }
14266 }
14267
14268 static void
14269 do_neon_bitfield (void)
14270 {
14271 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14272 neon_check_type (3, rs, N_IGNORE_TYPE);
14273 neon_three_same (neon_quad (rs), 0, -1);
14274 }
14275
14276 static void
14277 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14278 unsigned destbits)
14279 {
14280 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14281 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14282 types | N_KEY);
14283 if (et.type == NT_float)
14284 {
14285 NEON_ENCODE (FLOAT, inst);
14286 neon_three_same (neon_quad (rs), 0, -1);
14287 }
14288 else
14289 {
14290 NEON_ENCODE (INTEGER, inst);
14291 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14292 }
14293 }
14294
14295 static void
14296 do_neon_dyadic_if_su (void)
14297 {
14298 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14299 }
14300
14301 static void
14302 do_neon_dyadic_if_su_d (void)
14303 {
14304 /* This version only allow D registers, but that constraint is enforced during
14305 operand parsing so we don't need to do anything extra here. */
14306 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14307 }
14308
14309 static void
14310 do_neon_dyadic_if_i_d (void)
14311 {
14312 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14313 affected if we specify unsigned args. */
14314 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14315 }
14316
14317 enum vfp_or_neon_is_neon_bits
14318 {
14319 NEON_CHECK_CC = 1,
14320 NEON_CHECK_ARCH = 2,
14321 NEON_CHECK_ARCH8 = 4
14322 };
14323
14324 /* Call this function if an instruction which may have belonged to the VFP or
14325 Neon instruction sets, but turned out to be a Neon instruction (due to the
14326 operand types involved, etc.). We have to check and/or fix-up a couple of
14327 things:
14328
14329 - Make sure the user hasn't attempted to make a Neon instruction
14330 conditional.
14331 - Alter the value in the condition code field if necessary.
14332 - Make sure that the arch supports Neon instructions.
14333
14334 Which of these operations take place depends on bits from enum
14335 vfp_or_neon_is_neon_bits.
14336
14337 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14338 current instruction's condition is COND_ALWAYS, the condition field is
14339 changed to inst.uncond_value. This is necessary because instructions shared
14340 between VFP and Neon may be conditional for the VFP variants only, and the
14341 unconditional Neon version must have, e.g., 0xF in the condition field. */
14342
14343 static int
14344 vfp_or_neon_is_neon (unsigned check)
14345 {
14346 /* Conditions are always legal in Thumb mode (IT blocks). */
14347 if (!thumb_mode && (check & NEON_CHECK_CC))
14348 {
14349 if (inst.cond != COND_ALWAYS)
14350 {
14351 first_error (_(BAD_COND));
14352 return FAIL;
14353 }
14354 if (inst.uncond_value != -1)
14355 inst.instruction |= inst.uncond_value << 28;
14356 }
14357
14358 if ((check & NEON_CHECK_ARCH)
14359 && !mark_feature_used (&fpu_neon_ext_v1))
14360 {
14361 first_error (_(BAD_FPU));
14362 return FAIL;
14363 }
14364
14365 if ((check & NEON_CHECK_ARCH8)
14366 && !mark_feature_used (&fpu_neon_ext_armv8))
14367 {
14368 first_error (_(BAD_FPU));
14369 return FAIL;
14370 }
14371
14372 return SUCCESS;
14373 }
14374
14375 static void
14376 do_neon_addsub_if_i (void)
14377 {
14378 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14379 return;
14380
14381 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14382 return;
14383
14384 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14385 affected if we specify unsigned args. */
14386 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14387 }
14388
14389 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14390 result to be:
14391 V<op> A,B (A is operand 0, B is operand 2)
14392 to mean:
14393 V<op> A,B,A
14394 not:
14395 V<op> A,B,B
14396 so handle that case specially. */
14397
14398 static void
14399 neon_exchange_operands (void)
14400 {
14401 void *scratch = alloca (sizeof (inst.operands[0]));
14402 if (inst.operands[1].present)
14403 {
14404 /* Swap operands[1] and operands[2]. */
14405 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14406 inst.operands[1] = inst.operands[2];
14407 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14408 }
14409 else
14410 {
14411 inst.operands[1] = inst.operands[2];
14412 inst.operands[2] = inst.operands[0];
14413 }
14414 }
14415
14416 static void
14417 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14418 {
14419 if (inst.operands[2].isreg)
14420 {
14421 if (invert)
14422 neon_exchange_operands ();
14423 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14424 }
14425 else
14426 {
14427 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14428 struct neon_type_el et = neon_check_type (2, rs,
14429 N_EQK | N_SIZ, immtypes | N_KEY);
14430
14431 NEON_ENCODE (IMMED, inst);
14432 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14433 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14434 inst.instruction |= LOW4 (inst.operands[1].reg);
14435 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14436 inst.instruction |= neon_quad (rs) << 6;
14437 inst.instruction |= (et.type == NT_float) << 10;
14438 inst.instruction |= neon_logbits (et.size) << 18;
14439
14440 neon_dp_fixup (&inst);
14441 }
14442 }
14443
14444 static void
14445 do_neon_cmp (void)
14446 {
14447 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14448 }
14449
14450 static void
14451 do_neon_cmp_inv (void)
14452 {
14453 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14454 }
14455
14456 static void
14457 do_neon_ceq (void)
14458 {
14459 neon_compare (N_IF_32, N_IF_32, FALSE);
14460 }
14461
14462 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14463 scalars, which are encoded in 5 bits, M : Rm.
14464 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14465 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14466 index in M. */
14467
14468 static unsigned
14469 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14470 {
14471 unsigned regno = NEON_SCALAR_REG (scalar);
14472 unsigned elno = NEON_SCALAR_INDEX (scalar);
14473
14474 switch (elsize)
14475 {
14476 case 16:
14477 if (regno > 7 || elno > 3)
14478 goto bad_scalar;
14479 return regno | (elno << 3);
14480
14481 case 32:
14482 if (regno > 15 || elno > 1)
14483 goto bad_scalar;
14484 return regno | (elno << 4);
14485
14486 default:
14487 bad_scalar:
14488 first_error (_("scalar out of range for multiply instruction"));
14489 }
14490
14491 return 0;
14492 }
14493
14494 /* Encode multiply / multiply-accumulate scalar instructions. */
14495
14496 static void
14497 neon_mul_mac (struct neon_type_el et, int ubit)
14498 {
14499 unsigned scalar;
14500
14501 /* Give a more helpful error message if we have an invalid type. */
14502 if (et.type == NT_invtype)
14503 return;
14504
14505 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14506 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14507 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14508 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14509 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14510 inst.instruction |= LOW4 (scalar);
14511 inst.instruction |= HI1 (scalar) << 5;
14512 inst.instruction |= (et.type == NT_float) << 8;
14513 inst.instruction |= neon_logbits (et.size) << 20;
14514 inst.instruction |= (ubit != 0) << 24;
14515
14516 neon_dp_fixup (&inst);
14517 }
14518
14519 static void
14520 do_neon_mac_maybe_scalar (void)
14521 {
14522 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14523 return;
14524
14525 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14526 return;
14527
14528 if (inst.operands[2].isscalar)
14529 {
14530 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14531 struct neon_type_el et = neon_check_type (3, rs,
14532 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14533 NEON_ENCODE (SCALAR, inst);
14534 neon_mul_mac (et, neon_quad (rs));
14535 }
14536 else
14537 {
14538 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14539 affected if we specify unsigned args. */
14540 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14541 }
14542 }
14543
14544 static void
14545 do_neon_fmac (void)
14546 {
14547 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14548 return;
14549
14550 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14551 return;
14552
14553 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14554 }
14555
14556 static void
14557 do_neon_tst (void)
14558 {
14559 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14560 struct neon_type_el et = neon_check_type (3, rs,
14561 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14562 neon_three_same (neon_quad (rs), 0, et.size);
14563 }
14564
14565 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14566 same types as the MAC equivalents. The polynomial type for this instruction
14567 is encoded the same as the integer type. */
14568
14569 static void
14570 do_neon_mul (void)
14571 {
14572 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14573 return;
14574
14575 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14576 return;
14577
14578 if (inst.operands[2].isscalar)
14579 do_neon_mac_maybe_scalar ();
14580 else
14581 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14582 }
14583
14584 static void
14585 do_neon_qdmulh (void)
14586 {
14587 if (inst.operands[2].isscalar)
14588 {
14589 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14590 struct neon_type_el et = neon_check_type (3, rs,
14591 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14592 NEON_ENCODE (SCALAR, inst);
14593 neon_mul_mac (et, neon_quad (rs));
14594 }
14595 else
14596 {
14597 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14598 struct neon_type_el et = neon_check_type (3, rs,
14599 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14600 NEON_ENCODE (INTEGER, inst);
14601 /* The U bit (rounding) comes from bit mask. */
14602 neon_three_same (neon_quad (rs), 0, et.size);
14603 }
14604 }
14605
14606 static void
14607 do_neon_fcmp_absolute (void)
14608 {
14609 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14610 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14611 /* Size field comes from bit mask. */
14612 neon_three_same (neon_quad (rs), 1, -1);
14613 }
14614
14615 static void
14616 do_neon_fcmp_absolute_inv (void)
14617 {
14618 neon_exchange_operands ();
14619 do_neon_fcmp_absolute ();
14620 }
14621
14622 static void
14623 do_neon_step (void)
14624 {
14625 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14626 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14627 neon_three_same (neon_quad (rs), 0, -1);
14628 }
14629
14630 static void
14631 do_neon_abs_neg (void)
14632 {
14633 enum neon_shape rs;
14634 struct neon_type_el et;
14635
14636 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14637 return;
14638
14639 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14640 return;
14641
14642 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14643 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14644
14645 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14646 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14647 inst.instruction |= LOW4 (inst.operands[1].reg);
14648 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14649 inst.instruction |= neon_quad (rs) << 6;
14650 inst.instruction |= (et.type == NT_float) << 10;
14651 inst.instruction |= neon_logbits (et.size) << 18;
14652
14653 neon_dp_fixup (&inst);
14654 }
14655
14656 static void
14657 do_neon_sli (void)
14658 {
14659 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14660 struct neon_type_el et = neon_check_type (2, rs,
14661 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14662 int imm = inst.operands[2].imm;
14663 constraint (imm < 0 || (unsigned)imm >= et.size,
14664 _("immediate out of range for insert"));
14665 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14666 }
14667
14668 static void
14669 do_neon_sri (void)
14670 {
14671 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14672 struct neon_type_el et = neon_check_type (2, rs,
14673 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14674 int imm = inst.operands[2].imm;
14675 constraint (imm < 1 || (unsigned)imm > et.size,
14676 _("immediate out of range for insert"));
14677 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14678 }
14679
14680 static void
14681 do_neon_qshlu_imm (void)
14682 {
14683 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14684 struct neon_type_el et = neon_check_type (2, rs,
14685 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14686 int imm = inst.operands[2].imm;
14687 constraint (imm < 0 || (unsigned)imm >= et.size,
14688 _("immediate out of range for shift"));
14689 /* Only encodes the 'U present' variant of the instruction.
14690 In this case, signed types have OP (bit 8) set to 0.
14691 Unsigned types have OP set to 1. */
14692 inst.instruction |= (et.type == NT_unsigned) << 8;
14693 /* The rest of the bits are the same as other immediate shifts. */
14694 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14695 }
14696
14697 static void
14698 do_neon_qmovn (void)
14699 {
14700 struct neon_type_el et = neon_check_type (2, NS_DQ,
14701 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14702 /* Saturating move where operands can be signed or unsigned, and the
14703 destination has the same signedness. */
14704 NEON_ENCODE (INTEGER, inst);
14705 if (et.type == NT_unsigned)
14706 inst.instruction |= 0xc0;
14707 else
14708 inst.instruction |= 0x80;
14709 neon_two_same (0, 1, et.size / 2);
14710 }
14711
14712 static void
14713 do_neon_qmovun (void)
14714 {
14715 struct neon_type_el et = neon_check_type (2, NS_DQ,
14716 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14717 /* Saturating move with unsigned results. Operands must be signed. */
14718 NEON_ENCODE (INTEGER, inst);
14719 neon_two_same (0, 1, et.size / 2);
14720 }
14721
14722 static void
14723 do_neon_rshift_sat_narrow (void)
14724 {
14725 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14726 or unsigned. If operands are unsigned, results must also be unsigned. */
14727 struct neon_type_el et = neon_check_type (2, NS_DQI,
14728 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14729 int imm = inst.operands[2].imm;
14730 /* This gets the bounds check, size encoding and immediate bits calculation
14731 right. */
14732 et.size /= 2;
14733
14734 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14735 VQMOVN.I<size> <Dd>, <Qm>. */
14736 if (imm == 0)
14737 {
14738 inst.operands[2].present = 0;
14739 inst.instruction = N_MNEM_vqmovn;
14740 do_neon_qmovn ();
14741 return;
14742 }
14743
14744 constraint (imm < 1 || (unsigned)imm > et.size,
14745 _("immediate out of range"));
14746 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14747 }
14748
14749 static void
14750 do_neon_rshift_sat_narrow_u (void)
14751 {
14752 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14753 or unsigned. If operands are unsigned, results must also be unsigned. */
14754 struct neon_type_el et = neon_check_type (2, NS_DQI,
14755 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14756 int imm = inst.operands[2].imm;
14757 /* This gets the bounds check, size encoding and immediate bits calculation
14758 right. */
14759 et.size /= 2;
14760
14761 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14762 VQMOVUN.I<size> <Dd>, <Qm>. */
14763 if (imm == 0)
14764 {
14765 inst.operands[2].present = 0;
14766 inst.instruction = N_MNEM_vqmovun;
14767 do_neon_qmovun ();
14768 return;
14769 }
14770
14771 constraint (imm < 1 || (unsigned)imm > et.size,
14772 _("immediate out of range"));
14773 /* FIXME: The manual is kind of unclear about what value U should have in
14774 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14775 must be 1. */
14776 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14777 }
14778
14779 static void
14780 do_neon_movn (void)
14781 {
14782 struct neon_type_el et = neon_check_type (2, NS_DQ,
14783 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14784 NEON_ENCODE (INTEGER, inst);
14785 neon_two_same (0, 1, et.size / 2);
14786 }
14787
14788 static void
14789 do_neon_rshift_narrow (void)
14790 {
14791 struct neon_type_el et = neon_check_type (2, NS_DQI,
14792 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14793 int imm = inst.operands[2].imm;
14794 /* This gets the bounds check, size encoding and immediate bits calculation
14795 right. */
14796 et.size /= 2;
14797
14798 /* If immediate is zero then we are a pseudo-instruction for
14799 VMOVN.I<size> <Dd>, <Qm> */
14800 if (imm == 0)
14801 {
14802 inst.operands[2].present = 0;
14803 inst.instruction = N_MNEM_vmovn;
14804 do_neon_movn ();
14805 return;
14806 }
14807
14808 constraint (imm < 1 || (unsigned)imm > et.size,
14809 _("immediate out of range for narrowing operation"));
14810 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14811 }
14812
14813 static void
14814 do_neon_shll (void)
14815 {
14816 /* FIXME: Type checking when lengthening. */
14817 struct neon_type_el et = neon_check_type (2, NS_QDI,
14818 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14819 unsigned imm = inst.operands[2].imm;
14820
14821 if (imm == et.size)
14822 {
14823 /* Maximum shift variant. */
14824 NEON_ENCODE (INTEGER, inst);
14825 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14826 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14827 inst.instruction |= LOW4 (inst.operands[1].reg);
14828 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14829 inst.instruction |= neon_logbits (et.size) << 18;
14830
14831 neon_dp_fixup (&inst);
14832 }
14833 else
14834 {
14835 /* A more-specific type check for non-max versions. */
14836 et = neon_check_type (2, NS_QDI,
14837 N_EQK | N_DBL, N_SU_32 | N_KEY);
14838 NEON_ENCODE (IMMED, inst);
14839 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14840 }
14841 }
14842
14843 /* Check the various types for the VCVT instruction, and return which version
14844 the current instruction is. */
14845
14846 #define CVT_FLAVOUR_VAR \
14847 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14848 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14849 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14850 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14851 /* Half-precision conversions. */ \
14852 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14853 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14854 /* VFP instructions. */ \
14855 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14856 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14857 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14858 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14859 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14860 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14861 /* VFP instructions with bitshift. */ \
14862 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14863 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14864 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14865 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14866 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14867 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14868 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14869 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14870
14871 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14872 neon_cvt_flavour_##C,
14873
14874 /* The different types of conversions we can do. */
14875 enum neon_cvt_flavour
14876 {
14877 CVT_FLAVOUR_VAR
14878 neon_cvt_flavour_invalid,
14879 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14880 };
14881
14882 #undef CVT_VAR
14883
14884 static enum neon_cvt_flavour
14885 get_neon_cvt_flavour (enum neon_shape rs)
14886 {
14887 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14888 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14889 if (et.type != NT_invtype) \
14890 { \
14891 inst.error = NULL; \
14892 return (neon_cvt_flavour_##C); \
14893 }
14894
14895 struct neon_type_el et;
14896 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14897 || rs == NS_FF) ? N_VFP : 0;
14898 /* The instruction versions which take an immediate take one register
14899 argument, which is extended to the width of the full register. Thus the
14900 "source" and "destination" registers must have the same width. Hack that
14901 here by making the size equal to the key (wider, in this case) operand. */
14902 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14903
14904 CVT_FLAVOUR_VAR;
14905
14906 return neon_cvt_flavour_invalid;
14907 #undef CVT_VAR
14908 }
14909
14910 enum neon_cvt_mode
14911 {
14912 neon_cvt_mode_a,
14913 neon_cvt_mode_n,
14914 neon_cvt_mode_p,
14915 neon_cvt_mode_m,
14916 neon_cvt_mode_z,
14917 neon_cvt_mode_x,
14918 neon_cvt_mode_r
14919 };
14920
14921 /* Neon-syntax VFP conversions. */
14922
14923 static void
14924 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14925 {
14926 const char *opname = 0;
14927
14928 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14929 {
14930 /* Conversions with immediate bitshift. */
14931 const char *enc[] =
14932 {
14933 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14934 CVT_FLAVOUR_VAR
14935 NULL
14936 #undef CVT_VAR
14937 };
14938
14939 if (flavour < (int) ARRAY_SIZE (enc))
14940 {
14941 opname = enc[flavour];
14942 constraint (inst.operands[0].reg != inst.operands[1].reg,
14943 _("operands 0 and 1 must be the same register"));
14944 inst.operands[1] = inst.operands[2];
14945 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14946 }
14947 }
14948 else
14949 {
14950 /* Conversions without bitshift. */
14951 const char *enc[] =
14952 {
14953 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
14954 CVT_FLAVOUR_VAR
14955 NULL
14956 #undef CVT_VAR
14957 };
14958
14959 if (flavour < (int) ARRAY_SIZE (enc))
14960 opname = enc[flavour];
14961 }
14962
14963 if (opname)
14964 do_vfp_nsyn_opcode (opname);
14965 }
14966
14967 static void
14968 do_vfp_nsyn_cvtz (void)
14969 {
14970 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14971 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14972 const char *enc[] =
14973 {
14974 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
14975 CVT_FLAVOUR_VAR
14976 NULL
14977 #undef CVT_VAR
14978 };
14979
14980 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14981 do_vfp_nsyn_opcode (enc[flavour]);
14982 }
14983
14984 static void
14985 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
14986 enum neon_cvt_mode mode)
14987 {
14988 int sz, op;
14989 int rm;
14990
14991 set_it_insn_type (OUTSIDE_IT_INSN);
14992
14993 switch (flavour)
14994 {
14995 case neon_cvt_flavour_s32_f64:
14996 sz = 1;
14997 op = 1;
14998 break;
14999 case neon_cvt_flavour_s32_f32:
15000 sz = 0;
15001 op = 1;
15002 break;
15003 case neon_cvt_flavour_u32_f64:
15004 sz = 1;
15005 op = 0;
15006 break;
15007 case neon_cvt_flavour_u32_f32:
15008 sz = 0;
15009 op = 0;
15010 break;
15011 default:
15012 first_error (_("invalid instruction shape"));
15013 return;
15014 }
15015
15016 switch (mode)
15017 {
15018 case neon_cvt_mode_a: rm = 0; break;
15019 case neon_cvt_mode_n: rm = 1; break;
15020 case neon_cvt_mode_p: rm = 2; break;
15021 case neon_cvt_mode_m: rm = 3; break;
15022 default: first_error (_("invalid rounding mode")); return;
15023 }
15024
15025 NEON_ENCODE (FPV8, inst);
15026 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15027 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15028 inst.instruction |= sz << 8;
15029 inst.instruction |= op << 7;
15030 inst.instruction |= rm << 16;
15031 inst.instruction |= 0xf0000000;
15032 inst.is_neon = TRUE;
15033 }
15034
15035 static void
15036 do_neon_cvt_1 (enum neon_cvt_mode mode)
15037 {
15038 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15039 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15040 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15041
15042 /* PR11109: Handle round-to-zero for VCVT conversions. */
15043 if (mode == neon_cvt_mode_z
15044 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15045 && (flavour == neon_cvt_flavour_s32_f32
15046 || flavour == neon_cvt_flavour_u32_f32
15047 || flavour == neon_cvt_flavour_s32_f64
15048 || flavour == neon_cvt_flavour_u32_f64)
15049 && (rs == NS_FD || rs == NS_FF))
15050 {
15051 do_vfp_nsyn_cvtz ();
15052 return;
15053 }
15054
15055 /* VFP rather than Neon conversions. */
15056 if (flavour >= neon_cvt_flavour_first_fp)
15057 {
15058 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15059 do_vfp_nsyn_cvt (rs, flavour);
15060 else
15061 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15062
15063 return;
15064 }
15065
15066 switch (rs)
15067 {
15068 case NS_DDI:
15069 case NS_QQI:
15070 {
15071 unsigned immbits;
15072 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15073
15074 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15075 return;
15076
15077 /* Fixed-point conversion with #0 immediate is encoded as an
15078 integer conversion. */
15079 if (inst.operands[2].present && inst.operands[2].imm == 0)
15080 goto int_encode;
15081 immbits = 32 - inst.operands[2].imm;
15082 NEON_ENCODE (IMMED, inst);
15083 if (flavour != neon_cvt_flavour_invalid)
15084 inst.instruction |= enctab[flavour];
15085 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15086 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15087 inst.instruction |= LOW4 (inst.operands[1].reg);
15088 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15089 inst.instruction |= neon_quad (rs) << 6;
15090 inst.instruction |= 1 << 21;
15091 inst.instruction |= immbits << 16;
15092
15093 neon_dp_fixup (&inst);
15094 }
15095 break;
15096
15097 case NS_DD:
15098 case NS_QQ:
15099 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15100 {
15101 NEON_ENCODE (FLOAT, inst);
15102 set_it_insn_type (OUTSIDE_IT_INSN);
15103
15104 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15105 return;
15106
15107 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15108 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15109 inst.instruction |= LOW4 (inst.operands[1].reg);
15110 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15111 inst.instruction |= neon_quad (rs) << 6;
15112 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15113 inst.instruction |= mode << 8;
15114 if (thumb_mode)
15115 inst.instruction |= 0xfc000000;
15116 else
15117 inst.instruction |= 0xf0000000;
15118 }
15119 else
15120 {
15121 int_encode:
15122 {
15123 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15124
15125 NEON_ENCODE (INTEGER, inst);
15126
15127 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15128 return;
15129
15130 if (flavour != neon_cvt_flavour_invalid)
15131 inst.instruction |= enctab[flavour];
15132
15133 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15134 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15135 inst.instruction |= LOW4 (inst.operands[1].reg);
15136 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15137 inst.instruction |= neon_quad (rs) << 6;
15138 inst.instruction |= 2 << 18;
15139
15140 neon_dp_fixup (&inst);
15141 }
15142 }
15143 break;
15144
15145 /* Half-precision conversions for Advanced SIMD -- neon. */
15146 case NS_QD:
15147 case NS_DQ:
15148
15149 if ((rs == NS_DQ)
15150 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15151 {
15152 as_bad (_("operand size must match register width"));
15153 break;
15154 }
15155
15156 if ((rs == NS_QD)
15157 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15158 {
15159 as_bad (_("operand size must match register width"));
15160 break;
15161 }
15162
15163 if (rs == NS_DQ)
15164 inst.instruction = 0x3b60600;
15165 else
15166 inst.instruction = 0x3b60700;
15167
15168 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15169 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15170 inst.instruction |= LOW4 (inst.operands[1].reg);
15171 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15172 neon_dp_fixup (&inst);
15173 break;
15174
15175 default:
15176 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15177 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15178 do_vfp_nsyn_cvt (rs, flavour);
15179 else
15180 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15181 }
15182 }
15183
15184 static void
15185 do_neon_cvtr (void)
15186 {
15187 do_neon_cvt_1 (neon_cvt_mode_x);
15188 }
15189
15190 static void
15191 do_neon_cvt (void)
15192 {
15193 do_neon_cvt_1 (neon_cvt_mode_z);
15194 }
15195
15196 static void
15197 do_neon_cvta (void)
15198 {
15199 do_neon_cvt_1 (neon_cvt_mode_a);
15200 }
15201
15202 static void
15203 do_neon_cvtn (void)
15204 {
15205 do_neon_cvt_1 (neon_cvt_mode_n);
15206 }
15207
15208 static void
15209 do_neon_cvtp (void)
15210 {
15211 do_neon_cvt_1 (neon_cvt_mode_p);
15212 }
15213
15214 static void
15215 do_neon_cvtm (void)
15216 {
15217 do_neon_cvt_1 (neon_cvt_mode_m);
15218 }
15219
15220 static void
15221 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15222 {
15223 if (is_double)
15224 mark_feature_used (&fpu_vfp_ext_armv8);
15225
15226 encode_arm_vfp_reg (inst.operands[0].reg,
15227 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15228 encode_arm_vfp_reg (inst.operands[1].reg,
15229 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15230 inst.instruction |= to ? 0x10000 : 0;
15231 inst.instruction |= t ? 0x80 : 0;
15232 inst.instruction |= is_double ? 0x100 : 0;
15233 do_vfp_cond_or_thumb ();
15234 }
15235
15236 static void
15237 do_neon_cvttb_1 (bfd_boolean t)
15238 {
15239 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15240
15241 if (rs == NS_NULL)
15242 return;
15243 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15244 {
15245 inst.error = NULL;
15246 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15247 }
15248 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15249 {
15250 inst.error = NULL;
15251 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15252 }
15253 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15254 {
15255 inst.error = NULL;
15256 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15257 }
15258 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15259 {
15260 inst.error = NULL;
15261 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15262 }
15263 else
15264 return;
15265 }
15266
15267 static void
15268 do_neon_cvtb (void)
15269 {
15270 do_neon_cvttb_1 (FALSE);
15271 }
15272
15273
15274 static void
15275 do_neon_cvtt (void)
15276 {
15277 do_neon_cvttb_1 (TRUE);
15278 }
15279
15280 static void
15281 neon_move_immediate (void)
15282 {
15283 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15284 struct neon_type_el et = neon_check_type (2, rs,
15285 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15286 unsigned immlo, immhi = 0, immbits;
15287 int op, cmode, float_p;
15288
15289 constraint (et.type == NT_invtype,
15290 _("operand size must be specified for immediate VMOV"));
15291
15292 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15293 op = (inst.instruction & (1 << 5)) != 0;
15294
15295 immlo = inst.operands[1].imm;
15296 if (inst.operands[1].regisimm)
15297 immhi = inst.operands[1].reg;
15298
15299 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15300 _("immediate has bits set outside the operand size"));
15301
15302 float_p = inst.operands[1].immisfloat;
15303
15304 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15305 et.size, et.type)) == FAIL)
15306 {
15307 /* Invert relevant bits only. */
15308 neon_invert_size (&immlo, &immhi, et.size);
15309 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15310 with one or the other; those cases are caught by
15311 neon_cmode_for_move_imm. */
15312 op = !op;
15313 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15314 &op, et.size, et.type)) == FAIL)
15315 {
15316 first_error (_("immediate out of range"));
15317 return;
15318 }
15319 }
15320
15321 inst.instruction &= ~(1 << 5);
15322 inst.instruction |= op << 5;
15323
15324 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15325 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15326 inst.instruction |= neon_quad (rs) << 6;
15327 inst.instruction |= cmode << 8;
15328
15329 neon_write_immbits (immbits);
15330 }
15331
15332 static void
15333 do_neon_mvn (void)
15334 {
15335 if (inst.operands[1].isreg)
15336 {
15337 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15338
15339 NEON_ENCODE (INTEGER, inst);
15340 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15341 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15342 inst.instruction |= LOW4 (inst.operands[1].reg);
15343 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15344 inst.instruction |= neon_quad (rs) << 6;
15345 }
15346 else
15347 {
15348 NEON_ENCODE (IMMED, inst);
15349 neon_move_immediate ();
15350 }
15351
15352 neon_dp_fixup (&inst);
15353 }
15354
15355 /* Encode instructions of form:
15356
15357 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15358 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15359
15360 static void
15361 neon_mixed_length (struct neon_type_el et, unsigned size)
15362 {
15363 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15364 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15365 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15366 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15367 inst.instruction |= LOW4 (inst.operands[2].reg);
15368 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15369 inst.instruction |= (et.type == NT_unsigned) << 24;
15370 inst.instruction |= neon_logbits (size) << 20;
15371
15372 neon_dp_fixup (&inst);
15373 }
15374
15375 static void
15376 do_neon_dyadic_long (void)
15377 {
15378 /* FIXME: Type checking for lengthening op. */
15379 struct neon_type_el et = neon_check_type (3, NS_QDD,
15380 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15381 neon_mixed_length (et, et.size);
15382 }
15383
15384 static void
15385 do_neon_abal (void)
15386 {
15387 struct neon_type_el et = neon_check_type (3, NS_QDD,
15388 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15389 neon_mixed_length (et, et.size);
15390 }
15391
15392 static void
15393 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15394 {
15395 if (inst.operands[2].isscalar)
15396 {
15397 struct neon_type_el et = neon_check_type (3, NS_QDS,
15398 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15399 NEON_ENCODE (SCALAR, inst);
15400 neon_mul_mac (et, et.type == NT_unsigned);
15401 }
15402 else
15403 {
15404 struct neon_type_el et = neon_check_type (3, NS_QDD,
15405 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15406 NEON_ENCODE (INTEGER, inst);
15407 neon_mixed_length (et, et.size);
15408 }
15409 }
15410
15411 static void
15412 do_neon_mac_maybe_scalar_long (void)
15413 {
15414 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15415 }
15416
15417 static void
15418 do_neon_dyadic_wide (void)
15419 {
15420 struct neon_type_el et = neon_check_type (3, NS_QQD,
15421 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15422 neon_mixed_length (et, et.size);
15423 }
15424
15425 static void
15426 do_neon_dyadic_narrow (void)
15427 {
15428 struct neon_type_el et = neon_check_type (3, NS_QDD,
15429 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15430 /* Operand sign is unimportant, and the U bit is part of the opcode,
15431 so force the operand type to integer. */
15432 et.type = NT_integer;
15433 neon_mixed_length (et, et.size / 2);
15434 }
15435
15436 static void
15437 do_neon_mul_sat_scalar_long (void)
15438 {
15439 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15440 }
15441
15442 static void
15443 do_neon_vmull (void)
15444 {
15445 if (inst.operands[2].isscalar)
15446 do_neon_mac_maybe_scalar_long ();
15447 else
15448 {
15449 struct neon_type_el et = neon_check_type (3, NS_QDD,
15450 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15451
15452 if (et.type == NT_poly)
15453 NEON_ENCODE (POLY, inst);
15454 else
15455 NEON_ENCODE (INTEGER, inst);
15456
15457 /* For polynomial encoding the U bit must be zero, and the size must
15458 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15459 obviously, as 0b10). */
15460 if (et.size == 64)
15461 {
15462 /* Check we're on the correct architecture. */
15463 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15464 inst.error =
15465 _("Instruction form not available on this architecture.");
15466
15467 et.size = 32;
15468 }
15469
15470 neon_mixed_length (et, et.size);
15471 }
15472 }
15473
15474 static void
15475 do_neon_ext (void)
15476 {
15477 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15478 struct neon_type_el et = neon_check_type (3, rs,
15479 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15480 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15481
15482 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15483 _("shift out of range"));
15484 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15485 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15486 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15487 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15488 inst.instruction |= LOW4 (inst.operands[2].reg);
15489 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15490 inst.instruction |= neon_quad (rs) << 6;
15491 inst.instruction |= imm << 8;
15492
15493 neon_dp_fixup (&inst);
15494 }
15495
15496 static void
15497 do_neon_rev (void)
15498 {
15499 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15500 struct neon_type_el et = neon_check_type (2, rs,
15501 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15502 unsigned op = (inst.instruction >> 7) & 3;
15503 /* N (width of reversed regions) is encoded as part of the bitmask. We
15504 extract it here to check the elements to be reversed are smaller.
15505 Otherwise we'd get a reserved instruction. */
15506 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15507 gas_assert (elsize != 0);
15508 constraint (et.size >= elsize,
15509 _("elements must be smaller than reversal region"));
15510 neon_two_same (neon_quad (rs), 1, et.size);
15511 }
15512
15513 static void
15514 do_neon_dup (void)
15515 {
15516 if (inst.operands[1].isscalar)
15517 {
15518 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15519 struct neon_type_el et = neon_check_type (2, rs,
15520 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15521 unsigned sizebits = et.size >> 3;
15522 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15523 int logsize = neon_logbits (et.size);
15524 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15525
15526 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15527 return;
15528
15529 NEON_ENCODE (SCALAR, inst);
15530 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15531 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15532 inst.instruction |= LOW4 (dm);
15533 inst.instruction |= HI1 (dm) << 5;
15534 inst.instruction |= neon_quad (rs) << 6;
15535 inst.instruction |= x << 17;
15536 inst.instruction |= sizebits << 16;
15537
15538 neon_dp_fixup (&inst);
15539 }
15540 else
15541 {
15542 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15543 struct neon_type_el et = neon_check_type (2, rs,
15544 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15545 /* Duplicate ARM register to lanes of vector. */
15546 NEON_ENCODE (ARMREG, inst);
15547 switch (et.size)
15548 {
15549 case 8: inst.instruction |= 0x400000; break;
15550 case 16: inst.instruction |= 0x000020; break;
15551 case 32: inst.instruction |= 0x000000; break;
15552 default: break;
15553 }
15554 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15555 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15556 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15557 inst.instruction |= neon_quad (rs) << 21;
15558 /* The encoding for this instruction is identical for the ARM and Thumb
15559 variants, except for the condition field. */
15560 do_vfp_cond_or_thumb ();
15561 }
15562 }
15563
15564 /* VMOV has particularly many variations. It can be one of:
15565 0. VMOV<c><q> <Qd>, <Qm>
15566 1. VMOV<c><q> <Dd>, <Dm>
15567 (Register operations, which are VORR with Rm = Rn.)
15568 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15569 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15570 (Immediate loads.)
15571 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15572 (ARM register to scalar.)
15573 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15574 (Two ARM registers to vector.)
15575 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15576 (Scalar to ARM register.)
15577 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15578 (Vector to two ARM registers.)
15579 8. VMOV.F32 <Sd>, <Sm>
15580 9. VMOV.F64 <Dd>, <Dm>
15581 (VFP register moves.)
15582 10. VMOV.F32 <Sd>, #imm
15583 11. VMOV.F64 <Dd>, #imm
15584 (VFP float immediate load.)
15585 12. VMOV <Rd>, <Sm>
15586 (VFP single to ARM reg.)
15587 13. VMOV <Sd>, <Rm>
15588 (ARM reg to VFP single.)
15589 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15590 (Two ARM regs to two VFP singles.)
15591 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15592 (Two VFP singles to two ARM regs.)
15593
15594 These cases can be disambiguated using neon_select_shape, except cases 1/9
15595 and 3/11 which depend on the operand type too.
15596
15597 All the encoded bits are hardcoded by this function.
15598
15599 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15600 Cases 5, 7 may be used with VFPv2 and above.
15601
15602 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15603 can specify a type where it doesn't make sense to, and is ignored). */
15604
15605 static void
15606 do_neon_mov (void)
15607 {
15608 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15609 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15610 NS_NULL);
15611 struct neon_type_el et;
15612 const char *ldconst = 0;
15613
15614 switch (rs)
15615 {
15616 case NS_DD: /* case 1/9. */
15617 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15618 /* It is not an error here if no type is given. */
15619 inst.error = NULL;
15620 if (et.type == NT_float && et.size == 64)
15621 {
15622 do_vfp_nsyn_opcode ("fcpyd");
15623 break;
15624 }
15625 /* fall through. */
15626
15627 case NS_QQ: /* case 0/1. */
15628 {
15629 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15630 return;
15631 /* The architecture manual I have doesn't explicitly state which
15632 value the U bit should have for register->register moves, but
15633 the equivalent VORR instruction has U = 0, so do that. */
15634 inst.instruction = 0x0200110;
15635 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15636 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15637 inst.instruction |= LOW4 (inst.operands[1].reg);
15638 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15639 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15640 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15641 inst.instruction |= neon_quad (rs) << 6;
15642
15643 neon_dp_fixup (&inst);
15644 }
15645 break;
15646
15647 case NS_DI: /* case 3/11. */
15648 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15649 inst.error = NULL;
15650 if (et.type == NT_float && et.size == 64)
15651 {
15652 /* case 11 (fconstd). */
15653 ldconst = "fconstd";
15654 goto encode_fconstd;
15655 }
15656 /* fall through. */
15657
15658 case NS_QI: /* case 2/3. */
15659 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15660 return;
15661 inst.instruction = 0x0800010;
15662 neon_move_immediate ();
15663 neon_dp_fixup (&inst);
15664 break;
15665
15666 case NS_SR: /* case 4. */
15667 {
15668 unsigned bcdebits = 0;
15669 int logsize;
15670 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15671 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15672
15673 /* .<size> is optional here, defaulting to .32. */
15674 if (inst.vectype.elems == 0
15675 && inst.operands[0].vectype.type == NT_invtype
15676 && inst.operands[1].vectype.type == NT_invtype)
15677 {
15678 inst.vectype.el[0].type = NT_untyped;
15679 inst.vectype.el[0].size = 32;
15680 inst.vectype.elems = 1;
15681 }
15682
15683 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15684 logsize = neon_logbits (et.size);
15685
15686 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15687 _(BAD_FPU));
15688 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15689 && et.size != 32, _(BAD_FPU));
15690 constraint (et.type == NT_invtype, _("bad type for scalar"));
15691 constraint (x >= 64 / et.size, _("scalar index out of range"));
15692
15693 switch (et.size)
15694 {
15695 case 8: bcdebits = 0x8; break;
15696 case 16: bcdebits = 0x1; break;
15697 case 32: bcdebits = 0x0; break;
15698 default: ;
15699 }
15700
15701 bcdebits |= x << logsize;
15702
15703 inst.instruction = 0xe000b10;
15704 do_vfp_cond_or_thumb ();
15705 inst.instruction |= LOW4 (dn) << 16;
15706 inst.instruction |= HI1 (dn) << 7;
15707 inst.instruction |= inst.operands[1].reg << 12;
15708 inst.instruction |= (bcdebits & 3) << 5;
15709 inst.instruction |= (bcdebits >> 2) << 21;
15710 }
15711 break;
15712
15713 case NS_DRR: /* case 5 (fmdrr). */
15714 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15715 _(BAD_FPU));
15716
15717 inst.instruction = 0xc400b10;
15718 do_vfp_cond_or_thumb ();
15719 inst.instruction |= LOW4 (inst.operands[0].reg);
15720 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15721 inst.instruction |= inst.operands[1].reg << 12;
15722 inst.instruction |= inst.operands[2].reg << 16;
15723 break;
15724
15725 case NS_RS: /* case 6. */
15726 {
15727 unsigned logsize;
15728 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15729 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15730 unsigned abcdebits = 0;
15731
15732 /* .<dt> is optional here, defaulting to .32. */
15733 if (inst.vectype.elems == 0
15734 && inst.operands[0].vectype.type == NT_invtype
15735 && inst.operands[1].vectype.type == NT_invtype)
15736 {
15737 inst.vectype.el[0].type = NT_untyped;
15738 inst.vectype.el[0].size = 32;
15739 inst.vectype.elems = 1;
15740 }
15741
15742 et = neon_check_type (2, NS_NULL,
15743 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15744 logsize = neon_logbits (et.size);
15745
15746 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15747 _(BAD_FPU));
15748 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15749 && et.size != 32, _(BAD_FPU));
15750 constraint (et.type == NT_invtype, _("bad type for scalar"));
15751 constraint (x >= 64 / et.size, _("scalar index out of range"));
15752
15753 switch (et.size)
15754 {
15755 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15756 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15757 case 32: abcdebits = 0x00; break;
15758 default: ;
15759 }
15760
15761 abcdebits |= x << logsize;
15762 inst.instruction = 0xe100b10;
15763 do_vfp_cond_or_thumb ();
15764 inst.instruction |= LOW4 (dn) << 16;
15765 inst.instruction |= HI1 (dn) << 7;
15766 inst.instruction |= inst.operands[0].reg << 12;
15767 inst.instruction |= (abcdebits & 3) << 5;
15768 inst.instruction |= (abcdebits >> 2) << 21;
15769 }
15770 break;
15771
15772 case NS_RRD: /* case 7 (fmrrd). */
15773 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15774 _(BAD_FPU));
15775
15776 inst.instruction = 0xc500b10;
15777 do_vfp_cond_or_thumb ();
15778 inst.instruction |= inst.operands[0].reg << 12;
15779 inst.instruction |= inst.operands[1].reg << 16;
15780 inst.instruction |= LOW4 (inst.operands[2].reg);
15781 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15782 break;
15783
15784 case NS_FF: /* case 8 (fcpys). */
15785 do_vfp_nsyn_opcode ("fcpys");
15786 break;
15787
15788 case NS_FI: /* case 10 (fconsts). */
15789 ldconst = "fconsts";
15790 encode_fconstd:
15791 if (is_quarter_float (inst.operands[1].imm))
15792 {
15793 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15794 do_vfp_nsyn_opcode (ldconst);
15795 }
15796 else
15797 first_error (_("immediate out of range"));
15798 break;
15799
15800 case NS_RF: /* case 12 (fmrs). */
15801 do_vfp_nsyn_opcode ("fmrs");
15802 break;
15803
15804 case NS_FR: /* case 13 (fmsr). */
15805 do_vfp_nsyn_opcode ("fmsr");
15806 break;
15807
15808 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15809 (one of which is a list), but we have parsed four. Do some fiddling to
15810 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15811 expect. */
15812 case NS_RRFF: /* case 14 (fmrrs). */
15813 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15814 _("VFP registers must be adjacent"));
15815 inst.operands[2].imm = 2;
15816 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15817 do_vfp_nsyn_opcode ("fmrrs");
15818 break;
15819
15820 case NS_FFRR: /* case 15 (fmsrr). */
15821 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15822 _("VFP registers must be adjacent"));
15823 inst.operands[1] = inst.operands[2];
15824 inst.operands[2] = inst.operands[3];
15825 inst.operands[0].imm = 2;
15826 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15827 do_vfp_nsyn_opcode ("fmsrr");
15828 break;
15829
15830 case NS_NULL:
15831 /* neon_select_shape has determined that the instruction
15832 shape is wrong and has already set the error message. */
15833 break;
15834
15835 default:
15836 abort ();
15837 }
15838 }
15839
15840 static void
15841 do_neon_rshift_round_imm (void)
15842 {
15843 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15844 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15845 int imm = inst.operands[2].imm;
15846
15847 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15848 if (imm == 0)
15849 {
15850 inst.operands[2].present = 0;
15851 do_neon_mov ();
15852 return;
15853 }
15854
15855 constraint (imm < 1 || (unsigned)imm > et.size,
15856 _("immediate out of range for shift"));
15857 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15858 et.size - imm);
15859 }
15860
15861 static void
15862 do_neon_movl (void)
15863 {
15864 struct neon_type_el et = neon_check_type (2, NS_QD,
15865 N_EQK | N_DBL, N_SU_32 | N_KEY);
15866 unsigned sizebits = et.size >> 3;
15867 inst.instruction |= sizebits << 19;
15868 neon_two_same (0, et.type == NT_unsigned, -1);
15869 }
15870
15871 static void
15872 do_neon_trn (void)
15873 {
15874 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15875 struct neon_type_el et = neon_check_type (2, rs,
15876 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15877 NEON_ENCODE (INTEGER, inst);
15878 neon_two_same (neon_quad (rs), 1, et.size);
15879 }
15880
15881 static void
15882 do_neon_zip_uzp (void)
15883 {
15884 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15885 struct neon_type_el et = neon_check_type (2, rs,
15886 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15887 if (rs == NS_DD && et.size == 32)
15888 {
15889 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15890 inst.instruction = N_MNEM_vtrn;
15891 do_neon_trn ();
15892 return;
15893 }
15894 neon_two_same (neon_quad (rs), 1, et.size);
15895 }
15896
15897 static void
15898 do_neon_sat_abs_neg (void)
15899 {
15900 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15901 struct neon_type_el et = neon_check_type (2, rs,
15902 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15903 neon_two_same (neon_quad (rs), 1, et.size);
15904 }
15905
15906 static void
15907 do_neon_pair_long (void)
15908 {
15909 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15910 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15911 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15912 inst.instruction |= (et.type == NT_unsigned) << 7;
15913 neon_two_same (neon_quad (rs), 1, et.size);
15914 }
15915
15916 static void
15917 do_neon_recip_est (void)
15918 {
15919 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15920 struct neon_type_el et = neon_check_type (2, rs,
15921 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15922 inst.instruction |= (et.type == NT_float) << 8;
15923 neon_two_same (neon_quad (rs), 1, et.size);
15924 }
15925
15926 static void
15927 do_neon_cls (void)
15928 {
15929 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15930 struct neon_type_el et = neon_check_type (2, rs,
15931 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15932 neon_two_same (neon_quad (rs), 1, et.size);
15933 }
15934
15935 static void
15936 do_neon_clz (void)
15937 {
15938 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15939 struct neon_type_el et = neon_check_type (2, rs,
15940 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15941 neon_two_same (neon_quad (rs), 1, et.size);
15942 }
15943
15944 static void
15945 do_neon_cnt (void)
15946 {
15947 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15948 struct neon_type_el et = neon_check_type (2, rs,
15949 N_EQK | N_INT, N_8 | N_KEY);
15950 neon_two_same (neon_quad (rs), 1, et.size);
15951 }
15952
15953 static void
15954 do_neon_swp (void)
15955 {
15956 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15957 neon_two_same (neon_quad (rs), 1, -1);
15958 }
15959
15960 static void
15961 do_neon_tbl_tbx (void)
15962 {
15963 unsigned listlenbits;
15964 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15965
15966 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15967 {
15968 first_error (_("bad list length for table lookup"));
15969 return;
15970 }
15971
15972 listlenbits = inst.operands[1].imm - 1;
15973 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15974 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15975 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15976 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15977 inst.instruction |= LOW4 (inst.operands[2].reg);
15978 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15979 inst.instruction |= listlenbits << 8;
15980
15981 neon_dp_fixup (&inst);
15982 }
15983
15984 static void
15985 do_neon_ldm_stm (void)
15986 {
15987 /* P, U and L bits are part of bitmask. */
15988 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15989 unsigned offsetbits = inst.operands[1].imm * 2;
15990
15991 if (inst.operands[1].issingle)
15992 {
15993 do_vfp_nsyn_ldm_stm (is_dbmode);
15994 return;
15995 }
15996
15997 constraint (is_dbmode && !inst.operands[0].writeback,
15998 _("writeback (!) must be used for VLDMDB and VSTMDB"));
15999
16000 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16001 _("register list must contain at least 1 and at most 16 "
16002 "registers"));
16003
16004 inst.instruction |= inst.operands[0].reg << 16;
16005 inst.instruction |= inst.operands[0].writeback << 21;
16006 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16007 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16008
16009 inst.instruction |= offsetbits;
16010
16011 do_vfp_cond_or_thumb ();
16012 }
16013
16014 static void
16015 do_neon_ldr_str (void)
16016 {
16017 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16018
16019 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16020 And is UNPREDICTABLE in thumb mode. */
16021 if (!is_ldr
16022 && inst.operands[1].reg == REG_PC
16023 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16024 {
16025 if (thumb_mode)
16026 inst.error = _("Use of PC here is UNPREDICTABLE");
16027 else if (warn_on_deprecated)
16028 as_warn (_("Use of PC here is deprecated"));
16029 }
16030
16031 if (inst.operands[0].issingle)
16032 {
16033 if (is_ldr)
16034 do_vfp_nsyn_opcode ("flds");
16035 else
16036 do_vfp_nsyn_opcode ("fsts");
16037 }
16038 else
16039 {
16040 if (is_ldr)
16041 do_vfp_nsyn_opcode ("fldd");
16042 else
16043 do_vfp_nsyn_opcode ("fstd");
16044 }
16045 }
16046
16047 /* "interleave" version also handles non-interleaving register VLD1/VST1
16048 instructions. */
16049
16050 static void
16051 do_neon_ld_st_interleave (void)
16052 {
16053 struct neon_type_el et = neon_check_type (1, NS_NULL,
16054 N_8 | N_16 | N_32 | N_64);
16055 unsigned alignbits = 0;
16056 unsigned idx;
16057 /* The bits in this table go:
16058 0: register stride of one (0) or two (1)
16059 1,2: register list length, minus one (1, 2, 3, 4).
16060 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16061 We use -1 for invalid entries. */
16062 const int typetable[] =
16063 {
16064 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16065 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16066 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16067 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16068 };
16069 int typebits;
16070
16071 if (et.type == NT_invtype)
16072 return;
16073
16074 if (inst.operands[1].immisalign)
16075 switch (inst.operands[1].imm >> 8)
16076 {
16077 case 64: alignbits = 1; break;
16078 case 128:
16079 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16080 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16081 goto bad_alignment;
16082 alignbits = 2;
16083 break;
16084 case 256:
16085 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16086 goto bad_alignment;
16087 alignbits = 3;
16088 break;
16089 default:
16090 bad_alignment:
16091 first_error (_("bad alignment"));
16092 return;
16093 }
16094
16095 inst.instruction |= alignbits << 4;
16096 inst.instruction |= neon_logbits (et.size) << 6;
16097
16098 /* Bits [4:6] of the immediate in a list specifier encode register stride
16099 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16100 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16101 up the right value for "type" in a table based on this value and the given
16102 list style, then stick it back. */
16103 idx = ((inst.operands[0].imm >> 4) & 7)
16104 | (((inst.instruction >> 8) & 3) << 3);
16105
16106 typebits = typetable[idx];
16107
16108 constraint (typebits == -1, _("bad list type for instruction"));
16109 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16110 _("bad element type for instruction"));
16111
16112 inst.instruction &= ~0xf00;
16113 inst.instruction |= typebits << 8;
16114 }
16115
16116 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16117 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16118 otherwise. The variable arguments are a list of pairs of legal (size, align)
16119 values, terminated with -1. */
16120
16121 static int
16122 neon_alignment_bit (int size, int align, int *do_align, ...)
16123 {
16124 va_list ap;
16125 int result = FAIL, thissize, thisalign;
16126
16127 if (!inst.operands[1].immisalign)
16128 {
16129 *do_align = 0;
16130 return SUCCESS;
16131 }
16132
16133 va_start (ap, do_align);
16134
16135 do
16136 {
16137 thissize = va_arg (ap, int);
16138 if (thissize == -1)
16139 break;
16140 thisalign = va_arg (ap, int);
16141
16142 if (size == thissize && align == thisalign)
16143 result = SUCCESS;
16144 }
16145 while (result != SUCCESS);
16146
16147 va_end (ap);
16148
16149 if (result == SUCCESS)
16150 *do_align = 1;
16151 else
16152 first_error (_("unsupported alignment for instruction"));
16153
16154 return result;
16155 }
16156
16157 static void
16158 do_neon_ld_st_lane (void)
16159 {
16160 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16161 int align_good, do_align = 0;
16162 int logsize = neon_logbits (et.size);
16163 int align = inst.operands[1].imm >> 8;
16164 int n = (inst.instruction >> 8) & 3;
16165 int max_el = 64 / et.size;
16166
16167 if (et.type == NT_invtype)
16168 return;
16169
16170 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16171 _("bad list length"));
16172 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16173 _("scalar index out of range"));
16174 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16175 && et.size == 8,
16176 _("stride of 2 unavailable when element size is 8"));
16177
16178 switch (n)
16179 {
16180 case 0: /* VLD1 / VST1. */
16181 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16182 32, 32, -1);
16183 if (align_good == FAIL)
16184 return;
16185 if (do_align)
16186 {
16187 unsigned alignbits = 0;
16188 switch (et.size)
16189 {
16190 case 16: alignbits = 0x1; break;
16191 case 32: alignbits = 0x3; break;
16192 default: ;
16193 }
16194 inst.instruction |= alignbits << 4;
16195 }
16196 break;
16197
16198 case 1: /* VLD2 / VST2. */
16199 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16200 32, 64, -1);
16201 if (align_good == FAIL)
16202 return;
16203 if (do_align)
16204 inst.instruction |= 1 << 4;
16205 break;
16206
16207 case 2: /* VLD3 / VST3. */
16208 constraint (inst.operands[1].immisalign,
16209 _("can't use alignment with this instruction"));
16210 break;
16211
16212 case 3: /* VLD4 / VST4. */
16213 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16214 16, 64, 32, 64, 32, 128, -1);
16215 if (align_good == FAIL)
16216 return;
16217 if (do_align)
16218 {
16219 unsigned alignbits = 0;
16220 switch (et.size)
16221 {
16222 case 8: alignbits = 0x1; break;
16223 case 16: alignbits = 0x1; break;
16224 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16225 default: ;
16226 }
16227 inst.instruction |= alignbits << 4;
16228 }
16229 break;
16230
16231 default: ;
16232 }
16233
16234 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16235 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16236 inst.instruction |= 1 << (4 + logsize);
16237
16238 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16239 inst.instruction |= logsize << 10;
16240 }
16241
16242 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16243
16244 static void
16245 do_neon_ld_dup (void)
16246 {
16247 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16248 int align_good, do_align = 0;
16249
16250 if (et.type == NT_invtype)
16251 return;
16252
16253 switch ((inst.instruction >> 8) & 3)
16254 {
16255 case 0: /* VLD1. */
16256 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16257 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16258 &do_align, 16, 16, 32, 32, -1);
16259 if (align_good == FAIL)
16260 return;
16261 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16262 {
16263 case 1: break;
16264 case 2: inst.instruction |= 1 << 5; break;
16265 default: first_error (_("bad list length")); return;
16266 }
16267 inst.instruction |= neon_logbits (et.size) << 6;
16268 break;
16269
16270 case 1: /* VLD2. */
16271 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16272 &do_align, 8, 16, 16, 32, 32, 64, -1);
16273 if (align_good == FAIL)
16274 return;
16275 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16276 _("bad list length"));
16277 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16278 inst.instruction |= 1 << 5;
16279 inst.instruction |= neon_logbits (et.size) << 6;
16280 break;
16281
16282 case 2: /* VLD3. */
16283 constraint (inst.operands[1].immisalign,
16284 _("can't use alignment with this instruction"));
16285 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16286 _("bad list length"));
16287 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16288 inst.instruction |= 1 << 5;
16289 inst.instruction |= neon_logbits (et.size) << 6;
16290 break;
16291
16292 case 3: /* VLD4. */
16293 {
16294 int align = inst.operands[1].imm >> 8;
16295 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16296 16, 64, 32, 64, 32, 128, -1);
16297 if (align_good == FAIL)
16298 return;
16299 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16300 _("bad list length"));
16301 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16302 inst.instruction |= 1 << 5;
16303 if (et.size == 32 && align == 128)
16304 inst.instruction |= 0x3 << 6;
16305 else
16306 inst.instruction |= neon_logbits (et.size) << 6;
16307 }
16308 break;
16309
16310 default: ;
16311 }
16312
16313 inst.instruction |= do_align << 4;
16314 }
16315
16316 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16317 apart from bits [11:4]. */
16318
16319 static void
16320 do_neon_ldx_stx (void)
16321 {
16322 if (inst.operands[1].isreg)
16323 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16324
16325 switch (NEON_LANE (inst.operands[0].imm))
16326 {
16327 case NEON_INTERLEAVE_LANES:
16328 NEON_ENCODE (INTERLV, inst);
16329 do_neon_ld_st_interleave ();
16330 break;
16331
16332 case NEON_ALL_LANES:
16333 NEON_ENCODE (DUP, inst);
16334 if (inst.instruction == N_INV)
16335 {
16336 first_error ("only loads support such operands");
16337 break;
16338 }
16339 do_neon_ld_dup ();
16340 break;
16341
16342 default:
16343 NEON_ENCODE (LANE, inst);
16344 do_neon_ld_st_lane ();
16345 }
16346
16347 /* L bit comes from bit mask. */
16348 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16349 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16350 inst.instruction |= inst.operands[1].reg << 16;
16351
16352 if (inst.operands[1].postind)
16353 {
16354 int postreg = inst.operands[1].imm & 0xf;
16355 constraint (!inst.operands[1].immisreg,
16356 _("post-index must be a register"));
16357 constraint (postreg == 0xd || postreg == 0xf,
16358 _("bad register for post-index"));
16359 inst.instruction |= postreg;
16360 }
16361 else
16362 {
16363 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16364 constraint (inst.reloc.exp.X_op != O_constant
16365 || inst.reloc.exp.X_add_number != 0,
16366 BAD_ADDR_MODE);
16367
16368 if (inst.operands[1].writeback)
16369 {
16370 inst.instruction |= 0xd;
16371 }
16372 else
16373 inst.instruction |= 0xf;
16374 }
16375
16376 if (thumb_mode)
16377 inst.instruction |= 0xf9000000;
16378 else
16379 inst.instruction |= 0xf4000000;
16380 }
16381
16382 /* FP v8. */
16383 static void
16384 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16385 {
16386 NEON_ENCODE (FPV8, inst);
16387
16388 if (rs == NS_FFF)
16389 do_vfp_sp_dyadic ();
16390 else
16391 do_vfp_dp_rd_rn_rm ();
16392
16393 if (rs == NS_DDD)
16394 inst.instruction |= 0x100;
16395
16396 inst.instruction |= 0xf0000000;
16397 }
16398
16399 static void
16400 do_vsel (void)
16401 {
16402 set_it_insn_type (OUTSIDE_IT_INSN);
16403
16404 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16405 first_error (_("invalid instruction shape"));
16406 }
16407
16408 static void
16409 do_vmaxnm (void)
16410 {
16411 set_it_insn_type (OUTSIDE_IT_INSN);
16412
16413 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16414 return;
16415
16416 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16417 return;
16418
16419 neon_dyadic_misc (NT_untyped, N_F32, 0);
16420 }
16421
16422 static void
16423 do_vrint_1 (enum neon_cvt_mode mode)
16424 {
16425 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16426 struct neon_type_el et;
16427
16428 if (rs == NS_NULL)
16429 return;
16430
16431 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16432 if (et.type != NT_invtype)
16433 {
16434 /* VFP encodings. */
16435 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16436 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16437 set_it_insn_type (OUTSIDE_IT_INSN);
16438
16439 NEON_ENCODE (FPV8, inst);
16440 if (rs == NS_FF)
16441 do_vfp_sp_monadic ();
16442 else
16443 do_vfp_dp_rd_rm ();
16444
16445 switch (mode)
16446 {
16447 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16448 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16449 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16450 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16451 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16452 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16453 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16454 default: abort ();
16455 }
16456
16457 inst.instruction |= (rs == NS_DD) << 8;
16458 do_vfp_cond_or_thumb ();
16459 }
16460 else
16461 {
16462 /* Neon encodings (or something broken...). */
16463 inst.error = NULL;
16464 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16465
16466 if (et.type == NT_invtype)
16467 return;
16468
16469 set_it_insn_type (OUTSIDE_IT_INSN);
16470 NEON_ENCODE (FLOAT, inst);
16471
16472 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16473 return;
16474
16475 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16476 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16477 inst.instruction |= LOW4 (inst.operands[1].reg);
16478 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16479 inst.instruction |= neon_quad (rs) << 6;
16480 switch (mode)
16481 {
16482 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16483 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16484 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16485 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16486 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16487 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16488 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16489 default: abort ();
16490 }
16491
16492 if (thumb_mode)
16493 inst.instruction |= 0xfc000000;
16494 else
16495 inst.instruction |= 0xf0000000;
16496 }
16497 }
16498
16499 static void
16500 do_vrintx (void)
16501 {
16502 do_vrint_1 (neon_cvt_mode_x);
16503 }
16504
16505 static void
16506 do_vrintz (void)
16507 {
16508 do_vrint_1 (neon_cvt_mode_z);
16509 }
16510
16511 static void
16512 do_vrintr (void)
16513 {
16514 do_vrint_1 (neon_cvt_mode_r);
16515 }
16516
16517 static void
16518 do_vrinta (void)
16519 {
16520 do_vrint_1 (neon_cvt_mode_a);
16521 }
16522
16523 static void
16524 do_vrintn (void)
16525 {
16526 do_vrint_1 (neon_cvt_mode_n);
16527 }
16528
16529 static void
16530 do_vrintp (void)
16531 {
16532 do_vrint_1 (neon_cvt_mode_p);
16533 }
16534
16535 static void
16536 do_vrintm (void)
16537 {
16538 do_vrint_1 (neon_cvt_mode_m);
16539 }
16540
16541 /* Crypto v1 instructions. */
16542 static void
16543 do_crypto_2op_1 (unsigned elttype, int op)
16544 {
16545 set_it_insn_type (OUTSIDE_IT_INSN);
16546
16547 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16548 == NT_invtype)
16549 return;
16550
16551 inst.error = NULL;
16552
16553 NEON_ENCODE (INTEGER, inst);
16554 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16555 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16556 inst.instruction |= LOW4 (inst.operands[1].reg);
16557 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16558 if (op != -1)
16559 inst.instruction |= op << 6;
16560
16561 if (thumb_mode)
16562 inst.instruction |= 0xfc000000;
16563 else
16564 inst.instruction |= 0xf0000000;
16565 }
16566
16567 static void
16568 do_crypto_3op_1 (int u, int op)
16569 {
16570 set_it_insn_type (OUTSIDE_IT_INSN);
16571
16572 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16573 N_32 | N_UNT | N_KEY).type == NT_invtype)
16574 return;
16575
16576 inst.error = NULL;
16577
16578 NEON_ENCODE (INTEGER, inst);
16579 neon_three_same (1, u, 8 << op);
16580 }
16581
16582 static void
16583 do_aese (void)
16584 {
16585 do_crypto_2op_1 (N_8, 0);
16586 }
16587
16588 static void
16589 do_aesd (void)
16590 {
16591 do_crypto_2op_1 (N_8, 1);
16592 }
16593
16594 static void
16595 do_aesmc (void)
16596 {
16597 do_crypto_2op_1 (N_8, 2);
16598 }
16599
16600 static void
16601 do_aesimc (void)
16602 {
16603 do_crypto_2op_1 (N_8, 3);
16604 }
16605
16606 static void
16607 do_sha1c (void)
16608 {
16609 do_crypto_3op_1 (0, 0);
16610 }
16611
16612 static void
16613 do_sha1p (void)
16614 {
16615 do_crypto_3op_1 (0, 1);
16616 }
16617
16618 static void
16619 do_sha1m (void)
16620 {
16621 do_crypto_3op_1 (0, 2);
16622 }
16623
16624 static void
16625 do_sha1su0 (void)
16626 {
16627 do_crypto_3op_1 (0, 3);
16628 }
16629
16630 static void
16631 do_sha256h (void)
16632 {
16633 do_crypto_3op_1 (1, 0);
16634 }
16635
16636 static void
16637 do_sha256h2 (void)
16638 {
16639 do_crypto_3op_1 (1, 1);
16640 }
16641
16642 static void
16643 do_sha256su1 (void)
16644 {
16645 do_crypto_3op_1 (1, 2);
16646 }
16647
16648 static void
16649 do_sha1h (void)
16650 {
16651 do_crypto_2op_1 (N_32, -1);
16652 }
16653
16654 static void
16655 do_sha1su1 (void)
16656 {
16657 do_crypto_2op_1 (N_32, 0);
16658 }
16659
16660 static void
16661 do_sha256su0 (void)
16662 {
16663 do_crypto_2op_1 (N_32, 1);
16664 }
16665
16666 static void
16667 do_crc32_1 (unsigned int poly, unsigned int sz)
16668 {
16669 unsigned int Rd = inst.operands[0].reg;
16670 unsigned int Rn = inst.operands[1].reg;
16671 unsigned int Rm = inst.operands[2].reg;
16672
16673 set_it_insn_type (OUTSIDE_IT_INSN);
16674 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16675 inst.instruction |= LOW4 (Rn) << 16;
16676 inst.instruction |= LOW4 (Rm);
16677 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16678 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16679
16680 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16681 as_warn (UNPRED_REG ("r15"));
16682 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16683 as_warn (UNPRED_REG ("r13"));
16684 }
16685
16686 static void
16687 do_crc32b (void)
16688 {
16689 do_crc32_1 (0, 0);
16690 }
16691
16692 static void
16693 do_crc32h (void)
16694 {
16695 do_crc32_1 (0, 1);
16696 }
16697
16698 static void
16699 do_crc32w (void)
16700 {
16701 do_crc32_1 (0, 2);
16702 }
16703
16704 static void
16705 do_crc32cb (void)
16706 {
16707 do_crc32_1 (1, 0);
16708 }
16709
16710 static void
16711 do_crc32ch (void)
16712 {
16713 do_crc32_1 (1, 1);
16714 }
16715
16716 static void
16717 do_crc32cw (void)
16718 {
16719 do_crc32_1 (1, 2);
16720 }
16721
16722 \f
16723 /* Overall per-instruction processing. */
16724
16725 /* We need to be able to fix up arbitrary expressions in some statements.
16726 This is so that we can handle symbols that are an arbitrary distance from
16727 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16728 which returns part of an address in a form which will be valid for
16729 a data instruction. We do this by pushing the expression into a symbol
16730 in the expr_section, and creating a fix for that. */
16731
16732 static void
16733 fix_new_arm (fragS * frag,
16734 int where,
16735 short int size,
16736 expressionS * exp,
16737 int pc_rel,
16738 int reloc)
16739 {
16740 fixS * new_fix;
16741
16742 switch (exp->X_op)
16743 {
16744 case O_constant:
16745 if (pc_rel)
16746 {
16747 /* Create an absolute valued symbol, so we have something to
16748 refer to in the object file. Unfortunately for us, gas's
16749 generic expression parsing will already have folded out
16750 any use of .set foo/.type foo %function that may have
16751 been used to set type information of the target location,
16752 that's being specified symbolically. We have to presume
16753 the user knows what they are doing. */
16754 char name[16 + 8];
16755 symbolS *symbol;
16756
16757 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16758
16759 symbol = symbol_find_or_make (name);
16760 S_SET_SEGMENT (symbol, absolute_section);
16761 symbol_set_frag (symbol, &zero_address_frag);
16762 S_SET_VALUE (symbol, exp->X_add_number);
16763 exp->X_op = O_symbol;
16764 exp->X_add_symbol = symbol;
16765 exp->X_add_number = 0;
16766 }
16767 /* FALLTHROUGH */
16768 case O_symbol:
16769 case O_add:
16770 case O_subtract:
16771 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16772 (enum bfd_reloc_code_real) reloc);
16773 break;
16774
16775 default:
16776 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16777 pc_rel, (enum bfd_reloc_code_real) reloc);
16778 break;
16779 }
16780
16781 /* Mark whether the fix is to a THUMB instruction, or an ARM
16782 instruction. */
16783 new_fix->tc_fix_data = thumb_mode;
16784 }
16785
16786 /* Create a frg for an instruction requiring relaxation. */
16787 static void
16788 output_relax_insn (void)
16789 {
16790 char * to;
16791 symbolS *sym;
16792 int offset;
16793
16794 /* The size of the instruction is unknown, so tie the debug info to the
16795 start of the instruction. */
16796 dwarf2_emit_insn (0);
16797
16798 switch (inst.reloc.exp.X_op)
16799 {
16800 case O_symbol:
16801 sym = inst.reloc.exp.X_add_symbol;
16802 offset = inst.reloc.exp.X_add_number;
16803 break;
16804 case O_constant:
16805 sym = NULL;
16806 offset = inst.reloc.exp.X_add_number;
16807 break;
16808 default:
16809 sym = make_expr_symbol (&inst.reloc.exp);
16810 offset = 0;
16811 break;
16812 }
16813 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16814 inst.relax, sym, offset, NULL/*offset, opcode*/);
16815 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16816 }
16817
16818 /* Write a 32-bit thumb instruction to buf. */
16819 static void
16820 put_thumb32_insn (char * buf, unsigned long insn)
16821 {
16822 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16823 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16824 }
16825
16826 static void
16827 output_inst (const char * str)
16828 {
16829 char * to = NULL;
16830
16831 if (inst.error)
16832 {
16833 as_bad ("%s -- `%s'", inst.error, str);
16834 return;
16835 }
16836 if (inst.relax)
16837 {
16838 output_relax_insn ();
16839 return;
16840 }
16841 if (inst.size == 0)
16842 return;
16843
16844 to = frag_more (inst.size);
16845 /* PR 9814: Record the thumb mode into the current frag so that we know
16846 what type of NOP padding to use, if necessary. We override any previous
16847 setting so that if the mode has changed then the NOPS that we use will
16848 match the encoding of the last instruction in the frag. */
16849 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16850
16851 if (thumb_mode && (inst.size > THUMB_SIZE))
16852 {
16853 gas_assert (inst.size == (2 * THUMB_SIZE));
16854 put_thumb32_insn (to, inst.instruction);
16855 }
16856 else if (inst.size > INSN_SIZE)
16857 {
16858 gas_assert (inst.size == (2 * INSN_SIZE));
16859 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16860 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16861 }
16862 else
16863 md_number_to_chars (to, inst.instruction, inst.size);
16864
16865 if (inst.reloc.type != BFD_RELOC_UNUSED)
16866 fix_new_arm (frag_now, to - frag_now->fr_literal,
16867 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16868 inst.reloc.type);
16869
16870 dwarf2_emit_insn (inst.size);
16871 }
16872
16873 static char *
16874 output_it_inst (int cond, int mask, char * to)
16875 {
16876 unsigned long instruction = 0xbf00;
16877
16878 mask &= 0xf;
16879 instruction |= mask;
16880 instruction |= cond << 4;
16881
16882 if (to == NULL)
16883 {
16884 to = frag_more (2);
16885 #ifdef OBJ_ELF
16886 dwarf2_emit_insn (2);
16887 #endif
16888 }
16889
16890 md_number_to_chars (to, instruction, 2);
16891
16892 return to;
16893 }
16894
16895 /* Tag values used in struct asm_opcode's tag field. */
16896 enum opcode_tag
16897 {
16898 OT_unconditional, /* Instruction cannot be conditionalized.
16899 The ARM condition field is still 0xE. */
16900 OT_unconditionalF, /* Instruction cannot be conditionalized
16901 and carries 0xF in its ARM condition field. */
16902 OT_csuffix, /* Instruction takes a conditional suffix. */
16903 OT_csuffixF, /* Some forms of the instruction take a conditional
16904 suffix, others place 0xF where the condition field
16905 would be. */
16906 OT_cinfix3, /* Instruction takes a conditional infix,
16907 beginning at character index 3. (In
16908 unified mode, it becomes a suffix.) */
16909 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
16910 tsts, cmps, cmns, and teqs. */
16911 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
16912 character index 3, even in unified mode. Used for
16913 legacy instructions where suffix and infix forms
16914 may be ambiguous. */
16915 OT_csuf_or_in3, /* Instruction takes either a conditional
16916 suffix or an infix at character index 3. */
16917 OT_odd_infix_unc, /* This is the unconditional variant of an
16918 instruction that takes a conditional infix
16919 at an unusual position. In unified mode,
16920 this variant will accept a suffix. */
16921 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
16922 are the conditional variants of instructions that
16923 take conditional infixes in unusual positions.
16924 The infix appears at character index
16925 (tag - OT_odd_infix_0). These are not accepted
16926 in unified mode. */
16927 };
16928
16929 /* Subroutine of md_assemble, responsible for looking up the primary
16930 opcode from the mnemonic the user wrote. STR points to the
16931 beginning of the mnemonic.
16932
16933 This is not simply a hash table lookup, because of conditional
16934 variants. Most instructions have conditional variants, which are
16935 expressed with a _conditional affix_ to the mnemonic. If we were
16936 to encode each conditional variant as a literal string in the opcode
16937 table, it would have approximately 20,000 entries.
16938
16939 Most mnemonics take this affix as a suffix, and in unified syntax,
16940 'most' is upgraded to 'all'. However, in the divided syntax, some
16941 instructions take the affix as an infix, notably the s-variants of
16942 the arithmetic instructions. Of those instructions, all but six
16943 have the infix appear after the third character of the mnemonic.
16944
16945 Accordingly, the algorithm for looking up primary opcodes given
16946 an identifier is:
16947
16948 1. Look up the identifier in the opcode table.
16949 If we find a match, go to step U.
16950
16951 2. Look up the last two characters of the identifier in the
16952 conditions table. If we find a match, look up the first N-2
16953 characters of the identifier in the opcode table. If we
16954 find a match, go to step CE.
16955
16956 3. Look up the fourth and fifth characters of the identifier in
16957 the conditions table. If we find a match, extract those
16958 characters from the identifier, and look up the remaining
16959 characters in the opcode table. If we find a match, go
16960 to step CM.
16961
16962 4. Fail.
16963
16964 U. Examine the tag field of the opcode structure, in case this is
16965 one of the six instructions with its conditional infix in an
16966 unusual place. If it is, the tag tells us where to find the
16967 infix; look it up in the conditions table and set inst.cond
16968 accordingly. Otherwise, this is an unconditional instruction.
16969 Again set inst.cond accordingly. Return the opcode structure.
16970
16971 CE. Examine the tag field to make sure this is an instruction that
16972 should receive a conditional suffix. If it is not, fail.
16973 Otherwise, set inst.cond from the suffix we already looked up,
16974 and return the opcode structure.
16975
16976 CM. Examine the tag field to make sure this is an instruction that
16977 should receive a conditional infix after the third character.
16978 If it is not, fail. Otherwise, undo the edits to the current
16979 line of input and proceed as for case CE. */
16980
16981 static const struct asm_opcode *
16982 opcode_lookup (char **str)
16983 {
16984 char *end, *base;
16985 char *affix;
16986 const struct asm_opcode *opcode;
16987 const struct asm_cond *cond;
16988 char save[2];
16989
16990 /* Scan up to the end of the mnemonic, which must end in white space,
16991 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
16992 for (base = end = *str; *end != '\0'; end++)
16993 if (*end == ' ' || *end == '.')
16994 break;
16995
16996 if (end == base)
16997 return NULL;
16998
16999 /* Handle a possible width suffix and/or Neon type suffix. */
17000 if (end[0] == '.')
17001 {
17002 int offset = 2;
17003
17004 /* The .w and .n suffixes are only valid if the unified syntax is in
17005 use. */
17006 if (unified_syntax && end[1] == 'w')
17007 inst.size_req = 4;
17008 else if (unified_syntax && end[1] == 'n')
17009 inst.size_req = 2;
17010 else
17011 offset = 0;
17012
17013 inst.vectype.elems = 0;
17014
17015 *str = end + offset;
17016
17017 if (end[offset] == '.')
17018 {
17019 /* See if we have a Neon type suffix (possible in either unified or
17020 non-unified ARM syntax mode). */
17021 if (parse_neon_type (&inst.vectype, str) == FAIL)
17022 return NULL;
17023 }
17024 else if (end[offset] != '\0' && end[offset] != ' ')
17025 return NULL;
17026 }
17027 else
17028 *str = end;
17029
17030 /* Look for unaffixed or special-case affixed mnemonic. */
17031 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17032 end - base);
17033 if (opcode)
17034 {
17035 /* step U */
17036 if (opcode->tag < OT_odd_infix_0)
17037 {
17038 inst.cond = COND_ALWAYS;
17039 return opcode;
17040 }
17041
17042 if (warn_on_deprecated && unified_syntax)
17043 as_warn (_("conditional infixes are deprecated in unified syntax"));
17044 affix = base + (opcode->tag - OT_odd_infix_0);
17045 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17046 gas_assert (cond);
17047
17048 inst.cond = cond->value;
17049 return opcode;
17050 }
17051
17052 /* Cannot have a conditional suffix on a mnemonic of less than two
17053 characters. */
17054 if (end - base < 3)
17055 return NULL;
17056
17057 /* Look for suffixed mnemonic. */
17058 affix = end - 2;
17059 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17060 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17061 affix - base);
17062 if (opcode && cond)
17063 {
17064 /* step CE */
17065 switch (opcode->tag)
17066 {
17067 case OT_cinfix3_legacy:
17068 /* Ignore conditional suffixes matched on infix only mnemonics. */
17069 break;
17070
17071 case OT_cinfix3:
17072 case OT_cinfix3_deprecated:
17073 case OT_odd_infix_unc:
17074 if (!unified_syntax)
17075 return 0;
17076 /* else fall through */
17077
17078 case OT_csuffix:
17079 case OT_csuffixF:
17080 case OT_csuf_or_in3:
17081 inst.cond = cond->value;
17082 return opcode;
17083
17084 case OT_unconditional:
17085 case OT_unconditionalF:
17086 if (thumb_mode)
17087 inst.cond = cond->value;
17088 else
17089 {
17090 /* Delayed diagnostic. */
17091 inst.error = BAD_COND;
17092 inst.cond = COND_ALWAYS;
17093 }
17094 return opcode;
17095
17096 default:
17097 return NULL;
17098 }
17099 }
17100
17101 /* Cannot have a usual-position infix on a mnemonic of less than
17102 six characters (five would be a suffix). */
17103 if (end - base < 6)
17104 return NULL;
17105
17106 /* Look for infixed mnemonic in the usual position. */
17107 affix = base + 3;
17108 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17109 if (!cond)
17110 return NULL;
17111
17112 memcpy (save, affix, 2);
17113 memmove (affix, affix + 2, (end - affix) - 2);
17114 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17115 (end - base) - 2);
17116 memmove (affix + 2, affix, (end - affix) - 2);
17117 memcpy (affix, save, 2);
17118
17119 if (opcode
17120 && (opcode->tag == OT_cinfix3
17121 || opcode->tag == OT_cinfix3_deprecated
17122 || opcode->tag == OT_csuf_or_in3
17123 || opcode->tag == OT_cinfix3_legacy))
17124 {
17125 /* Step CM. */
17126 if (warn_on_deprecated && unified_syntax
17127 && (opcode->tag == OT_cinfix3
17128 || opcode->tag == OT_cinfix3_deprecated))
17129 as_warn (_("conditional infixes are deprecated in unified syntax"));
17130
17131 inst.cond = cond->value;
17132 return opcode;
17133 }
17134
17135 return NULL;
17136 }
17137
17138 /* This function generates an initial IT instruction, leaving its block
17139 virtually open for the new instructions. Eventually,
17140 the mask will be updated by now_it_add_mask () each time
17141 a new instruction needs to be included in the IT block.
17142 Finally, the block is closed with close_automatic_it_block ().
17143 The block closure can be requested either from md_assemble (),
17144 a tencode (), or due to a label hook. */
17145
17146 static void
17147 new_automatic_it_block (int cond)
17148 {
17149 now_it.state = AUTOMATIC_IT_BLOCK;
17150 now_it.mask = 0x18;
17151 now_it.cc = cond;
17152 now_it.block_length = 1;
17153 mapping_state (MAP_THUMB);
17154 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17155 now_it.warn_deprecated = FALSE;
17156 now_it.insn_cond = TRUE;
17157 }
17158
17159 /* Close an automatic IT block.
17160 See comments in new_automatic_it_block (). */
17161
17162 static void
17163 close_automatic_it_block (void)
17164 {
17165 now_it.mask = 0x10;
17166 now_it.block_length = 0;
17167 }
17168
17169 /* Update the mask of the current automatically-generated IT
17170 instruction. See comments in new_automatic_it_block (). */
17171
17172 static void
17173 now_it_add_mask (int cond)
17174 {
17175 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17176 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17177 | ((bitvalue) << (nbit)))
17178 const int resulting_bit = (cond & 1);
17179
17180 now_it.mask &= 0xf;
17181 now_it.mask = SET_BIT_VALUE (now_it.mask,
17182 resulting_bit,
17183 (5 - now_it.block_length));
17184 now_it.mask = SET_BIT_VALUE (now_it.mask,
17185 1,
17186 ((5 - now_it.block_length) - 1) );
17187 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17188
17189 #undef CLEAR_BIT
17190 #undef SET_BIT_VALUE
17191 }
17192
17193 /* The IT blocks handling machinery is accessed through the these functions:
17194 it_fsm_pre_encode () from md_assemble ()
17195 set_it_insn_type () optional, from the tencode functions
17196 set_it_insn_type_last () ditto
17197 in_it_block () ditto
17198 it_fsm_post_encode () from md_assemble ()
17199 force_automatic_it_block_close () from label habdling functions
17200
17201 Rationale:
17202 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17203 initializing the IT insn type with a generic initial value depending
17204 on the inst.condition.
17205 2) During the tencode function, two things may happen:
17206 a) The tencode function overrides the IT insn type by
17207 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17208 b) The tencode function queries the IT block state by
17209 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17210
17211 Both set_it_insn_type and in_it_block run the internal FSM state
17212 handling function (handle_it_state), because: a) setting the IT insn
17213 type may incur in an invalid state (exiting the function),
17214 and b) querying the state requires the FSM to be updated.
17215 Specifically we want to avoid creating an IT block for conditional
17216 branches, so it_fsm_pre_encode is actually a guess and we can't
17217 determine whether an IT block is required until the tencode () routine
17218 has decided what type of instruction this actually it.
17219 Because of this, if set_it_insn_type and in_it_block have to be used,
17220 set_it_insn_type has to be called first.
17221
17222 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17223 determines the insn IT type depending on the inst.cond code.
17224 When a tencode () routine encodes an instruction that can be
17225 either outside an IT block, or, in the case of being inside, has to be
17226 the last one, set_it_insn_type_last () will determine the proper
17227 IT instruction type based on the inst.cond code. Otherwise,
17228 set_it_insn_type can be called for overriding that logic or
17229 for covering other cases.
17230
17231 Calling handle_it_state () may not transition the IT block state to
17232 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17233 still queried. Instead, if the FSM determines that the state should
17234 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17235 after the tencode () function: that's what it_fsm_post_encode () does.
17236
17237 Since in_it_block () calls the state handling function to get an
17238 updated state, an error may occur (due to invalid insns combination).
17239 In that case, inst.error is set.
17240 Therefore, inst.error has to be checked after the execution of
17241 the tencode () routine.
17242
17243 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17244 any pending state change (if any) that didn't take place in
17245 handle_it_state () as explained above. */
17246
17247 static void
17248 it_fsm_pre_encode (void)
17249 {
17250 if (inst.cond != COND_ALWAYS)
17251 inst.it_insn_type = INSIDE_IT_INSN;
17252 else
17253 inst.it_insn_type = OUTSIDE_IT_INSN;
17254
17255 now_it.state_handled = 0;
17256 }
17257
17258 /* IT state FSM handling function. */
17259
17260 static int
17261 handle_it_state (void)
17262 {
17263 now_it.state_handled = 1;
17264 now_it.insn_cond = FALSE;
17265
17266 switch (now_it.state)
17267 {
17268 case OUTSIDE_IT_BLOCK:
17269 switch (inst.it_insn_type)
17270 {
17271 case OUTSIDE_IT_INSN:
17272 break;
17273
17274 case INSIDE_IT_INSN:
17275 case INSIDE_IT_LAST_INSN:
17276 if (thumb_mode == 0)
17277 {
17278 if (unified_syntax
17279 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17280 as_tsktsk (_("Warning: conditional outside an IT block"\
17281 " for Thumb."));
17282 }
17283 else
17284 {
17285 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17286 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17287 {
17288 /* Automatically generate the IT instruction. */
17289 new_automatic_it_block (inst.cond);
17290 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17291 close_automatic_it_block ();
17292 }
17293 else
17294 {
17295 inst.error = BAD_OUT_IT;
17296 return FAIL;
17297 }
17298 }
17299 break;
17300
17301 case IF_INSIDE_IT_LAST_INSN:
17302 case NEUTRAL_IT_INSN:
17303 break;
17304
17305 case IT_INSN:
17306 now_it.state = MANUAL_IT_BLOCK;
17307 now_it.block_length = 0;
17308 break;
17309 }
17310 break;
17311
17312 case AUTOMATIC_IT_BLOCK:
17313 /* Three things may happen now:
17314 a) We should increment current it block size;
17315 b) We should close current it block (closing insn or 4 insns);
17316 c) We should close current it block and start a new one (due
17317 to incompatible conditions or
17318 4 insns-length block reached). */
17319
17320 switch (inst.it_insn_type)
17321 {
17322 case OUTSIDE_IT_INSN:
17323 /* The closure of the block shall happen immediatelly,
17324 so any in_it_block () call reports the block as closed. */
17325 force_automatic_it_block_close ();
17326 break;
17327
17328 case INSIDE_IT_INSN:
17329 case INSIDE_IT_LAST_INSN:
17330 case IF_INSIDE_IT_LAST_INSN:
17331 now_it.block_length++;
17332
17333 if (now_it.block_length > 4
17334 || !now_it_compatible (inst.cond))
17335 {
17336 force_automatic_it_block_close ();
17337 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17338 new_automatic_it_block (inst.cond);
17339 }
17340 else
17341 {
17342 now_it.insn_cond = TRUE;
17343 now_it_add_mask (inst.cond);
17344 }
17345
17346 if (now_it.state == AUTOMATIC_IT_BLOCK
17347 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17348 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17349 close_automatic_it_block ();
17350 break;
17351
17352 case NEUTRAL_IT_INSN:
17353 now_it.block_length++;
17354 now_it.insn_cond = TRUE;
17355
17356 if (now_it.block_length > 4)
17357 force_automatic_it_block_close ();
17358 else
17359 now_it_add_mask (now_it.cc & 1);
17360 break;
17361
17362 case IT_INSN:
17363 close_automatic_it_block ();
17364 now_it.state = MANUAL_IT_BLOCK;
17365 break;
17366 }
17367 break;
17368
17369 case MANUAL_IT_BLOCK:
17370 {
17371 /* Check conditional suffixes. */
17372 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17373 int is_last;
17374 now_it.mask <<= 1;
17375 now_it.mask &= 0x1f;
17376 is_last = (now_it.mask == 0x10);
17377 now_it.insn_cond = TRUE;
17378
17379 switch (inst.it_insn_type)
17380 {
17381 case OUTSIDE_IT_INSN:
17382 inst.error = BAD_NOT_IT;
17383 return FAIL;
17384
17385 case INSIDE_IT_INSN:
17386 if (cond != inst.cond)
17387 {
17388 inst.error = BAD_IT_COND;
17389 return FAIL;
17390 }
17391 break;
17392
17393 case INSIDE_IT_LAST_INSN:
17394 case IF_INSIDE_IT_LAST_INSN:
17395 if (cond != inst.cond)
17396 {
17397 inst.error = BAD_IT_COND;
17398 return FAIL;
17399 }
17400 if (!is_last)
17401 {
17402 inst.error = BAD_BRANCH;
17403 return FAIL;
17404 }
17405 break;
17406
17407 case NEUTRAL_IT_INSN:
17408 /* The BKPT instruction is unconditional even in an IT block. */
17409 break;
17410
17411 case IT_INSN:
17412 inst.error = BAD_IT_IT;
17413 return FAIL;
17414 }
17415 }
17416 break;
17417 }
17418
17419 return SUCCESS;
17420 }
17421
17422 struct depr_insn_mask
17423 {
17424 unsigned long pattern;
17425 unsigned long mask;
17426 const char* description;
17427 };
17428
17429 /* List of 16-bit instruction patterns deprecated in an IT block in
17430 ARMv8. */
17431 static const struct depr_insn_mask depr_it_insns[] = {
17432 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17433 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17434 { 0xa000, 0xb800, N_("ADR") },
17435 { 0x4800, 0xf800, N_("Literal loads") },
17436 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17437 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17438 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17439 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17440 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17441 { 0, 0, NULL }
17442 };
17443
17444 static void
17445 it_fsm_post_encode (void)
17446 {
17447 int is_last;
17448
17449 if (!now_it.state_handled)
17450 handle_it_state ();
17451
17452 if (now_it.insn_cond
17453 && !now_it.warn_deprecated
17454 && warn_on_deprecated
17455 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17456 {
17457 if (inst.instruction >= 0x10000)
17458 {
17459 as_warn (_("IT blocks containing 32-bit Thumb instructions are "
17460 "deprecated in ARMv8"));
17461 now_it.warn_deprecated = TRUE;
17462 }
17463 else
17464 {
17465 const struct depr_insn_mask *p = depr_it_insns;
17466
17467 while (p->mask != 0)
17468 {
17469 if ((inst.instruction & p->mask) == p->pattern)
17470 {
17471 as_warn (_("IT blocks containing 16-bit Thumb instructions "
17472 "of the following class are deprecated in ARMv8: "
17473 "%s"), p->description);
17474 now_it.warn_deprecated = TRUE;
17475 break;
17476 }
17477
17478 ++p;
17479 }
17480 }
17481
17482 if (now_it.block_length > 1)
17483 {
17484 as_warn (_("IT blocks containing more than one conditional "
17485 "instruction are deprecated in ARMv8"));
17486 now_it.warn_deprecated = TRUE;
17487 }
17488 }
17489
17490 is_last = (now_it.mask == 0x10);
17491 if (is_last)
17492 {
17493 now_it.state = OUTSIDE_IT_BLOCK;
17494 now_it.mask = 0;
17495 }
17496 }
17497
17498 static void
17499 force_automatic_it_block_close (void)
17500 {
17501 if (now_it.state == AUTOMATIC_IT_BLOCK)
17502 {
17503 close_automatic_it_block ();
17504 now_it.state = OUTSIDE_IT_BLOCK;
17505 now_it.mask = 0;
17506 }
17507 }
17508
17509 static int
17510 in_it_block (void)
17511 {
17512 if (!now_it.state_handled)
17513 handle_it_state ();
17514
17515 return now_it.state != OUTSIDE_IT_BLOCK;
17516 }
17517
17518 void
17519 md_assemble (char *str)
17520 {
17521 char *p = str;
17522 const struct asm_opcode * opcode;
17523
17524 /* Align the previous label if needed. */
17525 if (last_label_seen != NULL)
17526 {
17527 symbol_set_frag (last_label_seen, frag_now);
17528 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17529 S_SET_SEGMENT (last_label_seen, now_seg);
17530 }
17531
17532 memset (&inst, '\0', sizeof (inst));
17533 inst.reloc.type = BFD_RELOC_UNUSED;
17534
17535 opcode = opcode_lookup (&p);
17536 if (!opcode)
17537 {
17538 /* It wasn't an instruction, but it might be a register alias of
17539 the form alias .req reg, or a Neon .dn/.qn directive. */
17540 if (! create_register_alias (str, p)
17541 && ! create_neon_reg_alias (str, p))
17542 as_bad (_("bad instruction `%s'"), str);
17543
17544 return;
17545 }
17546
17547 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17548 as_warn (_("s suffix on comparison instruction is deprecated"));
17549
17550 /* The value which unconditional instructions should have in place of the
17551 condition field. */
17552 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17553
17554 if (thumb_mode)
17555 {
17556 arm_feature_set variant;
17557
17558 variant = cpu_variant;
17559 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17560 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17561 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17562 /* Check that this instruction is supported for this CPU. */
17563 if (!opcode->tvariant
17564 || (thumb_mode == 1
17565 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17566 {
17567 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17568 return;
17569 }
17570 if (inst.cond != COND_ALWAYS && !unified_syntax
17571 && opcode->tencode != do_t_branch)
17572 {
17573 as_bad (_("Thumb does not support conditional execution"));
17574 return;
17575 }
17576
17577 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17578 {
17579 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17580 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17581 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17582 {
17583 /* Two things are addressed here.
17584 1) Implicit require narrow instructions on Thumb-1.
17585 This avoids relaxation accidentally introducing Thumb-2
17586 instructions.
17587 2) Reject wide instructions in non Thumb-2 cores. */
17588 if (inst.size_req == 0)
17589 inst.size_req = 2;
17590 else if (inst.size_req == 4)
17591 {
17592 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17593 return;
17594 }
17595 }
17596 }
17597
17598 inst.instruction = opcode->tvalue;
17599
17600 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17601 {
17602 /* Prepare the it_insn_type for those encodings that don't set
17603 it. */
17604 it_fsm_pre_encode ();
17605
17606 opcode->tencode ();
17607
17608 it_fsm_post_encode ();
17609 }
17610
17611 if (!(inst.error || inst.relax))
17612 {
17613 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17614 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17615 if (inst.size_req && inst.size_req != inst.size)
17616 {
17617 as_bad (_("cannot honor width suffix -- `%s'"), str);
17618 return;
17619 }
17620 }
17621
17622 /* Something has gone badly wrong if we try to relax a fixed size
17623 instruction. */
17624 gas_assert (inst.size_req == 0 || !inst.relax);
17625
17626 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17627 *opcode->tvariant);
17628 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17629 set those bits when Thumb-2 32-bit instructions are seen. ie.
17630 anything other than bl/blx and v6-M instructions.
17631 This is overly pessimistic for relaxable instructions. */
17632 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17633 || inst.relax)
17634 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17635 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17636 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17637 arm_ext_v6t2);
17638
17639 check_neon_suffixes;
17640
17641 if (!inst.error)
17642 {
17643 mapping_state (MAP_THUMB);
17644 }
17645 }
17646 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17647 {
17648 bfd_boolean is_bx;
17649
17650 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17651 is_bx = (opcode->aencode == do_bx);
17652
17653 /* Check that this instruction is supported for this CPU. */
17654 if (!(is_bx && fix_v4bx)
17655 && !(opcode->avariant &&
17656 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17657 {
17658 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17659 return;
17660 }
17661 if (inst.size_req)
17662 {
17663 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17664 return;
17665 }
17666
17667 inst.instruction = opcode->avalue;
17668 if (opcode->tag == OT_unconditionalF)
17669 inst.instruction |= 0xF << 28;
17670 else
17671 inst.instruction |= inst.cond << 28;
17672 inst.size = INSN_SIZE;
17673 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17674 {
17675 it_fsm_pre_encode ();
17676 opcode->aencode ();
17677 it_fsm_post_encode ();
17678 }
17679 /* Arm mode bx is marked as both v4T and v5 because it's still required
17680 on a hypothetical non-thumb v5 core. */
17681 if (is_bx)
17682 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17683 else
17684 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17685 *opcode->avariant);
17686
17687 check_neon_suffixes;
17688
17689 if (!inst.error)
17690 {
17691 mapping_state (MAP_ARM);
17692 }
17693 }
17694 else
17695 {
17696 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17697 "-- `%s'"), str);
17698 return;
17699 }
17700 output_inst (str);
17701 }
17702
17703 static void
17704 check_it_blocks_finished (void)
17705 {
17706 #ifdef OBJ_ELF
17707 asection *sect;
17708
17709 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17710 if (seg_info (sect)->tc_segment_info_data.current_it.state
17711 == MANUAL_IT_BLOCK)
17712 {
17713 as_warn (_("section '%s' finished with an open IT block."),
17714 sect->name);
17715 }
17716 #else
17717 if (now_it.state == MANUAL_IT_BLOCK)
17718 as_warn (_("file finished with an open IT block."));
17719 #endif
17720 }
17721
17722 /* Various frobbings of labels and their addresses. */
17723
17724 void
17725 arm_start_line_hook (void)
17726 {
17727 last_label_seen = NULL;
17728 }
17729
17730 void
17731 arm_frob_label (symbolS * sym)
17732 {
17733 last_label_seen = sym;
17734
17735 ARM_SET_THUMB (sym, thumb_mode);
17736
17737 #if defined OBJ_COFF || defined OBJ_ELF
17738 ARM_SET_INTERWORK (sym, support_interwork);
17739 #endif
17740
17741 force_automatic_it_block_close ();
17742
17743 /* Note - do not allow local symbols (.Lxxx) to be labelled
17744 as Thumb functions. This is because these labels, whilst
17745 they exist inside Thumb code, are not the entry points for
17746 possible ARM->Thumb calls. Also, these labels can be used
17747 as part of a computed goto or switch statement. eg gcc
17748 can generate code that looks like this:
17749
17750 ldr r2, [pc, .Laaa]
17751 lsl r3, r3, #2
17752 ldr r2, [r3, r2]
17753 mov pc, r2
17754
17755 .Lbbb: .word .Lxxx
17756 .Lccc: .word .Lyyy
17757 ..etc...
17758 .Laaa: .word Lbbb
17759
17760 The first instruction loads the address of the jump table.
17761 The second instruction converts a table index into a byte offset.
17762 The third instruction gets the jump address out of the table.
17763 The fourth instruction performs the jump.
17764
17765 If the address stored at .Laaa is that of a symbol which has the
17766 Thumb_Func bit set, then the linker will arrange for this address
17767 to have the bottom bit set, which in turn would mean that the
17768 address computation performed by the third instruction would end
17769 up with the bottom bit set. Since the ARM is capable of unaligned
17770 word loads, the instruction would then load the incorrect address
17771 out of the jump table, and chaos would ensue. */
17772 if (label_is_thumb_function_name
17773 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17774 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17775 {
17776 /* When the address of a Thumb function is taken the bottom
17777 bit of that address should be set. This will allow
17778 interworking between Arm and Thumb functions to work
17779 correctly. */
17780
17781 THUMB_SET_FUNC (sym, 1);
17782
17783 label_is_thumb_function_name = FALSE;
17784 }
17785
17786 dwarf2_emit_label (sym);
17787 }
17788
17789 bfd_boolean
17790 arm_data_in_code (void)
17791 {
17792 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17793 {
17794 *input_line_pointer = '/';
17795 input_line_pointer += 5;
17796 *input_line_pointer = 0;
17797 return TRUE;
17798 }
17799
17800 return FALSE;
17801 }
17802
17803 char *
17804 arm_canonicalize_symbol_name (char * name)
17805 {
17806 int len;
17807
17808 if (thumb_mode && (len = strlen (name)) > 5
17809 && streq (name + len - 5, "/data"))
17810 *(name + len - 5) = 0;
17811
17812 return name;
17813 }
17814 \f
17815 /* Table of all register names defined by default. The user can
17816 define additional names with .req. Note that all register names
17817 should appear in both upper and lowercase variants. Some registers
17818 also have mixed-case names. */
17819
17820 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17821 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17822 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17823 #define REGSET(p,t) \
17824 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17825 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17826 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17827 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17828 #define REGSETH(p,t) \
17829 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17830 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17831 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17832 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17833 #define REGSET2(p,t) \
17834 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17835 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17836 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17837 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17838 #define SPLRBANK(base,bank,t) \
17839 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17840 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17841 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17842 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17843 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17844 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17845
17846 static const struct reg_entry reg_names[] =
17847 {
17848 /* ARM integer registers. */
17849 REGSET(r, RN), REGSET(R, RN),
17850
17851 /* ATPCS synonyms. */
17852 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17853 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17854 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17855
17856 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17857 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17858 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17859
17860 /* Well-known aliases. */
17861 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17862 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17863
17864 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17865 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17866
17867 /* Coprocessor numbers. */
17868 REGSET(p, CP), REGSET(P, CP),
17869
17870 /* Coprocessor register numbers. The "cr" variants are for backward
17871 compatibility. */
17872 REGSET(c, CN), REGSET(C, CN),
17873 REGSET(cr, CN), REGSET(CR, CN),
17874
17875 /* ARM banked registers. */
17876 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17877 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17878 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17879 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17880 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17881 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17882 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17883
17884 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17885 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17886 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17887 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17888 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17889 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17890 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17891 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17892
17893 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17894 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17895 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17896 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17897 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17898 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17899 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17900 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17901 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17902
17903 /* FPA registers. */
17904 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17905 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17906
17907 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17908 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17909
17910 /* VFP SP registers. */
17911 REGSET(s,VFS), REGSET(S,VFS),
17912 REGSETH(s,VFS), REGSETH(S,VFS),
17913
17914 /* VFP DP Registers. */
17915 REGSET(d,VFD), REGSET(D,VFD),
17916 /* Extra Neon DP registers. */
17917 REGSETH(d,VFD), REGSETH(D,VFD),
17918
17919 /* Neon QP registers. */
17920 REGSET2(q,NQ), REGSET2(Q,NQ),
17921
17922 /* VFP control registers. */
17923 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17924 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17925 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17926 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17927 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17928 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17929
17930 /* Maverick DSP coprocessor registers. */
17931 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
17932 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
17933
17934 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17935 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17936 REGDEF(dspsc,0,DSPSC),
17937
17938 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17939 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17940 REGDEF(DSPSC,0,DSPSC),
17941
17942 /* iWMMXt data registers - p0, c0-15. */
17943 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17944
17945 /* iWMMXt control registers - p1, c0-3. */
17946 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17947 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17948 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17949 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17950
17951 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17952 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17953 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17954 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17955 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
17956
17957 /* XScale accumulator registers. */
17958 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
17959 };
17960 #undef REGDEF
17961 #undef REGNUM
17962 #undef REGSET
17963
17964 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
17965 within psr_required_here. */
17966 static const struct asm_psr psrs[] =
17967 {
17968 /* Backward compatibility notation. Note that "all" is no longer
17969 truly all possible PSR bits. */
17970 {"all", PSR_c | PSR_f},
17971 {"flg", PSR_f},
17972 {"ctl", PSR_c},
17973
17974 /* Individual flags. */
17975 {"f", PSR_f},
17976 {"c", PSR_c},
17977 {"x", PSR_x},
17978 {"s", PSR_s},
17979
17980 /* Combinations of flags. */
17981 {"fs", PSR_f | PSR_s},
17982 {"fx", PSR_f | PSR_x},
17983 {"fc", PSR_f | PSR_c},
17984 {"sf", PSR_s | PSR_f},
17985 {"sx", PSR_s | PSR_x},
17986 {"sc", PSR_s | PSR_c},
17987 {"xf", PSR_x | PSR_f},
17988 {"xs", PSR_x | PSR_s},
17989 {"xc", PSR_x | PSR_c},
17990 {"cf", PSR_c | PSR_f},
17991 {"cs", PSR_c | PSR_s},
17992 {"cx", PSR_c | PSR_x},
17993 {"fsx", PSR_f | PSR_s | PSR_x},
17994 {"fsc", PSR_f | PSR_s | PSR_c},
17995 {"fxs", PSR_f | PSR_x | PSR_s},
17996 {"fxc", PSR_f | PSR_x | PSR_c},
17997 {"fcs", PSR_f | PSR_c | PSR_s},
17998 {"fcx", PSR_f | PSR_c | PSR_x},
17999 {"sfx", PSR_s | PSR_f | PSR_x},
18000 {"sfc", PSR_s | PSR_f | PSR_c},
18001 {"sxf", PSR_s | PSR_x | PSR_f},
18002 {"sxc", PSR_s | PSR_x | PSR_c},
18003 {"scf", PSR_s | PSR_c | PSR_f},
18004 {"scx", PSR_s | PSR_c | PSR_x},
18005 {"xfs", PSR_x | PSR_f | PSR_s},
18006 {"xfc", PSR_x | PSR_f | PSR_c},
18007 {"xsf", PSR_x | PSR_s | PSR_f},
18008 {"xsc", PSR_x | PSR_s | PSR_c},
18009 {"xcf", PSR_x | PSR_c | PSR_f},
18010 {"xcs", PSR_x | PSR_c | PSR_s},
18011 {"cfs", PSR_c | PSR_f | PSR_s},
18012 {"cfx", PSR_c | PSR_f | PSR_x},
18013 {"csf", PSR_c | PSR_s | PSR_f},
18014 {"csx", PSR_c | PSR_s | PSR_x},
18015 {"cxf", PSR_c | PSR_x | PSR_f},
18016 {"cxs", PSR_c | PSR_x | PSR_s},
18017 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18018 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18019 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18020 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18021 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18022 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18023 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18024 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18025 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18026 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18027 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18028 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18029 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18030 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18031 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18032 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18033 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18034 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18035 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18036 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18037 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18038 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18039 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18040 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18041 };
18042
18043 /* Table of V7M psr names. */
18044 static const struct asm_psr v7m_psrs[] =
18045 {
18046 {"apsr", 0 }, {"APSR", 0 },
18047 {"iapsr", 1 }, {"IAPSR", 1 },
18048 {"eapsr", 2 }, {"EAPSR", 2 },
18049 {"psr", 3 }, {"PSR", 3 },
18050 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18051 {"ipsr", 5 }, {"IPSR", 5 },
18052 {"epsr", 6 }, {"EPSR", 6 },
18053 {"iepsr", 7 }, {"IEPSR", 7 },
18054 {"msp", 8 }, {"MSP", 8 },
18055 {"psp", 9 }, {"PSP", 9 },
18056 {"primask", 16}, {"PRIMASK", 16},
18057 {"basepri", 17}, {"BASEPRI", 17},
18058 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18059 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18060 {"faultmask", 19}, {"FAULTMASK", 19},
18061 {"control", 20}, {"CONTROL", 20}
18062 };
18063
18064 /* Table of all shift-in-operand names. */
18065 static const struct asm_shift_name shift_names [] =
18066 {
18067 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18068 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18069 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18070 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18071 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18072 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18073 };
18074
18075 /* Table of all explicit relocation names. */
18076 #ifdef OBJ_ELF
18077 static struct reloc_entry reloc_names[] =
18078 {
18079 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18080 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18081 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18082 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18083 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18084 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18085 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18086 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18087 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18088 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18089 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18090 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18091 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18092 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18093 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18094 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18095 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18096 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18097 };
18098 #endif
18099
18100 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18101 static const struct asm_cond conds[] =
18102 {
18103 {"eq", 0x0},
18104 {"ne", 0x1},
18105 {"cs", 0x2}, {"hs", 0x2},
18106 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18107 {"mi", 0x4},
18108 {"pl", 0x5},
18109 {"vs", 0x6},
18110 {"vc", 0x7},
18111 {"hi", 0x8},
18112 {"ls", 0x9},
18113 {"ge", 0xa},
18114 {"lt", 0xb},
18115 {"gt", 0xc},
18116 {"le", 0xd},
18117 {"al", 0xe}
18118 };
18119
18120 #define UL_BARRIER(L,U,CODE,FEAT) \
18121 { L, CODE, ARM_FEATURE (FEAT, 0) }, \
18122 { U, CODE, ARM_FEATURE (FEAT, 0) }
18123
18124 static struct asm_barrier_opt barrier_opt_names[] =
18125 {
18126 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18127 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18128 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18129 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18130 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18131 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18132 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18133 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18134 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18135 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18136 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18137 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18138 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18139 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18140 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18141 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18142 };
18143
18144 #undef UL_BARRIER
18145
18146 /* Table of ARM-format instructions. */
18147
18148 /* Macros for gluing together operand strings. N.B. In all cases
18149 other than OPS0, the trailing OP_stop comes from default
18150 zero-initialization of the unspecified elements of the array. */
18151 #define OPS0() { OP_stop, }
18152 #define OPS1(a) { OP_##a, }
18153 #define OPS2(a,b) { OP_##a,OP_##b, }
18154 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18155 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18156 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18157 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18158
18159 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18160 This is useful when mixing operands for ARM and THUMB, i.e. using the
18161 MIX_ARM_THUMB_OPERANDS macro.
18162 In order to use these macros, prefix the number of operands with _
18163 e.g. _3. */
18164 #define OPS_1(a) { a, }
18165 #define OPS_2(a,b) { a,b, }
18166 #define OPS_3(a,b,c) { a,b,c, }
18167 #define OPS_4(a,b,c,d) { a,b,c,d, }
18168 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18169 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18170
18171 /* These macros abstract out the exact format of the mnemonic table and
18172 save some repeated characters. */
18173
18174 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18175 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18176 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18177 THUMB_VARIANT, do_##ae, do_##te }
18178
18179 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18180 a T_MNEM_xyz enumerator. */
18181 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18182 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18183 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18184 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18185
18186 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18187 infix after the third character. */
18188 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18189 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18190 THUMB_VARIANT, do_##ae, do_##te }
18191 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18192 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18193 THUMB_VARIANT, do_##ae, do_##te }
18194 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18195 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18196 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18197 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18198 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18199 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18200 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18201 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18202
18203 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18204 field is still 0xE. Many of the Thumb variants can be executed
18205 conditionally, so this is checked separately. */
18206 #define TUE(mnem, op, top, nops, ops, ae, te) \
18207 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18208 THUMB_VARIANT, do_##ae, do_##te }
18209
18210 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18211 Used by mnemonics that have very minimal differences in the encoding for
18212 ARM and Thumb variants and can be handled in a common function. */
18213 #define TUEc(mnem, op, top, nops, ops, en) \
18214 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18215 THUMB_VARIANT, do_##en, do_##en }
18216
18217 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18218 condition code field. */
18219 #define TUF(mnem, op, top, nops, ops, ae, te) \
18220 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18221 THUMB_VARIANT, do_##ae, do_##te }
18222
18223 /* ARM-only variants of all the above. */
18224 #define CE(mnem, op, nops, ops, ae) \
18225 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18226
18227 #define C3(mnem, op, nops, ops, ae) \
18228 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18229
18230 /* Legacy mnemonics that always have conditional infix after the third
18231 character. */
18232 #define CL(mnem, op, nops, ops, ae) \
18233 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18234 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18235
18236 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18237 #define cCE(mnem, op, nops, ops, ae) \
18238 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18239
18240 /* Legacy coprocessor instructions where conditional infix and conditional
18241 suffix are ambiguous. For consistency this includes all FPA instructions,
18242 not just the potentially ambiguous ones. */
18243 #define cCL(mnem, op, nops, ops, ae) \
18244 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18245 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18246
18247 /* Coprocessor, takes either a suffix or a position-3 infix
18248 (for an FPA corner case). */
18249 #define C3E(mnem, op, nops, ops, ae) \
18250 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18251 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18252
18253 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18254 { m1 #m2 m3, OPS##nops ops, \
18255 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18256 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18257
18258 #define CM(m1, m2, op, nops, ops, ae) \
18259 xCM_ (m1, , m2, op, nops, ops, ae), \
18260 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18261 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18262 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18263 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18264 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18265 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18266 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18267 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18268 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18269 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18270 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18271 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18272 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18273 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18274 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18275 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18276 xCM_ (m1, le, m2, op, nops, ops, ae), \
18277 xCM_ (m1, al, m2, op, nops, ops, ae)
18278
18279 #define UE(mnem, op, nops, ops, ae) \
18280 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18281
18282 #define UF(mnem, op, nops, ops, ae) \
18283 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18284
18285 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18286 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18287 use the same encoding function for each. */
18288 #define NUF(mnem, op, nops, ops, enc) \
18289 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18290 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18291
18292 /* Neon data processing, version which indirects through neon_enc_tab for
18293 the various overloaded versions of opcodes. */
18294 #define nUF(mnem, op, nops, ops, enc) \
18295 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18296 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18297
18298 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18299 version. */
18300 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18301 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18302 THUMB_VARIANT, do_##enc, do_##enc }
18303
18304 #define NCE(mnem, op, nops, ops, enc) \
18305 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18306
18307 #define NCEF(mnem, op, nops, ops, enc) \
18308 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18309
18310 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18311 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18312 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18313 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18314
18315 #define nCE(mnem, op, nops, ops, enc) \
18316 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18317
18318 #define nCEF(mnem, op, nops, ops, enc) \
18319 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18320
18321 #define do_0 0
18322
18323 static const struct asm_opcode insns[] =
18324 {
18325 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18326 #define THUMB_VARIANT & arm_ext_v4t
18327 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18328 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18329 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18330 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18331 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18332 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18333 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18334 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18335 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18336 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18337 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18338 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18339 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18340 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18341 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18342 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18343
18344 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18345 for setting PSR flag bits. They are obsolete in V6 and do not
18346 have Thumb equivalents. */
18347 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18348 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18349 CL("tstp", 110f000, 2, (RR, SH), cmp),
18350 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18351 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18352 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18353 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18354 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18355 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18356
18357 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18358 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18359 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18360 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18361
18362 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18363 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18364 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18365 OP_RRnpc),
18366 OP_ADDRGLDR),ldst, t_ldst),
18367 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18368
18369 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18370 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18371 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18372 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18373 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18374 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18375
18376 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18377 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18378 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18379 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18380
18381 /* Pseudo ops. */
18382 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18383 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18384 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18385 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18386
18387 /* Thumb-compatibility pseudo ops. */
18388 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18389 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18390 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18391 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18392 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18393 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18394 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18395 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18396 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18397 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18398 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18399 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18400
18401 /* These may simplify to neg. */
18402 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18403 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18404
18405 #undef THUMB_VARIANT
18406 #define THUMB_VARIANT & arm_ext_v6
18407
18408 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18409
18410 /* V1 instructions with no Thumb analogue prior to V6T2. */
18411 #undef THUMB_VARIANT
18412 #define THUMB_VARIANT & arm_ext_v6t2
18413
18414 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18415 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18416 CL("teqp", 130f000, 2, (RR, SH), cmp),
18417
18418 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18419 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18420 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18421 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18422
18423 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18424 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18425
18426 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18427 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18428
18429 /* V1 instructions with no Thumb analogue at all. */
18430 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18431 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18432
18433 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18434 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18435 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18436 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18437 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18438 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18439 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18440 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18441
18442 #undef ARM_VARIANT
18443 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18444 #undef THUMB_VARIANT
18445 #define THUMB_VARIANT & arm_ext_v4t
18446
18447 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18448 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18449
18450 #undef THUMB_VARIANT
18451 #define THUMB_VARIANT & arm_ext_v6t2
18452
18453 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18454 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18455
18456 /* Generic coprocessor instructions. */
18457 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18458 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18459 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18460 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18461 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18462 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18463 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18464
18465 #undef ARM_VARIANT
18466 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18467
18468 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18469 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18470
18471 #undef ARM_VARIANT
18472 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18473 #undef THUMB_VARIANT
18474 #define THUMB_VARIANT & arm_ext_msr
18475
18476 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18477 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18478
18479 #undef ARM_VARIANT
18480 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18481 #undef THUMB_VARIANT
18482 #define THUMB_VARIANT & arm_ext_v6t2
18483
18484 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18485 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18486 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18487 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18488 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18489 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18490 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18491 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18492
18493 #undef ARM_VARIANT
18494 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18495 #undef THUMB_VARIANT
18496 #define THUMB_VARIANT & arm_ext_v4t
18497
18498 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18499 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18500 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18501 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18502 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18503 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18504
18505 #undef ARM_VARIANT
18506 #define ARM_VARIANT & arm_ext_v4t_5
18507
18508 /* ARM Architecture 4T. */
18509 /* Note: bx (and blx) are required on V5, even if the processor does
18510 not support Thumb. */
18511 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18512
18513 #undef ARM_VARIANT
18514 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18515 #undef THUMB_VARIANT
18516 #define THUMB_VARIANT & arm_ext_v5t
18517
18518 /* Note: blx has 2 variants; the .value coded here is for
18519 BLX(2). Only this variant has conditional execution. */
18520 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18521 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18522
18523 #undef THUMB_VARIANT
18524 #define THUMB_VARIANT & arm_ext_v6t2
18525
18526 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18527 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18528 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18529 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18530 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18531 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18532 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18533 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18534
18535 #undef ARM_VARIANT
18536 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18537 #undef THUMB_VARIANT
18538 #define THUMB_VARIANT & arm_ext_v5exp
18539
18540 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18541 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18542 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18543 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18544
18545 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18546 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18547
18548 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18549 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18550 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18551 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18552
18553 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18554 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18555 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18556 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18557
18558 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18559 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18560
18561 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18562 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18563 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18564 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18565
18566 #undef ARM_VARIANT
18567 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18568 #undef THUMB_VARIANT
18569 #define THUMB_VARIANT & arm_ext_v6t2
18570
18571 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18572 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18573 ldrd, t_ldstd),
18574 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18575 ADDRGLDRS), ldrd, t_ldstd),
18576
18577 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18578 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18579
18580 #undef ARM_VARIANT
18581 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18582
18583 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18584
18585 #undef ARM_VARIANT
18586 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18587 #undef THUMB_VARIANT
18588 #define THUMB_VARIANT & arm_ext_v6
18589
18590 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18591 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18592 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18593 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18594 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18595 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18596 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18597 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18598 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18599 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18600
18601 #undef THUMB_VARIANT
18602 #define THUMB_VARIANT & arm_ext_v6t2
18603
18604 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18605 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18606 strex, t_strex),
18607 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18608 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18609
18610 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18611 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18612
18613 /* ARM V6 not included in V7M. */
18614 #undef THUMB_VARIANT
18615 #define THUMB_VARIANT & arm_ext_v6_notm
18616 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18617 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18618 UF(rfeib, 9900a00, 1, (RRw), rfe),
18619 UF(rfeda, 8100a00, 1, (RRw), rfe),
18620 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18621 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18622 UF(rfefa, 8100a00, 1, (RRw), rfe),
18623 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18624 UF(rfeed, 9900a00, 1, (RRw), rfe),
18625 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18626 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18627 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18628 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18629 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18630 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18631 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18632 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18633 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18634
18635 /* ARM V6 not included in V7M (eg. integer SIMD). */
18636 #undef THUMB_VARIANT
18637 #define THUMB_VARIANT & arm_ext_v6_dsp
18638 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18639 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18640 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18641 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18642 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18643 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18644 /* Old name for QASX. */
18645 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18646 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18647 /* Old name for QSAX. */
18648 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18649 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18650 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18651 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18652 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18653 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18654 /* Old name for SASX. */
18655 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18656 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18657 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18658 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18659 /* Old name for SHASX. */
18660 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18661 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18662 /* Old name for SHSAX. */
18663 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18664 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18665 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18666 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18667 /* Old name for SSAX. */
18668 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18669 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18670 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18671 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18672 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18673 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18674 /* Old name for UASX. */
18675 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18676 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18677 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18678 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18679 /* Old name for UHASX. */
18680 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18681 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18682 /* Old name for UHSAX. */
18683 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18684 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18685 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18686 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18687 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18688 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18689 /* Old name for UQASX. */
18690 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18691 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18692 /* Old name for UQSAX. */
18693 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18694 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18695 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18696 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18697 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18698 /* Old name for USAX. */
18699 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18700 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18701 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18702 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18703 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18704 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18705 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18706 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18707 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18708 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18709 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18710 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18711 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18712 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18713 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18714 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18715 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18716 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18717 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18718 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18719 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18720 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18721 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18722 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18723 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18724 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18725 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18726 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18727 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18728 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18729 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18730 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18731 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18732 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18733
18734 #undef ARM_VARIANT
18735 #define ARM_VARIANT & arm_ext_v6k
18736 #undef THUMB_VARIANT
18737 #define THUMB_VARIANT & arm_ext_v6k
18738
18739 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18740 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18741 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18742 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18743
18744 #undef THUMB_VARIANT
18745 #define THUMB_VARIANT & arm_ext_v6_notm
18746 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18747 ldrexd, t_ldrexd),
18748 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18749 RRnpcb), strexd, t_strexd),
18750
18751 #undef THUMB_VARIANT
18752 #define THUMB_VARIANT & arm_ext_v6t2
18753 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18754 rd_rn, rd_rn),
18755 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18756 rd_rn, rd_rn),
18757 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18758 strex, t_strexbh),
18759 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18760 strex, t_strexbh),
18761 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18762
18763 #undef ARM_VARIANT
18764 #define ARM_VARIANT & arm_ext_sec
18765 #undef THUMB_VARIANT
18766 #define THUMB_VARIANT & arm_ext_sec
18767
18768 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18769
18770 #undef ARM_VARIANT
18771 #define ARM_VARIANT & arm_ext_virt
18772 #undef THUMB_VARIANT
18773 #define THUMB_VARIANT & arm_ext_virt
18774
18775 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18776 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18777
18778 #undef ARM_VARIANT
18779 #define ARM_VARIANT & arm_ext_v6t2
18780 #undef THUMB_VARIANT
18781 #define THUMB_VARIANT & arm_ext_v6t2
18782
18783 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18784 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18785 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18786 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18787
18788 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18789 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18790 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18791 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18792
18793 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18794 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18795 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18796 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18797
18798 /* Thumb-only instructions. */
18799 #undef ARM_VARIANT
18800 #define ARM_VARIANT NULL
18801 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18802 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18803
18804 /* ARM does not really have an IT instruction, so always allow it.
18805 The opcode is copied from Thumb in order to allow warnings in
18806 -mimplicit-it=[never | arm] modes. */
18807 #undef ARM_VARIANT
18808 #define ARM_VARIANT & arm_ext_v1
18809
18810 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18811 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18812 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18813 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18814 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18815 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18816 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18817 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18818 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18819 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18820 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18821 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18822 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18823 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18824 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18825 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18826 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18827 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18828
18829 /* Thumb2 only instructions. */
18830 #undef ARM_VARIANT
18831 #define ARM_VARIANT NULL
18832
18833 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18834 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18835 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18836 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18837 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18838 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18839
18840 /* Hardware division instructions. */
18841 #undef ARM_VARIANT
18842 #define ARM_VARIANT & arm_ext_adiv
18843 #undef THUMB_VARIANT
18844 #define THUMB_VARIANT & arm_ext_div
18845
18846 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18847 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18848
18849 /* ARM V6M/V7 instructions. */
18850 #undef ARM_VARIANT
18851 #define ARM_VARIANT & arm_ext_barrier
18852 #undef THUMB_VARIANT
18853 #define THUMB_VARIANT & arm_ext_barrier
18854
18855 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18856 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18857 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18858
18859 /* ARM V7 instructions. */
18860 #undef ARM_VARIANT
18861 #define ARM_VARIANT & arm_ext_v7
18862 #undef THUMB_VARIANT
18863 #define THUMB_VARIANT & arm_ext_v7
18864
18865 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18866 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18867
18868 #undef ARM_VARIANT
18869 #define ARM_VARIANT & arm_ext_mp
18870 #undef THUMB_VARIANT
18871 #define THUMB_VARIANT & arm_ext_mp
18872
18873 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18874
18875 /* AArchv8 instructions. */
18876 #undef ARM_VARIANT
18877 #define ARM_VARIANT & arm_ext_v8
18878 #undef THUMB_VARIANT
18879 #define THUMB_VARIANT & arm_ext_v8
18880
18881 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18882 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18883 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18884 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18885 ldrexd, t_ldrexd),
18886 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18887 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18888 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18889 stlex, t_stlex),
18890 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18891 strexd, t_strexd),
18892 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18893 stlex, t_stlex),
18894 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18895 stlex, t_stlex),
18896 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18897 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18898 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18899 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18900 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18901 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18902
18903 /* ARMv8 T32 only. */
18904 #undef ARM_VARIANT
18905 #define ARM_VARIANT NULL
18906 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
18907 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
18908 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
18909
18910 /* FP for ARMv8. */
18911 #undef ARM_VARIANT
18912 #define ARM_VARIANT & fpu_vfp_ext_armv8
18913 #undef THUMB_VARIANT
18914 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18915
18916 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
18917 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
18918 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
18919 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
18920 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18921 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18922 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
18923 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
18924 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
18925 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
18926 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
18927 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
18928 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
18929 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
18930 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
18931 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
18932 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
18933
18934 /* Crypto v1 extensions. */
18935 #undef ARM_VARIANT
18936 #define ARM_VARIANT & fpu_crypto_ext_armv8
18937 #undef THUMB_VARIANT
18938 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18939
18940 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18941 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18942 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18943 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18944 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18945 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18946 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18947 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18948 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18949 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18950 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18951 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18952 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18953 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
18954
18955 #undef ARM_VARIANT
18956 #define ARM_VARIANT & crc_ext_armv8
18957 #undef THUMB_VARIANT
18958 #define THUMB_VARIANT & crc_ext_armv8
18959 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
18960 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
18961 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
18962 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
18963 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
18964 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
18965
18966 #undef ARM_VARIANT
18967 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
18968 #undef THUMB_VARIANT
18969 #define THUMB_VARIANT NULL
18970
18971 cCE("wfs", e200110, 1, (RR), rd),
18972 cCE("rfs", e300110, 1, (RR), rd),
18973 cCE("wfc", e400110, 1, (RR), rd),
18974 cCE("rfc", e500110, 1, (RR), rd),
18975
18976 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
18977 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
18978 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
18979 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
18980
18981 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
18982 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
18983 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
18984 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
18985
18986 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
18987 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
18988 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
18989 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
18990 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
18991 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
18992 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
18993 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
18994 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
18995 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
18996 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
18997 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
18998
18999 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19000 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19001 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19002 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19003 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19004 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19005 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19006 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19007 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19008 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19009 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19010 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19011
19012 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19013 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19014 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19015 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19016 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19017 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19018 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19019 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19020 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19021 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19022 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19023 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19024
19025 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19026 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19027 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19028 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19029 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19030 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19031 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19032 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19033 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19034 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19035 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19036 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19037
19038 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19039 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19040 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19041 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19042 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19043 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19044 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19045 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19046 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19047 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19048 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19049 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19050
19051 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19052 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19053 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19054 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19055 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19056 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19057 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19058 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19059 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19060 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19061 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19062 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19063
19064 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19065 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19066 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19067 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19068 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19069 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19070 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19071 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19072 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19073 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19074 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19075 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19076
19077 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19078 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19079 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19080 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19081 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19082 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19083 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19084 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19085 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19086 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19087 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19088 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19089
19090 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19091 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19092 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19093 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19094 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19095 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19096 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19097 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19098 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19099 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19100 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19101 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19102
19103 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19104 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19105 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19106 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19107 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19108 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19109 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19110 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19111 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19112 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19113 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19114 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19115
19116 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19117 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19118 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19119 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19120 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19121 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19122 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19123 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19124 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19125 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19126 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19127 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19128
19129 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19130 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19131 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19132 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19133 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19134 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19135 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19136 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19137 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19138 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19139 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19140 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19141
19142 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19143 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19144 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19145 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19146 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19147 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19148 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19149 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19150 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19151 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19152 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19153 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19154
19155 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19156 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19157 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19158 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19159 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19160 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19161 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19162 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19163 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19164 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19165 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19166 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19167
19168 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19169 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19170 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19171 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19172 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19173 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19174 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19175 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19176 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19177 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19178 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19179 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19180
19181 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19182 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19183 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19184 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19185 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19186 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19187 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19188 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19189 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19190 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19191 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19192 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19193
19194 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19195 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19196 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19197 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19198 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19199 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19200 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19201 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19202 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19203 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19204 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19205 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19206
19207 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19208 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19209 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19210 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19211 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19212 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19213 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19214 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19215 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19216 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19217 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19218 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19219
19220 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19221 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19222 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19223 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19224 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19225 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19226 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19227 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19228 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19229 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19230 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19231 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19232
19233 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19234 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19235 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19236 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19237 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19238 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19239 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19240 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19241 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19242 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19243 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19244 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19245
19246 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19247 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19248 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19249 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19250 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19251 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19252 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19253 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19254 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19255 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19256 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19257 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19258
19259 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19260 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19261 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19262 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19263 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19264 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19265 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19266 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19267 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19268 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19269 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19270 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19271
19272 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19273 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19274 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19275 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19276 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19277 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19278 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19279 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19280 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19281 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19282 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19283 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19284
19285 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19286 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19287 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19288 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19289 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19290 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19291 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19292 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19293 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19294 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19295 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19296 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19297
19298 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19299 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19300 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19301 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19302 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19303 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19304 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19305 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19306 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19307 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19308 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19309 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19310
19311 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19312 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19313 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19314 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19315 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19316 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19317 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19318 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19319 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19320 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19321 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19322 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19323
19324 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19325 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19326 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19327 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19328 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19329 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19330 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19331 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19332 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19333 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19334 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19335 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19336
19337 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19338 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19339 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19340 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19341 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19342 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19343 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19344 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19345 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19346 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19347 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19348 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19349
19350 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19351 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19352 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19353 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19354 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19355 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19356 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19357 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19358 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19359 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19360 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19361 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19362
19363 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19364 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19365 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19366 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19367
19368 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19369 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19370 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19371 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19372 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19373 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19374 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19375 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19376 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19377 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19378 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19379 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19380
19381 /* The implementation of the FIX instruction is broken on some
19382 assemblers, in that it accepts a precision specifier as well as a
19383 rounding specifier, despite the fact that this is meaningless.
19384 To be more compatible, we accept it as well, though of course it
19385 does not set any bits. */
19386 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19387 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19388 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19389 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19390 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19391 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19392 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19393 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19394 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19395 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19396 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19397 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19398 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19399
19400 /* Instructions that were new with the real FPA, call them V2. */
19401 #undef ARM_VARIANT
19402 #define ARM_VARIANT & fpu_fpa_ext_v2
19403
19404 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19405 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19406 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19407 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19408 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19409 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19410
19411 #undef ARM_VARIANT
19412 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19413
19414 /* Moves and type conversions. */
19415 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19416 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19417 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19418 cCE("fmstat", ef1fa10, 0, (), noargs),
19419 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19420 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19421 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19422 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19423 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19424 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19425 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19426 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19427 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19428 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19429
19430 /* Memory operations. */
19431 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19432 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19433 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19434 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19435 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19436 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19437 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19438 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19439 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19440 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19441 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19442 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19443 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19444 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19445 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19446 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19447 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19448 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19449
19450 /* Monadic operations. */
19451 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19452 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19453 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19454
19455 /* Dyadic operations. */
19456 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19457 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19458 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19459 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19460 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19461 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19462 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19463 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19464 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19465
19466 /* Comparisons. */
19467 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19468 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19469 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19470 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19471
19472 /* Double precision load/store are still present on single precision
19473 implementations. */
19474 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19475 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19476 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19477 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19478 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19479 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19480 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19481 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19482 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19483 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19484
19485 #undef ARM_VARIANT
19486 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19487
19488 /* Moves and type conversions. */
19489 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19490 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19491 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19492 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19493 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19494 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19495 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19496 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19497 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19498 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19499 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19500 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19501 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19502
19503 /* Monadic operations. */
19504 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19505 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19506 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19507
19508 /* Dyadic operations. */
19509 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19510 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19511 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19512 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19513 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19514 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19515 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19516 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19517 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19518
19519 /* Comparisons. */
19520 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19521 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19522 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19523 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19524
19525 #undef ARM_VARIANT
19526 #define ARM_VARIANT & fpu_vfp_ext_v2
19527
19528 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19529 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19530 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19531 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19532
19533 /* Instructions which may belong to either the Neon or VFP instruction sets.
19534 Individual encoder functions perform additional architecture checks. */
19535 #undef ARM_VARIANT
19536 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19537 #undef THUMB_VARIANT
19538 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19539
19540 /* These mnemonics are unique to VFP. */
19541 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19542 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19543 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19544 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19545 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19546 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19547 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19548 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19549 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19550 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19551
19552 /* Mnemonics shared by Neon and VFP. */
19553 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19554 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19555 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19556
19557 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19558 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19559
19560 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19561 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19562
19563 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19564 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19565 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19566 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19567 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19568 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19569 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19570 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19571
19572 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19573 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19574 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19575 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19576
19577
19578 /* NOTE: All VMOV encoding is special-cased! */
19579 NCE(vmov, 0, 1, (VMOV), neon_mov),
19580 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19581
19582 #undef THUMB_VARIANT
19583 #define THUMB_VARIANT & fpu_neon_ext_v1
19584 #undef ARM_VARIANT
19585 #define ARM_VARIANT & fpu_neon_ext_v1
19586
19587 /* Data processing with three registers of the same length. */
19588 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19589 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19590 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19591 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19592 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19593 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19594 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19595 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19596 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19597 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19598 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19599 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19600 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19601 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19602 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19603 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19604 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19605 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19606 /* If not immediate, fall back to neon_dyadic_i64_su.
19607 shl_imm should accept I8 I16 I32 I64,
19608 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19609 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19610 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19611 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19612 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19613 /* Logic ops, types optional & ignored. */
19614 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19615 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19616 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19617 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19618 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19619 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19620 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19621 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19622 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19623 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19624 /* Bitfield ops, untyped. */
19625 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19626 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19627 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19628 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19629 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19630 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19631 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19632 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19633 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19634 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19635 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19636 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19637 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19638 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19639 back to neon_dyadic_if_su. */
19640 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19641 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19642 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19643 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19644 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19645 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19646 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19647 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19648 /* Comparison. Type I8 I16 I32 F32. */
19649 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19650 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19651 /* As above, D registers only. */
19652 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19653 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19654 /* Int and float variants, signedness unimportant. */
19655 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19656 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19657 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19658 /* Add/sub take types I8 I16 I32 I64 F32. */
19659 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19660 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19661 /* vtst takes sizes 8, 16, 32. */
19662 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19663 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19664 /* VMUL takes I8 I16 I32 F32 P8. */
19665 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19666 /* VQD{R}MULH takes S16 S32. */
19667 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19668 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19669 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19670 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19671 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19672 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19673 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19674 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19675 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19676 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19677 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19678 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19679 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19680 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19681 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19682 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19683
19684 /* Two address, int/float. Types S8 S16 S32 F32. */
19685 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19686 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19687
19688 /* Data processing with two registers and a shift amount. */
19689 /* Right shifts, and variants with rounding.
19690 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19691 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19692 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19693 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19694 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19695 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19696 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19697 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19698 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19699 /* Shift and insert. Sizes accepted 8 16 32 64. */
19700 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19701 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19702 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19703 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19704 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19705 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19706 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19707 /* Right shift immediate, saturating & narrowing, with rounding variants.
19708 Types accepted S16 S32 S64 U16 U32 U64. */
19709 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19710 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19711 /* As above, unsigned. Types accepted S16 S32 S64. */
19712 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19713 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19714 /* Right shift narrowing. Types accepted I16 I32 I64. */
19715 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19716 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19717 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19718 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19719 /* CVT with optional immediate for fixed-point variant. */
19720 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19721
19722 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19723 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19724
19725 /* Data processing, three registers of different lengths. */
19726 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19727 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19728 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19729 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19730 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19731 /* If not scalar, fall back to neon_dyadic_long.
19732 Vector types as above, scalar types S16 S32 U16 U32. */
19733 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19734 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19735 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19736 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19737 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19738 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19739 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19740 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19741 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19742 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19743 /* Saturating doubling multiplies. Types S16 S32. */
19744 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19745 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19746 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19747 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19748 S16 S32 U16 U32. */
19749 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19750
19751 /* Extract. Size 8. */
19752 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19753 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19754
19755 /* Two registers, miscellaneous. */
19756 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19757 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19758 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19759 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19760 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19761 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19762 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19763 /* Vector replicate. Sizes 8 16 32. */
19764 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19765 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19766 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19767 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19768 /* VMOVN. Types I16 I32 I64. */
19769 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19770 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19771 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19772 /* VQMOVUN. Types S16 S32 S64. */
19773 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19774 /* VZIP / VUZP. Sizes 8 16 32. */
19775 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19776 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19777 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19778 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19779 /* VQABS / VQNEG. Types S8 S16 S32. */
19780 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19781 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19782 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19783 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19784 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19785 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19786 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19787 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19788 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19789 /* Reciprocal estimates. Types U32 F32. */
19790 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19791 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19792 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19793 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19794 /* VCLS. Types S8 S16 S32. */
19795 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19796 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19797 /* VCLZ. Types I8 I16 I32. */
19798 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19799 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19800 /* VCNT. Size 8. */
19801 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19802 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19803 /* Two address, untyped. */
19804 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19805 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19806 /* VTRN. Sizes 8 16 32. */
19807 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19808 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19809
19810 /* Table lookup. Size 8. */
19811 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19812 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19813
19814 #undef THUMB_VARIANT
19815 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19816 #undef ARM_VARIANT
19817 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19818
19819 /* Neon element/structure load/store. */
19820 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19821 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19822 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19823 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19824 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19825 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19826 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19827 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19828
19829 #undef THUMB_VARIANT
19830 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19831 #undef ARM_VARIANT
19832 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19833 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19834 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19835 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19836 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19837 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19838 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19839 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19840 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19841 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19842
19843 #undef THUMB_VARIANT
19844 #define THUMB_VARIANT & fpu_vfp_ext_v3
19845 #undef ARM_VARIANT
19846 #define ARM_VARIANT & fpu_vfp_ext_v3
19847
19848 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19849 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19850 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19851 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19852 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19853 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19854 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19855 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19856 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19857
19858 #undef ARM_VARIANT
19859 #define ARM_VARIANT & fpu_vfp_ext_fma
19860 #undef THUMB_VARIANT
19861 #define THUMB_VARIANT & fpu_vfp_ext_fma
19862 /* Mnemonics shared by Neon and VFP. These are included in the
19863 VFP FMA variant; NEON and VFP FMA always includes the NEON
19864 FMA instructions. */
19865 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19866 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19867 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19868 the v form should always be used. */
19869 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19870 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19871 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19872 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19873 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19874 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19875
19876 #undef THUMB_VARIANT
19877 #undef ARM_VARIANT
19878 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19879
19880 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19881 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19882 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19883 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19884 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19885 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19886 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19887 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19888
19889 #undef ARM_VARIANT
19890 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19891
19892 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19893 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19894 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19895 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
19896 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
19897 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
19898 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
19899 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
19900 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
19901 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19902 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19903 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19904 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19905 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19906 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19907 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19908 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19909 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19910 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
19911 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
19912 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19913 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19914 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19915 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19916 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19917 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19918 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
19919 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
19920 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
19921 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
19922 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
19923 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
19924 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
19925 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
19926 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
19927 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
19928 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
19929 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19930 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19931 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19932 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19933 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19934 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19935 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19936 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19937 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19938 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19939 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19940 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19941 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19942 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19943 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19944 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19945 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19946 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19947 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19948 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19949 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19950 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19951 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19952 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19953 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19954 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19955 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19956 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19957 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19958 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19959 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19960 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
19961 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
19962 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19963 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19964 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19965 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19966 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19967 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19968 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19969 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19970 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19971 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19972 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19973 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19974 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19975 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19976 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19977 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19978 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19979 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19980 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
19981 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19982 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19983 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19984 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19985 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19986 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19987 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19988 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19989 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19990 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19991 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19992 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19993 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19994 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19995 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19996 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19997 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19998 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19999 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20000 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20001 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20002 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20003 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20004 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20005 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20006 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20007 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20008 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20009 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20010 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20011 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20012 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20013 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20014 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20015 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20016 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20017 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20018 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20019 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20020 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20021 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20022 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20023 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20024 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20025 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20026 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20027 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20028 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20029 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20030 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20031 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20032 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20033 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20034 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20035 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20036 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20037 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20038 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20039 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20040 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20041 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20042 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20043 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20044 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20045 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20046 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20047 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20048 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20049 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20050 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20051 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20052 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20053 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20054
20055 #undef ARM_VARIANT
20056 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20057
20058 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20059 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20060 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20061 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20062 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20063 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20064 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20065 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20066 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20067 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20068 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20069 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20070 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20071 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20072 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20073 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20074 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20075 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20076 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20077 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20078 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20079 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20080 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20081 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20082 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20083 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20084 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20085 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20086 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20087 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20088 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20089 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20090 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20091 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20092 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20093 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20094 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20095 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20096 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20097 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20098 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20099 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20100 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20101 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20102 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20103 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20104 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20105 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20106 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20107 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20108 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20109 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20110 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20111 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20112 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20113 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20114 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20115
20116 #undef ARM_VARIANT
20117 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20118
20119 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20120 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20121 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20122 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20123 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20124 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20125 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20126 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20127 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20128 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20129 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20130 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20131 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20132 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20133 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20134 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20135 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20136 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20137 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20138 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20139 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20140 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20141 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20142 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20143 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20144 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20145 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20146 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20147 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20148 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20149 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20150 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20151 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20152 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20153 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20154 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20155 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20156 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20157 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20158 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20159 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20160 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20161 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20162 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20163 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20164 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20165 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20166 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20167 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20168 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20169 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20170 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20171 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20172 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20173 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20174 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20175 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20176 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20177 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20178 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20179 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20180 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20181 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20182 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20183 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20184 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20185 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20186 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20187 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20188 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20189 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20190 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20191 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20192 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20193 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20194 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20195 };
20196 #undef ARM_VARIANT
20197 #undef THUMB_VARIANT
20198 #undef TCE
20199 #undef TUE
20200 #undef TUF
20201 #undef TCC
20202 #undef cCE
20203 #undef cCL
20204 #undef C3E
20205 #undef CE
20206 #undef CM
20207 #undef UE
20208 #undef UF
20209 #undef UT
20210 #undef NUF
20211 #undef nUF
20212 #undef NCE
20213 #undef nCE
20214 #undef OPS0
20215 #undef OPS1
20216 #undef OPS2
20217 #undef OPS3
20218 #undef OPS4
20219 #undef OPS5
20220 #undef OPS6
20221 #undef do_0
20222 \f
20223 /* MD interface: bits in the object file. */
20224
20225 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20226 for use in the a.out file, and stores them in the array pointed to by buf.
20227 This knows about the endian-ness of the target machine and does
20228 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20229 2 (short) and 4 (long) Floating numbers are put out as a series of
20230 LITTLENUMS (shorts, here at least). */
20231
20232 void
20233 md_number_to_chars (char * buf, valueT val, int n)
20234 {
20235 if (target_big_endian)
20236 number_to_chars_bigendian (buf, val, n);
20237 else
20238 number_to_chars_littleendian (buf, val, n);
20239 }
20240
20241 static valueT
20242 md_chars_to_number (char * buf, int n)
20243 {
20244 valueT result = 0;
20245 unsigned char * where = (unsigned char *) buf;
20246
20247 if (target_big_endian)
20248 {
20249 while (n--)
20250 {
20251 result <<= 8;
20252 result |= (*where++ & 255);
20253 }
20254 }
20255 else
20256 {
20257 while (n--)
20258 {
20259 result <<= 8;
20260 result |= (where[n] & 255);
20261 }
20262 }
20263
20264 return result;
20265 }
20266
20267 /* MD interface: Sections. */
20268
20269 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20270 that an rs_machine_dependent frag may reach. */
20271
20272 unsigned int
20273 arm_frag_max_var (fragS *fragp)
20274 {
20275 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20276 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20277
20278 Note that we generate relaxable instructions even for cases that don't
20279 really need it, like an immediate that's a trivial constant. So we're
20280 overestimating the instruction size for some of those cases. Rather
20281 than putting more intelligence here, it would probably be better to
20282 avoid generating a relaxation frag in the first place when it can be
20283 determined up front that a short instruction will suffice. */
20284
20285 gas_assert (fragp->fr_type == rs_machine_dependent);
20286 return INSN_SIZE;
20287 }
20288
20289 /* Estimate the size of a frag before relaxing. Assume everything fits in
20290 2 bytes. */
20291
20292 int
20293 md_estimate_size_before_relax (fragS * fragp,
20294 segT segtype ATTRIBUTE_UNUSED)
20295 {
20296 fragp->fr_var = 2;
20297 return 2;
20298 }
20299
20300 /* Convert a machine dependent frag. */
20301
20302 void
20303 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20304 {
20305 unsigned long insn;
20306 unsigned long old_op;
20307 char *buf;
20308 expressionS exp;
20309 fixS *fixp;
20310 int reloc_type;
20311 int pc_rel;
20312 int opcode;
20313
20314 buf = fragp->fr_literal + fragp->fr_fix;
20315
20316 old_op = bfd_get_16(abfd, buf);
20317 if (fragp->fr_symbol)
20318 {
20319 exp.X_op = O_symbol;
20320 exp.X_add_symbol = fragp->fr_symbol;
20321 }
20322 else
20323 {
20324 exp.X_op = O_constant;
20325 }
20326 exp.X_add_number = fragp->fr_offset;
20327 opcode = fragp->fr_subtype;
20328 switch (opcode)
20329 {
20330 case T_MNEM_ldr_pc:
20331 case T_MNEM_ldr_pc2:
20332 case T_MNEM_ldr_sp:
20333 case T_MNEM_str_sp:
20334 case T_MNEM_ldr:
20335 case T_MNEM_ldrb:
20336 case T_MNEM_ldrh:
20337 case T_MNEM_str:
20338 case T_MNEM_strb:
20339 case T_MNEM_strh:
20340 if (fragp->fr_var == 4)
20341 {
20342 insn = THUMB_OP32 (opcode);
20343 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20344 {
20345 insn |= (old_op & 0x700) << 4;
20346 }
20347 else
20348 {
20349 insn |= (old_op & 7) << 12;
20350 insn |= (old_op & 0x38) << 13;
20351 }
20352 insn |= 0x00000c00;
20353 put_thumb32_insn (buf, insn);
20354 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20355 }
20356 else
20357 {
20358 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20359 }
20360 pc_rel = (opcode == T_MNEM_ldr_pc2);
20361 break;
20362 case T_MNEM_adr:
20363 if (fragp->fr_var == 4)
20364 {
20365 insn = THUMB_OP32 (opcode);
20366 insn |= (old_op & 0xf0) << 4;
20367 put_thumb32_insn (buf, insn);
20368 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20369 }
20370 else
20371 {
20372 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20373 exp.X_add_number -= 4;
20374 }
20375 pc_rel = 1;
20376 break;
20377 case T_MNEM_mov:
20378 case T_MNEM_movs:
20379 case T_MNEM_cmp:
20380 case T_MNEM_cmn:
20381 if (fragp->fr_var == 4)
20382 {
20383 int r0off = (opcode == T_MNEM_mov
20384 || opcode == T_MNEM_movs) ? 0 : 8;
20385 insn = THUMB_OP32 (opcode);
20386 insn = (insn & 0xe1ffffff) | 0x10000000;
20387 insn |= (old_op & 0x700) << r0off;
20388 put_thumb32_insn (buf, insn);
20389 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20390 }
20391 else
20392 {
20393 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20394 }
20395 pc_rel = 0;
20396 break;
20397 case T_MNEM_b:
20398 if (fragp->fr_var == 4)
20399 {
20400 insn = THUMB_OP32(opcode);
20401 put_thumb32_insn (buf, insn);
20402 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20403 }
20404 else
20405 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20406 pc_rel = 1;
20407 break;
20408 case T_MNEM_bcond:
20409 if (fragp->fr_var == 4)
20410 {
20411 insn = THUMB_OP32(opcode);
20412 insn |= (old_op & 0xf00) << 14;
20413 put_thumb32_insn (buf, insn);
20414 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20415 }
20416 else
20417 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20418 pc_rel = 1;
20419 break;
20420 case T_MNEM_add_sp:
20421 case T_MNEM_add_pc:
20422 case T_MNEM_inc_sp:
20423 case T_MNEM_dec_sp:
20424 if (fragp->fr_var == 4)
20425 {
20426 /* ??? Choose between add and addw. */
20427 insn = THUMB_OP32 (opcode);
20428 insn |= (old_op & 0xf0) << 4;
20429 put_thumb32_insn (buf, insn);
20430 if (opcode == T_MNEM_add_pc)
20431 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20432 else
20433 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20434 }
20435 else
20436 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20437 pc_rel = 0;
20438 break;
20439
20440 case T_MNEM_addi:
20441 case T_MNEM_addis:
20442 case T_MNEM_subi:
20443 case T_MNEM_subis:
20444 if (fragp->fr_var == 4)
20445 {
20446 insn = THUMB_OP32 (opcode);
20447 insn |= (old_op & 0xf0) << 4;
20448 insn |= (old_op & 0xf) << 16;
20449 put_thumb32_insn (buf, insn);
20450 if (insn & (1 << 20))
20451 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20452 else
20453 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20454 }
20455 else
20456 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20457 pc_rel = 0;
20458 break;
20459 default:
20460 abort ();
20461 }
20462 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20463 (enum bfd_reloc_code_real) reloc_type);
20464 fixp->fx_file = fragp->fr_file;
20465 fixp->fx_line = fragp->fr_line;
20466 fragp->fr_fix += fragp->fr_var;
20467 }
20468
20469 /* Return the size of a relaxable immediate operand instruction.
20470 SHIFT and SIZE specify the form of the allowable immediate. */
20471 static int
20472 relax_immediate (fragS *fragp, int size, int shift)
20473 {
20474 offsetT offset;
20475 offsetT mask;
20476 offsetT low;
20477
20478 /* ??? Should be able to do better than this. */
20479 if (fragp->fr_symbol)
20480 return 4;
20481
20482 low = (1 << shift) - 1;
20483 mask = (1 << (shift + size)) - (1 << shift);
20484 offset = fragp->fr_offset;
20485 /* Force misaligned offsets to 32-bit variant. */
20486 if (offset & low)
20487 return 4;
20488 if (offset & ~mask)
20489 return 4;
20490 return 2;
20491 }
20492
20493 /* Get the address of a symbol during relaxation. */
20494 static addressT
20495 relaxed_symbol_addr (fragS *fragp, long stretch)
20496 {
20497 fragS *sym_frag;
20498 addressT addr;
20499 symbolS *sym;
20500
20501 sym = fragp->fr_symbol;
20502 sym_frag = symbol_get_frag (sym);
20503 know (S_GET_SEGMENT (sym) != absolute_section
20504 || sym_frag == &zero_address_frag);
20505 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20506
20507 /* If frag has yet to be reached on this pass, assume it will
20508 move by STRETCH just as we did. If this is not so, it will
20509 be because some frag between grows, and that will force
20510 another pass. */
20511
20512 if (stretch != 0
20513 && sym_frag->relax_marker != fragp->relax_marker)
20514 {
20515 fragS *f;
20516
20517 /* Adjust stretch for any alignment frag. Note that if have
20518 been expanding the earlier code, the symbol may be
20519 defined in what appears to be an earlier frag. FIXME:
20520 This doesn't handle the fr_subtype field, which specifies
20521 a maximum number of bytes to skip when doing an
20522 alignment. */
20523 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20524 {
20525 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20526 {
20527 if (stretch < 0)
20528 stretch = - ((- stretch)
20529 & ~ ((1 << (int) f->fr_offset) - 1));
20530 else
20531 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20532 if (stretch == 0)
20533 break;
20534 }
20535 }
20536 if (f != NULL)
20537 addr += stretch;
20538 }
20539
20540 return addr;
20541 }
20542
20543 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20544 load. */
20545 static int
20546 relax_adr (fragS *fragp, asection *sec, long stretch)
20547 {
20548 addressT addr;
20549 offsetT val;
20550
20551 /* Assume worst case for symbols not known to be in the same section. */
20552 if (fragp->fr_symbol == NULL
20553 || !S_IS_DEFINED (fragp->fr_symbol)
20554 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20555 || S_IS_WEAK (fragp->fr_symbol))
20556 return 4;
20557
20558 val = relaxed_symbol_addr (fragp, stretch);
20559 addr = fragp->fr_address + fragp->fr_fix;
20560 addr = (addr + 4) & ~3;
20561 /* Force misaligned targets to 32-bit variant. */
20562 if (val & 3)
20563 return 4;
20564 val -= addr;
20565 if (val < 0 || val > 1020)
20566 return 4;
20567 return 2;
20568 }
20569
20570 /* Return the size of a relaxable add/sub immediate instruction. */
20571 static int
20572 relax_addsub (fragS *fragp, asection *sec)
20573 {
20574 char *buf;
20575 int op;
20576
20577 buf = fragp->fr_literal + fragp->fr_fix;
20578 op = bfd_get_16(sec->owner, buf);
20579 if ((op & 0xf) == ((op >> 4) & 0xf))
20580 return relax_immediate (fragp, 8, 0);
20581 else
20582 return relax_immediate (fragp, 3, 0);
20583 }
20584
20585 /* Return TRUE iff the definition of symbol S could be pre-empted
20586 (overridden) at link or load time. */
20587 static bfd_boolean
20588 symbol_preemptible (symbolS *s)
20589 {
20590 /* Weak symbols can always be pre-empted. */
20591 if (S_IS_WEAK (s))
20592 return TRUE;
20593
20594 /* Non-global symbols cannot be pre-empted. */
20595 if (! S_IS_EXTERNAL (s))
20596 return FALSE;
20597
20598 #ifdef OBJ_ELF
20599 /* In ELF, a global symbol can be marked protected, or private. In that
20600 case it can't be pre-empted (other definitions in the same link unit
20601 would violate the ODR). */
20602 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20603 return FALSE;
20604 #endif
20605
20606 /* Other global symbols might be pre-empted. */
20607 return TRUE;
20608 }
20609
20610 /* Return the size of a relaxable branch instruction. BITS is the
20611 size of the offset field in the narrow instruction. */
20612
20613 static int
20614 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20615 {
20616 addressT addr;
20617 offsetT val;
20618 offsetT limit;
20619
20620 /* Assume worst case for symbols not known to be in the same section. */
20621 if (!S_IS_DEFINED (fragp->fr_symbol)
20622 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20623 || S_IS_WEAK (fragp->fr_symbol))
20624 return 4;
20625
20626 #ifdef OBJ_ELF
20627 /* A branch to a function in ARM state will require interworking. */
20628 if (S_IS_DEFINED (fragp->fr_symbol)
20629 && ARM_IS_FUNC (fragp->fr_symbol))
20630 return 4;
20631 #endif
20632
20633 if (symbol_preemptible (fragp->fr_symbol))
20634 return 4;
20635
20636 val = relaxed_symbol_addr (fragp, stretch);
20637 addr = fragp->fr_address + fragp->fr_fix + 4;
20638 val -= addr;
20639
20640 /* Offset is a signed value *2 */
20641 limit = 1 << bits;
20642 if (val >= limit || val < -limit)
20643 return 4;
20644 return 2;
20645 }
20646
20647
20648 /* Relax a machine dependent frag. This returns the amount by which
20649 the current size of the frag should change. */
20650
20651 int
20652 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20653 {
20654 int oldsize;
20655 int newsize;
20656
20657 oldsize = fragp->fr_var;
20658 switch (fragp->fr_subtype)
20659 {
20660 case T_MNEM_ldr_pc2:
20661 newsize = relax_adr (fragp, sec, stretch);
20662 break;
20663 case T_MNEM_ldr_pc:
20664 case T_MNEM_ldr_sp:
20665 case T_MNEM_str_sp:
20666 newsize = relax_immediate (fragp, 8, 2);
20667 break;
20668 case T_MNEM_ldr:
20669 case T_MNEM_str:
20670 newsize = relax_immediate (fragp, 5, 2);
20671 break;
20672 case T_MNEM_ldrh:
20673 case T_MNEM_strh:
20674 newsize = relax_immediate (fragp, 5, 1);
20675 break;
20676 case T_MNEM_ldrb:
20677 case T_MNEM_strb:
20678 newsize = relax_immediate (fragp, 5, 0);
20679 break;
20680 case T_MNEM_adr:
20681 newsize = relax_adr (fragp, sec, stretch);
20682 break;
20683 case T_MNEM_mov:
20684 case T_MNEM_movs:
20685 case T_MNEM_cmp:
20686 case T_MNEM_cmn:
20687 newsize = relax_immediate (fragp, 8, 0);
20688 break;
20689 case T_MNEM_b:
20690 newsize = relax_branch (fragp, sec, 11, stretch);
20691 break;
20692 case T_MNEM_bcond:
20693 newsize = relax_branch (fragp, sec, 8, stretch);
20694 break;
20695 case T_MNEM_add_sp:
20696 case T_MNEM_add_pc:
20697 newsize = relax_immediate (fragp, 8, 2);
20698 break;
20699 case T_MNEM_inc_sp:
20700 case T_MNEM_dec_sp:
20701 newsize = relax_immediate (fragp, 7, 2);
20702 break;
20703 case T_MNEM_addi:
20704 case T_MNEM_addis:
20705 case T_MNEM_subi:
20706 case T_MNEM_subis:
20707 newsize = relax_addsub (fragp, sec);
20708 break;
20709 default:
20710 abort ();
20711 }
20712
20713 fragp->fr_var = newsize;
20714 /* Freeze wide instructions that are at or before the same location as
20715 in the previous pass. This avoids infinite loops.
20716 Don't freeze them unconditionally because targets may be artificially
20717 misaligned by the expansion of preceding frags. */
20718 if (stretch <= 0 && newsize > 2)
20719 {
20720 md_convert_frag (sec->owner, sec, fragp);
20721 frag_wane (fragp);
20722 }
20723
20724 return newsize - oldsize;
20725 }
20726
20727 /* Round up a section size to the appropriate boundary. */
20728
20729 valueT
20730 md_section_align (segT segment ATTRIBUTE_UNUSED,
20731 valueT size)
20732 {
20733 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20734 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20735 {
20736 /* For a.out, force the section size to be aligned. If we don't do
20737 this, BFD will align it for us, but it will not write out the
20738 final bytes of the section. This may be a bug in BFD, but it is
20739 easier to fix it here since that is how the other a.out targets
20740 work. */
20741 int align;
20742
20743 align = bfd_get_section_alignment (stdoutput, segment);
20744 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20745 }
20746 #endif
20747
20748 return size;
20749 }
20750
20751 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20752 of an rs_align_code fragment. */
20753
20754 void
20755 arm_handle_align (fragS * fragP)
20756 {
20757 static char const arm_noop[2][2][4] =
20758 {
20759 { /* ARMv1 */
20760 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20761 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20762 },
20763 { /* ARMv6k */
20764 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20765 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20766 },
20767 };
20768 static char const thumb_noop[2][2][2] =
20769 {
20770 { /* Thumb-1 */
20771 {0xc0, 0x46}, /* LE */
20772 {0x46, 0xc0}, /* BE */
20773 },
20774 { /* Thumb-2 */
20775 {0x00, 0xbf}, /* LE */
20776 {0xbf, 0x00} /* BE */
20777 }
20778 };
20779 static char const wide_thumb_noop[2][4] =
20780 { /* Wide Thumb-2 */
20781 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20782 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20783 };
20784
20785 unsigned bytes, fix, noop_size;
20786 char * p;
20787 const char * noop;
20788 const char *narrow_noop = NULL;
20789 #ifdef OBJ_ELF
20790 enum mstate state;
20791 #endif
20792
20793 if (fragP->fr_type != rs_align_code)
20794 return;
20795
20796 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20797 p = fragP->fr_literal + fragP->fr_fix;
20798 fix = 0;
20799
20800 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20801 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20802
20803 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20804
20805 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20806 {
20807 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20808 {
20809 narrow_noop = thumb_noop[1][target_big_endian];
20810 noop = wide_thumb_noop[target_big_endian];
20811 }
20812 else
20813 noop = thumb_noop[0][target_big_endian];
20814 noop_size = 2;
20815 #ifdef OBJ_ELF
20816 state = MAP_THUMB;
20817 #endif
20818 }
20819 else
20820 {
20821 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20822 [target_big_endian];
20823 noop_size = 4;
20824 #ifdef OBJ_ELF
20825 state = MAP_ARM;
20826 #endif
20827 }
20828
20829 fragP->fr_var = noop_size;
20830
20831 if (bytes & (noop_size - 1))
20832 {
20833 fix = bytes & (noop_size - 1);
20834 #ifdef OBJ_ELF
20835 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20836 #endif
20837 memset (p, 0, fix);
20838 p += fix;
20839 bytes -= fix;
20840 }
20841
20842 if (narrow_noop)
20843 {
20844 if (bytes & noop_size)
20845 {
20846 /* Insert a narrow noop. */
20847 memcpy (p, narrow_noop, noop_size);
20848 p += noop_size;
20849 bytes -= noop_size;
20850 fix += noop_size;
20851 }
20852
20853 /* Use wide noops for the remainder */
20854 noop_size = 4;
20855 }
20856
20857 while (bytes >= noop_size)
20858 {
20859 memcpy (p, noop, noop_size);
20860 p += noop_size;
20861 bytes -= noop_size;
20862 fix += noop_size;
20863 }
20864
20865 fragP->fr_fix += fix;
20866 }
20867
20868 /* Called from md_do_align. Used to create an alignment
20869 frag in a code section. */
20870
20871 void
20872 arm_frag_align_code (int n, int max)
20873 {
20874 char * p;
20875
20876 /* We assume that there will never be a requirement
20877 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20878 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20879 {
20880 char err_msg[128];
20881
20882 sprintf (err_msg,
20883 _("alignments greater than %d bytes not supported in .text sections."),
20884 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20885 as_fatal ("%s", err_msg);
20886 }
20887
20888 p = frag_var (rs_align_code,
20889 MAX_MEM_FOR_RS_ALIGN_CODE,
20890 1,
20891 (relax_substateT) max,
20892 (symbolS *) NULL,
20893 (offsetT) n,
20894 (char *) NULL);
20895 *p = 0;
20896 }
20897
20898 /* Perform target specific initialisation of a frag.
20899 Note - despite the name this initialisation is not done when the frag
20900 is created, but only when its type is assigned. A frag can be created
20901 and used a long time before its type is set, so beware of assuming that
20902 this initialisationis performed first. */
20903
20904 #ifndef OBJ_ELF
20905 void
20906 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20907 {
20908 /* Record whether this frag is in an ARM or a THUMB area. */
20909 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20910 }
20911
20912 #else /* OBJ_ELF is defined. */
20913 void
20914 arm_init_frag (fragS * fragP, int max_chars)
20915 {
20916 /* If the current ARM vs THUMB mode has not already
20917 been recorded into this frag then do so now. */
20918 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20919 {
20920 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20921
20922 /* Record a mapping symbol for alignment frags. We will delete this
20923 later if the alignment ends up empty. */
20924 switch (fragP->fr_type)
20925 {
20926 case rs_align:
20927 case rs_align_test:
20928 case rs_fill:
20929 mapping_state_2 (MAP_DATA, max_chars);
20930 break;
20931 case rs_align_code:
20932 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20933 break;
20934 default:
20935 break;
20936 }
20937 }
20938 }
20939
20940 /* When we change sections we need to issue a new mapping symbol. */
20941
20942 void
20943 arm_elf_change_section (void)
20944 {
20945 /* Link an unlinked unwind index table section to the .text section. */
20946 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20947 && elf_linked_to_section (now_seg) == NULL)
20948 elf_linked_to_section (now_seg) = text_section;
20949 }
20950
20951 int
20952 arm_elf_section_type (const char * str, size_t len)
20953 {
20954 if (len == 5 && strncmp (str, "exidx", 5) == 0)
20955 return SHT_ARM_EXIDX;
20956
20957 return -1;
20958 }
20959 \f
20960 /* Code to deal with unwinding tables. */
20961
20962 static void add_unwind_adjustsp (offsetT);
20963
20964 /* Generate any deferred unwind frame offset. */
20965
20966 static void
20967 flush_pending_unwind (void)
20968 {
20969 offsetT offset;
20970
20971 offset = unwind.pending_offset;
20972 unwind.pending_offset = 0;
20973 if (offset != 0)
20974 add_unwind_adjustsp (offset);
20975 }
20976
20977 /* Add an opcode to this list for this function. Two-byte opcodes should
20978 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
20979 order. */
20980
20981 static void
20982 add_unwind_opcode (valueT op, int length)
20983 {
20984 /* Add any deferred stack adjustment. */
20985 if (unwind.pending_offset)
20986 flush_pending_unwind ();
20987
20988 unwind.sp_restored = 0;
20989
20990 if (unwind.opcode_count + length > unwind.opcode_alloc)
20991 {
20992 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
20993 if (unwind.opcodes)
20994 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
20995 unwind.opcode_alloc);
20996 else
20997 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
20998 }
20999 while (length > 0)
21000 {
21001 length--;
21002 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21003 op >>= 8;
21004 unwind.opcode_count++;
21005 }
21006 }
21007
21008 /* Add unwind opcodes to adjust the stack pointer. */
21009
21010 static void
21011 add_unwind_adjustsp (offsetT offset)
21012 {
21013 valueT op;
21014
21015 if (offset > 0x200)
21016 {
21017 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21018 char bytes[5];
21019 int n;
21020 valueT o;
21021
21022 /* Long form: 0xb2, uleb128. */
21023 /* This might not fit in a word so add the individual bytes,
21024 remembering the list is built in reverse order. */
21025 o = (valueT) ((offset - 0x204) >> 2);
21026 if (o == 0)
21027 add_unwind_opcode (0, 1);
21028
21029 /* Calculate the uleb128 encoding of the offset. */
21030 n = 0;
21031 while (o)
21032 {
21033 bytes[n] = o & 0x7f;
21034 o >>= 7;
21035 if (o)
21036 bytes[n] |= 0x80;
21037 n++;
21038 }
21039 /* Add the insn. */
21040 for (; n; n--)
21041 add_unwind_opcode (bytes[n - 1], 1);
21042 add_unwind_opcode (0xb2, 1);
21043 }
21044 else if (offset > 0x100)
21045 {
21046 /* Two short opcodes. */
21047 add_unwind_opcode (0x3f, 1);
21048 op = (offset - 0x104) >> 2;
21049 add_unwind_opcode (op, 1);
21050 }
21051 else if (offset > 0)
21052 {
21053 /* Short opcode. */
21054 op = (offset - 4) >> 2;
21055 add_unwind_opcode (op, 1);
21056 }
21057 else if (offset < 0)
21058 {
21059 offset = -offset;
21060 while (offset > 0x100)
21061 {
21062 add_unwind_opcode (0x7f, 1);
21063 offset -= 0x100;
21064 }
21065 op = ((offset - 4) >> 2) | 0x40;
21066 add_unwind_opcode (op, 1);
21067 }
21068 }
21069
21070 /* Finish the list of unwind opcodes for this function. */
21071 static void
21072 finish_unwind_opcodes (void)
21073 {
21074 valueT op;
21075
21076 if (unwind.fp_used)
21077 {
21078 /* Adjust sp as necessary. */
21079 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21080 flush_pending_unwind ();
21081
21082 /* After restoring sp from the frame pointer. */
21083 op = 0x90 | unwind.fp_reg;
21084 add_unwind_opcode (op, 1);
21085 }
21086 else
21087 flush_pending_unwind ();
21088 }
21089
21090
21091 /* Start an exception table entry. If idx is nonzero this is an index table
21092 entry. */
21093
21094 static void
21095 start_unwind_section (const segT text_seg, int idx)
21096 {
21097 const char * text_name;
21098 const char * prefix;
21099 const char * prefix_once;
21100 const char * group_name;
21101 size_t prefix_len;
21102 size_t text_len;
21103 char * sec_name;
21104 size_t sec_name_len;
21105 int type;
21106 int flags;
21107 int linkonce;
21108
21109 if (idx)
21110 {
21111 prefix = ELF_STRING_ARM_unwind;
21112 prefix_once = ELF_STRING_ARM_unwind_once;
21113 type = SHT_ARM_EXIDX;
21114 }
21115 else
21116 {
21117 prefix = ELF_STRING_ARM_unwind_info;
21118 prefix_once = ELF_STRING_ARM_unwind_info_once;
21119 type = SHT_PROGBITS;
21120 }
21121
21122 text_name = segment_name (text_seg);
21123 if (streq (text_name, ".text"))
21124 text_name = "";
21125
21126 if (strncmp (text_name, ".gnu.linkonce.t.",
21127 strlen (".gnu.linkonce.t.")) == 0)
21128 {
21129 prefix = prefix_once;
21130 text_name += strlen (".gnu.linkonce.t.");
21131 }
21132
21133 prefix_len = strlen (prefix);
21134 text_len = strlen (text_name);
21135 sec_name_len = prefix_len + text_len;
21136 sec_name = (char *) xmalloc (sec_name_len + 1);
21137 memcpy (sec_name, prefix, prefix_len);
21138 memcpy (sec_name + prefix_len, text_name, text_len);
21139 sec_name[prefix_len + text_len] = '\0';
21140
21141 flags = SHF_ALLOC;
21142 linkonce = 0;
21143 group_name = 0;
21144
21145 /* Handle COMDAT group. */
21146 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21147 {
21148 group_name = elf_group_name (text_seg);
21149 if (group_name == NULL)
21150 {
21151 as_bad (_("Group section `%s' has no group signature"),
21152 segment_name (text_seg));
21153 ignore_rest_of_line ();
21154 return;
21155 }
21156 flags |= SHF_GROUP;
21157 linkonce = 1;
21158 }
21159
21160 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21161
21162 /* Set the section link for index tables. */
21163 if (idx)
21164 elf_linked_to_section (now_seg) = text_seg;
21165 }
21166
21167
21168 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21169 personality routine data. Returns zero, or the index table value for
21170 an inline entry. */
21171
21172 static valueT
21173 create_unwind_entry (int have_data)
21174 {
21175 int size;
21176 addressT where;
21177 char *ptr;
21178 /* The current word of data. */
21179 valueT data;
21180 /* The number of bytes left in this word. */
21181 int n;
21182
21183 finish_unwind_opcodes ();
21184
21185 /* Remember the current text section. */
21186 unwind.saved_seg = now_seg;
21187 unwind.saved_subseg = now_subseg;
21188
21189 start_unwind_section (now_seg, 0);
21190
21191 if (unwind.personality_routine == NULL)
21192 {
21193 if (unwind.personality_index == -2)
21194 {
21195 if (have_data)
21196 as_bad (_("handlerdata in cantunwind frame"));
21197 return 1; /* EXIDX_CANTUNWIND. */
21198 }
21199
21200 /* Use a default personality routine if none is specified. */
21201 if (unwind.personality_index == -1)
21202 {
21203 if (unwind.opcode_count > 3)
21204 unwind.personality_index = 1;
21205 else
21206 unwind.personality_index = 0;
21207 }
21208
21209 /* Space for the personality routine entry. */
21210 if (unwind.personality_index == 0)
21211 {
21212 if (unwind.opcode_count > 3)
21213 as_bad (_("too many unwind opcodes for personality routine 0"));
21214
21215 if (!have_data)
21216 {
21217 /* All the data is inline in the index table. */
21218 data = 0x80;
21219 n = 3;
21220 while (unwind.opcode_count > 0)
21221 {
21222 unwind.opcode_count--;
21223 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21224 n--;
21225 }
21226
21227 /* Pad with "finish" opcodes. */
21228 while (n--)
21229 data = (data << 8) | 0xb0;
21230
21231 return data;
21232 }
21233 size = 0;
21234 }
21235 else
21236 /* We get two opcodes "free" in the first word. */
21237 size = unwind.opcode_count - 2;
21238 }
21239 else
21240 {
21241 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21242 if (unwind.personality_index != -1)
21243 {
21244 as_bad (_("attempt to recreate an unwind entry"));
21245 return 1;
21246 }
21247
21248 /* An extra byte is required for the opcode count. */
21249 size = unwind.opcode_count + 1;
21250 }
21251
21252 size = (size + 3) >> 2;
21253 if (size > 0xff)
21254 as_bad (_("too many unwind opcodes"));
21255
21256 frag_align (2, 0, 0);
21257 record_alignment (now_seg, 2);
21258 unwind.table_entry = expr_build_dot ();
21259
21260 /* Allocate the table entry. */
21261 ptr = frag_more ((size << 2) + 4);
21262 /* PR 13449: Zero the table entries in case some of them are not used. */
21263 memset (ptr, 0, (size << 2) + 4);
21264 where = frag_now_fix () - ((size << 2) + 4);
21265
21266 switch (unwind.personality_index)
21267 {
21268 case -1:
21269 /* ??? Should this be a PLT generating relocation? */
21270 /* Custom personality routine. */
21271 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21272 BFD_RELOC_ARM_PREL31);
21273
21274 where += 4;
21275 ptr += 4;
21276
21277 /* Set the first byte to the number of additional words. */
21278 data = size > 0 ? size - 1 : 0;
21279 n = 3;
21280 break;
21281
21282 /* ABI defined personality routines. */
21283 case 0:
21284 /* Three opcodes bytes are packed into the first word. */
21285 data = 0x80;
21286 n = 3;
21287 break;
21288
21289 case 1:
21290 case 2:
21291 /* The size and first two opcode bytes go in the first word. */
21292 data = ((0x80 + unwind.personality_index) << 8) | size;
21293 n = 2;
21294 break;
21295
21296 default:
21297 /* Should never happen. */
21298 abort ();
21299 }
21300
21301 /* Pack the opcodes into words (MSB first), reversing the list at the same
21302 time. */
21303 while (unwind.opcode_count > 0)
21304 {
21305 if (n == 0)
21306 {
21307 md_number_to_chars (ptr, data, 4);
21308 ptr += 4;
21309 n = 4;
21310 data = 0;
21311 }
21312 unwind.opcode_count--;
21313 n--;
21314 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21315 }
21316
21317 /* Finish off the last word. */
21318 if (n < 4)
21319 {
21320 /* Pad with "finish" opcodes. */
21321 while (n--)
21322 data = (data << 8) | 0xb0;
21323
21324 md_number_to_chars (ptr, data, 4);
21325 }
21326
21327 if (!have_data)
21328 {
21329 /* Add an empty descriptor if there is no user-specified data. */
21330 ptr = frag_more (4);
21331 md_number_to_chars (ptr, 0, 4);
21332 }
21333
21334 return 0;
21335 }
21336
21337
21338 /* Initialize the DWARF-2 unwind information for this procedure. */
21339
21340 void
21341 tc_arm_frame_initial_instructions (void)
21342 {
21343 cfi_add_CFA_def_cfa (REG_SP, 0);
21344 }
21345 #endif /* OBJ_ELF */
21346
21347 /* Convert REGNAME to a DWARF-2 register number. */
21348
21349 int
21350 tc_arm_regname_to_dw2regnum (char *regname)
21351 {
21352 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21353 if (reg != FAIL)
21354 return reg;
21355
21356 /* PR 16694: Allow VFP registers as well. */
21357 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21358 if (reg != FAIL)
21359 return 64 + reg;
21360
21361 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21362 if (reg != FAIL)
21363 return reg + 256;
21364
21365 return -1;
21366 }
21367
21368 #ifdef TE_PE
21369 void
21370 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21371 {
21372 expressionS exp;
21373
21374 exp.X_op = O_secrel;
21375 exp.X_add_symbol = symbol;
21376 exp.X_add_number = 0;
21377 emit_expr (&exp, size);
21378 }
21379 #endif
21380
21381 /* MD interface: Symbol and relocation handling. */
21382
21383 /* Return the address within the segment that a PC-relative fixup is
21384 relative to. For ARM, PC-relative fixups applied to instructions
21385 are generally relative to the location of the fixup plus 8 bytes.
21386 Thumb branches are offset by 4, and Thumb loads relative to PC
21387 require special handling. */
21388
21389 long
21390 md_pcrel_from_section (fixS * fixP, segT seg)
21391 {
21392 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21393
21394 /* If this is pc-relative and we are going to emit a relocation
21395 then we just want to put out any pipeline compensation that the linker
21396 will need. Otherwise we want to use the calculated base.
21397 For WinCE we skip the bias for externals as well, since this
21398 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21399 if (fixP->fx_pcrel
21400 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21401 || (arm_force_relocation (fixP)
21402 #ifdef TE_WINCE
21403 && !S_IS_EXTERNAL (fixP->fx_addsy)
21404 #endif
21405 )))
21406 base = 0;
21407
21408
21409 switch (fixP->fx_r_type)
21410 {
21411 /* PC relative addressing on the Thumb is slightly odd as the
21412 bottom two bits of the PC are forced to zero for the
21413 calculation. This happens *after* application of the
21414 pipeline offset. However, Thumb adrl already adjusts for
21415 this, so we need not do it again. */
21416 case BFD_RELOC_ARM_THUMB_ADD:
21417 return base & ~3;
21418
21419 case BFD_RELOC_ARM_THUMB_OFFSET:
21420 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21421 case BFD_RELOC_ARM_T32_ADD_PC12:
21422 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21423 return (base + 4) & ~3;
21424
21425 /* Thumb branches are simply offset by +4. */
21426 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21427 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21428 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21429 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21430 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21431 return base + 4;
21432
21433 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21434 if (fixP->fx_addsy
21435 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21436 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21437 && ARM_IS_FUNC (fixP->fx_addsy)
21438 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21439 base = fixP->fx_where + fixP->fx_frag->fr_address;
21440 return base + 4;
21441
21442 /* BLX is like branches above, but forces the low two bits of PC to
21443 zero. */
21444 case BFD_RELOC_THUMB_PCREL_BLX:
21445 if (fixP->fx_addsy
21446 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21447 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21448 && THUMB_IS_FUNC (fixP->fx_addsy)
21449 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21450 base = fixP->fx_where + fixP->fx_frag->fr_address;
21451 return (base + 4) & ~3;
21452
21453 /* ARM mode branches are offset by +8. However, the Windows CE
21454 loader expects the relocation not to take this into account. */
21455 case BFD_RELOC_ARM_PCREL_BLX:
21456 if (fixP->fx_addsy
21457 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21458 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21459 && ARM_IS_FUNC (fixP->fx_addsy)
21460 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21461 base = fixP->fx_where + fixP->fx_frag->fr_address;
21462 return base + 8;
21463
21464 case BFD_RELOC_ARM_PCREL_CALL:
21465 if (fixP->fx_addsy
21466 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21467 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21468 && THUMB_IS_FUNC (fixP->fx_addsy)
21469 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21470 base = fixP->fx_where + fixP->fx_frag->fr_address;
21471 return base + 8;
21472
21473 case BFD_RELOC_ARM_PCREL_BRANCH:
21474 case BFD_RELOC_ARM_PCREL_JUMP:
21475 case BFD_RELOC_ARM_PLT32:
21476 #ifdef TE_WINCE
21477 /* When handling fixups immediately, because we have already
21478 discovered the value of a symbol, or the address of the frag involved
21479 we must account for the offset by +8, as the OS loader will never see the reloc.
21480 see fixup_segment() in write.c
21481 The S_IS_EXTERNAL test handles the case of global symbols.
21482 Those need the calculated base, not just the pipe compensation the linker will need. */
21483 if (fixP->fx_pcrel
21484 && fixP->fx_addsy != NULL
21485 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21486 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21487 return base + 8;
21488 return base;
21489 #else
21490 return base + 8;
21491 #endif
21492
21493
21494 /* ARM mode loads relative to PC are also offset by +8. Unlike
21495 branches, the Windows CE loader *does* expect the relocation
21496 to take this into account. */
21497 case BFD_RELOC_ARM_OFFSET_IMM:
21498 case BFD_RELOC_ARM_OFFSET_IMM8:
21499 case BFD_RELOC_ARM_HWLITERAL:
21500 case BFD_RELOC_ARM_LITERAL:
21501 case BFD_RELOC_ARM_CP_OFF_IMM:
21502 return base + 8;
21503
21504
21505 /* Other PC-relative relocations are un-offset. */
21506 default:
21507 return base;
21508 }
21509 }
21510
21511 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21512 Otherwise we have no need to default values of symbols. */
21513
21514 symbolS *
21515 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21516 {
21517 #ifdef OBJ_ELF
21518 if (name[0] == '_' && name[1] == 'G'
21519 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21520 {
21521 if (!GOT_symbol)
21522 {
21523 if (symbol_find (name))
21524 as_bad (_("GOT already in the symbol table"));
21525
21526 GOT_symbol = symbol_new (name, undefined_section,
21527 (valueT) 0, & zero_address_frag);
21528 }
21529
21530 return GOT_symbol;
21531 }
21532 #endif
21533
21534 return NULL;
21535 }
21536
21537 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21538 computed as two separate immediate values, added together. We
21539 already know that this value cannot be computed by just one ARM
21540 instruction. */
21541
21542 static unsigned int
21543 validate_immediate_twopart (unsigned int val,
21544 unsigned int * highpart)
21545 {
21546 unsigned int a;
21547 unsigned int i;
21548
21549 for (i = 0; i < 32; i += 2)
21550 if (((a = rotate_left (val, i)) & 0xff) != 0)
21551 {
21552 if (a & 0xff00)
21553 {
21554 if (a & ~ 0xffff)
21555 continue;
21556 * highpart = (a >> 8) | ((i + 24) << 7);
21557 }
21558 else if (a & 0xff0000)
21559 {
21560 if (a & 0xff000000)
21561 continue;
21562 * highpart = (a >> 16) | ((i + 16) << 7);
21563 }
21564 else
21565 {
21566 gas_assert (a & 0xff000000);
21567 * highpart = (a >> 24) | ((i + 8) << 7);
21568 }
21569
21570 return (a & 0xff) | (i << 7);
21571 }
21572
21573 return FAIL;
21574 }
21575
21576 static int
21577 validate_offset_imm (unsigned int val, int hwse)
21578 {
21579 if ((hwse && val > 255) || val > 4095)
21580 return FAIL;
21581 return val;
21582 }
21583
21584 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21585 negative immediate constant by altering the instruction. A bit of
21586 a hack really.
21587 MOV <-> MVN
21588 AND <-> BIC
21589 ADC <-> SBC
21590 by inverting the second operand, and
21591 ADD <-> SUB
21592 CMP <-> CMN
21593 by negating the second operand. */
21594
21595 static int
21596 negate_data_op (unsigned long * instruction,
21597 unsigned long value)
21598 {
21599 int op, new_inst;
21600 unsigned long negated, inverted;
21601
21602 negated = encode_arm_immediate (-value);
21603 inverted = encode_arm_immediate (~value);
21604
21605 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21606 switch (op)
21607 {
21608 /* First negates. */
21609 case OPCODE_SUB: /* ADD <-> SUB */
21610 new_inst = OPCODE_ADD;
21611 value = negated;
21612 break;
21613
21614 case OPCODE_ADD:
21615 new_inst = OPCODE_SUB;
21616 value = negated;
21617 break;
21618
21619 case OPCODE_CMP: /* CMP <-> CMN */
21620 new_inst = OPCODE_CMN;
21621 value = negated;
21622 break;
21623
21624 case OPCODE_CMN:
21625 new_inst = OPCODE_CMP;
21626 value = negated;
21627 break;
21628
21629 /* Now Inverted ops. */
21630 case OPCODE_MOV: /* MOV <-> MVN */
21631 new_inst = OPCODE_MVN;
21632 value = inverted;
21633 break;
21634
21635 case OPCODE_MVN:
21636 new_inst = OPCODE_MOV;
21637 value = inverted;
21638 break;
21639
21640 case OPCODE_AND: /* AND <-> BIC */
21641 new_inst = OPCODE_BIC;
21642 value = inverted;
21643 break;
21644
21645 case OPCODE_BIC:
21646 new_inst = OPCODE_AND;
21647 value = inverted;
21648 break;
21649
21650 case OPCODE_ADC: /* ADC <-> SBC */
21651 new_inst = OPCODE_SBC;
21652 value = inverted;
21653 break;
21654
21655 case OPCODE_SBC:
21656 new_inst = OPCODE_ADC;
21657 value = inverted;
21658 break;
21659
21660 /* We cannot do anything. */
21661 default:
21662 return FAIL;
21663 }
21664
21665 if (value == (unsigned) FAIL)
21666 return FAIL;
21667
21668 *instruction &= OPCODE_MASK;
21669 *instruction |= new_inst << DATA_OP_SHIFT;
21670 return value;
21671 }
21672
21673 /* Like negate_data_op, but for Thumb-2. */
21674
21675 static unsigned int
21676 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21677 {
21678 int op, new_inst;
21679 int rd;
21680 unsigned int negated, inverted;
21681
21682 negated = encode_thumb32_immediate (-value);
21683 inverted = encode_thumb32_immediate (~value);
21684
21685 rd = (*instruction >> 8) & 0xf;
21686 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21687 switch (op)
21688 {
21689 /* ADD <-> SUB. Includes CMP <-> CMN. */
21690 case T2_OPCODE_SUB:
21691 new_inst = T2_OPCODE_ADD;
21692 value = negated;
21693 break;
21694
21695 case T2_OPCODE_ADD:
21696 new_inst = T2_OPCODE_SUB;
21697 value = negated;
21698 break;
21699
21700 /* ORR <-> ORN. Includes MOV <-> MVN. */
21701 case T2_OPCODE_ORR:
21702 new_inst = T2_OPCODE_ORN;
21703 value = inverted;
21704 break;
21705
21706 case T2_OPCODE_ORN:
21707 new_inst = T2_OPCODE_ORR;
21708 value = inverted;
21709 break;
21710
21711 /* AND <-> BIC. TST has no inverted equivalent. */
21712 case T2_OPCODE_AND:
21713 new_inst = T2_OPCODE_BIC;
21714 if (rd == 15)
21715 value = FAIL;
21716 else
21717 value = inverted;
21718 break;
21719
21720 case T2_OPCODE_BIC:
21721 new_inst = T2_OPCODE_AND;
21722 value = inverted;
21723 break;
21724
21725 /* ADC <-> SBC */
21726 case T2_OPCODE_ADC:
21727 new_inst = T2_OPCODE_SBC;
21728 value = inverted;
21729 break;
21730
21731 case T2_OPCODE_SBC:
21732 new_inst = T2_OPCODE_ADC;
21733 value = inverted;
21734 break;
21735
21736 /* We cannot do anything. */
21737 default:
21738 return FAIL;
21739 }
21740
21741 if (value == (unsigned int)FAIL)
21742 return FAIL;
21743
21744 *instruction &= T2_OPCODE_MASK;
21745 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21746 return value;
21747 }
21748
21749 /* Read a 32-bit thumb instruction from buf. */
21750 static unsigned long
21751 get_thumb32_insn (char * buf)
21752 {
21753 unsigned long insn;
21754 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21755 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21756
21757 return insn;
21758 }
21759
21760
21761 /* We usually want to set the low bit on the address of thumb function
21762 symbols. In particular .word foo - . should have the low bit set.
21763 Generic code tries to fold the difference of two symbols to
21764 a constant. Prevent this and force a relocation when the first symbols
21765 is a thumb function. */
21766
21767 bfd_boolean
21768 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21769 {
21770 if (op == O_subtract
21771 && l->X_op == O_symbol
21772 && r->X_op == O_symbol
21773 && THUMB_IS_FUNC (l->X_add_symbol))
21774 {
21775 l->X_op = O_subtract;
21776 l->X_op_symbol = r->X_add_symbol;
21777 l->X_add_number -= r->X_add_number;
21778 return TRUE;
21779 }
21780
21781 /* Process as normal. */
21782 return FALSE;
21783 }
21784
21785 /* Encode Thumb2 unconditional branches and calls. The encoding
21786 for the 2 are identical for the immediate values. */
21787
21788 static void
21789 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21790 {
21791 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21792 offsetT newval;
21793 offsetT newval2;
21794 addressT S, I1, I2, lo, hi;
21795
21796 S = (value >> 24) & 0x01;
21797 I1 = (value >> 23) & 0x01;
21798 I2 = (value >> 22) & 0x01;
21799 hi = (value >> 12) & 0x3ff;
21800 lo = (value >> 1) & 0x7ff;
21801 newval = md_chars_to_number (buf, THUMB_SIZE);
21802 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21803 newval |= (S << 10) | hi;
21804 newval2 &= ~T2I1I2MASK;
21805 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21806 md_number_to_chars (buf, newval, THUMB_SIZE);
21807 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21808 }
21809
21810 void
21811 md_apply_fix (fixS * fixP,
21812 valueT * valP,
21813 segT seg)
21814 {
21815 offsetT value = * valP;
21816 offsetT newval;
21817 unsigned int newimm;
21818 unsigned long temp;
21819 int sign;
21820 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21821
21822 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21823
21824 /* Note whether this will delete the relocation. */
21825
21826 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21827 fixP->fx_done = 1;
21828
21829 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21830 consistency with the behaviour on 32-bit hosts. Remember value
21831 for emit_reloc. */
21832 value &= 0xffffffff;
21833 value ^= 0x80000000;
21834 value -= 0x80000000;
21835
21836 *valP = value;
21837 fixP->fx_addnumber = value;
21838
21839 /* Same treatment for fixP->fx_offset. */
21840 fixP->fx_offset &= 0xffffffff;
21841 fixP->fx_offset ^= 0x80000000;
21842 fixP->fx_offset -= 0x80000000;
21843
21844 switch (fixP->fx_r_type)
21845 {
21846 case BFD_RELOC_NONE:
21847 /* This will need to go in the object file. */
21848 fixP->fx_done = 0;
21849 break;
21850
21851 case BFD_RELOC_ARM_IMMEDIATE:
21852 /* We claim that this fixup has been processed here,
21853 even if in fact we generate an error because we do
21854 not have a reloc for it, so tc_gen_reloc will reject it. */
21855 fixP->fx_done = 1;
21856
21857 if (fixP->fx_addsy)
21858 {
21859 const char *msg = 0;
21860
21861 if (! S_IS_DEFINED (fixP->fx_addsy))
21862 msg = _("undefined symbol %s used as an immediate value");
21863 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21864 msg = _("symbol %s is in a different section");
21865 else if (S_IS_WEAK (fixP->fx_addsy))
21866 msg = _("symbol %s is weak and may be overridden later");
21867
21868 if (msg)
21869 {
21870 as_bad_where (fixP->fx_file, fixP->fx_line,
21871 msg, S_GET_NAME (fixP->fx_addsy));
21872 break;
21873 }
21874 }
21875
21876 temp = md_chars_to_number (buf, INSN_SIZE);
21877
21878 /* If the offset is negative, we should use encoding A2 for ADR. */
21879 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21880 newimm = negate_data_op (&temp, value);
21881 else
21882 {
21883 newimm = encode_arm_immediate (value);
21884
21885 /* If the instruction will fail, see if we can fix things up by
21886 changing the opcode. */
21887 if (newimm == (unsigned int) FAIL)
21888 newimm = negate_data_op (&temp, value);
21889 }
21890
21891 if (newimm == (unsigned int) FAIL)
21892 {
21893 as_bad_where (fixP->fx_file, fixP->fx_line,
21894 _("invalid constant (%lx) after fixup"),
21895 (unsigned long) value);
21896 break;
21897 }
21898
21899 newimm |= (temp & 0xfffff000);
21900 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21901 break;
21902
21903 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21904 {
21905 unsigned int highpart = 0;
21906 unsigned int newinsn = 0xe1a00000; /* nop. */
21907
21908 if (fixP->fx_addsy)
21909 {
21910 const char *msg = 0;
21911
21912 if (! S_IS_DEFINED (fixP->fx_addsy))
21913 msg = _("undefined symbol %s used as an immediate value");
21914 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21915 msg = _("symbol %s is in a different section");
21916 else if (S_IS_WEAK (fixP->fx_addsy))
21917 msg = _("symbol %s is weak and may be overridden later");
21918
21919 if (msg)
21920 {
21921 as_bad_where (fixP->fx_file, fixP->fx_line,
21922 msg, S_GET_NAME (fixP->fx_addsy));
21923 break;
21924 }
21925 }
21926
21927 newimm = encode_arm_immediate (value);
21928 temp = md_chars_to_number (buf, INSN_SIZE);
21929
21930 /* If the instruction will fail, see if we can fix things up by
21931 changing the opcode. */
21932 if (newimm == (unsigned int) FAIL
21933 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21934 {
21935 /* No ? OK - try using two ADD instructions to generate
21936 the value. */
21937 newimm = validate_immediate_twopart (value, & highpart);
21938
21939 /* Yes - then make sure that the second instruction is
21940 also an add. */
21941 if (newimm != (unsigned int) FAIL)
21942 newinsn = temp;
21943 /* Still No ? Try using a negated value. */
21944 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21945 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21946 /* Otherwise - give up. */
21947 else
21948 {
21949 as_bad_where (fixP->fx_file, fixP->fx_line,
21950 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21951 (long) value);
21952 break;
21953 }
21954
21955 /* Replace the first operand in the 2nd instruction (which
21956 is the PC) with the destination register. We have
21957 already added in the PC in the first instruction and we
21958 do not want to do it again. */
21959 newinsn &= ~ 0xf0000;
21960 newinsn |= ((newinsn & 0x0f000) << 4);
21961 }
21962
21963 newimm |= (temp & 0xfffff000);
21964 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21965
21966 highpart |= (newinsn & 0xfffff000);
21967 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
21968 }
21969 break;
21970
21971 case BFD_RELOC_ARM_OFFSET_IMM:
21972 if (!fixP->fx_done && seg->use_rela_p)
21973 value = 0;
21974
21975 case BFD_RELOC_ARM_LITERAL:
21976 sign = value > 0;
21977
21978 if (value < 0)
21979 value = - value;
21980
21981 if (validate_offset_imm (value, 0) == FAIL)
21982 {
21983 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
21984 as_bad_where (fixP->fx_file, fixP->fx_line,
21985 _("invalid literal constant: pool needs to be closer"));
21986 else
21987 as_bad_where (fixP->fx_file, fixP->fx_line,
21988 _("bad immediate value for offset (%ld)"),
21989 (long) value);
21990 break;
21991 }
21992
21993 newval = md_chars_to_number (buf, INSN_SIZE);
21994 if (value == 0)
21995 newval &= 0xfffff000;
21996 else
21997 {
21998 newval &= 0xff7ff000;
21999 newval |= value | (sign ? INDEX_UP : 0);
22000 }
22001 md_number_to_chars (buf, newval, INSN_SIZE);
22002 break;
22003
22004 case BFD_RELOC_ARM_OFFSET_IMM8:
22005 case BFD_RELOC_ARM_HWLITERAL:
22006 sign = value > 0;
22007
22008 if (value < 0)
22009 value = - value;
22010
22011 if (validate_offset_imm (value, 1) == FAIL)
22012 {
22013 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22014 as_bad_where (fixP->fx_file, fixP->fx_line,
22015 _("invalid literal constant: pool needs to be closer"));
22016 else
22017 as_bad_where (fixP->fx_file, fixP->fx_line,
22018 _("bad immediate value for 8-bit offset (%ld)"),
22019 (long) value);
22020 break;
22021 }
22022
22023 newval = md_chars_to_number (buf, INSN_SIZE);
22024 if (value == 0)
22025 newval &= 0xfffff0f0;
22026 else
22027 {
22028 newval &= 0xff7ff0f0;
22029 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22030 }
22031 md_number_to_chars (buf, newval, INSN_SIZE);
22032 break;
22033
22034 case BFD_RELOC_ARM_T32_OFFSET_U8:
22035 if (value < 0 || value > 1020 || value % 4 != 0)
22036 as_bad_where (fixP->fx_file, fixP->fx_line,
22037 _("bad immediate value for offset (%ld)"), (long) value);
22038 value /= 4;
22039
22040 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22041 newval |= value;
22042 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22043 break;
22044
22045 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22046 /* This is a complicated relocation used for all varieties of Thumb32
22047 load/store instruction with immediate offset:
22048
22049 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22050 *4, optional writeback(W)
22051 (doubleword load/store)
22052
22053 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22054 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22055 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22056 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22057 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22058
22059 Uppercase letters indicate bits that are already encoded at
22060 this point. Lowercase letters are our problem. For the
22061 second block of instructions, the secondary opcode nybble
22062 (bits 8..11) is present, and bit 23 is zero, even if this is
22063 a PC-relative operation. */
22064 newval = md_chars_to_number (buf, THUMB_SIZE);
22065 newval <<= 16;
22066 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22067
22068 if ((newval & 0xf0000000) == 0xe0000000)
22069 {
22070 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22071 if (value >= 0)
22072 newval |= (1 << 23);
22073 else
22074 value = -value;
22075 if (value % 4 != 0)
22076 {
22077 as_bad_where (fixP->fx_file, fixP->fx_line,
22078 _("offset not a multiple of 4"));
22079 break;
22080 }
22081 value /= 4;
22082 if (value > 0xff)
22083 {
22084 as_bad_where (fixP->fx_file, fixP->fx_line,
22085 _("offset out of range"));
22086 break;
22087 }
22088 newval &= ~0xff;
22089 }
22090 else if ((newval & 0x000f0000) == 0x000f0000)
22091 {
22092 /* PC-relative, 12-bit offset. */
22093 if (value >= 0)
22094 newval |= (1 << 23);
22095 else
22096 value = -value;
22097 if (value > 0xfff)
22098 {
22099 as_bad_where (fixP->fx_file, fixP->fx_line,
22100 _("offset out of range"));
22101 break;
22102 }
22103 newval &= ~0xfff;
22104 }
22105 else if ((newval & 0x00000100) == 0x00000100)
22106 {
22107 /* Writeback: 8-bit, +/- offset. */
22108 if (value >= 0)
22109 newval |= (1 << 9);
22110 else
22111 value = -value;
22112 if (value > 0xff)
22113 {
22114 as_bad_where (fixP->fx_file, fixP->fx_line,
22115 _("offset out of range"));
22116 break;
22117 }
22118 newval &= ~0xff;
22119 }
22120 else if ((newval & 0x00000f00) == 0x00000e00)
22121 {
22122 /* T-instruction: positive 8-bit offset. */
22123 if (value < 0 || value > 0xff)
22124 {
22125 as_bad_where (fixP->fx_file, fixP->fx_line,
22126 _("offset out of range"));
22127 break;
22128 }
22129 newval &= ~0xff;
22130 newval |= value;
22131 }
22132 else
22133 {
22134 /* Positive 12-bit or negative 8-bit offset. */
22135 int limit;
22136 if (value >= 0)
22137 {
22138 newval |= (1 << 23);
22139 limit = 0xfff;
22140 }
22141 else
22142 {
22143 value = -value;
22144 limit = 0xff;
22145 }
22146 if (value > limit)
22147 {
22148 as_bad_where (fixP->fx_file, fixP->fx_line,
22149 _("offset out of range"));
22150 break;
22151 }
22152 newval &= ~limit;
22153 }
22154
22155 newval |= value;
22156 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22157 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22158 break;
22159
22160 case BFD_RELOC_ARM_SHIFT_IMM:
22161 newval = md_chars_to_number (buf, INSN_SIZE);
22162 if (((unsigned long) value) > 32
22163 || (value == 32
22164 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22165 {
22166 as_bad_where (fixP->fx_file, fixP->fx_line,
22167 _("shift expression is too large"));
22168 break;
22169 }
22170
22171 if (value == 0)
22172 /* Shifts of zero must be done as lsl. */
22173 newval &= ~0x60;
22174 else if (value == 32)
22175 value = 0;
22176 newval &= 0xfffff07f;
22177 newval |= (value & 0x1f) << 7;
22178 md_number_to_chars (buf, newval, INSN_SIZE);
22179 break;
22180
22181 case BFD_RELOC_ARM_T32_IMMEDIATE:
22182 case BFD_RELOC_ARM_T32_ADD_IMM:
22183 case BFD_RELOC_ARM_T32_IMM12:
22184 case BFD_RELOC_ARM_T32_ADD_PC12:
22185 /* We claim that this fixup has been processed here,
22186 even if in fact we generate an error because we do
22187 not have a reloc for it, so tc_gen_reloc will reject it. */
22188 fixP->fx_done = 1;
22189
22190 if (fixP->fx_addsy
22191 && ! S_IS_DEFINED (fixP->fx_addsy))
22192 {
22193 as_bad_where (fixP->fx_file, fixP->fx_line,
22194 _("undefined symbol %s used as an immediate value"),
22195 S_GET_NAME (fixP->fx_addsy));
22196 break;
22197 }
22198
22199 newval = md_chars_to_number (buf, THUMB_SIZE);
22200 newval <<= 16;
22201 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22202
22203 newimm = FAIL;
22204 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22205 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22206 {
22207 newimm = encode_thumb32_immediate (value);
22208 if (newimm == (unsigned int) FAIL)
22209 newimm = thumb32_negate_data_op (&newval, value);
22210 }
22211 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22212 && newimm == (unsigned int) FAIL)
22213 {
22214 /* Turn add/sum into addw/subw. */
22215 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22216 newval = (newval & 0xfeffffff) | 0x02000000;
22217 /* No flat 12-bit imm encoding for addsw/subsw. */
22218 if ((newval & 0x00100000) == 0)
22219 {
22220 /* 12 bit immediate for addw/subw. */
22221 if (value < 0)
22222 {
22223 value = -value;
22224 newval ^= 0x00a00000;
22225 }
22226 if (value > 0xfff)
22227 newimm = (unsigned int) FAIL;
22228 else
22229 newimm = value;
22230 }
22231 }
22232
22233 if (newimm == (unsigned int)FAIL)
22234 {
22235 as_bad_where (fixP->fx_file, fixP->fx_line,
22236 _("invalid constant (%lx) after fixup"),
22237 (unsigned long) value);
22238 break;
22239 }
22240
22241 newval |= (newimm & 0x800) << 15;
22242 newval |= (newimm & 0x700) << 4;
22243 newval |= (newimm & 0x0ff);
22244
22245 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22246 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22247 break;
22248
22249 case BFD_RELOC_ARM_SMC:
22250 if (((unsigned long) value) > 0xffff)
22251 as_bad_where (fixP->fx_file, fixP->fx_line,
22252 _("invalid smc expression"));
22253 newval = md_chars_to_number (buf, INSN_SIZE);
22254 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22255 md_number_to_chars (buf, newval, INSN_SIZE);
22256 break;
22257
22258 case BFD_RELOC_ARM_HVC:
22259 if (((unsigned long) value) > 0xffff)
22260 as_bad_where (fixP->fx_file, fixP->fx_line,
22261 _("invalid hvc expression"));
22262 newval = md_chars_to_number (buf, INSN_SIZE);
22263 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22264 md_number_to_chars (buf, newval, INSN_SIZE);
22265 break;
22266
22267 case BFD_RELOC_ARM_SWI:
22268 if (fixP->tc_fix_data != 0)
22269 {
22270 if (((unsigned long) value) > 0xff)
22271 as_bad_where (fixP->fx_file, fixP->fx_line,
22272 _("invalid swi expression"));
22273 newval = md_chars_to_number (buf, THUMB_SIZE);
22274 newval |= value;
22275 md_number_to_chars (buf, newval, THUMB_SIZE);
22276 }
22277 else
22278 {
22279 if (((unsigned long) value) > 0x00ffffff)
22280 as_bad_where (fixP->fx_file, fixP->fx_line,
22281 _("invalid swi expression"));
22282 newval = md_chars_to_number (buf, INSN_SIZE);
22283 newval |= value;
22284 md_number_to_chars (buf, newval, INSN_SIZE);
22285 }
22286 break;
22287
22288 case BFD_RELOC_ARM_MULTI:
22289 if (((unsigned long) value) > 0xffff)
22290 as_bad_where (fixP->fx_file, fixP->fx_line,
22291 _("invalid expression in load/store multiple"));
22292 newval = value | md_chars_to_number (buf, INSN_SIZE);
22293 md_number_to_chars (buf, newval, INSN_SIZE);
22294 break;
22295
22296 #ifdef OBJ_ELF
22297 case BFD_RELOC_ARM_PCREL_CALL:
22298
22299 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22300 && fixP->fx_addsy
22301 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22302 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22303 && THUMB_IS_FUNC (fixP->fx_addsy))
22304 /* Flip the bl to blx. This is a simple flip
22305 bit here because we generate PCREL_CALL for
22306 unconditional bls. */
22307 {
22308 newval = md_chars_to_number (buf, INSN_SIZE);
22309 newval = newval | 0x10000000;
22310 md_number_to_chars (buf, newval, INSN_SIZE);
22311 temp = 1;
22312 fixP->fx_done = 1;
22313 }
22314 else
22315 temp = 3;
22316 goto arm_branch_common;
22317
22318 case BFD_RELOC_ARM_PCREL_JUMP:
22319 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22320 && fixP->fx_addsy
22321 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22322 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22323 && THUMB_IS_FUNC (fixP->fx_addsy))
22324 {
22325 /* This would map to a bl<cond>, b<cond>,
22326 b<always> to a Thumb function. We
22327 need to force a relocation for this particular
22328 case. */
22329 newval = md_chars_to_number (buf, INSN_SIZE);
22330 fixP->fx_done = 0;
22331 }
22332
22333 case BFD_RELOC_ARM_PLT32:
22334 #endif
22335 case BFD_RELOC_ARM_PCREL_BRANCH:
22336 temp = 3;
22337 goto arm_branch_common;
22338
22339 case BFD_RELOC_ARM_PCREL_BLX:
22340
22341 temp = 1;
22342 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22343 && fixP->fx_addsy
22344 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22345 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22346 && ARM_IS_FUNC (fixP->fx_addsy))
22347 {
22348 /* Flip the blx to a bl and warn. */
22349 const char *name = S_GET_NAME (fixP->fx_addsy);
22350 newval = 0xeb000000;
22351 as_warn_where (fixP->fx_file, fixP->fx_line,
22352 _("blx to '%s' an ARM ISA state function changed to bl"),
22353 name);
22354 md_number_to_chars (buf, newval, INSN_SIZE);
22355 temp = 3;
22356 fixP->fx_done = 1;
22357 }
22358
22359 #ifdef OBJ_ELF
22360 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22361 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22362 #endif
22363
22364 arm_branch_common:
22365 /* We are going to store value (shifted right by two) in the
22366 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22367 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22368 also be be clear. */
22369 if (value & temp)
22370 as_bad_where (fixP->fx_file, fixP->fx_line,
22371 _("misaligned branch destination"));
22372 if ((value & (offsetT)0xfe000000) != (offsetT)0
22373 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22374 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22375
22376 if (fixP->fx_done || !seg->use_rela_p)
22377 {
22378 newval = md_chars_to_number (buf, INSN_SIZE);
22379 newval |= (value >> 2) & 0x00ffffff;
22380 /* Set the H bit on BLX instructions. */
22381 if (temp == 1)
22382 {
22383 if (value & 2)
22384 newval |= 0x01000000;
22385 else
22386 newval &= ~0x01000000;
22387 }
22388 md_number_to_chars (buf, newval, INSN_SIZE);
22389 }
22390 break;
22391
22392 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22393 /* CBZ can only branch forward. */
22394
22395 /* Attempts to use CBZ to branch to the next instruction
22396 (which, strictly speaking, are prohibited) will be turned into
22397 no-ops.
22398
22399 FIXME: It may be better to remove the instruction completely and
22400 perform relaxation. */
22401 if (value == -2)
22402 {
22403 newval = md_chars_to_number (buf, THUMB_SIZE);
22404 newval = 0xbf00; /* NOP encoding T1 */
22405 md_number_to_chars (buf, newval, THUMB_SIZE);
22406 }
22407 else
22408 {
22409 if (value & ~0x7e)
22410 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22411
22412 if (fixP->fx_done || !seg->use_rela_p)
22413 {
22414 newval = md_chars_to_number (buf, THUMB_SIZE);
22415 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22416 md_number_to_chars (buf, newval, THUMB_SIZE);
22417 }
22418 }
22419 break;
22420
22421 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22422 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22423 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22424
22425 if (fixP->fx_done || !seg->use_rela_p)
22426 {
22427 newval = md_chars_to_number (buf, THUMB_SIZE);
22428 newval |= (value & 0x1ff) >> 1;
22429 md_number_to_chars (buf, newval, THUMB_SIZE);
22430 }
22431 break;
22432
22433 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22434 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22435 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22436
22437 if (fixP->fx_done || !seg->use_rela_p)
22438 {
22439 newval = md_chars_to_number (buf, THUMB_SIZE);
22440 newval |= (value & 0xfff) >> 1;
22441 md_number_to_chars (buf, newval, THUMB_SIZE);
22442 }
22443 break;
22444
22445 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22446 if (fixP->fx_addsy
22447 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22448 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22449 && ARM_IS_FUNC (fixP->fx_addsy)
22450 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22451 {
22452 /* Force a relocation for a branch 20 bits wide. */
22453 fixP->fx_done = 0;
22454 }
22455 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22456 as_bad_where (fixP->fx_file, fixP->fx_line,
22457 _("conditional branch out of range"));
22458
22459 if (fixP->fx_done || !seg->use_rela_p)
22460 {
22461 offsetT newval2;
22462 addressT S, J1, J2, lo, hi;
22463
22464 S = (value & 0x00100000) >> 20;
22465 J2 = (value & 0x00080000) >> 19;
22466 J1 = (value & 0x00040000) >> 18;
22467 hi = (value & 0x0003f000) >> 12;
22468 lo = (value & 0x00000ffe) >> 1;
22469
22470 newval = md_chars_to_number (buf, THUMB_SIZE);
22471 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22472 newval |= (S << 10) | hi;
22473 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22474 md_number_to_chars (buf, newval, THUMB_SIZE);
22475 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22476 }
22477 break;
22478
22479 case BFD_RELOC_THUMB_PCREL_BLX:
22480 /* If there is a blx from a thumb state function to
22481 another thumb function flip this to a bl and warn
22482 about it. */
22483
22484 if (fixP->fx_addsy
22485 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22486 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22487 && THUMB_IS_FUNC (fixP->fx_addsy))
22488 {
22489 const char *name = S_GET_NAME (fixP->fx_addsy);
22490 as_warn_where (fixP->fx_file, fixP->fx_line,
22491 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22492 name);
22493 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22494 newval = newval | 0x1000;
22495 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22496 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22497 fixP->fx_done = 1;
22498 }
22499
22500
22501 goto thumb_bl_common;
22502
22503 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22504 /* A bl from Thumb state ISA to an internal ARM state function
22505 is converted to a blx. */
22506 if (fixP->fx_addsy
22507 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22508 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22509 && ARM_IS_FUNC (fixP->fx_addsy)
22510 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22511 {
22512 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22513 newval = newval & ~0x1000;
22514 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22515 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22516 fixP->fx_done = 1;
22517 }
22518
22519 thumb_bl_common:
22520
22521 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22522 /* For a BLX instruction, make sure that the relocation is rounded up
22523 to a word boundary. This follows the semantics of the instruction
22524 which specifies that bit 1 of the target address will come from bit
22525 1 of the base address. */
22526 value = (value + 3) & ~ 3;
22527
22528 #ifdef OBJ_ELF
22529 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22530 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22531 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22532 #endif
22533
22534 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22535 {
22536 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22537 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22538 else if ((value & ~0x1ffffff)
22539 && ((value & ~0x1ffffff) != ~0x1ffffff))
22540 as_bad_where (fixP->fx_file, fixP->fx_line,
22541 _("Thumb2 branch out of range"));
22542 }
22543
22544 if (fixP->fx_done || !seg->use_rela_p)
22545 encode_thumb2_b_bl_offset (buf, value);
22546
22547 break;
22548
22549 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22550 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22551 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22552
22553 if (fixP->fx_done || !seg->use_rela_p)
22554 encode_thumb2_b_bl_offset (buf, value);
22555
22556 break;
22557
22558 case BFD_RELOC_8:
22559 if (fixP->fx_done || !seg->use_rela_p)
22560 *buf = value;
22561 break;
22562
22563 case BFD_RELOC_16:
22564 if (fixP->fx_done || !seg->use_rela_p)
22565 md_number_to_chars (buf, value, 2);
22566 break;
22567
22568 #ifdef OBJ_ELF
22569 case BFD_RELOC_ARM_TLS_CALL:
22570 case BFD_RELOC_ARM_THM_TLS_CALL:
22571 case BFD_RELOC_ARM_TLS_DESCSEQ:
22572 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22573 case BFD_RELOC_ARM_TLS_GOTDESC:
22574 case BFD_RELOC_ARM_TLS_GD32:
22575 case BFD_RELOC_ARM_TLS_LE32:
22576 case BFD_RELOC_ARM_TLS_IE32:
22577 case BFD_RELOC_ARM_TLS_LDM32:
22578 case BFD_RELOC_ARM_TLS_LDO32:
22579 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22580 break;
22581
22582 case BFD_RELOC_ARM_GOT32:
22583 case BFD_RELOC_ARM_GOTOFF:
22584 break;
22585
22586 case BFD_RELOC_ARM_GOT_PREL:
22587 if (fixP->fx_done || !seg->use_rela_p)
22588 md_number_to_chars (buf, value, 4);
22589 break;
22590
22591 case BFD_RELOC_ARM_TARGET2:
22592 /* TARGET2 is not partial-inplace, so we need to write the
22593 addend here for REL targets, because it won't be written out
22594 during reloc processing later. */
22595 if (fixP->fx_done || !seg->use_rela_p)
22596 md_number_to_chars (buf, fixP->fx_offset, 4);
22597 break;
22598 #endif
22599
22600 case BFD_RELOC_RVA:
22601 case BFD_RELOC_32:
22602 case BFD_RELOC_ARM_TARGET1:
22603 case BFD_RELOC_ARM_ROSEGREL32:
22604 case BFD_RELOC_ARM_SBREL32:
22605 case BFD_RELOC_32_PCREL:
22606 #ifdef TE_PE
22607 case BFD_RELOC_32_SECREL:
22608 #endif
22609 if (fixP->fx_done || !seg->use_rela_p)
22610 #ifdef TE_WINCE
22611 /* For WinCE we only do this for pcrel fixups. */
22612 if (fixP->fx_done || fixP->fx_pcrel)
22613 #endif
22614 md_number_to_chars (buf, value, 4);
22615 break;
22616
22617 #ifdef OBJ_ELF
22618 case BFD_RELOC_ARM_PREL31:
22619 if (fixP->fx_done || !seg->use_rela_p)
22620 {
22621 newval = md_chars_to_number (buf, 4) & 0x80000000;
22622 if ((value ^ (value >> 1)) & 0x40000000)
22623 {
22624 as_bad_where (fixP->fx_file, fixP->fx_line,
22625 _("rel31 relocation overflow"));
22626 }
22627 newval |= value & 0x7fffffff;
22628 md_number_to_chars (buf, newval, 4);
22629 }
22630 break;
22631 #endif
22632
22633 case BFD_RELOC_ARM_CP_OFF_IMM:
22634 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22635 if (value < -1023 || value > 1023 || (value & 3))
22636 as_bad_where (fixP->fx_file, fixP->fx_line,
22637 _("co-processor offset out of range"));
22638 cp_off_common:
22639 sign = value > 0;
22640 if (value < 0)
22641 value = -value;
22642 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22643 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22644 newval = md_chars_to_number (buf, INSN_SIZE);
22645 else
22646 newval = get_thumb32_insn (buf);
22647 if (value == 0)
22648 newval &= 0xffffff00;
22649 else
22650 {
22651 newval &= 0xff7fff00;
22652 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22653 }
22654 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22655 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22656 md_number_to_chars (buf, newval, INSN_SIZE);
22657 else
22658 put_thumb32_insn (buf, newval);
22659 break;
22660
22661 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22662 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22663 if (value < -255 || value > 255)
22664 as_bad_where (fixP->fx_file, fixP->fx_line,
22665 _("co-processor offset out of range"));
22666 value *= 4;
22667 goto cp_off_common;
22668
22669 case BFD_RELOC_ARM_THUMB_OFFSET:
22670 newval = md_chars_to_number (buf, THUMB_SIZE);
22671 /* Exactly what ranges, and where the offset is inserted depends
22672 on the type of instruction, we can establish this from the
22673 top 4 bits. */
22674 switch (newval >> 12)
22675 {
22676 case 4: /* PC load. */
22677 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22678 forced to zero for these loads; md_pcrel_from has already
22679 compensated for this. */
22680 if (value & 3)
22681 as_bad_where (fixP->fx_file, fixP->fx_line,
22682 _("invalid offset, target not word aligned (0x%08lX)"),
22683 (((unsigned long) fixP->fx_frag->fr_address
22684 + (unsigned long) fixP->fx_where) & ~3)
22685 + (unsigned long) value);
22686
22687 if (value & ~0x3fc)
22688 as_bad_where (fixP->fx_file, fixP->fx_line,
22689 _("invalid offset, value too big (0x%08lX)"),
22690 (long) value);
22691
22692 newval |= value >> 2;
22693 break;
22694
22695 case 9: /* SP load/store. */
22696 if (value & ~0x3fc)
22697 as_bad_where (fixP->fx_file, fixP->fx_line,
22698 _("invalid offset, value too big (0x%08lX)"),
22699 (long) value);
22700 newval |= value >> 2;
22701 break;
22702
22703 case 6: /* Word load/store. */
22704 if (value & ~0x7c)
22705 as_bad_where (fixP->fx_file, fixP->fx_line,
22706 _("invalid offset, value too big (0x%08lX)"),
22707 (long) value);
22708 newval |= value << 4; /* 6 - 2. */
22709 break;
22710
22711 case 7: /* Byte load/store. */
22712 if (value & ~0x1f)
22713 as_bad_where (fixP->fx_file, fixP->fx_line,
22714 _("invalid offset, value too big (0x%08lX)"),
22715 (long) value);
22716 newval |= value << 6;
22717 break;
22718
22719 case 8: /* Halfword load/store. */
22720 if (value & ~0x3e)
22721 as_bad_where (fixP->fx_file, fixP->fx_line,
22722 _("invalid offset, value too big (0x%08lX)"),
22723 (long) value);
22724 newval |= value << 5; /* 6 - 1. */
22725 break;
22726
22727 default:
22728 as_bad_where (fixP->fx_file, fixP->fx_line,
22729 "Unable to process relocation for thumb opcode: %lx",
22730 (unsigned long) newval);
22731 break;
22732 }
22733 md_number_to_chars (buf, newval, THUMB_SIZE);
22734 break;
22735
22736 case BFD_RELOC_ARM_THUMB_ADD:
22737 /* This is a complicated relocation, since we use it for all of
22738 the following immediate relocations:
22739
22740 3bit ADD/SUB
22741 8bit ADD/SUB
22742 9bit ADD/SUB SP word-aligned
22743 10bit ADD PC/SP word-aligned
22744
22745 The type of instruction being processed is encoded in the
22746 instruction field:
22747
22748 0x8000 SUB
22749 0x00F0 Rd
22750 0x000F Rs
22751 */
22752 newval = md_chars_to_number (buf, THUMB_SIZE);
22753 {
22754 int rd = (newval >> 4) & 0xf;
22755 int rs = newval & 0xf;
22756 int subtract = !!(newval & 0x8000);
22757
22758 /* Check for HI regs, only very restricted cases allowed:
22759 Adjusting SP, and using PC or SP to get an address. */
22760 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22761 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22762 as_bad_where (fixP->fx_file, fixP->fx_line,
22763 _("invalid Hi register with immediate"));
22764
22765 /* If value is negative, choose the opposite instruction. */
22766 if (value < 0)
22767 {
22768 value = -value;
22769 subtract = !subtract;
22770 if (value < 0)
22771 as_bad_where (fixP->fx_file, fixP->fx_line,
22772 _("immediate value out of range"));
22773 }
22774
22775 if (rd == REG_SP)
22776 {
22777 if (value & ~0x1fc)
22778 as_bad_where (fixP->fx_file, fixP->fx_line,
22779 _("invalid immediate for stack address calculation"));
22780 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22781 newval |= value >> 2;
22782 }
22783 else if (rs == REG_PC || rs == REG_SP)
22784 {
22785 if (subtract || value & ~0x3fc)
22786 as_bad_where (fixP->fx_file, fixP->fx_line,
22787 _("invalid immediate for address calculation (value = 0x%08lX)"),
22788 (unsigned long) value);
22789 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22790 newval |= rd << 8;
22791 newval |= value >> 2;
22792 }
22793 else if (rs == rd)
22794 {
22795 if (value & ~0xff)
22796 as_bad_where (fixP->fx_file, fixP->fx_line,
22797 _("immediate value out of range"));
22798 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22799 newval |= (rd << 8) | value;
22800 }
22801 else
22802 {
22803 if (value & ~0x7)
22804 as_bad_where (fixP->fx_file, fixP->fx_line,
22805 _("immediate value out of range"));
22806 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22807 newval |= rd | (rs << 3) | (value << 6);
22808 }
22809 }
22810 md_number_to_chars (buf, newval, THUMB_SIZE);
22811 break;
22812
22813 case BFD_RELOC_ARM_THUMB_IMM:
22814 newval = md_chars_to_number (buf, THUMB_SIZE);
22815 if (value < 0 || value > 255)
22816 as_bad_where (fixP->fx_file, fixP->fx_line,
22817 _("invalid immediate: %ld is out of range"),
22818 (long) value);
22819 newval |= value;
22820 md_number_to_chars (buf, newval, THUMB_SIZE);
22821 break;
22822
22823 case BFD_RELOC_ARM_THUMB_SHIFT:
22824 /* 5bit shift value (0..32). LSL cannot take 32. */
22825 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22826 temp = newval & 0xf800;
22827 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22828 as_bad_where (fixP->fx_file, fixP->fx_line,
22829 _("invalid shift value: %ld"), (long) value);
22830 /* Shifts of zero must be encoded as LSL. */
22831 if (value == 0)
22832 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22833 /* Shifts of 32 are encoded as zero. */
22834 else if (value == 32)
22835 value = 0;
22836 newval |= value << 6;
22837 md_number_to_chars (buf, newval, THUMB_SIZE);
22838 break;
22839
22840 case BFD_RELOC_VTABLE_INHERIT:
22841 case BFD_RELOC_VTABLE_ENTRY:
22842 fixP->fx_done = 0;
22843 return;
22844
22845 case BFD_RELOC_ARM_MOVW:
22846 case BFD_RELOC_ARM_MOVT:
22847 case BFD_RELOC_ARM_THUMB_MOVW:
22848 case BFD_RELOC_ARM_THUMB_MOVT:
22849 if (fixP->fx_done || !seg->use_rela_p)
22850 {
22851 /* REL format relocations are limited to a 16-bit addend. */
22852 if (!fixP->fx_done)
22853 {
22854 if (value < -0x8000 || value > 0x7fff)
22855 as_bad_where (fixP->fx_file, fixP->fx_line,
22856 _("offset out of range"));
22857 }
22858 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22859 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22860 {
22861 value >>= 16;
22862 }
22863
22864 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22865 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22866 {
22867 newval = get_thumb32_insn (buf);
22868 newval &= 0xfbf08f00;
22869 newval |= (value & 0xf000) << 4;
22870 newval |= (value & 0x0800) << 15;
22871 newval |= (value & 0x0700) << 4;
22872 newval |= (value & 0x00ff);
22873 put_thumb32_insn (buf, newval);
22874 }
22875 else
22876 {
22877 newval = md_chars_to_number (buf, 4);
22878 newval &= 0xfff0f000;
22879 newval |= value & 0x0fff;
22880 newval |= (value & 0xf000) << 4;
22881 md_number_to_chars (buf, newval, 4);
22882 }
22883 }
22884 return;
22885
22886 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22887 case BFD_RELOC_ARM_ALU_PC_G0:
22888 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22889 case BFD_RELOC_ARM_ALU_PC_G1:
22890 case BFD_RELOC_ARM_ALU_PC_G2:
22891 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22892 case BFD_RELOC_ARM_ALU_SB_G0:
22893 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22894 case BFD_RELOC_ARM_ALU_SB_G1:
22895 case BFD_RELOC_ARM_ALU_SB_G2:
22896 gas_assert (!fixP->fx_done);
22897 if (!seg->use_rela_p)
22898 {
22899 bfd_vma insn;
22900 bfd_vma encoded_addend;
22901 bfd_vma addend_abs = abs (value);
22902
22903 /* Check that the absolute value of the addend can be
22904 expressed as an 8-bit constant plus a rotation. */
22905 encoded_addend = encode_arm_immediate (addend_abs);
22906 if (encoded_addend == (unsigned int) FAIL)
22907 as_bad_where (fixP->fx_file, fixP->fx_line,
22908 _("the offset 0x%08lX is not representable"),
22909 (unsigned long) addend_abs);
22910
22911 /* Extract the instruction. */
22912 insn = md_chars_to_number (buf, INSN_SIZE);
22913
22914 /* If the addend is positive, use an ADD instruction.
22915 Otherwise use a SUB. Take care not to destroy the S bit. */
22916 insn &= 0xff1fffff;
22917 if (value < 0)
22918 insn |= 1 << 22;
22919 else
22920 insn |= 1 << 23;
22921
22922 /* Place the encoded addend into the first 12 bits of the
22923 instruction. */
22924 insn &= 0xfffff000;
22925 insn |= encoded_addend;
22926
22927 /* Update the instruction. */
22928 md_number_to_chars (buf, insn, INSN_SIZE);
22929 }
22930 break;
22931
22932 case BFD_RELOC_ARM_LDR_PC_G0:
22933 case BFD_RELOC_ARM_LDR_PC_G1:
22934 case BFD_RELOC_ARM_LDR_PC_G2:
22935 case BFD_RELOC_ARM_LDR_SB_G0:
22936 case BFD_RELOC_ARM_LDR_SB_G1:
22937 case BFD_RELOC_ARM_LDR_SB_G2:
22938 gas_assert (!fixP->fx_done);
22939 if (!seg->use_rela_p)
22940 {
22941 bfd_vma insn;
22942 bfd_vma addend_abs = abs (value);
22943
22944 /* Check that the absolute value of the addend can be
22945 encoded in 12 bits. */
22946 if (addend_abs >= 0x1000)
22947 as_bad_where (fixP->fx_file, fixP->fx_line,
22948 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22949 (unsigned long) addend_abs);
22950
22951 /* Extract the instruction. */
22952 insn = md_chars_to_number (buf, INSN_SIZE);
22953
22954 /* If the addend is negative, clear bit 23 of the instruction.
22955 Otherwise set it. */
22956 if (value < 0)
22957 insn &= ~(1 << 23);
22958 else
22959 insn |= 1 << 23;
22960
22961 /* Place the absolute value of the addend into the first 12 bits
22962 of the instruction. */
22963 insn &= 0xfffff000;
22964 insn |= addend_abs;
22965
22966 /* Update the instruction. */
22967 md_number_to_chars (buf, insn, INSN_SIZE);
22968 }
22969 break;
22970
22971 case BFD_RELOC_ARM_LDRS_PC_G0:
22972 case BFD_RELOC_ARM_LDRS_PC_G1:
22973 case BFD_RELOC_ARM_LDRS_PC_G2:
22974 case BFD_RELOC_ARM_LDRS_SB_G0:
22975 case BFD_RELOC_ARM_LDRS_SB_G1:
22976 case BFD_RELOC_ARM_LDRS_SB_G2:
22977 gas_assert (!fixP->fx_done);
22978 if (!seg->use_rela_p)
22979 {
22980 bfd_vma insn;
22981 bfd_vma addend_abs = abs (value);
22982
22983 /* Check that the absolute value of the addend can be
22984 encoded in 8 bits. */
22985 if (addend_abs >= 0x100)
22986 as_bad_where (fixP->fx_file, fixP->fx_line,
22987 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
22988 (unsigned long) addend_abs);
22989
22990 /* Extract the instruction. */
22991 insn = md_chars_to_number (buf, INSN_SIZE);
22992
22993 /* If the addend is negative, clear bit 23 of the instruction.
22994 Otherwise set it. */
22995 if (value < 0)
22996 insn &= ~(1 << 23);
22997 else
22998 insn |= 1 << 23;
22999
23000 /* Place the first four bits of the absolute value of the addend
23001 into the first 4 bits of the instruction, and the remaining
23002 four into bits 8 .. 11. */
23003 insn &= 0xfffff0f0;
23004 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23005
23006 /* Update the instruction. */
23007 md_number_to_chars (buf, insn, INSN_SIZE);
23008 }
23009 break;
23010
23011 case BFD_RELOC_ARM_LDC_PC_G0:
23012 case BFD_RELOC_ARM_LDC_PC_G1:
23013 case BFD_RELOC_ARM_LDC_PC_G2:
23014 case BFD_RELOC_ARM_LDC_SB_G0:
23015 case BFD_RELOC_ARM_LDC_SB_G1:
23016 case BFD_RELOC_ARM_LDC_SB_G2:
23017 gas_assert (!fixP->fx_done);
23018 if (!seg->use_rela_p)
23019 {
23020 bfd_vma insn;
23021 bfd_vma addend_abs = abs (value);
23022
23023 /* Check that the absolute value of the addend is a multiple of
23024 four and, when divided by four, fits in 8 bits. */
23025 if (addend_abs & 0x3)
23026 as_bad_where (fixP->fx_file, fixP->fx_line,
23027 _("bad offset 0x%08lX (must be word-aligned)"),
23028 (unsigned long) addend_abs);
23029
23030 if ((addend_abs >> 2) > 0xff)
23031 as_bad_where (fixP->fx_file, fixP->fx_line,
23032 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23033 (unsigned long) addend_abs);
23034
23035 /* Extract the instruction. */
23036 insn = md_chars_to_number (buf, INSN_SIZE);
23037
23038 /* If the addend is negative, clear bit 23 of the instruction.
23039 Otherwise set it. */
23040 if (value < 0)
23041 insn &= ~(1 << 23);
23042 else
23043 insn |= 1 << 23;
23044
23045 /* Place the addend (divided by four) into the first eight
23046 bits of the instruction. */
23047 insn &= 0xfffffff0;
23048 insn |= addend_abs >> 2;
23049
23050 /* Update the instruction. */
23051 md_number_to_chars (buf, insn, INSN_SIZE);
23052 }
23053 break;
23054
23055 case BFD_RELOC_ARM_V4BX:
23056 /* This will need to go in the object file. */
23057 fixP->fx_done = 0;
23058 break;
23059
23060 case BFD_RELOC_UNUSED:
23061 default:
23062 as_bad_where (fixP->fx_file, fixP->fx_line,
23063 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23064 }
23065 }
23066
23067 /* Translate internal representation of relocation info to BFD target
23068 format. */
23069
23070 arelent *
23071 tc_gen_reloc (asection *section, fixS *fixp)
23072 {
23073 arelent * reloc;
23074 bfd_reloc_code_real_type code;
23075
23076 reloc = (arelent *) xmalloc (sizeof (arelent));
23077
23078 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23079 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23080 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23081
23082 if (fixp->fx_pcrel)
23083 {
23084 if (section->use_rela_p)
23085 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23086 else
23087 fixp->fx_offset = reloc->address;
23088 }
23089 reloc->addend = fixp->fx_offset;
23090
23091 switch (fixp->fx_r_type)
23092 {
23093 case BFD_RELOC_8:
23094 if (fixp->fx_pcrel)
23095 {
23096 code = BFD_RELOC_8_PCREL;
23097 break;
23098 }
23099
23100 case BFD_RELOC_16:
23101 if (fixp->fx_pcrel)
23102 {
23103 code = BFD_RELOC_16_PCREL;
23104 break;
23105 }
23106
23107 case BFD_RELOC_32:
23108 if (fixp->fx_pcrel)
23109 {
23110 code = BFD_RELOC_32_PCREL;
23111 break;
23112 }
23113
23114 case BFD_RELOC_ARM_MOVW:
23115 if (fixp->fx_pcrel)
23116 {
23117 code = BFD_RELOC_ARM_MOVW_PCREL;
23118 break;
23119 }
23120
23121 case BFD_RELOC_ARM_MOVT:
23122 if (fixp->fx_pcrel)
23123 {
23124 code = BFD_RELOC_ARM_MOVT_PCREL;
23125 break;
23126 }
23127
23128 case BFD_RELOC_ARM_THUMB_MOVW:
23129 if (fixp->fx_pcrel)
23130 {
23131 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23132 break;
23133 }
23134
23135 case BFD_RELOC_ARM_THUMB_MOVT:
23136 if (fixp->fx_pcrel)
23137 {
23138 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23139 break;
23140 }
23141
23142 case BFD_RELOC_NONE:
23143 case BFD_RELOC_ARM_PCREL_BRANCH:
23144 case BFD_RELOC_ARM_PCREL_BLX:
23145 case BFD_RELOC_RVA:
23146 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23147 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23148 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23149 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23150 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23151 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23152 case BFD_RELOC_VTABLE_ENTRY:
23153 case BFD_RELOC_VTABLE_INHERIT:
23154 #ifdef TE_PE
23155 case BFD_RELOC_32_SECREL:
23156 #endif
23157 code = fixp->fx_r_type;
23158 break;
23159
23160 case BFD_RELOC_THUMB_PCREL_BLX:
23161 #ifdef OBJ_ELF
23162 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23163 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23164 else
23165 #endif
23166 code = BFD_RELOC_THUMB_PCREL_BLX;
23167 break;
23168
23169 case BFD_RELOC_ARM_LITERAL:
23170 case BFD_RELOC_ARM_HWLITERAL:
23171 /* If this is called then the a literal has
23172 been referenced across a section boundary. */
23173 as_bad_where (fixp->fx_file, fixp->fx_line,
23174 _("literal referenced across section boundary"));
23175 return NULL;
23176
23177 #ifdef OBJ_ELF
23178 case BFD_RELOC_ARM_TLS_CALL:
23179 case BFD_RELOC_ARM_THM_TLS_CALL:
23180 case BFD_RELOC_ARM_TLS_DESCSEQ:
23181 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23182 case BFD_RELOC_ARM_GOT32:
23183 case BFD_RELOC_ARM_GOTOFF:
23184 case BFD_RELOC_ARM_GOT_PREL:
23185 case BFD_RELOC_ARM_PLT32:
23186 case BFD_RELOC_ARM_TARGET1:
23187 case BFD_RELOC_ARM_ROSEGREL32:
23188 case BFD_RELOC_ARM_SBREL32:
23189 case BFD_RELOC_ARM_PREL31:
23190 case BFD_RELOC_ARM_TARGET2:
23191 case BFD_RELOC_ARM_TLS_LE32:
23192 case BFD_RELOC_ARM_TLS_LDO32:
23193 case BFD_RELOC_ARM_PCREL_CALL:
23194 case BFD_RELOC_ARM_PCREL_JUMP:
23195 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23196 case BFD_RELOC_ARM_ALU_PC_G0:
23197 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23198 case BFD_RELOC_ARM_ALU_PC_G1:
23199 case BFD_RELOC_ARM_ALU_PC_G2:
23200 case BFD_RELOC_ARM_LDR_PC_G0:
23201 case BFD_RELOC_ARM_LDR_PC_G1:
23202 case BFD_RELOC_ARM_LDR_PC_G2:
23203 case BFD_RELOC_ARM_LDRS_PC_G0:
23204 case BFD_RELOC_ARM_LDRS_PC_G1:
23205 case BFD_RELOC_ARM_LDRS_PC_G2:
23206 case BFD_RELOC_ARM_LDC_PC_G0:
23207 case BFD_RELOC_ARM_LDC_PC_G1:
23208 case BFD_RELOC_ARM_LDC_PC_G2:
23209 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23210 case BFD_RELOC_ARM_ALU_SB_G0:
23211 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23212 case BFD_RELOC_ARM_ALU_SB_G1:
23213 case BFD_RELOC_ARM_ALU_SB_G2:
23214 case BFD_RELOC_ARM_LDR_SB_G0:
23215 case BFD_RELOC_ARM_LDR_SB_G1:
23216 case BFD_RELOC_ARM_LDR_SB_G2:
23217 case BFD_RELOC_ARM_LDRS_SB_G0:
23218 case BFD_RELOC_ARM_LDRS_SB_G1:
23219 case BFD_RELOC_ARM_LDRS_SB_G2:
23220 case BFD_RELOC_ARM_LDC_SB_G0:
23221 case BFD_RELOC_ARM_LDC_SB_G1:
23222 case BFD_RELOC_ARM_LDC_SB_G2:
23223 case BFD_RELOC_ARM_V4BX:
23224 code = fixp->fx_r_type;
23225 break;
23226
23227 case BFD_RELOC_ARM_TLS_GOTDESC:
23228 case BFD_RELOC_ARM_TLS_GD32:
23229 case BFD_RELOC_ARM_TLS_IE32:
23230 case BFD_RELOC_ARM_TLS_LDM32:
23231 /* BFD will include the symbol's address in the addend.
23232 But we don't want that, so subtract it out again here. */
23233 if (!S_IS_COMMON (fixp->fx_addsy))
23234 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23235 code = fixp->fx_r_type;
23236 break;
23237 #endif
23238
23239 case BFD_RELOC_ARM_IMMEDIATE:
23240 as_bad_where (fixp->fx_file, fixp->fx_line,
23241 _("internal relocation (type: IMMEDIATE) not fixed up"));
23242 return NULL;
23243
23244 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23245 as_bad_where (fixp->fx_file, fixp->fx_line,
23246 _("ADRL used for a symbol not defined in the same file"));
23247 return NULL;
23248
23249 case BFD_RELOC_ARM_OFFSET_IMM:
23250 if (section->use_rela_p)
23251 {
23252 code = fixp->fx_r_type;
23253 break;
23254 }
23255
23256 if (fixp->fx_addsy != NULL
23257 && !S_IS_DEFINED (fixp->fx_addsy)
23258 && S_IS_LOCAL (fixp->fx_addsy))
23259 {
23260 as_bad_where (fixp->fx_file, fixp->fx_line,
23261 _("undefined local label `%s'"),
23262 S_GET_NAME (fixp->fx_addsy));
23263 return NULL;
23264 }
23265
23266 as_bad_where (fixp->fx_file, fixp->fx_line,
23267 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23268 return NULL;
23269
23270 default:
23271 {
23272 char * type;
23273
23274 switch (fixp->fx_r_type)
23275 {
23276 case BFD_RELOC_NONE: type = "NONE"; break;
23277 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23278 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23279 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23280 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23281 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23282 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23283 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23284 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23285 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23286 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23287 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23288 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23289 default: type = _("<unknown>"); break;
23290 }
23291 as_bad_where (fixp->fx_file, fixp->fx_line,
23292 _("cannot represent %s relocation in this object file format"),
23293 type);
23294 return NULL;
23295 }
23296 }
23297
23298 #ifdef OBJ_ELF
23299 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23300 && GOT_symbol
23301 && fixp->fx_addsy == GOT_symbol)
23302 {
23303 code = BFD_RELOC_ARM_GOTPC;
23304 reloc->addend = fixp->fx_offset = reloc->address;
23305 }
23306 #endif
23307
23308 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23309
23310 if (reloc->howto == NULL)
23311 {
23312 as_bad_where (fixp->fx_file, fixp->fx_line,
23313 _("cannot represent %s relocation in this object file format"),
23314 bfd_get_reloc_code_name (code));
23315 return NULL;
23316 }
23317
23318 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23319 vtable entry to be used in the relocation's section offset. */
23320 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23321 reloc->address = fixp->fx_offset;
23322
23323 return reloc;
23324 }
23325
23326 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23327
23328 void
23329 cons_fix_new_arm (fragS * frag,
23330 int where,
23331 int size,
23332 expressionS * exp,
23333 bfd_reloc_code_real_type reloc)
23334 {
23335 int pcrel = 0;
23336
23337 /* Pick a reloc.
23338 FIXME: @@ Should look at CPU word size. */
23339 switch (size)
23340 {
23341 case 1:
23342 reloc = BFD_RELOC_8;
23343 break;
23344 case 2:
23345 reloc = BFD_RELOC_16;
23346 break;
23347 case 4:
23348 default:
23349 reloc = BFD_RELOC_32;
23350 break;
23351 case 8:
23352 reloc = BFD_RELOC_64;
23353 break;
23354 }
23355
23356 #ifdef TE_PE
23357 if (exp->X_op == O_secrel)
23358 {
23359 exp->X_op = O_symbol;
23360 reloc = BFD_RELOC_32_SECREL;
23361 }
23362 #endif
23363
23364 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23365 }
23366
23367 #if defined (OBJ_COFF)
23368 void
23369 arm_validate_fix (fixS * fixP)
23370 {
23371 /* If the destination of the branch is a defined symbol which does not have
23372 the THUMB_FUNC attribute, then we must be calling a function which has
23373 the (interfacearm) attribute. We look for the Thumb entry point to that
23374 function and change the branch to refer to that function instead. */
23375 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23376 && fixP->fx_addsy != NULL
23377 && S_IS_DEFINED (fixP->fx_addsy)
23378 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23379 {
23380 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23381 }
23382 }
23383 #endif
23384
23385
23386 int
23387 arm_force_relocation (struct fix * fixp)
23388 {
23389 #if defined (OBJ_COFF) && defined (TE_PE)
23390 if (fixp->fx_r_type == BFD_RELOC_RVA)
23391 return 1;
23392 #endif
23393
23394 /* In case we have a call or a branch to a function in ARM ISA mode from
23395 a thumb function or vice-versa force the relocation. These relocations
23396 are cleared off for some cores that might have blx and simple transformations
23397 are possible. */
23398
23399 #ifdef OBJ_ELF
23400 switch (fixp->fx_r_type)
23401 {
23402 case BFD_RELOC_ARM_PCREL_JUMP:
23403 case BFD_RELOC_ARM_PCREL_CALL:
23404 case BFD_RELOC_THUMB_PCREL_BLX:
23405 if (THUMB_IS_FUNC (fixp->fx_addsy))
23406 return 1;
23407 break;
23408
23409 case BFD_RELOC_ARM_PCREL_BLX:
23410 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23411 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23412 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23413 if (ARM_IS_FUNC (fixp->fx_addsy))
23414 return 1;
23415 break;
23416
23417 default:
23418 break;
23419 }
23420 #endif
23421
23422 /* Resolve these relocations even if the symbol is extern or weak.
23423 Technically this is probably wrong due to symbol preemption.
23424 In practice these relocations do not have enough range to be useful
23425 at dynamic link time, and some code (e.g. in the Linux kernel)
23426 expects these references to be resolved. */
23427 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23428 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23429 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23430 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23431 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23432 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23433 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23434 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23435 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23436 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23437 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23438 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23439 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23440 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23441 return 0;
23442
23443 /* Always leave these relocations for the linker. */
23444 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23445 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23446 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23447 return 1;
23448
23449 /* Always generate relocations against function symbols. */
23450 if (fixp->fx_r_type == BFD_RELOC_32
23451 && fixp->fx_addsy
23452 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23453 return 1;
23454
23455 return generic_force_reloc (fixp);
23456 }
23457
23458 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23459 /* Relocations against function names must be left unadjusted,
23460 so that the linker can use this information to generate interworking
23461 stubs. The MIPS version of this function
23462 also prevents relocations that are mips-16 specific, but I do not
23463 know why it does this.
23464
23465 FIXME:
23466 There is one other problem that ought to be addressed here, but
23467 which currently is not: Taking the address of a label (rather
23468 than a function) and then later jumping to that address. Such
23469 addresses also ought to have their bottom bit set (assuming that
23470 they reside in Thumb code), but at the moment they will not. */
23471
23472 bfd_boolean
23473 arm_fix_adjustable (fixS * fixP)
23474 {
23475 if (fixP->fx_addsy == NULL)
23476 return 1;
23477
23478 /* Preserve relocations against symbols with function type. */
23479 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23480 return FALSE;
23481
23482 if (THUMB_IS_FUNC (fixP->fx_addsy)
23483 && fixP->fx_subsy == NULL)
23484 return FALSE;
23485
23486 /* We need the symbol name for the VTABLE entries. */
23487 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23488 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23489 return FALSE;
23490
23491 /* Don't allow symbols to be discarded on GOT related relocs. */
23492 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23493 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23494 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23495 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23496 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23497 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23498 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23499 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23500 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23501 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23502 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23503 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23504 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23505 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23506 return FALSE;
23507
23508 /* Similarly for group relocations. */
23509 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23510 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23511 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23512 return FALSE;
23513
23514 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23515 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23516 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23517 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23518 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23519 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23520 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23521 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23522 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23523 return FALSE;
23524
23525 return TRUE;
23526 }
23527 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23528
23529 #ifdef OBJ_ELF
23530
23531 const char *
23532 elf32_arm_target_format (void)
23533 {
23534 #ifdef TE_SYMBIAN
23535 return (target_big_endian
23536 ? "elf32-bigarm-symbian"
23537 : "elf32-littlearm-symbian");
23538 #elif defined (TE_VXWORKS)
23539 return (target_big_endian
23540 ? "elf32-bigarm-vxworks"
23541 : "elf32-littlearm-vxworks");
23542 #elif defined (TE_NACL)
23543 return (target_big_endian
23544 ? "elf32-bigarm-nacl"
23545 : "elf32-littlearm-nacl");
23546 #else
23547 if (target_big_endian)
23548 return "elf32-bigarm";
23549 else
23550 return "elf32-littlearm";
23551 #endif
23552 }
23553
23554 void
23555 armelf_frob_symbol (symbolS * symp,
23556 int * puntp)
23557 {
23558 elf_frob_symbol (symp, puntp);
23559 }
23560 #endif
23561
23562 /* MD interface: Finalization. */
23563
23564 void
23565 arm_cleanup (void)
23566 {
23567 literal_pool * pool;
23568
23569 /* Ensure that all the IT blocks are properly closed. */
23570 check_it_blocks_finished ();
23571
23572 for (pool = list_of_pools; pool; pool = pool->next)
23573 {
23574 /* Put it at the end of the relevant section. */
23575 subseg_set (pool->section, pool->sub_section);
23576 #ifdef OBJ_ELF
23577 arm_elf_change_section ();
23578 #endif
23579 s_ltorg (0);
23580 }
23581 }
23582
23583 #ifdef OBJ_ELF
23584 /* Remove any excess mapping symbols generated for alignment frags in
23585 SEC. We may have created a mapping symbol before a zero byte
23586 alignment; remove it if there's a mapping symbol after the
23587 alignment. */
23588 static void
23589 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23590 void *dummy ATTRIBUTE_UNUSED)
23591 {
23592 segment_info_type *seginfo = seg_info (sec);
23593 fragS *fragp;
23594
23595 if (seginfo == NULL || seginfo->frchainP == NULL)
23596 return;
23597
23598 for (fragp = seginfo->frchainP->frch_root;
23599 fragp != NULL;
23600 fragp = fragp->fr_next)
23601 {
23602 symbolS *sym = fragp->tc_frag_data.last_map;
23603 fragS *next = fragp->fr_next;
23604
23605 /* Variable-sized frags have been converted to fixed size by
23606 this point. But if this was variable-sized to start with,
23607 there will be a fixed-size frag after it. So don't handle
23608 next == NULL. */
23609 if (sym == NULL || next == NULL)
23610 continue;
23611
23612 if (S_GET_VALUE (sym) < next->fr_address)
23613 /* Not at the end of this frag. */
23614 continue;
23615 know (S_GET_VALUE (sym) == next->fr_address);
23616
23617 do
23618 {
23619 if (next->tc_frag_data.first_map != NULL)
23620 {
23621 /* Next frag starts with a mapping symbol. Discard this
23622 one. */
23623 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23624 break;
23625 }
23626
23627 if (next->fr_next == NULL)
23628 {
23629 /* This mapping symbol is at the end of the section. Discard
23630 it. */
23631 know (next->fr_fix == 0 && next->fr_var == 0);
23632 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23633 break;
23634 }
23635
23636 /* As long as we have empty frags without any mapping symbols,
23637 keep looking. */
23638 /* If the next frag is non-empty and does not start with a
23639 mapping symbol, then this mapping symbol is required. */
23640 if (next->fr_address != next->fr_next->fr_address)
23641 break;
23642
23643 next = next->fr_next;
23644 }
23645 while (next != NULL);
23646 }
23647 }
23648 #endif
23649
23650 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23651 ARM ones. */
23652
23653 void
23654 arm_adjust_symtab (void)
23655 {
23656 #ifdef OBJ_COFF
23657 symbolS * sym;
23658
23659 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23660 {
23661 if (ARM_IS_THUMB (sym))
23662 {
23663 if (THUMB_IS_FUNC (sym))
23664 {
23665 /* Mark the symbol as a Thumb function. */
23666 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23667 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23668 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23669
23670 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23671 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23672 else
23673 as_bad (_("%s: unexpected function type: %d"),
23674 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23675 }
23676 else switch (S_GET_STORAGE_CLASS (sym))
23677 {
23678 case C_EXT:
23679 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23680 break;
23681 case C_STAT:
23682 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23683 break;
23684 case C_LABEL:
23685 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23686 break;
23687 default:
23688 /* Do nothing. */
23689 break;
23690 }
23691 }
23692
23693 if (ARM_IS_INTERWORK (sym))
23694 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23695 }
23696 #endif
23697 #ifdef OBJ_ELF
23698 symbolS * sym;
23699 char bind;
23700
23701 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23702 {
23703 if (ARM_IS_THUMB (sym))
23704 {
23705 elf_symbol_type * elf_sym;
23706
23707 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23708 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23709
23710 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23711 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23712 {
23713 /* If it's a .thumb_func, declare it as so,
23714 otherwise tag label as .code 16. */
23715 if (THUMB_IS_FUNC (sym))
23716 elf_sym->internal_elf_sym.st_target_internal
23717 = ST_BRANCH_TO_THUMB;
23718 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23719 elf_sym->internal_elf_sym.st_info =
23720 ELF_ST_INFO (bind, STT_ARM_16BIT);
23721 }
23722 }
23723 }
23724
23725 /* Remove any overlapping mapping symbols generated by alignment frags. */
23726 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23727 /* Now do generic ELF adjustments. */
23728 elf_adjust_symtab ();
23729 #endif
23730 }
23731
23732 /* MD interface: Initialization. */
23733
23734 static void
23735 set_constant_flonums (void)
23736 {
23737 int i;
23738
23739 for (i = 0; i < NUM_FLOAT_VALS; i++)
23740 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23741 abort ();
23742 }
23743
23744 /* Auto-select Thumb mode if it's the only available instruction set for the
23745 given architecture. */
23746
23747 static void
23748 autoselect_thumb_from_cpu_variant (void)
23749 {
23750 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23751 opcode_select (16);
23752 }
23753
23754 void
23755 md_begin (void)
23756 {
23757 unsigned mach;
23758 unsigned int i;
23759
23760 if ( (arm_ops_hsh = hash_new ()) == NULL
23761 || (arm_cond_hsh = hash_new ()) == NULL
23762 || (arm_shift_hsh = hash_new ()) == NULL
23763 || (arm_psr_hsh = hash_new ()) == NULL
23764 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23765 || (arm_reg_hsh = hash_new ()) == NULL
23766 || (arm_reloc_hsh = hash_new ()) == NULL
23767 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23768 as_fatal (_("virtual memory exhausted"));
23769
23770 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23771 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23772 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23773 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23774 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23775 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23776 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23777 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23778 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23779 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23780 (void *) (v7m_psrs + i));
23781 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23782 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23783 for (i = 0;
23784 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23785 i++)
23786 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23787 (void *) (barrier_opt_names + i));
23788 #ifdef OBJ_ELF
23789 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23790 {
23791 struct reloc_entry * entry = reloc_names + i;
23792
23793 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23794 /* This makes encode_branch() use the EABI versions of this relocation. */
23795 entry->reloc = BFD_RELOC_UNUSED;
23796
23797 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23798 }
23799 #endif
23800
23801 set_constant_flonums ();
23802
23803 /* Set the cpu variant based on the command-line options. We prefer
23804 -mcpu= over -march= if both are set (as for GCC); and we prefer
23805 -mfpu= over any other way of setting the floating point unit.
23806 Use of legacy options with new options are faulted. */
23807 if (legacy_cpu)
23808 {
23809 if (mcpu_cpu_opt || march_cpu_opt)
23810 as_bad (_("use of old and new-style options to set CPU type"));
23811
23812 mcpu_cpu_opt = legacy_cpu;
23813 }
23814 else if (!mcpu_cpu_opt)
23815 mcpu_cpu_opt = march_cpu_opt;
23816
23817 if (legacy_fpu)
23818 {
23819 if (mfpu_opt)
23820 as_bad (_("use of old and new-style options to set FPU type"));
23821
23822 mfpu_opt = legacy_fpu;
23823 }
23824 else if (!mfpu_opt)
23825 {
23826 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23827 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23828 /* Some environments specify a default FPU. If they don't, infer it
23829 from the processor. */
23830 if (mcpu_fpu_opt)
23831 mfpu_opt = mcpu_fpu_opt;
23832 else
23833 mfpu_opt = march_fpu_opt;
23834 #else
23835 mfpu_opt = &fpu_default;
23836 #endif
23837 }
23838
23839 if (!mfpu_opt)
23840 {
23841 if (mcpu_cpu_opt != NULL)
23842 mfpu_opt = &fpu_default;
23843 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23844 mfpu_opt = &fpu_arch_vfp_v2;
23845 else
23846 mfpu_opt = &fpu_arch_fpa;
23847 }
23848
23849 #ifdef CPU_DEFAULT
23850 if (!mcpu_cpu_opt)
23851 {
23852 mcpu_cpu_opt = &cpu_default;
23853 selected_cpu = cpu_default;
23854 }
23855 #else
23856 if (mcpu_cpu_opt)
23857 selected_cpu = *mcpu_cpu_opt;
23858 else
23859 mcpu_cpu_opt = &arm_arch_any;
23860 #endif
23861
23862 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23863
23864 autoselect_thumb_from_cpu_variant ();
23865
23866 arm_arch_used = thumb_arch_used = arm_arch_none;
23867
23868 #if defined OBJ_COFF || defined OBJ_ELF
23869 {
23870 unsigned int flags = 0;
23871
23872 #if defined OBJ_ELF
23873 flags = meabi_flags;
23874
23875 switch (meabi_flags)
23876 {
23877 case EF_ARM_EABI_UNKNOWN:
23878 #endif
23879 /* Set the flags in the private structure. */
23880 if (uses_apcs_26) flags |= F_APCS26;
23881 if (support_interwork) flags |= F_INTERWORK;
23882 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23883 if (pic_code) flags |= F_PIC;
23884 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23885 flags |= F_SOFT_FLOAT;
23886
23887 switch (mfloat_abi_opt)
23888 {
23889 case ARM_FLOAT_ABI_SOFT:
23890 case ARM_FLOAT_ABI_SOFTFP:
23891 flags |= F_SOFT_FLOAT;
23892 break;
23893
23894 case ARM_FLOAT_ABI_HARD:
23895 if (flags & F_SOFT_FLOAT)
23896 as_bad (_("hard-float conflicts with specified fpu"));
23897 break;
23898 }
23899
23900 /* Using pure-endian doubles (even if soft-float). */
23901 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23902 flags |= F_VFP_FLOAT;
23903
23904 #if defined OBJ_ELF
23905 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23906 flags |= EF_ARM_MAVERICK_FLOAT;
23907 break;
23908
23909 case EF_ARM_EABI_VER4:
23910 case EF_ARM_EABI_VER5:
23911 /* No additional flags to set. */
23912 break;
23913
23914 default:
23915 abort ();
23916 }
23917 #endif
23918 bfd_set_private_flags (stdoutput, flags);
23919
23920 /* We have run out flags in the COFF header to encode the
23921 status of ATPCS support, so instead we create a dummy,
23922 empty, debug section called .arm.atpcs. */
23923 if (atpcs)
23924 {
23925 asection * sec;
23926
23927 sec = bfd_make_section (stdoutput, ".arm.atpcs");
23928
23929 if (sec != NULL)
23930 {
23931 bfd_set_section_flags
23932 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23933 bfd_set_section_size (stdoutput, sec, 0);
23934 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23935 }
23936 }
23937 }
23938 #endif
23939
23940 /* Record the CPU type as well. */
23941 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23942 mach = bfd_mach_arm_iWMMXt2;
23943 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23944 mach = bfd_mach_arm_iWMMXt;
23945 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23946 mach = bfd_mach_arm_XScale;
23947 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23948 mach = bfd_mach_arm_ep9312;
23949 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23950 mach = bfd_mach_arm_5TE;
23951 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23952 {
23953 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23954 mach = bfd_mach_arm_5T;
23955 else
23956 mach = bfd_mach_arm_5;
23957 }
23958 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
23959 {
23960 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23961 mach = bfd_mach_arm_4T;
23962 else
23963 mach = bfd_mach_arm_4;
23964 }
23965 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
23966 mach = bfd_mach_arm_3M;
23967 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
23968 mach = bfd_mach_arm_3;
23969 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
23970 mach = bfd_mach_arm_2a;
23971 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
23972 mach = bfd_mach_arm_2;
23973 else
23974 mach = bfd_mach_arm_unknown;
23975
23976 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
23977 }
23978
23979 /* Command line processing. */
23980
23981 /* md_parse_option
23982 Invocation line includes a switch not recognized by the base assembler.
23983 See if it's a processor-specific option.
23984
23985 This routine is somewhat complicated by the need for backwards
23986 compatibility (since older releases of gcc can't be changed).
23987 The new options try to make the interface as compatible as
23988 possible with GCC.
23989
23990 New options (supported) are:
23991
23992 -mcpu=<cpu name> Assemble for selected processor
23993 -march=<architecture name> Assemble for selected architecture
23994 -mfpu=<fpu architecture> Assemble for selected FPU.
23995 -EB/-mbig-endian Big-endian
23996 -EL/-mlittle-endian Little-endian
23997 -k Generate PIC code
23998 -mthumb Start in Thumb mode
23999 -mthumb-interwork Code supports ARM/Thumb interworking
24000
24001 -m[no-]warn-deprecated Warn about deprecated features
24002
24003 For now we will also provide support for:
24004
24005 -mapcs-32 32-bit Program counter
24006 -mapcs-26 26-bit Program counter
24007 -macps-float Floats passed in FP registers
24008 -mapcs-reentrant Reentrant code
24009 -matpcs
24010 (sometime these will probably be replaced with -mapcs=<list of options>
24011 and -matpcs=<list of options>)
24012
24013 The remaining options are only supported for back-wards compatibility.
24014 Cpu variants, the arm part is optional:
24015 -m[arm]1 Currently not supported.
24016 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24017 -m[arm]3 Arm 3 processor
24018 -m[arm]6[xx], Arm 6 processors
24019 -m[arm]7[xx][t][[d]m] Arm 7 processors
24020 -m[arm]8[10] Arm 8 processors
24021 -m[arm]9[20][tdmi] Arm 9 processors
24022 -mstrongarm[110[0]] StrongARM processors
24023 -mxscale XScale processors
24024 -m[arm]v[2345[t[e]]] Arm architectures
24025 -mall All (except the ARM1)
24026 FP variants:
24027 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24028 -mfpe-old (No float load/store multiples)
24029 -mvfpxd VFP Single precision
24030 -mvfp All VFP
24031 -mno-fpu Disable all floating point instructions
24032
24033 The following CPU names are recognized:
24034 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24035 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24036 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24037 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24038 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24039 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24040 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24041
24042 */
24043
24044 const char * md_shortopts = "m:k";
24045
24046 #ifdef ARM_BI_ENDIAN
24047 #define OPTION_EB (OPTION_MD_BASE + 0)
24048 #define OPTION_EL (OPTION_MD_BASE + 1)
24049 #else
24050 #if TARGET_BYTES_BIG_ENDIAN
24051 #define OPTION_EB (OPTION_MD_BASE + 0)
24052 #else
24053 #define OPTION_EL (OPTION_MD_BASE + 1)
24054 #endif
24055 #endif
24056 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24057
24058 struct option md_longopts[] =
24059 {
24060 #ifdef OPTION_EB
24061 {"EB", no_argument, NULL, OPTION_EB},
24062 #endif
24063 #ifdef OPTION_EL
24064 {"EL", no_argument, NULL, OPTION_EL},
24065 #endif
24066 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24067 {NULL, no_argument, NULL, 0}
24068 };
24069
24070 size_t md_longopts_size = sizeof (md_longopts);
24071
24072 struct arm_option_table
24073 {
24074 char *option; /* Option name to match. */
24075 char *help; /* Help information. */
24076 int *var; /* Variable to change. */
24077 int value; /* What to change it to. */
24078 char *deprecated; /* If non-null, print this message. */
24079 };
24080
24081 struct arm_option_table arm_opts[] =
24082 {
24083 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24084 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24085 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24086 &support_interwork, 1, NULL},
24087 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24088 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24089 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24090 1, NULL},
24091 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24092 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24093 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24094 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24095 NULL},
24096
24097 /* These are recognized by the assembler, but have no affect on code. */
24098 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24099 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24100
24101 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24102 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24103 &warn_on_deprecated, 0, NULL},
24104 {NULL, NULL, NULL, 0, NULL}
24105 };
24106
24107 struct arm_legacy_option_table
24108 {
24109 char *option; /* Option name to match. */
24110 const arm_feature_set **var; /* Variable to change. */
24111 const arm_feature_set value; /* What to change it to. */
24112 char *deprecated; /* If non-null, print this message. */
24113 };
24114
24115 const struct arm_legacy_option_table arm_legacy_opts[] =
24116 {
24117 /* DON'T add any new processors to this list -- we want the whole list
24118 to go away... Add them to the processors table instead. */
24119 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24120 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24121 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24122 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24123 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24124 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24125 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24126 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24127 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24128 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24129 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24130 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24131 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24132 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24133 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24134 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24135 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24136 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24137 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24138 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24139 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24140 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24141 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24142 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24143 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24144 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24145 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24146 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24147 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24148 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24149 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24150 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24151 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24152 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24153 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24154 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24155 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24156 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24157 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24158 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24159 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24160 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24161 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24162 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24163 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24164 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24165 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24166 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24167 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24168 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24169 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24170 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24171 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24172 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24173 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24174 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24175 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24176 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24177 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24178 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24179 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24180 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24181 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24182 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24183 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24184 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24185 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24186 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24187 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24188 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24189 N_("use -mcpu=strongarm110")},
24190 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24191 N_("use -mcpu=strongarm1100")},
24192 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24193 N_("use -mcpu=strongarm1110")},
24194 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24195 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24196 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24197
24198 /* Architecture variants -- don't add any more to this list either. */
24199 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24200 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24201 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24202 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24203 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24204 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24205 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24206 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24207 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24208 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24209 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24210 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24211 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24212 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24213 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24214 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24215 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24216 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24217
24218 /* Floating point variants -- don't add any more to this list either. */
24219 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24220 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24221 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24222 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24223 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24224
24225 {NULL, NULL, ARM_ARCH_NONE, NULL}
24226 };
24227
24228 struct arm_cpu_option_table
24229 {
24230 char *name;
24231 size_t name_len;
24232 const arm_feature_set value;
24233 /* For some CPUs we assume an FPU unless the user explicitly sets
24234 -mfpu=... */
24235 const arm_feature_set default_fpu;
24236 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24237 case. */
24238 const char *canonical_name;
24239 };
24240
24241 /* This list should, at a minimum, contain all the cpu names
24242 recognized by GCC. */
24243 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24244 static const struct arm_cpu_option_table arm_cpus[] =
24245 {
24246 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24247 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24248 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24249 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24250 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24251 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24252 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24253 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24254 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24255 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24256 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24257 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24258 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24259 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24260 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24261 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24262 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24263 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24264 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24265 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24266 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24267 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24268 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24269 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24270 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24271 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24272 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24273 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24274 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24275 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24276 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24277 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24278 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24279 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24280 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24281 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24282 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24283 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24284 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24285 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24286 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24287 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24288 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24289 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24290 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24291 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24292 /* For V5 or later processors we default to using VFP; but the user
24293 should really set the FPU type explicitly. */
24294 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24295 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24296 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24297 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24298 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24299 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24300 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24301 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24302 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24303 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24304 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24305 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24306 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24307 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24308 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24309 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24310 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24311 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24312 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24313 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24314 "ARM1026EJ-S"),
24315 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24316 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24317 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24318 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24319 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24320 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24321 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24322 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24323 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24324 "ARM1136JF-S"),
24325 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24326 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24327 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24328 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24329 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24330 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
24331 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
24332 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24333 FPU_NONE, "Cortex-A5"),
24334 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24335 "Cortex-A7"),
24336 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24337 ARM_FEATURE (0, FPU_VFP_V3
24338 | FPU_NEON_EXT_V1),
24339 "Cortex-A8"),
24340 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24341 ARM_FEATURE (0, FPU_VFP_V3
24342 | FPU_NEON_EXT_V1),
24343 "Cortex-A9"),
24344 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24345 "Cortex-A12"),
24346 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24347 "Cortex-A15"),
24348 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24349 "Cortex-A53"),
24350 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24351 "Cortex-A57"),
24352 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24353 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24354 "Cortex-R4F"),
24355 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24356 FPU_NONE, "Cortex-R5"),
24357 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24358 FPU_ARCH_VFP_V3D16,
24359 "Cortex-R7"),
24360 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24361 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24362 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24363 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24364 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24365 /* ??? XSCALE is really an architecture. */
24366 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24367 /* ??? iwmmxt is not a processor. */
24368 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24369 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24370 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24371 /* Maverick */
24372 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24373 FPU_ARCH_MAVERICK, "ARM920T"),
24374 /* Marvell processors. */
24375 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
24376 FPU_ARCH_VFP_V3D16, NULL),
24377
24378 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24379 };
24380 #undef ARM_CPU_OPT
24381
24382 struct arm_arch_option_table
24383 {
24384 char *name;
24385 size_t name_len;
24386 const arm_feature_set value;
24387 const arm_feature_set default_fpu;
24388 };
24389
24390 /* This list should, at a minimum, contain all the architecture names
24391 recognized by GCC. */
24392 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24393 static const struct arm_arch_option_table arm_archs[] =
24394 {
24395 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24396 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24397 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24398 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24399 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24400 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24401 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24402 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24403 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24404 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24405 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24406 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24407 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24408 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24409 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24410 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24411 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24412 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24413 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24414 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24415 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24416 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24417 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24418 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24419 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24420 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24421 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24422 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24423 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24424 /* The official spelling of the ARMv7 profile variants is the dashed form.
24425 Accept the non-dashed form for compatibility with old toolchains. */
24426 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24427 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
24428 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24429 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24430 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24431 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24432 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24433 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24434 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24435 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24436 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24437 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24438 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24439 };
24440 #undef ARM_ARCH_OPT
24441
24442 /* ISA extensions in the co-processor and main instruction set space. */
24443 struct arm_option_extension_value_table
24444 {
24445 char *name;
24446 size_t name_len;
24447 const arm_feature_set value;
24448 const arm_feature_set allowed_archs;
24449 };
24450
24451 /* The following table must be in alphabetical order with a NULL last entry.
24452 */
24453 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
24454 static const struct arm_option_extension_value_table arm_extensions[] =
24455 {
24456 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
24457 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24458 ARM_FEATURE (ARM_EXT_V8, 0)),
24459 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
24460 ARM_FEATURE (ARM_EXT_V8, 0)),
24461 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
24462 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24463 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
24464 ARM_EXT_OPT ("iwmmxt2",
24465 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
24466 ARM_EXT_OPT ("maverick",
24467 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
24468 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
24469 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24470 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24471 ARM_FEATURE (ARM_EXT_V8, 0)),
24472 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
24473 ARM_FEATURE (ARM_EXT_V6M, 0)),
24474 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
24475 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
24476 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
24477 | ARM_EXT_DIV, 0),
24478 ARM_FEATURE (ARM_EXT_V7A, 0)),
24479 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
24480 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24481 };
24482 #undef ARM_EXT_OPT
24483
24484 /* ISA floating-point and Advanced SIMD extensions. */
24485 struct arm_option_fpu_value_table
24486 {
24487 char *name;
24488 const arm_feature_set value;
24489 };
24490
24491 /* This list should, at a minimum, contain all the fpu names
24492 recognized by GCC. */
24493 static const struct arm_option_fpu_value_table arm_fpus[] =
24494 {
24495 {"softfpa", FPU_NONE},
24496 {"fpe", FPU_ARCH_FPE},
24497 {"fpe2", FPU_ARCH_FPE},
24498 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24499 {"fpa", FPU_ARCH_FPA},
24500 {"fpa10", FPU_ARCH_FPA},
24501 {"fpa11", FPU_ARCH_FPA},
24502 {"arm7500fe", FPU_ARCH_FPA},
24503 {"softvfp", FPU_ARCH_VFP},
24504 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24505 {"vfp", FPU_ARCH_VFP_V2},
24506 {"vfp9", FPU_ARCH_VFP_V2},
24507 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24508 {"vfp10", FPU_ARCH_VFP_V2},
24509 {"vfp10-r0", FPU_ARCH_VFP_V1},
24510 {"vfpxd", FPU_ARCH_VFP_V1xD},
24511 {"vfpv2", FPU_ARCH_VFP_V2},
24512 {"vfpv3", FPU_ARCH_VFP_V3},
24513 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24514 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24515 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24516 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24517 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24518 {"arm1020t", FPU_ARCH_VFP_V1},
24519 {"arm1020e", FPU_ARCH_VFP_V2},
24520 {"arm1136jfs", FPU_ARCH_VFP_V2},
24521 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24522 {"maverick", FPU_ARCH_MAVERICK},
24523 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24524 {"neon-fp16", FPU_ARCH_NEON_FP16},
24525 {"vfpv4", FPU_ARCH_VFP_V4},
24526 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24527 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24528 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24529 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24530 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24531 {"crypto-neon-fp-armv8",
24532 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24533 {NULL, ARM_ARCH_NONE}
24534 };
24535
24536 struct arm_option_value_table
24537 {
24538 char *name;
24539 long value;
24540 };
24541
24542 static const struct arm_option_value_table arm_float_abis[] =
24543 {
24544 {"hard", ARM_FLOAT_ABI_HARD},
24545 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24546 {"soft", ARM_FLOAT_ABI_SOFT},
24547 {NULL, 0}
24548 };
24549
24550 #ifdef OBJ_ELF
24551 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24552 static const struct arm_option_value_table arm_eabis[] =
24553 {
24554 {"gnu", EF_ARM_EABI_UNKNOWN},
24555 {"4", EF_ARM_EABI_VER4},
24556 {"5", EF_ARM_EABI_VER5},
24557 {NULL, 0}
24558 };
24559 #endif
24560
24561 struct arm_long_option_table
24562 {
24563 char * option; /* Substring to match. */
24564 char * help; /* Help information. */
24565 int (* func) (char * subopt); /* Function to decode sub-option. */
24566 char * deprecated; /* If non-null, print this message. */
24567 };
24568
24569 static bfd_boolean
24570 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24571 {
24572 arm_feature_set *ext_set = (arm_feature_set *)
24573 xmalloc (sizeof (arm_feature_set));
24574
24575 /* We insist on extensions being specified in alphabetical order, and with
24576 extensions being added before being removed. We achieve this by having
24577 the global ARM_EXTENSIONS table in alphabetical order, and using the
24578 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24579 or removing it (0) and only allowing it to change in the order
24580 -1 -> 1 -> 0. */
24581 const struct arm_option_extension_value_table * opt = NULL;
24582 int adding_value = -1;
24583
24584 /* Copy the feature set, so that we can modify it. */
24585 *ext_set = **opt_p;
24586 *opt_p = ext_set;
24587
24588 while (str != NULL && *str != 0)
24589 {
24590 char *ext;
24591 size_t len;
24592
24593 if (*str != '+')
24594 {
24595 as_bad (_("invalid architectural extension"));
24596 return FALSE;
24597 }
24598
24599 str++;
24600 ext = strchr (str, '+');
24601
24602 if (ext != NULL)
24603 len = ext - str;
24604 else
24605 len = strlen (str);
24606
24607 if (len >= 2 && strncmp (str, "no", 2) == 0)
24608 {
24609 if (adding_value != 0)
24610 {
24611 adding_value = 0;
24612 opt = arm_extensions;
24613 }
24614
24615 len -= 2;
24616 str += 2;
24617 }
24618 else if (len > 0)
24619 {
24620 if (adding_value == -1)
24621 {
24622 adding_value = 1;
24623 opt = arm_extensions;
24624 }
24625 else if (adding_value != 1)
24626 {
24627 as_bad (_("must specify extensions to add before specifying "
24628 "those to remove"));
24629 return FALSE;
24630 }
24631 }
24632
24633 if (len == 0)
24634 {
24635 as_bad (_("missing architectural extension"));
24636 return FALSE;
24637 }
24638
24639 gas_assert (adding_value != -1);
24640 gas_assert (opt != NULL);
24641
24642 /* Scan over the options table trying to find an exact match. */
24643 for (; opt->name != NULL; opt++)
24644 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24645 {
24646 /* Check we can apply the extension to this architecture. */
24647 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24648 {
24649 as_bad (_("extension does not apply to the base architecture"));
24650 return FALSE;
24651 }
24652
24653 /* Add or remove the extension. */
24654 if (adding_value)
24655 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24656 else
24657 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24658
24659 break;
24660 }
24661
24662 if (opt->name == NULL)
24663 {
24664 /* Did we fail to find an extension because it wasn't specified in
24665 alphabetical order, or because it does not exist? */
24666
24667 for (opt = arm_extensions; opt->name != NULL; opt++)
24668 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24669 break;
24670
24671 if (opt->name == NULL)
24672 as_bad (_("unknown architectural extension `%s'"), str);
24673 else
24674 as_bad (_("architectural extensions must be specified in "
24675 "alphabetical order"));
24676
24677 return FALSE;
24678 }
24679 else
24680 {
24681 /* We should skip the extension we've just matched the next time
24682 round. */
24683 opt++;
24684 }
24685
24686 str = ext;
24687 };
24688
24689 return TRUE;
24690 }
24691
24692 static bfd_boolean
24693 arm_parse_cpu (char *str)
24694 {
24695 const struct arm_cpu_option_table *opt;
24696 char *ext = strchr (str, '+');
24697 size_t len;
24698
24699 if (ext != NULL)
24700 len = ext - str;
24701 else
24702 len = strlen (str);
24703
24704 if (len == 0)
24705 {
24706 as_bad (_("missing cpu name `%s'"), str);
24707 return FALSE;
24708 }
24709
24710 for (opt = arm_cpus; opt->name != NULL; opt++)
24711 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24712 {
24713 mcpu_cpu_opt = &opt->value;
24714 mcpu_fpu_opt = &opt->default_fpu;
24715 if (opt->canonical_name)
24716 strcpy (selected_cpu_name, opt->canonical_name);
24717 else
24718 {
24719 size_t i;
24720
24721 for (i = 0; i < len; i++)
24722 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24723 selected_cpu_name[i] = 0;
24724 }
24725
24726 if (ext != NULL)
24727 return arm_parse_extension (ext, &mcpu_cpu_opt);
24728
24729 return TRUE;
24730 }
24731
24732 as_bad (_("unknown cpu `%s'"), str);
24733 return FALSE;
24734 }
24735
24736 static bfd_boolean
24737 arm_parse_arch (char *str)
24738 {
24739 const struct arm_arch_option_table *opt;
24740 char *ext = strchr (str, '+');
24741 size_t len;
24742
24743 if (ext != NULL)
24744 len = ext - str;
24745 else
24746 len = strlen (str);
24747
24748 if (len == 0)
24749 {
24750 as_bad (_("missing architecture name `%s'"), str);
24751 return FALSE;
24752 }
24753
24754 for (opt = arm_archs; opt->name != NULL; opt++)
24755 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24756 {
24757 march_cpu_opt = &opt->value;
24758 march_fpu_opt = &opt->default_fpu;
24759 strcpy (selected_cpu_name, opt->name);
24760
24761 if (ext != NULL)
24762 return arm_parse_extension (ext, &march_cpu_opt);
24763
24764 return TRUE;
24765 }
24766
24767 as_bad (_("unknown architecture `%s'\n"), str);
24768 return FALSE;
24769 }
24770
24771 static bfd_boolean
24772 arm_parse_fpu (char * str)
24773 {
24774 const struct arm_option_fpu_value_table * opt;
24775
24776 for (opt = arm_fpus; opt->name != NULL; opt++)
24777 if (streq (opt->name, str))
24778 {
24779 mfpu_opt = &opt->value;
24780 return TRUE;
24781 }
24782
24783 as_bad (_("unknown floating point format `%s'\n"), str);
24784 return FALSE;
24785 }
24786
24787 static bfd_boolean
24788 arm_parse_float_abi (char * str)
24789 {
24790 const struct arm_option_value_table * opt;
24791
24792 for (opt = arm_float_abis; opt->name != NULL; opt++)
24793 if (streq (opt->name, str))
24794 {
24795 mfloat_abi_opt = opt->value;
24796 return TRUE;
24797 }
24798
24799 as_bad (_("unknown floating point abi `%s'\n"), str);
24800 return FALSE;
24801 }
24802
24803 #ifdef OBJ_ELF
24804 static bfd_boolean
24805 arm_parse_eabi (char * str)
24806 {
24807 const struct arm_option_value_table *opt;
24808
24809 for (opt = arm_eabis; opt->name != NULL; opt++)
24810 if (streq (opt->name, str))
24811 {
24812 meabi_flags = opt->value;
24813 return TRUE;
24814 }
24815 as_bad (_("unknown EABI `%s'\n"), str);
24816 return FALSE;
24817 }
24818 #endif
24819
24820 static bfd_boolean
24821 arm_parse_it_mode (char * str)
24822 {
24823 bfd_boolean ret = TRUE;
24824
24825 if (streq ("arm", str))
24826 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24827 else if (streq ("thumb", str))
24828 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24829 else if (streq ("always", str))
24830 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24831 else if (streq ("never", str))
24832 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24833 else
24834 {
24835 as_bad (_("unknown implicit IT mode `%s', should be "\
24836 "arm, thumb, always, or never."), str);
24837 ret = FALSE;
24838 }
24839
24840 return ret;
24841 }
24842
24843 static bfd_boolean
24844 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
24845 {
24846 codecomposer_syntax = TRUE;
24847 arm_comment_chars[0] = ';';
24848 arm_line_separator_chars[0] = 0;
24849 return TRUE;
24850 }
24851
24852 struct arm_long_option_table arm_long_opts[] =
24853 {
24854 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24855 arm_parse_cpu, NULL},
24856 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24857 arm_parse_arch, NULL},
24858 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
24859 arm_parse_fpu, NULL},
24860 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
24861 arm_parse_float_abi, NULL},
24862 #ifdef OBJ_ELF
24863 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
24864 arm_parse_eabi, NULL},
24865 #endif
24866 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
24867 arm_parse_it_mode, NULL},
24868 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
24869 arm_ccs_mode, NULL},
24870 {NULL, NULL, 0, NULL}
24871 };
24872
24873 int
24874 md_parse_option (int c, char * arg)
24875 {
24876 struct arm_option_table *opt;
24877 const struct arm_legacy_option_table *fopt;
24878 struct arm_long_option_table *lopt;
24879
24880 switch (c)
24881 {
24882 #ifdef OPTION_EB
24883 case OPTION_EB:
24884 target_big_endian = 1;
24885 break;
24886 #endif
24887
24888 #ifdef OPTION_EL
24889 case OPTION_EL:
24890 target_big_endian = 0;
24891 break;
24892 #endif
24893
24894 case OPTION_FIX_V4BX:
24895 fix_v4bx = TRUE;
24896 break;
24897
24898 case 'a':
24899 /* Listing option. Just ignore these, we don't support additional
24900 ones. */
24901 return 0;
24902
24903 default:
24904 for (opt = arm_opts; opt->option != NULL; opt++)
24905 {
24906 if (c == opt->option[0]
24907 && ((arg == NULL && opt->option[1] == 0)
24908 || streq (arg, opt->option + 1)))
24909 {
24910 /* If the option is deprecated, tell the user. */
24911 if (warn_on_deprecated && opt->deprecated != NULL)
24912 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24913 arg ? arg : "", _(opt->deprecated));
24914
24915 if (opt->var != NULL)
24916 *opt->var = opt->value;
24917
24918 return 1;
24919 }
24920 }
24921
24922 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24923 {
24924 if (c == fopt->option[0]
24925 && ((arg == NULL && fopt->option[1] == 0)
24926 || streq (arg, fopt->option + 1)))
24927 {
24928 /* If the option is deprecated, tell the user. */
24929 if (warn_on_deprecated && fopt->deprecated != NULL)
24930 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24931 arg ? arg : "", _(fopt->deprecated));
24932
24933 if (fopt->var != NULL)
24934 *fopt->var = &fopt->value;
24935
24936 return 1;
24937 }
24938 }
24939
24940 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24941 {
24942 /* These options are expected to have an argument. */
24943 if (c == lopt->option[0]
24944 && arg != NULL
24945 && strncmp (arg, lopt->option + 1,
24946 strlen (lopt->option + 1)) == 0)
24947 {
24948 /* If the option is deprecated, tell the user. */
24949 if (warn_on_deprecated && lopt->deprecated != NULL)
24950 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24951 _(lopt->deprecated));
24952
24953 /* Call the sup-option parser. */
24954 return lopt->func (arg + strlen (lopt->option) - 1);
24955 }
24956 }
24957
24958 return 0;
24959 }
24960
24961 return 1;
24962 }
24963
24964 void
24965 md_show_usage (FILE * fp)
24966 {
24967 struct arm_option_table *opt;
24968 struct arm_long_option_table *lopt;
24969
24970 fprintf (fp, _(" ARM-specific assembler options:\n"));
24971
24972 for (opt = arm_opts; opt->option != NULL; opt++)
24973 if (opt->help != NULL)
24974 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
24975
24976 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24977 if (lopt->help != NULL)
24978 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
24979
24980 #ifdef OPTION_EB
24981 fprintf (fp, _("\
24982 -EB assemble code for a big-endian cpu\n"));
24983 #endif
24984
24985 #ifdef OPTION_EL
24986 fprintf (fp, _("\
24987 -EL assemble code for a little-endian cpu\n"));
24988 #endif
24989
24990 fprintf (fp, _("\
24991 --fix-v4bx Allow BX in ARMv4 code\n"));
24992 }
24993
24994
24995 #ifdef OBJ_ELF
24996 typedef struct
24997 {
24998 int val;
24999 arm_feature_set flags;
25000 } cpu_arch_ver_table;
25001
25002 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25003 least features first. */
25004 static const cpu_arch_ver_table cpu_arch_ver[] =
25005 {
25006 {1, ARM_ARCH_V4},
25007 {2, ARM_ARCH_V4T},
25008 {3, ARM_ARCH_V5},
25009 {3, ARM_ARCH_V5T},
25010 {4, ARM_ARCH_V5TE},
25011 {5, ARM_ARCH_V5TEJ},
25012 {6, ARM_ARCH_V6},
25013 {9, ARM_ARCH_V6K},
25014 {7, ARM_ARCH_V6Z},
25015 {11, ARM_ARCH_V6M},
25016 {12, ARM_ARCH_V6SM},
25017 {8, ARM_ARCH_V6T2},
25018 {10, ARM_ARCH_V7VE},
25019 {10, ARM_ARCH_V7R},
25020 {10, ARM_ARCH_V7M},
25021 {14, ARM_ARCH_V8A},
25022 {0, ARM_ARCH_NONE}
25023 };
25024
25025 /* Set an attribute if it has not already been set by the user. */
25026 static void
25027 aeabi_set_attribute_int (int tag, int value)
25028 {
25029 if (tag < 1
25030 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25031 || !attributes_set_explicitly[tag])
25032 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25033 }
25034
25035 static void
25036 aeabi_set_attribute_string (int tag, const char *value)
25037 {
25038 if (tag < 1
25039 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25040 || !attributes_set_explicitly[tag])
25041 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25042 }
25043
25044 /* Set the public EABI object attributes. */
25045 static void
25046 aeabi_set_public_attributes (void)
25047 {
25048 int arch;
25049 char profile;
25050 int virt_sec = 0;
25051 int fp16_optional = 0;
25052 arm_feature_set flags;
25053 arm_feature_set tmp;
25054 const cpu_arch_ver_table *p;
25055
25056 /* Choose the architecture based on the capabilities of the requested cpu
25057 (if any) and/or the instructions actually used. */
25058 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25059 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25060 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25061
25062 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25063 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25064
25065 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25066 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25067
25068 /* Allow the user to override the reported architecture. */
25069 if (object_arch)
25070 {
25071 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25072 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25073 }
25074
25075 /* We need to make sure that the attributes do not identify us as v6S-M
25076 when the only v6S-M feature in use is the Operating System Extensions. */
25077 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25078 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25079 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25080
25081 tmp = flags;
25082 arch = 0;
25083 for (p = cpu_arch_ver; p->val; p++)
25084 {
25085 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25086 {
25087 arch = p->val;
25088 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25089 }
25090 }
25091
25092 /* The table lookup above finds the last architecture to contribute
25093 a new feature. Unfortunately, Tag13 is a subset of the union of
25094 v6T2 and v7-M, so it is never seen as contributing a new feature.
25095 We can not search for the last entry which is entirely used,
25096 because if no CPU is specified we build up only those flags
25097 actually used. Perhaps we should separate out the specified
25098 and implicit cases. Avoid taking this path for -march=all by
25099 checking for contradictory v7-A / v7-M features. */
25100 if (arch == 10
25101 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25102 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25103 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25104 arch = 13;
25105
25106 /* Tag_CPU_name. */
25107 if (selected_cpu_name[0])
25108 {
25109 char *q;
25110
25111 q = selected_cpu_name;
25112 if (strncmp (q, "armv", 4) == 0)
25113 {
25114 int i;
25115
25116 q += 4;
25117 for (i = 0; q[i]; i++)
25118 q[i] = TOUPPER (q[i]);
25119 }
25120 aeabi_set_attribute_string (Tag_CPU_name, q);
25121 }
25122
25123 /* Tag_CPU_arch. */
25124 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25125
25126 /* Tag_CPU_arch_profile. */
25127 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
25128 profile = 'A';
25129 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25130 profile = 'R';
25131 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25132 profile = 'M';
25133 else
25134 profile = '\0';
25135
25136 if (profile != '\0')
25137 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25138
25139 /* Tag_ARM_ISA_use. */
25140 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25141 || arch == 0)
25142 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25143
25144 /* Tag_THUMB_ISA_use. */
25145 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25146 || arch == 0)
25147 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25148 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25149
25150 /* Tag_VFP_arch. */
25151 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
25152 aeabi_set_attribute_int (Tag_VFP_arch, 7);
25153 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25154 aeabi_set_attribute_int (Tag_VFP_arch,
25155 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25156 ? 5 : 6);
25157 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25158 {
25159 fp16_optional = 1;
25160 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25161 }
25162 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25163 {
25164 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25165 fp16_optional = 1;
25166 }
25167 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25168 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25169 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25170 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25171 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25172
25173 /* Tag_ABI_HardFP_use. */
25174 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25175 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25176 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25177
25178 /* Tag_WMMX_arch. */
25179 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25180 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25181 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25182 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25183
25184 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25185 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25186 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25187 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25188 {
25189 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25190 {
25191 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25192 }
25193 else
25194 {
25195 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25196 fp16_optional = 1;
25197 }
25198 }
25199
25200 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25201 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25202 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25203
25204 /* Tag_DIV_use.
25205
25206 We set Tag_DIV_use to two when integer divide instructions have been used
25207 in ARM state, or when Thumb integer divide instructions have been used,
25208 but we have no architecture profile set, nor have we any ARM instructions.
25209
25210 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25211 architecture.
25212
25213 For new architectures we will have to check these tests. */
25214 gas_assert (arch <= TAG_CPU_ARCH_V8);
25215 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25216 aeabi_set_attribute_int (Tag_DIV_use, 0);
25217 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25218 || (profile == '\0'
25219 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25220 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25221 aeabi_set_attribute_int (Tag_DIV_use, 2);
25222
25223 /* Tag_MP_extension_use. */
25224 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25225 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25226
25227 /* Tag Virtualization_use. */
25228 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25229 virt_sec |= 1;
25230 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25231 virt_sec |= 2;
25232 if (virt_sec != 0)
25233 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25234 }
25235
25236 /* Add the default contents for the .ARM.attributes section. */
25237 void
25238 arm_md_end (void)
25239 {
25240 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25241 return;
25242
25243 aeabi_set_public_attributes ();
25244 }
25245 #endif /* OBJ_ELF */
25246
25247
25248 /* Parse a .cpu directive. */
25249
25250 static void
25251 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25252 {
25253 const struct arm_cpu_option_table *opt;
25254 char *name;
25255 char saved_char;
25256
25257 name = input_line_pointer;
25258 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25259 input_line_pointer++;
25260 saved_char = *input_line_pointer;
25261 *input_line_pointer = 0;
25262
25263 /* Skip the first "all" entry. */
25264 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25265 if (streq (opt->name, name))
25266 {
25267 mcpu_cpu_opt = &opt->value;
25268 selected_cpu = opt->value;
25269 if (opt->canonical_name)
25270 strcpy (selected_cpu_name, opt->canonical_name);
25271 else
25272 {
25273 int i;
25274 for (i = 0; opt->name[i]; i++)
25275 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25276
25277 selected_cpu_name[i] = 0;
25278 }
25279 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25280 *input_line_pointer = saved_char;
25281 demand_empty_rest_of_line ();
25282 return;
25283 }
25284 as_bad (_("unknown cpu `%s'"), name);
25285 *input_line_pointer = saved_char;
25286 ignore_rest_of_line ();
25287 }
25288
25289
25290 /* Parse a .arch directive. */
25291
25292 static void
25293 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25294 {
25295 const struct arm_arch_option_table *opt;
25296 char saved_char;
25297 char *name;
25298
25299 name = input_line_pointer;
25300 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25301 input_line_pointer++;
25302 saved_char = *input_line_pointer;
25303 *input_line_pointer = 0;
25304
25305 /* Skip the first "all" entry. */
25306 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25307 if (streq (opt->name, name))
25308 {
25309 mcpu_cpu_opt = &opt->value;
25310 selected_cpu = opt->value;
25311 strcpy (selected_cpu_name, opt->name);
25312 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25313 *input_line_pointer = saved_char;
25314 demand_empty_rest_of_line ();
25315 return;
25316 }
25317
25318 as_bad (_("unknown architecture `%s'\n"), name);
25319 *input_line_pointer = saved_char;
25320 ignore_rest_of_line ();
25321 }
25322
25323
25324 /* Parse a .object_arch directive. */
25325
25326 static void
25327 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25328 {
25329 const struct arm_arch_option_table *opt;
25330 char saved_char;
25331 char *name;
25332
25333 name = input_line_pointer;
25334 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25335 input_line_pointer++;
25336 saved_char = *input_line_pointer;
25337 *input_line_pointer = 0;
25338
25339 /* Skip the first "all" entry. */
25340 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25341 if (streq (opt->name, name))
25342 {
25343 object_arch = &opt->value;
25344 *input_line_pointer = saved_char;
25345 demand_empty_rest_of_line ();
25346 return;
25347 }
25348
25349 as_bad (_("unknown architecture `%s'\n"), name);
25350 *input_line_pointer = saved_char;
25351 ignore_rest_of_line ();
25352 }
25353
25354 /* Parse a .arch_extension directive. */
25355
25356 static void
25357 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25358 {
25359 const struct arm_option_extension_value_table *opt;
25360 char saved_char;
25361 char *name;
25362 int adding_value = 1;
25363
25364 name = input_line_pointer;
25365 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25366 input_line_pointer++;
25367 saved_char = *input_line_pointer;
25368 *input_line_pointer = 0;
25369
25370 if (strlen (name) >= 2
25371 && strncmp (name, "no", 2) == 0)
25372 {
25373 adding_value = 0;
25374 name += 2;
25375 }
25376
25377 for (opt = arm_extensions; opt->name != NULL; opt++)
25378 if (streq (opt->name, name))
25379 {
25380 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25381 {
25382 as_bad (_("architectural extension `%s' is not allowed for the "
25383 "current base architecture"), name);
25384 break;
25385 }
25386
25387 if (adding_value)
25388 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
25389 else
25390 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
25391
25392 mcpu_cpu_opt = &selected_cpu;
25393 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25394 *input_line_pointer = saved_char;
25395 demand_empty_rest_of_line ();
25396 return;
25397 }
25398
25399 if (opt->name == NULL)
25400 as_bad (_("unknown architecture extension `%s'\n"), name);
25401
25402 *input_line_pointer = saved_char;
25403 ignore_rest_of_line ();
25404 }
25405
25406 /* Parse a .fpu directive. */
25407
25408 static void
25409 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25410 {
25411 const struct arm_option_fpu_value_table *opt;
25412 char saved_char;
25413 char *name;
25414
25415 name = input_line_pointer;
25416 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25417 input_line_pointer++;
25418 saved_char = *input_line_pointer;
25419 *input_line_pointer = 0;
25420
25421 for (opt = arm_fpus; opt->name != NULL; opt++)
25422 if (streq (opt->name, name))
25423 {
25424 mfpu_opt = &opt->value;
25425 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25426 *input_line_pointer = saved_char;
25427 demand_empty_rest_of_line ();
25428 return;
25429 }
25430
25431 as_bad (_("unknown floating point format `%s'\n"), name);
25432 *input_line_pointer = saved_char;
25433 ignore_rest_of_line ();
25434 }
25435
25436 /* Copy symbol information. */
25437
25438 void
25439 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25440 {
25441 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25442 }
25443
25444 #ifdef OBJ_ELF
25445 /* Given a symbolic attribute NAME, return the proper integer value.
25446 Returns -1 if the attribute is not known. */
25447
25448 int
25449 arm_convert_symbolic_attribute (const char *name)
25450 {
25451 static const struct
25452 {
25453 const char * name;
25454 const int tag;
25455 }
25456 attribute_table[] =
25457 {
25458 /* When you modify this table you should
25459 also modify the list in doc/c-arm.texi. */
25460 #define T(tag) {#tag, tag}
25461 T (Tag_CPU_raw_name),
25462 T (Tag_CPU_name),
25463 T (Tag_CPU_arch),
25464 T (Tag_CPU_arch_profile),
25465 T (Tag_ARM_ISA_use),
25466 T (Tag_THUMB_ISA_use),
25467 T (Tag_FP_arch),
25468 T (Tag_VFP_arch),
25469 T (Tag_WMMX_arch),
25470 T (Tag_Advanced_SIMD_arch),
25471 T (Tag_PCS_config),
25472 T (Tag_ABI_PCS_R9_use),
25473 T (Tag_ABI_PCS_RW_data),
25474 T (Tag_ABI_PCS_RO_data),
25475 T (Tag_ABI_PCS_GOT_use),
25476 T (Tag_ABI_PCS_wchar_t),
25477 T (Tag_ABI_FP_rounding),
25478 T (Tag_ABI_FP_denormal),
25479 T (Tag_ABI_FP_exceptions),
25480 T (Tag_ABI_FP_user_exceptions),
25481 T (Tag_ABI_FP_number_model),
25482 T (Tag_ABI_align_needed),
25483 T (Tag_ABI_align8_needed),
25484 T (Tag_ABI_align_preserved),
25485 T (Tag_ABI_align8_preserved),
25486 T (Tag_ABI_enum_size),
25487 T (Tag_ABI_HardFP_use),
25488 T (Tag_ABI_VFP_args),
25489 T (Tag_ABI_WMMX_args),
25490 T (Tag_ABI_optimization_goals),
25491 T (Tag_ABI_FP_optimization_goals),
25492 T (Tag_compatibility),
25493 T (Tag_CPU_unaligned_access),
25494 T (Tag_FP_HP_extension),
25495 T (Tag_VFP_HP_extension),
25496 T (Tag_ABI_FP_16bit_format),
25497 T (Tag_MPextension_use),
25498 T (Tag_DIV_use),
25499 T (Tag_nodefaults),
25500 T (Tag_also_compatible_with),
25501 T (Tag_conformance),
25502 T (Tag_T2EE_use),
25503 T (Tag_Virtualization_use),
25504 /* We deliberately do not include Tag_MPextension_use_legacy. */
25505 #undef T
25506 };
25507 unsigned int i;
25508
25509 if (name == NULL)
25510 return -1;
25511
25512 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25513 if (streq (name, attribute_table[i].name))
25514 return attribute_table[i].tag;
25515
25516 return -1;
25517 }
25518
25519
25520 /* Apply sym value for relocations only in the case that
25521 they are for local symbols and you have the respective
25522 architectural feature for blx and simple switches. */
25523 int
25524 arm_apply_sym_value (struct fix * fixP)
25525 {
25526 if (fixP->fx_addsy
25527 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25528 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25529 {
25530 switch (fixP->fx_r_type)
25531 {
25532 case BFD_RELOC_ARM_PCREL_BLX:
25533 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25534 if (ARM_IS_FUNC (fixP->fx_addsy))
25535 return 1;
25536 break;
25537
25538 case BFD_RELOC_ARM_PCREL_CALL:
25539 case BFD_RELOC_THUMB_PCREL_BLX:
25540 if (THUMB_IS_FUNC (fixP->fx_addsy))
25541 return 1;
25542 break;
25543
25544 default:
25545 break;
25546 }
25547
25548 }
25549 return 0;
25550 }
25551 #endif /* OBJ_ELF */