]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
2010-01-04 Daniel Gutson <dgutson@codesourcery.com>
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 #endif /* OBJ_ELF */
81
82 /* Results from operand parsing worker functions. */
83
84 typedef enum
85 {
86 PARSE_OPERAND_SUCCESS,
87 PARSE_OPERAND_FAIL,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
90
91 enum arm_float_abi
92 {
93 ARM_FLOAT_ABI_HARD,
94 ARM_FLOAT_ABI_SOFTFP,
95 ARM_FLOAT_ABI_SOFT
96 };
97
98 /* Types of processor to assemble for. */
99 #ifndef CPU_DEFAULT
100 #if defined __XSCALE__
101 #define CPU_DEFAULT ARM_ARCH_XSCALE
102 #else
103 #if defined __thumb__
104 #define CPU_DEFAULT ARM_ARCH_V5T
105 #endif
106 #endif
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 static arm_feature_set cpu_variant;
130 static arm_feature_set arm_arch_used;
131 static arm_feature_set thumb_arch_used;
132
133 /* Flags stored in private area of BFD structure. */
134 static int uses_apcs_26 = FALSE;
135 static int atpcs = FALSE;
136 static int support_interwork = FALSE;
137 static int uses_apcs_float = FALSE;
138 static int pic_code = FALSE;
139 static int fix_v4bx = FALSE;
140 /* Warn on using deprecated features. */
141 static int warn_on_deprecated = TRUE;
142
143
144 /* Variables that we set while parsing command-line options. Once all
145 options have been read we re-process these values to set the real
146 assembly flags. */
147 static const arm_feature_set *legacy_cpu = NULL;
148 static const arm_feature_set *legacy_fpu = NULL;
149
150 static const arm_feature_set *mcpu_cpu_opt = NULL;
151 static const arm_feature_set *mcpu_fpu_opt = NULL;
152 static const arm_feature_set *march_cpu_opt = NULL;
153 static const arm_feature_set *march_fpu_opt = NULL;
154 static const arm_feature_set *mfpu_opt = NULL;
155 static const arm_feature_set *object_arch = NULL;
156
157 /* Constants for known architecture features. */
158 static const arm_feature_set fpu_default = FPU_DEFAULT;
159 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
160 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
161 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
162 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
163 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
164 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
167
168 #ifdef CPU_DEFAULT
169 static const arm_feature_set cpu_default = CPU_DEFAULT;
170 #endif
171
172 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
174 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
175 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
176 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
177 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
178 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
179 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v4t_5 =
181 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
182 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
183 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
184 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
185 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
186 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
187 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
188 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
190 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
191 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
192 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
193 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
201
202 static const arm_feature_set arm_arch_any = ARM_ANY;
203 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
204 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
205 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
206
207 static const arm_feature_set arm_cext_iwmmxt2 =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
209 static const arm_feature_set arm_cext_iwmmxt =
210 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
211 static const arm_feature_set arm_cext_xscale =
212 ARM_FEATURE (0, ARM_CEXT_XSCALE);
213 static const arm_feature_set arm_cext_maverick =
214 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
215 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
216 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
217 static const arm_feature_set fpu_vfp_ext_v1xd =
218 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
219 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
220 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
221 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
222 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
223 static const arm_feature_set fpu_vfp_ext_d32 =
224 ARM_FEATURE (0, FPU_VFP_EXT_D32);
225 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
226 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
227 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
228 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
229 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
230 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
231
232 static int mfloat_abi_opt = -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name[16];
237 #ifdef OBJ_ELF
238 # ifdef EABI_DEFAULT
239 static int meabi_flags = EABI_DEFAULT;
240 # else
241 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
242 # endif
243
244 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
245
246 bfd_boolean
247 arm_is_eabi (void)
248 {
249 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
250 }
251 #endif
252
253 #ifdef OBJ_ELF
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS * GOT_symbol;
256 #endif
257
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
261 instructions. */
262 static int thumb_mode = 0;
263 /* A value distinct from the possible values for thumb_mode that we
264 can use to record whether thumb_mode has been copied into the
265 tc_frag_data field of a frag. */
266 #define MODE_RECORDED (1 << 4)
267
268 /* Specifies the intrinsic IT insn behavior mode. */
269 enum implicit_it_mode
270 {
271 IMPLICIT_IT_MODE_NEVER = 0x00,
272 IMPLICIT_IT_MODE_ARM = 0x01,
273 IMPLICIT_IT_MODE_THUMB = 0x02,
274 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
275 };
276 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
277
278 /* If unified_syntax is true, we are processing the new unified
279 ARM/Thumb syntax. Important differences from the old ARM mode:
280
281 - Immediate operands do not require a # prefix.
282 - Conditional affixes always appear at the end of the
283 instruction. (For backward compatibility, those instructions
284 that formerly had them in the middle, continue to accept them
285 there.)
286 - The IT instruction may appear, and if it does is validated
287 against subsequent conditional affixes. It does not generate
288 machine code.
289
290 Important differences from the old Thumb mode:
291
292 - Immediate operands do not require a # prefix.
293 - Most of the V6T2 instructions are only available in unified mode.
294 - The .N and .W suffixes are recognized and honored (it is an error
295 if they cannot be honored).
296 - All instructions set the flags if and only if they have an 's' affix.
297 - Conditional affixes may be used. They are validated against
298 preceding IT instructions. Unlike ARM mode, you cannot use a
299 conditional affix except in the scope of an IT instruction. */
300
301 static bfd_boolean unified_syntax = FALSE;
302
303 enum neon_el_type
304 {
305 NT_invtype,
306 NT_untyped,
307 NT_integer,
308 NT_float,
309 NT_poly,
310 NT_signed,
311 NT_unsigned
312 };
313
314 struct neon_type_el
315 {
316 enum neon_el_type type;
317 unsigned size;
318 };
319
320 #define NEON_MAX_TYPE_ELS 4
321
322 struct neon_type
323 {
324 struct neon_type_el el[NEON_MAX_TYPE_ELS];
325 unsigned elems;
326 };
327
328 enum it_instruction_type
329 {
330 OUTSIDE_IT_INSN,
331 INSIDE_IT_INSN,
332 INSIDE_IT_LAST_INSN,
333 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
334 if inside, should be the last one. */
335 NEUTRAL_IT_INSN, /* This could be either inside or outside,
336 i.e. BKPT and NOP. */
337 IT_INSN /* The IT insn has been parsed. */
338 };
339
340 struct arm_it
341 {
342 const char * error;
343 unsigned long instruction;
344 int size;
345 int size_req;
346 int cond;
347 /* "uncond_value" is set to the value in place of the conditional field in
348 unconditional versions of the instruction, or -1 if nothing is
349 appropriate. */
350 int uncond_value;
351 struct neon_type vectype;
352 /* This does not indicate an actual NEON instruction, only that
353 the mnemonic accepts neon-style type suffixes. */
354 int is_neon;
355 /* Set to the opcode if the instruction needs relaxation.
356 Zero if the instruction is not relaxed. */
357 unsigned long relax;
358 struct
359 {
360 bfd_reloc_code_real_type type;
361 expressionS exp;
362 int pc_rel;
363 } reloc;
364
365 enum it_instruction_type it_insn_type;
366
367 struct
368 {
369 unsigned reg;
370 signed int imm;
371 struct neon_type_el vectype;
372 unsigned present : 1; /* Operand present. */
373 unsigned isreg : 1; /* Operand was a register. */
374 unsigned immisreg : 1; /* .imm field is a second register. */
375 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
376 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
377 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
378 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
379 instructions. This allows us to disambiguate ARM <-> vector insns. */
380 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
381 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
382 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
383 unsigned issingle : 1; /* Operand is VFP single-precision register. */
384 unsigned hasreloc : 1; /* Operand has relocation suffix. */
385 unsigned writeback : 1; /* Operand has trailing ! */
386 unsigned preind : 1; /* Preindexed address. */
387 unsigned postind : 1; /* Postindexed address. */
388 unsigned negative : 1; /* Index register was negated. */
389 unsigned shifted : 1; /* Shift applied to operation. */
390 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
391 } operands[6];
392 };
393
394 static struct arm_it inst;
395
396 #define NUM_FLOAT_VALS 8
397
398 const char * fp_const[] =
399 {
400 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
401 };
402
403 /* Number of littlenums required to hold an extended precision number. */
404 #define MAX_LITTLENUMS 6
405
406 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
407
408 #define FAIL (-1)
409 #define SUCCESS (0)
410
411 #define SUFF_S 1
412 #define SUFF_D 2
413 #define SUFF_E 3
414 #define SUFF_P 4
415
416 #define CP_T_X 0x00008000
417 #define CP_T_Y 0x00400000
418
419 #define CONDS_BIT 0x00100000
420 #define LOAD_BIT 0x00100000
421
422 #define DOUBLE_LOAD_FLAG 0x00000001
423
424 struct asm_cond
425 {
426 const char * template_name;
427 unsigned long value;
428 };
429
430 #define COND_ALWAYS 0xE
431
432 struct asm_psr
433 {
434 const char * template_name;
435 unsigned long field;
436 };
437
438 struct asm_barrier_opt
439 {
440 const char * template_name;
441 unsigned long value;
442 };
443
444 /* The bit that distinguishes CPSR and SPSR. */
445 #define SPSR_BIT (1 << 22)
446
447 /* The individual PSR flag bits. */
448 #define PSR_c (1 << 16)
449 #define PSR_x (1 << 17)
450 #define PSR_s (1 << 18)
451 #define PSR_f (1 << 19)
452
453 struct reloc_entry
454 {
455 char * name;
456 bfd_reloc_code_real_type reloc;
457 };
458
459 enum vfp_reg_pos
460 {
461 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
462 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
463 };
464
465 enum vfp_ldstm_type
466 {
467 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
468 };
469
470 /* Bits for DEFINED field in neon_typed_alias. */
471 #define NTA_HASTYPE 1
472 #define NTA_HASINDEX 2
473
474 struct neon_typed_alias
475 {
476 unsigned char defined;
477 unsigned char index;
478 struct neon_type_el eltype;
479 };
480
481 /* ARM register categories. This includes coprocessor numbers and various
482 architecture extensions' registers. */
483 enum arm_reg_type
484 {
485 REG_TYPE_RN,
486 REG_TYPE_CP,
487 REG_TYPE_CN,
488 REG_TYPE_FN,
489 REG_TYPE_VFS,
490 REG_TYPE_VFD,
491 REG_TYPE_NQ,
492 REG_TYPE_VFSD,
493 REG_TYPE_NDQ,
494 REG_TYPE_NSDQ,
495 REG_TYPE_VFC,
496 REG_TYPE_MVF,
497 REG_TYPE_MVD,
498 REG_TYPE_MVFX,
499 REG_TYPE_MVDX,
500 REG_TYPE_MVAX,
501 REG_TYPE_DSPSC,
502 REG_TYPE_MMXWR,
503 REG_TYPE_MMXWC,
504 REG_TYPE_MMXWCG,
505 REG_TYPE_XSCALE,
506 };
507
508 /* Structure for a hash table entry for a register.
509 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
510 information which states whether a vector type or index is specified (for a
511 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
512 struct reg_entry
513 {
514 const char * name;
515 unsigned char number;
516 unsigned char type;
517 unsigned char builtin;
518 struct neon_typed_alias * neon;
519 };
520
521 /* Diagnostics used when we don't get a register of the expected type. */
522 const char * const reg_expected_msgs[] =
523 {
524 N_("ARM register expected"),
525 N_("bad or missing co-processor number"),
526 N_("co-processor register expected"),
527 N_("FPA register expected"),
528 N_("VFP single precision register expected"),
529 N_("VFP/Neon double precision register expected"),
530 N_("Neon quad precision register expected"),
531 N_("VFP single or double precision register expected"),
532 N_("Neon double or quad precision register expected"),
533 N_("VFP single, double or Neon quad precision register expected"),
534 N_("VFP system register expected"),
535 N_("Maverick MVF register expected"),
536 N_("Maverick MVD register expected"),
537 N_("Maverick MVFX register expected"),
538 N_("Maverick MVDX register expected"),
539 N_("Maverick MVAX register expected"),
540 N_("Maverick DSPSC register expected"),
541 N_("iWMMXt data register expected"),
542 N_("iWMMXt control register expected"),
543 N_("iWMMXt scalar register expected"),
544 N_("XScale accumulator register expected"),
545 };
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 13
549 #define REG_LR 14
550 #define REG_PC 15
551
552 /* ARM instructions take 4bytes in the object file, Thumb instructions
553 take 2: */
554 #define INSN_SIZE 4
555
556 struct asm_opcode
557 {
558 /* Basic string to match. */
559 const char * template_name;
560
561 /* Parameters to instruction. */
562 unsigned char operands[8];
563
564 /* Conditional tag - see opcode_lookup. */
565 unsigned int tag : 4;
566
567 /* Basic instruction code. */
568 unsigned int avalue : 28;
569
570 /* Thumb-format instruction code. */
571 unsigned int tvalue;
572
573 /* Which architecture variant provides this instruction. */
574 const arm_feature_set * avariant;
575 const arm_feature_set * tvariant;
576
577 /* Function to call to encode instruction in ARM format. */
578 void (* aencode) (void);
579
580 /* Function to call to encode instruction in Thumb format. */
581 void (* tencode) (void);
582 };
583
584 /* Defines for various bits that we will want to toggle. */
585 #define INST_IMMEDIATE 0x02000000
586 #define OFFSET_REG 0x02000000
587 #define HWOFFSET_IMM 0x00400000
588 #define SHIFT_BY_REG 0x00000010
589 #define PRE_INDEX 0x01000000
590 #define INDEX_UP 0x00800000
591 #define WRITE_BACK 0x00200000
592 #define LDM_TYPE_2_OR_3 0x00400000
593 #define CPSI_MMOD 0x00020000
594
595 #define LITERAL_MASK 0xf000f000
596 #define OPCODE_MASK 0xfe1fffff
597 #define V4_STR_BIT 0x00000020
598
599 #define T2_SUBS_PC_LR 0xf3de8f00
600
601 #define DATA_OP_SHIFT 21
602
603 #define T2_OPCODE_MASK 0xfe1fffff
604 #define T2_DATA_OP_SHIFT 21
605
606 /* Codes to distinguish the arithmetic instructions. */
607 #define OPCODE_AND 0
608 #define OPCODE_EOR 1
609 #define OPCODE_SUB 2
610 #define OPCODE_RSB 3
611 #define OPCODE_ADD 4
612 #define OPCODE_ADC 5
613 #define OPCODE_SBC 6
614 #define OPCODE_RSC 7
615 #define OPCODE_TST 8
616 #define OPCODE_TEQ 9
617 #define OPCODE_CMP 10
618 #define OPCODE_CMN 11
619 #define OPCODE_ORR 12
620 #define OPCODE_MOV 13
621 #define OPCODE_BIC 14
622 #define OPCODE_MVN 15
623
624 #define T2_OPCODE_AND 0
625 #define T2_OPCODE_BIC 1
626 #define T2_OPCODE_ORR 2
627 #define T2_OPCODE_ORN 3
628 #define T2_OPCODE_EOR 4
629 #define T2_OPCODE_ADD 8
630 #define T2_OPCODE_ADC 10
631 #define T2_OPCODE_SBC 11
632 #define T2_OPCODE_SUB 13
633 #define T2_OPCODE_RSB 14
634
635 #define T_OPCODE_MUL 0x4340
636 #define T_OPCODE_TST 0x4200
637 #define T_OPCODE_CMN 0x42c0
638 #define T_OPCODE_NEG 0x4240
639 #define T_OPCODE_MVN 0x43c0
640
641 #define T_OPCODE_ADD_R3 0x1800
642 #define T_OPCODE_SUB_R3 0x1a00
643 #define T_OPCODE_ADD_HI 0x4400
644 #define T_OPCODE_ADD_ST 0xb000
645 #define T_OPCODE_SUB_ST 0xb080
646 #define T_OPCODE_ADD_SP 0xa800
647 #define T_OPCODE_ADD_PC 0xa000
648 #define T_OPCODE_ADD_I8 0x3000
649 #define T_OPCODE_SUB_I8 0x3800
650 #define T_OPCODE_ADD_I3 0x1c00
651 #define T_OPCODE_SUB_I3 0x1e00
652
653 #define T_OPCODE_ASR_R 0x4100
654 #define T_OPCODE_LSL_R 0x4080
655 #define T_OPCODE_LSR_R 0x40c0
656 #define T_OPCODE_ROR_R 0x41c0
657 #define T_OPCODE_ASR_I 0x1000
658 #define T_OPCODE_LSL_I 0x0000
659 #define T_OPCODE_LSR_I 0x0800
660
661 #define T_OPCODE_MOV_I8 0x2000
662 #define T_OPCODE_CMP_I8 0x2800
663 #define T_OPCODE_CMP_LR 0x4280
664 #define T_OPCODE_MOV_HR 0x4600
665 #define T_OPCODE_CMP_HR 0x4500
666
667 #define T_OPCODE_LDR_PC 0x4800
668 #define T_OPCODE_LDR_SP 0x9800
669 #define T_OPCODE_STR_SP 0x9000
670 #define T_OPCODE_LDR_IW 0x6800
671 #define T_OPCODE_STR_IW 0x6000
672 #define T_OPCODE_LDR_IH 0x8800
673 #define T_OPCODE_STR_IH 0x8000
674 #define T_OPCODE_LDR_IB 0x7800
675 #define T_OPCODE_STR_IB 0x7000
676 #define T_OPCODE_LDR_RW 0x5800
677 #define T_OPCODE_STR_RW 0x5000
678 #define T_OPCODE_LDR_RH 0x5a00
679 #define T_OPCODE_STR_RH 0x5200
680 #define T_OPCODE_LDR_RB 0x5c00
681 #define T_OPCODE_STR_RB 0x5400
682
683 #define T_OPCODE_PUSH 0xb400
684 #define T_OPCODE_POP 0xbc00
685
686 #define T_OPCODE_BRANCH 0xe000
687
688 #define THUMB_SIZE 2 /* Size of thumb instruction. */
689 #define THUMB_PP_PC_LR 0x0100
690 #define THUMB_LOAD_BIT 0x0800
691 #define THUMB2_LOAD_BIT 0x00100000
692
693 #define BAD_ARGS _("bad arguments to instruction")
694 #define BAD_SP _("r13 not allowed here")
695 #define BAD_PC _("r15 not allowed here")
696 #define BAD_COND _("instruction cannot be conditional")
697 #define BAD_OVERLAP _("registers may not be the same")
698 #define BAD_HIREG _("lo register required")
699 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
700 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
701 #define BAD_BRANCH _("branch must be last instruction in IT block")
702 #define BAD_NOT_IT _("instruction not allowed in IT block")
703 #define BAD_FPU _("selected FPU does not support instruction")
704 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
705 #define BAD_IT_COND _("incorrect condition in IT block")
706 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
707 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
708
709 static struct hash_control * arm_ops_hsh;
710 static struct hash_control * arm_cond_hsh;
711 static struct hash_control * arm_shift_hsh;
712 static struct hash_control * arm_psr_hsh;
713 static struct hash_control * arm_v7m_psr_hsh;
714 static struct hash_control * arm_reg_hsh;
715 static struct hash_control * arm_reloc_hsh;
716 static struct hash_control * arm_barrier_opt_hsh;
717
718 /* Stuff needed to resolve the label ambiguity
719 As:
720 ...
721 label: <insn>
722 may differ from:
723 ...
724 label:
725 <insn> */
726
727 symbolS * last_label_seen;
728 static int label_is_thumb_function_name = FALSE;
729
730 /* Literal pool structure. Held on a per-section
731 and per-sub-section basis. */
732
733 #define MAX_LITERAL_POOL_SIZE 1024
734 typedef struct literal_pool
735 {
736 expressionS literals [MAX_LITERAL_POOL_SIZE];
737 unsigned int next_free_entry;
738 unsigned int id;
739 symbolS * symbol;
740 segT section;
741 subsegT sub_section;
742 struct literal_pool * next;
743 } literal_pool;
744
745 /* Pointer to a linked list of literal pools. */
746 literal_pool * list_of_pools = NULL;
747
748 #ifdef OBJ_ELF
749 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
750 #else
751 static struct current_it now_it;
752 #endif
753
754 static inline int
755 now_it_compatible (int cond)
756 {
757 return (cond & ~1) == (now_it.cc & ~1);
758 }
759
760 static inline int
761 conditional_insn (void)
762 {
763 return inst.cond != COND_ALWAYS;
764 }
765
766 static int in_it_block (void);
767
768 static int handle_it_state (void);
769
770 static void force_automatic_it_block_close (void);
771
772 static void it_fsm_post_encode (void);
773
774 #define set_it_insn_type(type) \
775 do \
776 { \
777 inst.it_insn_type = type; \
778 if (handle_it_state () == FAIL) \
779 return; \
780 } \
781 while (0)
782
783 #define set_it_insn_type_nonvoid(type, failret) \
784 do \
785 { \
786 inst.it_insn_type = type; \
787 if (handle_it_state () == FAIL) \
788 return failret; \
789 } \
790 while(0)
791
792 #define set_it_insn_type_last() \
793 do \
794 { \
795 if (inst.cond == COND_ALWAYS) \
796 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
797 else \
798 set_it_insn_type (INSIDE_IT_LAST_INSN); \
799 } \
800 while (0)
801
802 /* Pure syntax. */
803
804 /* This array holds the chars that always start a comment. If the
805 pre-processor is disabled, these aren't very useful. */
806 const char comment_chars[] = "@";
807
808 /* This array holds the chars that only start a comment at the beginning of
809 a line. If the line seems to have the form '# 123 filename'
810 .line and .file directives will appear in the pre-processed output. */
811 /* Note that input_file.c hand checks for '#' at the beginning of the
812 first line of the input file. This is because the compiler outputs
813 #NO_APP at the beginning of its output. */
814 /* Also note that comments like this one will always work. */
815 const char line_comment_chars[] = "#";
816
817 const char line_separator_chars[] = ";";
818
819 /* Chars that can be used to separate mant
820 from exp in floating point numbers. */
821 const char EXP_CHARS[] = "eE";
822
823 /* Chars that mean this number is a floating point constant. */
824 /* As in 0f12.456 */
825 /* or 0d1.2345e12 */
826
827 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
828
829 /* Prefix characters that indicate the start of an immediate
830 value. */
831 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
832
833 /* Separator character handling. */
834
835 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
836
837 static inline int
838 skip_past_char (char ** str, char c)
839 {
840 if (**str == c)
841 {
842 (*str)++;
843 return SUCCESS;
844 }
845 else
846 return FAIL;
847 }
848
849 #define skip_past_comma(str) skip_past_char (str, ',')
850
851 /* Arithmetic expressions (possibly involving symbols). */
852
853 /* Return TRUE if anything in the expression is a bignum. */
854
855 static int
856 walk_no_bignums (symbolS * sp)
857 {
858 if (symbol_get_value_expression (sp)->X_op == O_big)
859 return 1;
860
861 if (symbol_get_value_expression (sp)->X_add_symbol)
862 {
863 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
864 || (symbol_get_value_expression (sp)->X_op_symbol
865 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
866 }
867
868 return 0;
869 }
870
871 static int in_my_get_expression = 0;
872
873 /* Third argument to my_get_expression. */
874 #define GE_NO_PREFIX 0
875 #define GE_IMM_PREFIX 1
876 #define GE_OPT_PREFIX 2
877 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
878 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
879 #define GE_OPT_PREFIX_BIG 3
880
881 static int
882 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
883 {
884 char * save_in;
885 segT seg;
886
887 /* In unified syntax, all prefixes are optional. */
888 if (unified_syntax)
889 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
890 : GE_OPT_PREFIX;
891
892 switch (prefix_mode)
893 {
894 case GE_NO_PREFIX: break;
895 case GE_IMM_PREFIX:
896 if (!is_immediate_prefix (**str))
897 {
898 inst.error = _("immediate expression requires a # prefix");
899 return FAIL;
900 }
901 (*str)++;
902 break;
903 case GE_OPT_PREFIX:
904 case GE_OPT_PREFIX_BIG:
905 if (is_immediate_prefix (**str))
906 (*str)++;
907 break;
908 default: abort ();
909 }
910
911 memset (ep, 0, sizeof (expressionS));
912
913 save_in = input_line_pointer;
914 input_line_pointer = *str;
915 in_my_get_expression = 1;
916 seg = expression (ep);
917 in_my_get_expression = 0;
918
919 if (ep->X_op == O_illegal || ep->X_op == O_absent)
920 {
921 /* We found a bad or missing expression in md_operand(). */
922 *str = input_line_pointer;
923 input_line_pointer = save_in;
924 if (inst.error == NULL)
925 inst.error = (ep->X_op == O_absent
926 ? _("missing expression") :_("bad expression"));
927 return 1;
928 }
929
930 #ifdef OBJ_AOUT
931 if (seg != absolute_section
932 && seg != text_section
933 && seg != data_section
934 && seg != bss_section
935 && seg != undefined_section)
936 {
937 inst.error = _("bad segment");
938 *str = input_line_pointer;
939 input_line_pointer = save_in;
940 return 1;
941 }
942 #endif
943
944 /* Get rid of any bignums now, so that we don't generate an error for which
945 we can't establish a line number later on. Big numbers are never valid
946 in instructions, which is where this routine is always called. */
947 if (prefix_mode != GE_OPT_PREFIX_BIG
948 && (ep->X_op == O_big
949 || (ep->X_add_symbol
950 && (walk_no_bignums (ep->X_add_symbol)
951 || (ep->X_op_symbol
952 && walk_no_bignums (ep->X_op_symbol))))))
953 {
954 inst.error = _("invalid constant");
955 *str = input_line_pointer;
956 input_line_pointer = save_in;
957 return 1;
958 }
959
960 *str = input_line_pointer;
961 input_line_pointer = save_in;
962 return 0;
963 }
964
965 /* Turn a string in input_line_pointer into a floating point constant
966 of type TYPE, and store the appropriate bytes in *LITP. The number
967 of LITTLENUMS emitted is stored in *SIZEP. An error message is
968 returned, or NULL on OK.
969
970 Note that fp constants aren't represent in the normal way on the ARM.
971 In big endian mode, things are as expected. However, in little endian
972 mode fp constants are big-endian word-wise, and little-endian byte-wise
973 within the words. For example, (double) 1.1 in big endian mode is
974 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
975 the byte sequence 99 99 f1 3f 9a 99 99 99.
976
977 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
978
979 char *
980 md_atof (int type, char * litP, int * sizeP)
981 {
982 int prec;
983 LITTLENUM_TYPE words[MAX_LITTLENUMS];
984 char *t;
985 int i;
986
987 switch (type)
988 {
989 case 'f':
990 case 'F':
991 case 's':
992 case 'S':
993 prec = 2;
994 break;
995
996 case 'd':
997 case 'D':
998 case 'r':
999 case 'R':
1000 prec = 4;
1001 break;
1002
1003 case 'x':
1004 case 'X':
1005 prec = 5;
1006 break;
1007
1008 case 'p':
1009 case 'P':
1010 prec = 5;
1011 break;
1012
1013 default:
1014 *sizeP = 0;
1015 return _("Unrecognized or unsupported floating point constant");
1016 }
1017
1018 t = atof_ieee (input_line_pointer, type, words);
1019 if (t)
1020 input_line_pointer = t;
1021 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1022
1023 if (target_big_endian)
1024 {
1025 for (i = 0; i < prec; i++)
1026 {
1027 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1028 litP += sizeof (LITTLENUM_TYPE);
1029 }
1030 }
1031 else
1032 {
1033 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1034 for (i = prec - 1; i >= 0; i--)
1035 {
1036 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1037 litP += sizeof (LITTLENUM_TYPE);
1038 }
1039 else
1040 /* For a 4 byte float the order of elements in `words' is 1 0.
1041 For an 8 byte float the order is 1 0 3 2. */
1042 for (i = 0; i < prec; i += 2)
1043 {
1044 md_number_to_chars (litP, (valueT) words[i + 1],
1045 sizeof (LITTLENUM_TYPE));
1046 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1047 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1048 litP += 2 * sizeof (LITTLENUM_TYPE);
1049 }
1050 }
1051
1052 return NULL;
1053 }
1054
1055 /* We handle all bad expressions here, so that we can report the faulty
1056 instruction in the error message. */
1057 void
1058 md_operand (expressionS * exp)
1059 {
1060 if (in_my_get_expression)
1061 exp->X_op = O_illegal;
1062 }
1063
1064 /* Immediate values. */
1065
1066 /* Generic immediate-value read function for use in directives.
1067 Accepts anything that 'expression' can fold to a constant.
1068 *val receives the number. */
1069 #ifdef OBJ_ELF
1070 static int
1071 immediate_for_directive (int *val)
1072 {
1073 expressionS exp;
1074 exp.X_op = O_illegal;
1075
1076 if (is_immediate_prefix (*input_line_pointer))
1077 {
1078 input_line_pointer++;
1079 expression (&exp);
1080 }
1081
1082 if (exp.X_op != O_constant)
1083 {
1084 as_bad (_("expected #constant"));
1085 ignore_rest_of_line ();
1086 return FAIL;
1087 }
1088 *val = exp.X_add_number;
1089 return SUCCESS;
1090 }
1091 #endif
1092
1093 /* Register parsing. */
1094
1095 /* Generic register parser. CCP points to what should be the
1096 beginning of a register name. If it is indeed a valid register
1097 name, advance CCP over it and return the reg_entry structure;
1098 otherwise return NULL. Does not issue diagnostics. */
1099
1100 static struct reg_entry *
1101 arm_reg_parse_multi (char **ccp)
1102 {
1103 char *start = *ccp;
1104 char *p;
1105 struct reg_entry *reg;
1106
1107 #ifdef REGISTER_PREFIX
1108 if (*start != REGISTER_PREFIX)
1109 return NULL;
1110 start++;
1111 #endif
1112 #ifdef OPTIONAL_REGISTER_PREFIX
1113 if (*start == OPTIONAL_REGISTER_PREFIX)
1114 start++;
1115 #endif
1116
1117 p = start;
1118 if (!ISALPHA (*p) || !is_name_beginner (*p))
1119 return NULL;
1120
1121 do
1122 p++;
1123 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1124
1125 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1126
1127 if (!reg)
1128 return NULL;
1129
1130 *ccp = p;
1131 return reg;
1132 }
1133
1134 static int
1135 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1136 enum arm_reg_type type)
1137 {
1138 /* Alternative syntaxes are accepted for a few register classes. */
1139 switch (type)
1140 {
1141 case REG_TYPE_MVF:
1142 case REG_TYPE_MVD:
1143 case REG_TYPE_MVFX:
1144 case REG_TYPE_MVDX:
1145 /* Generic coprocessor register names are allowed for these. */
1146 if (reg && reg->type == REG_TYPE_CN)
1147 return reg->number;
1148 break;
1149
1150 case REG_TYPE_CP:
1151 /* For backward compatibility, a bare number is valid here. */
1152 {
1153 unsigned long processor = strtoul (start, ccp, 10);
1154 if (*ccp != start && processor <= 15)
1155 return processor;
1156 }
1157
1158 case REG_TYPE_MMXWC:
1159 /* WC includes WCG. ??? I'm not sure this is true for all
1160 instructions that take WC registers. */
1161 if (reg && reg->type == REG_TYPE_MMXWCG)
1162 return reg->number;
1163 break;
1164
1165 default:
1166 break;
1167 }
1168
1169 return FAIL;
1170 }
1171
1172 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1173 return value is the register number or FAIL. */
1174
1175 static int
1176 arm_reg_parse (char **ccp, enum arm_reg_type type)
1177 {
1178 char *start = *ccp;
1179 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1180 int ret;
1181
1182 /* Do not allow a scalar (reg+index) to parse as a register. */
1183 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1184 return FAIL;
1185
1186 if (reg && reg->type == type)
1187 return reg->number;
1188
1189 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1190 return ret;
1191
1192 *ccp = start;
1193 return FAIL;
1194 }
1195
1196 /* Parse a Neon type specifier. *STR should point at the leading '.'
1197 character. Does no verification at this stage that the type fits the opcode
1198 properly. E.g.,
1199
1200 .i32.i32.s16
1201 .s32.f32
1202 .u16
1203
1204 Can all be legally parsed by this function.
1205
1206 Fills in neon_type struct pointer with parsed information, and updates STR
1207 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1208 type, FAIL if not. */
1209
1210 static int
1211 parse_neon_type (struct neon_type *type, char **str)
1212 {
1213 char *ptr = *str;
1214
1215 if (type)
1216 type->elems = 0;
1217
1218 while (type->elems < NEON_MAX_TYPE_ELS)
1219 {
1220 enum neon_el_type thistype = NT_untyped;
1221 unsigned thissize = -1u;
1222
1223 if (*ptr != '.')
1224 break;
1225
1226 ptr++;
1227
1228 /* Just a size without an explicit type. */
1229 if (ISDIGIT (*ptr))
1230 goto parsesize;
1231
1232 switch (TOLOWER (*ptr))
1233 {
1234 case 'i': thistype = NT_integer; break;
1235 case 'f': thistype = NT_float; break;
1236 case 'p': thistype = NT_poly; break;
1237 case 's': thistype = NT_signed; break;
1238 case 'u': thistype = NT_unsigned; break;
1239 case 'd':
1240 thistype = NT_float;
1241 thissize = 64;
1242 ptr++;
1243 goto done;
1244 default:
1245 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1246 return FAIL;
1247 }
1248
1249 ptr++;
1250
1251 /* .f is an abbreviation for .f32. */
1252 if (thistype == NT_float && !ISDIGIT (*ptr))
1253 thissize = 32;
1254 else
1255 {
1256 parsesize:
1257 thissize = strtoul (ptr, &ptr, 10);
1258
1259 if (thissize != 8 && thissize != 16 && thissize != 32
1260 && thissize != 64)
1261 {
1262 as_bad (_("bad size %d in type specifier"), thissize);
1263 return FAIL;
1264 }
1265 }
1266
1267 done:
1268 if (type)
1269 {
1270 type->el[type->elems].type = thistype;
1271 type->el[type->elems].size = thissize;
1272 type->elems++;
1273 }
1274 }
1275
1276 /* Empty/missing type is not a successful parse. */
1277 if (type->elems == 0)
1278 return FAIL;
1279
1280 *str = ptr;
1281
1282 return SUCCESS;
1283 }
1284
1285 /* Errors may be set multiple times during parsing or bit encoding
1286 (particularly in the Neon bits), but usually the earliest error which is set
1287 will be the most meaningful. Avoid overwriting it with later (cascading)
1288 errors by calling this function. */
1289
1290 static void
1291 first_error (const char *err)
1292 {
1293 if (!inst.error)
1294 inst.error = err;
1295 }
1296
1297 /* Parse a single type, e.g. ".s32", leading period included. */
1298 static int
1299 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1300 {
1301 char *str = *ccp;
1302 struct neon_type optype;
1303
1304 if (*str == '.')
1305 {
1306 if (parse_neon_type (&optype, &str) == SUCCESS)
1307 {
1308 if (optype.elems == 1)
1309 *vectype = optype.el[0];
1310 else
1311 {
1312 first_error (_("only one type should be specified for operand"));
1313 return FAIL;
1314 }
1315 }
1316 else
1317 {
1318 first_error (_("vector type expected"));
1319 return FAIL;
1320 }
1321 }
1322 else
1323 return FAIL;
1324
1325 *ccp = str;
1326
1327 return SUCCESS;
1328 }
1329
1330 /* Special meanings for indices (which have a range of 0-7), which will fit into
1331 a 4-bit integer. */
1332
1333 #define NEON_ALL_LANES 15
1334 #define NEON_INTERLEAVE_LANES 14
1335
1336 /* Parse either a register or a scalar, with an optional type. Return the
1337 register number, and optionally fill in the actual type of the register
1338 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1339 type/index information in *TYPEINFO. */
1340
1341 static int
1342 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1343 enum arm_reg_type *rtype,
1344 struct neon_typed_alias *typeinfo)
1345 {
1346 char *str = *ccp;
1347 struct reg_entry *reg = arm_reg_parse_multi (&str);
1348 struct neon_typed_alias atype;
1349 struct neon_type_el parsetype;
1350
1351 atype.defined = 0;
1352 atype.index = -1;
1353 atype.eltype.type = NT_invtype;
1354 atype.eltype.size = -1;
1355
1356 /* Try alternate syntax for some types of register. Note these are mutually
1357 exclusive with the Neon syntax extensions. */
1358 if (reg == NULL)
1359 {
1360 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1361 if (altreg != FAIL)
1362 *ccp = str;
1363 if (typeinfo)
1364 *typeinfo = atype;
1365 return altreg;
1366 }
1367
1368 /* Undo polymorphism when a set of register types may be accepted. */
1369 if ((type == REG_TYPE_NDQ
1370 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1371 || (type == REG_TYPE_VFSD
1372 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1373 || (type == REG_TYPE_NSDQ
1374 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1375 || reg->type == REG_TYPE_NQ))
1376 || (type == REG_TYPE_MMXWC
1377 && (reg->type == REG_TYPE_MMXWCG)))
1378 type = (enum arm_reg_type) reg->type;
1379
1380 if (type != reg->type)
1381 return FAIL;
1382
1383 if (reg->neon)
1384 atype = *reg->neon;
1385
1386 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1387 {
1388 if ((atype.defined & NTA_HASTYPE) != 0)
1389 {
1390 first_error (_("can't redefine type for operand"));
1391 return FAIL;
1392 }
1393 atype.defined |= NTA_HASTYPE;
1394 atype.eltype = parsetype;
1395 }
1396
1397 if (skip_past_char (&str, '[') == SUCCESS)
1398 {
1399 if (type != REG_TYPE_VFD)
1400 {
1401 first_error (_("only D registers may be indexed"));
1402 return FAIL;
1403 }
1404
1405 if ((atype.defined & NTA_HASINDEX) != 0)
1406 {
1407 first_error (_("can't change index for operand"));
1408 return FAIL;
1409 }
1410
1411 atype.defined |= NTA_HASINDEX;
1412
1413 if (skip_past_char (&str, ']') == SUCCESS)
1414 atype.index = NEON_ALL_LANES;
1415 else
1416 {
1417 expressionS exp;
1418
1419 my_get_expression (&exp, &str, GE_NO_PREFIX);
1420
1421 if (exp.X_op != O_constant)
1422 {
1423 first_error (_("constant expression required"));
1424 return FAIL;
1425 }
1426
1427 if (skip_past_char (&str, ']') == FAIL)
1428 return FAIL;
1429
1430 atype.index = exp.X_add_number;
1431 }
1432 }
1433
1434 if (typeinfo)
1435 *typeinfo = atype;
1436
1437 if (rtype)
1438 *rtype = type;
1439
1440 *ccp = str;
1441
1442 return reg->number;
1443 }
1444
1445 /* Like arm_reg_parse, but allow allow the following extra features:
1446 - If RTYPE is non-zero, return the (possibly restricted) type of the
1447 register (e.g. Neon double or quad reg when either has been requested).
1448 - If this is a Neon vector type with additional type information, fill
1449 in the struct pointed to by VECTYPE (if non-NULL).
1450 This function will fault on encountering a scalar. */
1451
1452 static int
1453 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1454 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1455 {
1456 struct neon_typed_alias atype;
1457 char *str = *ccp;
1458 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1459
1460 if (reg == FAIL)
1461 return FAIL;
1462
1463 /* Do not allow a scalar (reg+index) to parse as a register. */
1464 if ((atype.defined & NTA_HASINDEX) != 0)
1465 {
1466 first_error (_("register operand expected, but got scalar"));
1467 return FAIL;
1468 }
1469
1470 if (vectype)
1471 *vectype = atype.eltype;
1472
1473 *ccp = str;
1474
1475 return reg;
1476 }
1477
1478 #define NEON_SCALAR_REG(X) ((X) >> 4)
1479 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1480
1481 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1482 have enough information to be able to do a good job bounds-checking. So, we
1483 just do easy checks here, and do further checks later. */
1484
1485 static int
1486 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1487 {
1488 int reg;
1489 char *str = *ccp;
1490 struct neon_typed_alias atype;
1491
1492 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1493
1494 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1495 return FAIL;
1496
1497 if (atype.index == NEON_ALL_LANES)
1498 {
1499 first_error (_("scalar must have an index"));
1500 return FAIL;
1501 }
1502 else if (atype.index >= 64 / elsize)
1503 {
1504 first_error (_("scalar index out of range"));
1505 return FAIL;
1506 }
1507
1508 if (type)
1509 *type = atype.eltype;
1510
1511 *ccp = str;
1512
1513 return reg * 16 + atype.index;
1514 }
1515
1516 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1517
1518 static long
1519 parse_reg_list (char ** strp)
1520 {
1521 char * str = * strp;
1522 long range = 0;
1523 int another_range;
1524
1525 /* We come back here if we get ranges concatenated by '+' or '|'. */
1526 do
1527 {
1528 another_range = 0;
1529
1530 if (*str == '{')
1531 {
1532 int in_range = 0;
1533 int cur_reg = -1;
1534
1535 str++;
1536 do
1537 {
1538 int reg;
1539
1540 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1541 {
1542 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1543 return FAIL;
1544 }
1545
1546 if (in_range)
1547 {
1548 int i;
1549
1550 if (reg <= cur_reg)
1551 {
1552 first_error (_("bad range in register list"));
1553 return FAIL;
1554 }
1555
1556 for (i = cur_reg + 1; i < reg; i++)
1557 {
1558 if (range & (1 << i))
1559 as_tsktsk
1560 (_("Warning: duplicated register (r%d) in register list"),
1561 i);
1562 else
1563 range |= 1 << i;
1564 }
1565 in_range = 0;
1566 }
1567
1568 if (range & (1 << reg))
1569 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1570 reg);
1571 else if (reg <= cur_reg)
1572 as_tsktsk (_("Warning: register range not in ascending order"));
1573
1574 range |= 1 << reg;
1575 cur_reg = reg;
1576 }
1577 while (skip_past_comma (&str) != FAIL
1578 || (in_range = 1, *str++ == '-'));
1579 str--;
1580
1581 if (*str++ != '}')
1582 {
1583 first_error (_("missing `}'"));
1584 return FAIL;
1585 }
1586 }
1587 else
1588 {
1589 expressionS exp;
1590
1591 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1592 return FAIL;
1593
1594 if (exp.X_op == O_constant)
1595 {
1596 if (exp.X_add_number
1597 != (exp.X_add_number & 0x0000ffff))
1598 {
1599 inst.error = _("invalid register mask");
1600 return FAIL;
1601 }
1602
1603 if ((range & exp.X_add_number) != 0)
1604 {
1605 int regno = range & exp.X_add_number;
1606
1607 regno &= -regno;
1608 regno = (1 << regno) - 1;
1609 as_tsktsk
1610 (_("Warning: duplicated register (r%d) in register list"),
1611 regno);
1612 }
1613
1614 range |= exp.X_add_number;
1615 }
1616 else
1617 {
1618 if (inst.reloc.type != 0)
1619 {
1620 inst.error = _("expression too complex");
1621 return FAIL;
1622 }
1623
1624 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1625 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1626 inst.reloc.pc_rel = 0;
1627 }
1628 }
1629
1630 if (*str == '|' || *str == '+')
1631 {
1632 str++;
1633 another_range = 1;
1634 }
1635 }
1636 while (another_range);
1637
1638 *strp = str;
1639 return range;
1640 }
1641
1642 /* Types of registers in a list. */
1643
1644 enum reg_list_els
1645 {
1646 REGLIST_VFP_S,
1647 REGLIST_VFP_D,
1648 REGLIST_NEON_D
1649 };
1650
1651 /* Parse a VFP register list. If the string is invalid return FAIL.
1652 Otherwise return the number of registers, and set PBASE to the first
1653 register. Parses registers of type ETYPE.
1654 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1655 - Q registers can be used to specify pairs of D registers
1656 - { } can be omitted from around a singleton register list
1657 FIXME: This is not implemented, as it would require backtracking in
1658 some cases, e.g.:
1659 vtbl.8 d3,d4,d5
1660 This could be done (the meaning isn't really ambiguous), but doesn't
1661 fit in well with the current parsing framework.
1662 - 32 D registers may be used (also true for VFPv3).
1663 FIXME: Types are ignored in these register lists, which is probably a
1664 bug. */
1665
1666 static int
1667 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1668 {
1669 char *str = *ccp;
1670 int base_reg;
1671 int new_base;
1672 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1673 int max_regs = 0;
1674 int count = 0;
1675 int warned = 0;
1676 unsigned long mask = 0;
1677 int i;
1678
1679 if (*str != '{')
1680 {
1681 inst.error = _("expecting {");
1682 return FAIL;
1683 }
1684
1685 str++;
1686
1687 switch (etype)
1688 {
1689 case REGLIST_VFP_S:
1690 regtype = REG_TYPE_VFS;
1691 max_regs = 32;
1692 break;
1693
1694 case REGLIST_VFP_D:
1695 regtype = REG_TYPE_VFD;
1696 break;
1697
1698 case REGLIST_NEON_D:
1699 regtype = REG_TYPE_NDQ;
1700 break;
1701 }
1702
1703 if (etype != REGLIST_VFP_S)
1704 {
1705 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1706 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1707 {
1708 max_regs = 32;
1709 if (thumb_mode)
1710 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1711 fpu_vfp_ext_d32);
1712 else
1713 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1714 fpu_vfp_ext_d32);
1715 }
1716 else
1717 max_regs = 16;
1718 }
1719
1720 base_reg = max_regs;
1721
1722 do
1723 {
1724 int setmask = 1, addregs = 1;
1725
1726 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1727
1728 if (new_base == FAIL)
1729 {
1730 first_error (_(reg_expected_msgs[regtype]));
1731 return FAIL;
1732 }
1733
1734 if (new_base >= max_regs)
1735 {
1736 first_error (_("register out of range in list"));
1737 return FAIL;
1738 }
1739
1740 /* Note: a value of 2 * n is returned for the register Q<n>. */
1741 if (regtype == REG_TYPE_NQ)
1742 {
1743 setmask = 3;
1744 addregs = 2;
1745 }
1746
1747 if (new_base < base_reg)
1748 base_reg = new_base;
1749
1750 if (mask & (setmask << new_base))
1751 {
1752 first_error (_("invalid register list"));
1753 return FAIL;
1754 }
1755
1756 if ((mask >> new_base) != 0 && ! warned)
1757 {
1758 as_tsktsk (_("register list not in ascending order"));
1759 warned = 1;
1760 }
1761
1762 mask |= setmask << new_base;
1763 count += addregs;
1764
1765 if (*str == '-') /* We have the start of a range expression */
1766 {
1767 int high_range;
1768
1769 str++;
1770
1771 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1772 == FAIL)
1773 {
1774 inst.error = gettext (reg_expected_msgs[regtype]);
1775 return FAIL;
1776 }
1777
1778 if (high_range >= max_regs)
1779 {
1780 first_error (_("register out of range in list"));
1781 return FAIL;
1782 }
1783
1784 if (regtype == REG_TYPE_NQ)
1785 high_range = high_range + 1;
1786
1787 if (high_range <= new_base)
1788 {
1789 inst.error = _("register range not in ascending order");
1790 return FAIL;
1791 }
1792
1793 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1794 {
1795 if (mask & (setmask << new_base))
1796 {
1797 inst.error = _("invalid register list");
1798 return FAIL;
1799 }
1800
1801 mask |= setmask << new_base;
1802 count += addregs;
1803 }
1804 }
1805 }
1806 while (skip_past_comma (&str) != FAIL);
1807
1808 str++;
1809
1810 /* Sanity check -- should have raised a parse error above. */
1811 if (count == 0 || count > max_regs)
1812 abort ();
1813
1814 *pbase = base_reg;
1815
1816 /* Final test -- the registers must be consecutive. */
1817 mask >>= base_reg;
1818 for (i = 0; i < count; i++)
1819 {
1820 if ((mask & (1u << i)) == 0)
1821 {
1822 inst.error = _("non-contiguous register range");
1823 return FAIL;
1824 }
1825 }
1826
1827 *ccp = str;
1828
1829 return count;
1830 }
1831
1832 /* True if two alias types are the same. */
1833
1834 static bfd_boolean
1835 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1836 {
1837 if (!a && !b)
1838 return TRUE;
1839
1840 if (!a || !b)
1841 return FALSE;
1842
1843 if (a->defined != b->defined)
1844 return FALSE;
1845
1846 if ((a->defined & NTA_HASTYPE) != 0
1847 && (a->eltype.type != b->eltype.type
1848 || a->eltype.size != b->eltype.size))
1849 return FALSE;
1850
1851 if ((a->defined & NTA_HASINDEX) != 0
1852 && (a->index != b->index))
1853 return FALSE;
1854
1855 return TRUE;
1856 }
1857
1858 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1859 The base register is put in *PBASE.
1860 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1861 the return value.
1862 The register stride (minus one) is put in bit 4 of the return value.
1863 Bits [6:5] encode the list length (minus one).
1864 The type of the list elements is put in *ELTYPE, if non-NULL. */
1865
1866 #define NEON_LANE(X) ((X) & 0xf)
1867 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1868 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1869
1870 static int
1871 parse_neon_el_struct_list (char **str, unsigned *pbase,
1872 struct neon_type_el *eltype)
1873 {
1874 char *ptr = *str;
1875 int base_reg = -1;
1876 int reg_incr = -1;
1877 int count = 0;
1878 int lane = -1;
1879 int leading_brace = 0;
1880 enum arm_reg_type rtype = REG_TYPE_NDQ;
1881 int addregs = 1;
1882 const char *const incr_error = _("register stride must be 1 or 2");
1883 const char *const type_error = _("mismatched element/structure types in list");
1884 struct neon_typed_alias firsttype;
1885
1886 if (skip_past_char (&ptr, '{') == SUCCESS)
1887 leading_brace = 1;
1888
1889 do
1890 {
1891 struct neon_typed_alias atype;
1892 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1893
1894 if (getreg == FAIL)
1895 {
1896 first_error (_(reg_expected_msgs[rtype]));
1897 return FAIL;
1898 }
1899
1900 if (base_reg == -1)
1901 {
1902 base_reg = getreg;
1903 if (rtype == REG_TYPE_NQ)
1904 {
1905 reg_incr = 1;
1906 addregs = 2;
1907 }
1908 firsttype = atype;
1909 }
1910 else if (reg_incr == -1)
1911 {
1912 reg_incr = getreg - base_reg;
1913 if (reg_incr < 1 || reg_incr > 2)
1914 {
1915 first_error (_(incr_error));
1916 return FAIL;
1917 }
1918 }
1919 else if (getreg != base_reg + reg_incr * count)
1920 {
1921 first_error (_(incr_error));
1922 return FAIL;
1923 }
1924
1925 if (! neon_alias_types_same (&atype, &firsttype))
1926 {
1927 first_error (_(type_error));
1928 return FAIL;
1929 }
1930
1931 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1932 modes. */
1933 if (ptr[0] == '-')
1934 {
1935 struct neon_typed_alias htype;
1936 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1937 if (lane == -1)
1938 lane = NEON_INTERLEAVE_LANES;
1939 else if (lane != NEON_INTERLEAVE_LANES)
1940 {
1941 first_error (_(type_error));
1942 return FAIL;
1943 }
1944 if (reg_incr == -1)
1945 reg_incr = 1;
1946 else if (reg_incr != 1)
1947 {
1948 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1949 return FAIL;
1950 }
1951 ptr++;
1952 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1953 if (hireg == FAIL)
1954 {
1955 first_error (_(reg_expected_msgs[rtype]));
1956 return FAIL;
1957 }
1958 if (! neon_alias_types_same (&htype, &firsttype))
1959 {
1960 first_error (_(type_error));
1961 return FAIL;
1962 }
1963 count += hireg + dregs - getreg;
1964 continue;
1965 }
1966
1967 /* If we're using Q registers, we can't use [] or [n] syntax. */
1968 if (rtype == REG_TYPE_NQ)
1969 {
1970 count += 2;
1971 continue;
1972 }
1973
1974 if ((atype.defined & NTA_HASINDEX) != 0)
1975 {
1976 if (lane == -1)
1977 lane = atype.index;
1978 else if (lane != atype.index)
1979 {
1980 first_error (_(type_error));
1981 return FAIL;
1982 }
1983 }
1984 else if (lane == -1)
1985 lane = NEON_INTERLEAVE_LANES;
1986 else if (lane != NEON_INTERLEAVE_LANES)
1987 {
1988 first_error (_(type_error));
1989 return FAIL;
1990 }
1991 count++;
1992 }
1993 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1994
1995 /* No lane set by [x]. We must be interleaving structures. */
1996 if (lane == -1)
1997 lane = NEON_INTERLEAVE_LANES;
1998
1999 /* Sanity check. */
2000 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2001 || (count > 1 && reg_incr == -1))
2002 {
2003 first_error (_("error parsing element/structure list"));
2004 return FAIL;
2005 }
2006
2007 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2008 {
2009 first_error (_("expected }"));
2010 return FAIL;
2011 }
2012
2013 if (reg_incr == -1)
2014 reg_incr = 1;
2015
2016 if (eltype)
2017 *eltype = firsttype.eltype;
2018
2019 *pbase = base_reg;
2020 *str = ptr;
2021
2022 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2023 }
2024
2025 /* Parse an explicit relocation suffix on an expression. This is
2026 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2027 arm_reloc_hsh contains no entries, so this function can only
2028 succeed if there is no () after the word. Returns -1 on error,
2029 BFD_RELOC_UNUSED if there wasn't any suffix. */
2030 static int
2031 parse_reloc (char **str)
2032 {
2033 struct reloc_entry *r;
2034 char *p, *q;
2035
2036 if (**str != '(')
2037 return BFD_RELOC_UNUSED;
2038
2039 p = *str + 1;
2040 q = p;
2041
2042 while (*q && *q != ')' && *q != ',')
2043 q++;
2044 if (*q != ')')
2045 return -1;
2046
2047 if ((r = (struct reloc_entry *)
2048 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2049 return -1;
2050
2051 *str = q + 1;
2052 return r->reloc;
2053 }
2054
2055 /* Directives: register aliases. */
2056
2057 static struct reg_entry *
2058 insert_reg_alias (char *str, int number, int type)
2059 {
2060 struct reg_entry *new_reg;
2061 const char *name;
2062
2063 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2064 {
2065 if (new_reg->builtin)
2066 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2067
2068 /* Only warn about a redefinition if it's not defined as the
2069 same register. */
2070 else if (new_reg->number != number || new_reg->type != type)
2071 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2072
2073 return NULL;
2074 }
2075
2076 name = xstrdup (str);
2077 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2078
2079 new_reg->name = name;
2080 new_reg->number = number;
2081 new_reg->type = type;
2082 new_reg->builtin = FALSE;
2083 new_reg->neon = NULL;
2084
2085 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2086 abort ();
2087
2088 return new_reg;
2089 }
2090
2091 static void
2092 insert_neon_reg_alias (char *str, int number, int type,
2093 struct neon_typed_alias *atype)
2094 {
2095 struct reg_entry *reg = insert_reg_alias (str, number, type);
2096
2097 if (!reg)
2098 {
2099 first_error (_("attempt to redefine typed alias"));
2100 return;
2101 }
2102
2103 if (atype)
2104 {
2105 reg->neon = (struct neon_typed_alias *)
2106 xmalloc (sizeof (struct neon_typed_alias));
2107 *reg->neon = *atype;
2108 }
2109 }
2110
2111 /* Look for the .req directive. This is of the form:
2112
2113 new_register_name .req existing_register_name
2114
2115 If we find one, or if it looks sufficiently like one that we want to
2116 handle any error here, return TRUE. Otherwise return FALSE. */
2117
2118 static bfd_boolean
2119 create_register_alias (char * newname, char *p)
2120 {
2121 struct reg_entry *old;
2122 char *oldname, *nbuf;
2123 size_t nlen;
2124
2125 /* The input scrubber ensures that whitespace after the mnemonic is
2126 collapsed to single spaces. */
2127 oldname = p;
2128 if (strncmp (oldname, " .req ", 6) != 0)
2129 return FALSE;
2130
2131 oldname += 6;
2132 if (*oldname == '\0')
2133 return FALSE;
2134
2135 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2136 if (!old)
2137 {
2138 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2139 return TRUE;
2140 }
2141
2142 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2143 the desired alias name, and p points to its end. If not, then
2144 the desired alias name is in the global original_case_string. */
2145 #ifdef TC_CASE_SENSITIVE
2146 nlen = p - newname;
2147 #else
2148 newname = original_case_string;
2149 nlen = strlen (newname);
2150 #endif
2151
2152 nbuf = (char *) alloca (nlen + 1);
2153 memcpy (nbuf, newname, nlen);
2154 nbuf[nlen] = '\0';
2155
2156 /* Create aliases under the new name as stated; an all-lowercase
2157 version of the new name; and an all-uppercase version of the new
2158 name. */
2159 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2160 {
2161 for (p = nbuf; *p; p++)
2162 *p = TOUPPER (*p);
2163
2164 if (strncmp (nbuf, newname, nlen))
2165 {
2166 /* If this attempt to create an additional alias fails, do not bother
2167 trying to create the all-lower case alias. We will fail and issue
2168 a second, duplicate error message. This situation arises when the
2169 programmer does something like:
2170 foo .req r0
2171 Foo .req r1
2172 The second .req creates the "Foo" alias but then fails to create
2173 the artificial FOO alias because it has already been created by the
2174 first .req. */
2175 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2176 return TRUE;
2177 }
2178
2179 for (p = nbuf; *p; p++)
2180 *p = TOLOWER (*p);
2181
2182 if (strncmp (nbuf, newname, nlen))
2183 insert_reg_alias (nbuf, old->number, old->type);
2184 }
2185
2186 return TRUE;
2187 }
2188
2189 /* Create a Neon typed/indexed register alias using directives, e.g.:
2190 X .dn d5.s32[1]
2191 Y .qn 6.s16
2192 Z .dn d7
2193 T .dn Z[0]
2194 These typed registers can be used instead of the types specified after the
2195 Neon mnemonic, so long as all operands given have types. Types can also be
2196 specified directly, e.g.:
2197 vadd d0.s32, d1.s32, d2.s32 */
2198
2199 static bfd_boolean
2200 create_neon_reg_alias (char *newname, char *p)
2201 {
2202 enum arm_reg_type basetype;
2203 struct reg_entry *basereg;
2204 struct reg_entry mybasereg;
2205 struct neon_type ntype;
2206 struct neon_typed_alias typeinfo;
2207 char *namebuf, *nameend;
2208 int namelen;
2209
2210 typeinfo.defined = 0;
2211 typeinfo.eltype.type = NT_invtype;
2212 typeinfo.eltype.size = -1;
2213 typeinfo.index = -1;
2214
2215 nameend = p;
2216
2217 if (strncmp (p, " .dn ", 5) == 0)
2218 basetype = REG_TYPE_VFD;
2219 else if (strncmp (p, " .qn ", 5) == 0)
2220 basetype = REG_TYPE_NQ;
2221 else
2222 return FALSE;
2223
2224 p += 5;
2225
2226 if (*p == '\0')
2227 return FALSE;
2228
2229 basereg = arm_reg_parse_multi (&p);
2230
2231 if (basereg && basereg->type != basetype)
2232 {
2233 as_bad (_("bad type for register"));
2234 return FALSE;
2235 }
2236
2237 if (basereg == NULL)
2238 {
2239 expressionS exp;
2240 /* Try parsing as an integer. */
2241 my_get_expression (&exp, &p, GE_NO_PREFIX);
2242 if (exp.X_op != O_constant)
2243 {
2244 as_bad (_("expression must be constant"));
2245 return FALSE;
2246 }
2247 basereg = &mybasereg;
2248 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2249 : exp.X_add_number;
2250 basereg->neon = 0;
2251 }
2252
2253 if (basereg->neon)
2254 typeinfo = *basereg->neon;
2255
2256 if (parse_neon_type (&ntype, &p) == SUCCESS)
2257 {
2258 /* We got a type. */
2259 if (typeinfo.defined & NTA_HASTYPE)
2260 {
2261 as_bad (_("can't redefine the type of a register alias"));
2262 return FALSE;
2263 }
2264
2265 typeinfo.defined |= NTA_HASTYPE;
2266 if (ntype.elems != 1)
2267 {
2268 as_bad (_("you must specify a single type only"));
2269 return FALSE;
2270 }
2271 typeinfo.eltype = ntype.el[0];
2272 }
2273
2274 if (skip_past_char (&p, '[') == SUCCESS)
2275 {
2276 expressionS exp;
2277 /* We got a scalar index. */
2278
2279 if (typeinfo.defined & NTA_HASINDEX)
2280 {
2281 as_bad (_("can't redefine the index of a scalar alias"));
2282 return FALSE;
2283 }
2284
2285 my_get_expression (&exp, &p, GE_NO_PREFIX);
2286
2287 if (exp.X_op != O_constant)
2288 {
2289 as_bad (_("scalar index must be constant"));
2290 return FALSE;
2291 }
2292
2293 typeinfo.defined |= NTA_HASINDEX;
2294 typeinfo.index = exp.X_add_number;
2295
2296 if (skip_past_char (&p, ']') == FAIL)
2297 {
2298 as_bad (_("expecting ]"));
2299 return FALSE;
2300 }
2301 }
2302
2303 namelen = nameend - newname;
2304 namebuf = (char *) alloca (namelen + 1);
2305 strncpy (namebuf, newname, namelen);
2306 namebuf[namelen] = '\0';
2307
2308 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2309 typeinfo.defined != 0 ? &typeinfo : NULL);
2310
2311 /* Insert name in all uppercase. */
2312 for (p = namebuf; *p; p++)
2313 *p = TOUPPER (*p);
2314
2315 if (strncmp (namebuf, newname, namelen))
2316 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2317 typeinfo.defined != 0 ? &typeinfo : NULL);
2318
2319 /* Insert name in all lowercase. */
2320 for (p = namebuf; *p; p++)
2321 *p = TOLOWER (*p);
2322
2323 if (strncmp (namebuf, newname, namelen))
2324 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2325 typeinfo.defined != 0 ? &typeinfo : NULL);
2326
2327 return TRUE;
2328 }
2329
2330 /* Should never be called, as .req goes between the alias and the
2331 register name, not at the beginning of the line. */
2332
2333 static void
2334 s_req (int a ATTRIBUTE_UNUSED)
2335 {
2336 as_bad (_("invalid syntax for .req directive"));
2337 }
2338
2339 static void
2340 s_dn (int a ATTRIBUTE_UNUSED)
2341 {
2342 as_bad (_("invalid syntax for .dn directive"));
2343 }
2344
2345 static void
2346 s_qn (int a ATTRIBUTE_UNUSED)
2347 {
2348 as_bad (_("invalid syntax for .qn directive"));
2349 }
2350
2351 /* The .unreq directive deletes an alias which was previously defined
2352 by .req. For example:
2353
2354 my_alias .req r11
2355 .unreq my_alias */
2356
2357 static void
2358 s_unreq (int a ATTRIBUTE_UNUSED)
2359 {
2360 char * name;
2361 char saved_char;
2362
2363 name = input_line_pointer;
2364
2365 while (*input_line_pointer != 0
2366 && *input_line_pointer != ' '
2367 && *input_line_pointer != '\n')
2368 ++input_line_pointer;
2369
2370 saved_char = *input_line_pointer;
2371 *input_line_pointer = 0;
2372
2373 if (!*name)
2374 as_bad (_("invalid syntax for .unreq directive"));
2375 else
2376 {
2377 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2378 name);
2379
2380 if (!reg)
2381 as_bad (_("unknown register alias '%s'"), name);
2382 else if (reg->builtin)
2383 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2384 name);
2385 else
2386 {
2387 char * p;
2388 char * nbuf;
2389
2390 hash_delete (arm_reg_hsh, name, FALSE);
2391 free ((char *) reg->name);
2392 if (reg->neon)
2393 free (reg->neon);
2394 free (reg);
2395
2396 /* Also locate the all upper case and all lower case versions.
2397 Do not complain if we cannot find one or the other as it
2398 was probably deleted above. */
2399
2400 nbuf = strdup (name);
2401 for (p = nbuf; *p; p++)
2402 *p = TOUPPER (*p);
2403 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2404 if (reg)
2405 {
2406 hash_delete (arm_reg_hsh, nbuf, FALSE);
2407 free ((char *) reg->name);
2408 if (reg->neon)
2409 free (reg->neon);
2410 free (reg);
2411 }
2412
2413 for (p = nbuf; *p; p++)
2414 *p = TOLOWER (*p);
2415 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2416 if (reg)
2417 {
2418 hash_delete (arm_reg_hsh, nbuf, FALSE);
2419 free ((char *) reg->name);
2420 if (reg->neon)
2421 free (reg->neon);
2422 free (reg);
2423 }
2424
2425 free (nbuf);
2426 }
2427 }
2428
2429 *input_line_pointer = saved_char;
2430 demand_empty_rest_of_line ();
2431 }
2432
2433 /* Directives: Instruction set selection. */
2434
2435 #ifdef OBJ_ELF
2436 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2437 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2438 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2439 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2440
2441 /* Create a new mapping symbol for the transition to STATE. */
2442
2443 static void
2444 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2445 {
2446 symbolS * symbolP;
2447 const char * symname;
2448 int type;
2449
2450 switch (state)
2451 {
2452 case MAP_DATA:
2453 symname = "$d";
2454 type = BSF_NO_FLAGS;
2455 break;
2456 case MAP_ARM:
2457 symname = "$a";
2458 type = BSF_NO_FLAGS;
2459 break;
2460 case MAP_THUMB:
2461 symname = "$t";
2462 type = BSF_NO_FLAGS;
2463 break;
2464 default:
2465 abort ();
2466 }
2467
2468 symbolP = symbol_new (symname, now_seg, value, frag);
2469 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2470
2471 switch (state)
2472 {
2473 case MAP_ARM:
2474 THUMB_SET_FUNC (symbolP, 0);
2475 ARM_SET_THUMB (symbolP, 0);
2476 ARM_SET_INTERWORK (symbolP, support_interwork);
2477 break;
2478
2479 case MAP_THUMB:
2480 THUMB_SET_FUNC (symbolP, 1);
2481 ARM_SET_THUMB (symbolP, 1);
2482 ARM_SET_INTERWORK (symbolP, support_interwork);
2483 break;
2484
2485 case MAP_DATA:
2486 default:
2487 break;
2488 }
2489
2490 /* Save the mapping symbols for future reference. Also check that
2491 we do not place two mapping symbols at the same offset within a
2492 frag. We'll handle overlap between frags in
2493 check_mapping_symbols. */
2494 if (value == 0)
2495 {
2496 know (frag->tc_frag_data.first_map == NULL);
2497 frag->tc_frag_data.first_map = symbolP;
2498 }
2499 if (frag->tc_frag_data.last_map != NULL)
2500 know (S_GET_VALUE (frag->tc_frag_data.last_map) < S_GET_VALUE (symbolP));
2501 frag->tc_frag_data.last_map = symbolP;
2502 }
2503
2504 /* We must sometimes convert a region marked as code to data during
2505 code alignment, if an odd number of bytes have to be padded. The
2506 code mapping symbol is pushed to an aligned address. */
2507
2508 static void
2509 insert_data_mapping_symbol (enum mstate state,
2510 valueT value, fragS *frag, offsetT bytes)
2511 {
2512 /* If there was already a mapping symbol, remove it. */
2513 if (frag->tc_frag_data.last_map != NULL
2514 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2515 {
2516 symbolS *symp = frag->tc_frag_data.last_map;
2517
2518 if (value == 0)
2519 {
2520 know (frag->tc_frag_data.first_map == symp);
2521 frag->tc_frag_data.first_map = NULL;
2522 }
2523 frag->tc_frag_data.last_map = NULL;
2524 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2525 }
2526
2527 make_mapping_symbol (MAP_DATA, value, frag);
2528 make_mapping_symbol (state, value + bytes, frag);
2529 }
2530
2531 static void mapping_state_2 (enum mstate state, int max_chars);
2532
2533 /* Set the mapping state to STATE. Only call this when about to
2534 emit some STATE bytes to the file. */
2535
2536 void
2537 mapping_state (enum mstate state)
2538 {
2539 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2540
2541 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2542
2543 if (mapstate == state)
2544 /* The mapping symbol has already been emitted.
2545 There is nothing else to do. */
2546 return;
2547 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2548 /* This case will be evaluated later in the next else. */
2549 return;
2550 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2551 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2552 {
2553 /* Only add the symbol if the offset is > 0:
2554 if we're at the first frag, check it's size > 0;
2555 if we're not at the first frag, then for sure
2556 the offset is > 0. */
2557 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2558 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2559
2560 if (add_symbol)
2561 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2562 }
2563
2564 mapping_state_2 (state, 0);
2565 #undef TRANSITION
2566 }
2567
2568 /* Same as mapping_state, but MAX_CHARS bytes have already been
2569 allocated. Put the mapping symbol that far back. */
2570
2571 static void
2572 mapping_state_2 (enum mstate state, int max_chars)
2573 {
2574 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2575
2576 if (!SEG_NORMAL (now_seg))
2577 return;
2578
2579 if (mapstate == state)
2580 /* The mapping symbol has already been emitted.
2581 There is nothing else to do. */
2582 return;
2583
2584 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2585 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2586 }
2587 #else
2588 #define mapping_state(x) ((void)0)
2589 #define mapping_state_2(x, y) ((void)0)
2590 #endif
2591
2592 /* Find the real, Thumb encoded start of a Thumb function. */
2593
2594 #ifdef OBJ_COFF
2595 static symbolS *
2596 find_real_start (symbolS * symbolP)
2597 {
2598 char * real_start;
2599 const char * name = S_GET_NAME (symbolP);
2600 symbolS * new_target;
2601
2602 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2603 #define STUB_NAME ".real_start_of"
2604
2605 if (name == NULL)
2606 abort ();
2607
2608 /* The compiler may generate BL instructions to local labels because
2609 it needs to perform a branch to a far away location. These labels
2610 do not have a corresponding ".real_start_of" label. We check
2611 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2612 the ".real_start_of" convention for nonlocal branches. */
2613 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2614 return symbolP;
2615
2616 real_start = ACONCAT ((STUB_NAME, name, NULL));
2617 new_target = symbol_find (real_start);
2618
2619 if (new_target == NULL)
2620 {
2621 as_warn (_("Failed to find real start of function: %s\n"), name);
2622 new_target = symbolP;
2623 }
2624
2625 return new_target;
2626 }
2627 #endif
2628
2629 static void
2630 opcode_select (int width)
2631 {
2632 switch (width)
2633 {
2634 case 16:
2635 if (! thumb_mode)
2636 {
2637 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2638 as_bad (_("selected processor does not support THUMB opcodes"));
2639
2640 thumb_mode = 1;
2641 /* No need to force the alignment, since we will have been
2642 coming from ARM mode, which is word-aligned. */
2643 record_alignment (now_seg, 1);
2644 }
2645 break;
2646
2647 case 32:
2648 if (thumb_mode)
2649 {
2650 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2651 as_bad (_("selected processor does not support ARM opcodes"));
2652
2653 thumb_mode = 0;
2654
2655 if (!need_pass_2)
2656 frag_align (2, 0, 0);
2657
2658 record_alignment (now_seg, 1);
2659 }
2660 break;
2661
2662 default:
2663 as_bad (_("invalid instruction size selected (%d)"), width);
2664 }
2665 }
2666
2667 static void
2668 s_arm (int ignore ATTRIBUTE_UNUSED)
2669 {
2670 opcode_select (32);
2671 demand_empty_rest_of_line ();
2672 }
2673
2674 static void
2675 s_thumb (int ignore ATTRIBUTE_UNUSED)
2676 {
2677 opcode_select (16);
2678 demand_empty_rest_of_line ();
2679 }
2680
2681 static void
2682 s_code (int unused ATTRIBUTE_UNUSED)
2683 {
2684 int temp;
2685
2686 temp = get_absolute_expression ();
2687 switch (temp)
2688 {
2689 case 16:
2690 case 32:
2691 opcode_select (temp);
2692 break;
2693
2694 default:
2695 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2696 }
2697 }
2698
2699 static void
2700 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2701 {
2702 /* If we are not already in thumb mode go into it, EVEN if
2703 the target processor does not support thumb instructions.
2704 This is used by gcc/config/arm/lib1funcs.asm for example
2705 to compile interworking support functions even if the
2706 target processor should not support interworking. */
2707 if (! thumb_mode)
2708 {
2709 thumb_mode = 2;
2710 record_alignment (now_seg, 1);
2711 }
2712
2713 demand_empty_rest_of_line ();
2714 }
2715
2716 static void
2717 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2718 {
2719 s_thumb (0);
2720
2721 /* The following label is the name/address of the start of a Thumb function.
2722 We need to know this for the interworking support. */
2723 label_is_thumb_function_name = TRUE;
2724 }
2725
2726 /* Perform a .set directive, but also mark the alias as
2727 being a thumb function. */
2728
2729 static void
2730 s_thumb_set (int equiv)
2731 {
2732 /* XXX the following is a duplicate of the code for s_set() in read.c
2733 We cannot just call that code as we need to get at the symbol that
2734 is created. */
2735 char * name;
2736 char delim;
2737 char * end_name;
2738 symbolS * symbolP;
2739
2740 /* Especial apologies for the random logic:
2741 This just grew, and could be parsed much more simply!
2742 Dean - in haste. */
2743 name = input_line_pointer;
2744 delim = get_symbol_end ();
2745 end_name = input_line_pointer;
2746 *end_name = delim;
2747
2748 if (*input_line_pointer != ',')
2749 {
2750 *end_name = 0;
2751 as_bad (_("expected comma after name \"%s\""), name);
2752 *end_name = delim;
2753 ignore_rest_of_line ();
2754 return;
2755 }
2756
2757 input_line_pointer++;
2758 *end_name = 0;
2759
2760 if (name[0] == '.' && name[1] == '\0')
2761 {
2762 /* XXX - this should not happen to .thumb_set. */
2763 abort ();
2764 }
2765
2766 if ((symbolP = symbol_find (name)) == NULL
2767 && (symbolP = md_undefined_symbol (name)) == NULL)
2768 {
2769 #ifndef NO_LISTING
2770 /* When doing symbol listings, play games with dummy fragments living
2771 outside the normal fragment chain to record the file and line info
2772 for this symbol. */
2773 if (listing & LISTING_SYMBOLS)
2774 {
2775 extern struct list_info_struct * listing_tail;
2776 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2777
2778 memset (dummy_frag, 0, sizeof (fragS));
2779 dummy_frag->fr_type = rs_fill;
2780 dummy_frag->line = listing_tail;
2781 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2782 dummy_frag->fr_symbol = symbolP;
2783 }
2784 else
2785 #endif
2786 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2787
2788 #ifdef OBJ_COFF
2789 /* "set" symbols are local unless otherwise specified. */
2790 SF_SET_LOCAL (symbolP);
2791 #endif /* OBJ_COFF */
2792 } /* Make a new symbol. */
2793
2794 symbol_table_insert (symbolP);
2795
2796 * end_name = delim;
2797
2798 if (equiv
2799 && S_IS_DEFINED (symbolP)
2800 && S_GET_SEGMENT (symbolP) != reg_section)
2801 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2802
2803 pseudo_set (symbolP);
2804
2805 demand_empty_rest_of_line ();
2806
2807 /* XXX Now we come to the Thumb specific bit of code. */
2808
2809 THUMB_SET_FUNC (symbolP, 1);
2810 ARM_SET_THUMB (symbolP, 1);
2811 #if defined OBJ_ELF || defined OBJ_COFF
2812 ARM_SET_INTERWORK (symbolP, support_interwork);
2813 #endif
2814 }
2815
2816 /* Directives: Mode selection. */
2817
2818 /* .syntax [unified|divided] - choose the new unified syntax
2819 (same for Arm and Thumb encoding, modulo slight differences in what
2820 can be represented) or the old divergent syntax for each mode. */
2821 static void
2822 s_syntax (int unused ATTRIBUTE_UNUSED)
2823 {
2824 char *name, delim;
2825
2826 name = input_line_pointer;
2827 delim = get_symbol_end ();
2828
2829 if (!strcasecmp (name, "unified"))
2830 unified_syntax = TRUE;
2831 else if (!strcasecmp (name, "divided"))
2832 unified_syntax = FALSE;
2833 else
2834 {
2835 as_bad (_("unrecognized syntax mode \"%s\""), name);
2836 return;
2837 }
2838 *input_line_pointer = delim;
2839 demand_empty_rest_of_line ();
2840 }
2841
2842 /* Directives: sectioning and alignment. */
2843
2844 /* Same as s_align_ptwo but align 0 => align 2. */
2845
2846 static void
2847 s_align (int unused ATTRIBUTE_UNUSED)
2848 {
2849 int temp;
2850 bfd_boolean fill_p;
2851 long temp_fill;
2852 long max_alignment = 15;
2853
2854 temp = get_absolute_expression ();
2855 if (temp > max_alignment)
2856 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2857 else if (temp < 0)
2858 {
2859 as_bad (_("alignment negative. 0 assumed."));
2860 temp = 0;
2861 }
2862
2863 if (*input_line_pointer == ',')
2864 {
2865 input_line_pointer++;
2866 temp_fill = get_absolute_expression ();
2867 fill_p = TRUE;
2868 }
2869 else
2870 {
2871 fill_p = FALSE;
2872 temp_fill = 0;
2873 }
2874
2875 if (!temp)
2876 temp = 2;
2877
2878 /* Only make a frag if we HAVE to. */
2879 if (temp && !need_pass_2)
2880 {
2881 if (!fill_p && subseg_text_p (now_seg))
2882 frag_align_code (temp, 0);
2883 else
2884 frag_align (temp, (int) temp_fill, 0);
2885 }
2886 demand_empty_rest_of_line ();
2887
2888 record_alignment (now_seg, temp);
2889 }
2890
2891 static void
2892 s_bss (int ignore ATTRIBUTE_UNUSED)
2893 {
2894 /* We don't support putting frags in the BSS segment, we fake it by
2895 marking in_bss, then looking at s_skip for clues. */
2896 subseg_set (bss_section, 0);
2897 demand_empty_rest_of_line ();
2898
2899 #ifdef md_elf_section_change_hook
2900 md_elf_section_change_hook ();
2901 #endif
2902 }
2903
2904 static void
2905 s_even (int ignore ATTRIBUTE_UNUSED)
2906 {
2907 /* Never make frag if expect extra pass. */
2908 if (!need_pass_2)
2909 frag_align (1, 0, 0);
2910
2911 record_alignment (now_seg, 1);
2912
2913 demand_empty_rest_of_line ();
2914 }
2915
2916 /* Directives: Literal pools. */
2917
2918 static literal_pool *
2919 find_literal_pool (void)
2920 {
2921 literal_pool * pool;
2922
2923 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2924 {
2925 if (pool->section == now_seg
2926 && pool->sub_section == now_subseg)
2927 break;
2928 }
2929
2930 return pool;
2931 }
2932
2933 static literal_pool *
2934 find_or_make_literal_pool (void)
2935 {
2936 /* Next literal pool ID number. */
2937 static unsigned int latest_pool_num = 1;
2938 literal_pool * pool;
2939
2940 pool = find_literal_pool ();
2941
2942 if (pool == NULL)
2943 {
2944 /* Create a new pool. */
2945 pool = (literal_pool *) xmalloc (sizeof (* pool));
2946 if (! pool)
2947 return NULL;
2948
2949 pool->next_free_entry = 0;
2950 pool->section = now_seg;
2951 pool->sub_section = now_subseg;
2952 pool->next = list_of_pools;
2953 pool->symbol = NULL;
2954
2955 /* Add it to the list. */
2956 list_of_pools = pool;
2957 }
2958
2959 /* New pools, and emptied pools, will have a NULL symbol. */
2960 if (pool->symbol == NULL)
2961 {
2962 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2963 (valueT) 0, &zero_address_frag);
2964 pool->id = latest_pool_num ++;
2965 }
2966
2967 /* Done. */
2968 return pool;
2969 }
2970
2971 /* Add the literal in the global 'inst'
2972 structure to the relevant literal pool. */
2973
2974 static int
2975 add_to_lit_pool (void)
2976 {
2977 literal_pool * pool;
2978 unsigned int entry;
2979
2980 pool = find_or_make_literal_pool ();
2981
2982 /* Check if this literal value is already in the pool. */
2983 for (entry = 0; entry < pool->next_free_entry; entry ++)
2984 {
2985 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2986 && (inst.reloc.exp.X_op == O_constant)
2987 && (pool->literals[entry].X_add_number
2988 == inst.reloc.exp.X_add_number)
2989 && (pool->literals[entry].X_unsigned
2990 == inst.reloc.exp.X_unsigned))
2991 break;
2992
2993 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2994 && (inst.reloc.exp.X_op == O_symbol)
2995 && (pool->literals[entry].X_add_number
2996 == inst.reloc.exp.X_add_number)
2997 && (pool->literals[entry].X_add_symbol
2998 == inst.reloc.exp.X_add_symbol)
2999 && (pool->literals[entry].X_op_symbol
3000 == inst.reloc.exp.X_op_symbol))
3001 break;
3002 }
3003
3004 /* Do we need to create a new entry? */
3005 if (entry == pool->next_free_entry)
3006 {
3007 if (entry >= MAX_LITERAL_POOL_SIZE)
3008 {
3009 inst.error = _("literal pool overflow");
3010 return FAIL;
3011 }
3012
3013 pool->literals[entry] = inst.reloc.exp;
3014 pool->next_free_entry += 1;
3015 }
3016
3017 inst.reloc.exp.X_op = O_symbol;
3018 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3019 inst.reloc.exp.X_add_symbol = pool->symbol;
3020
3021 return SUCCESS;
3022 }
3023
3024 /* Can't use symbol_new here, so have to create a symbol and then at
3025 a later date assign it a value. Thats what these functions do. */
3026
3027 static void
3028 symbol_locate (symbolS * symbolP,
3029 const char * name, /* It is copied, the caller can modify. */
3030 segT segment, /* Segment identifier (SEG_<something>). */
3031 valueT valu, /* Symbol value. */
3032 fragS * frag) /* Associated fragment. */
3033 {
3034 unsigned int name_length;
3035 char * preserved_copy_of_name;
3036
3037 name_length = strlen (name) + 1; /* +1 for \0. */
3038 obstack_grow (&notes, name, name_length);
3039 preserved_copy_of_name = (char *) obstack_finish (&notes);
3040
3041 #ifdef tc_canonicalize_symbol_name
3042 preserved_copy_of_name =
3043 tc_canonicalize_symbol_name (preserved_copy_of_name);
3044 #endif
3045
3046 S_SET_NAME (symbolP, preserved_copy_of_name);
3047
3048 S_SET_SEGMENT (symbolP, segment);
3049 S_SET_VALUE (symbolP, valu);
3050 symbol_clear_list_pointers (symbolP);
3051
3052 symbol_set_frag (symbolP, frag);
3053
3054 /* Link to end of symbol chain. */
3055 {
3056 extern int symbol_table_frozen;
3057
3058 if (symbol_table_frozen)
3059 abort ();
3060 }
3061
3062 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3063
3064 obj_symbol_new_hook (symbolP);
3065
3066 #ifdef tc_symbol_new_hook
3067 tc_symbol_new_hook (symbolP);
3068 #endif
3069
3070 #ifdef DEBUG_SYMS
3071 verify_symbol_chain (symbol_rootP, symbol_lastP);
3072 #endif /* DEBUG_SYMS */
3073 }
3074
3075
3076 static void
3077 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3078 {
3079 unsigned int entry;
3080 literal_pool * pool;
3081 char sym_name[20];
3082
3083 pool = find_literal_pool ();
3084 if (pool == NULL
3085 || pool->symbol == NULL
3086 || pool->next_free_entry == 0)
3087 return;
3088
3089 mapping_state (MAP_DATA);
3090
3091 /* Align pool as you have word accesses.
3092 Only make a frag if we have to. */
3093 if (!need_pass_2)
3094 frag_align (2, 0, 0);
3095
3096 record_alignment (now_seg, 2);
3097
3098 sprintf (sym_name, "$$lit_\002%x", pool->id);
3099
3100 symbol_locate (pool->symbol, sym_name, now_seg,
3101 (valueT) frag_now_fix (), frag_now);
3102 symbol_table_insert (pool->symbol);
3103
3104 ARM_SET_THUMB (pool->symbol, thumb_mode);
3105
3106 #if defined OBJ_COFF || defined OBJ_ELF
3107 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3108 #endif
3109
3110 for (entry = 0; entry < pool->next_free_entry; entry ++)
3111 /* First output the expression in the instruction to the pool. */
3112 emit_expr (&(pool->literals[entry]), 4); /* .word */
3113
3114 /* Mark the pool as empty. */
3115 pool->next_free_entry = 0;
3116 pool->symbol = NULL;
3117 }
3118
3119 #ifdef OBJ_ELF
3120 /* Forward declarations for functions below, in the MD interface
3121 section. */
3122 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3123 static valueT create_unwind_entry (int);
3124 static void start_unwind_section (const segT, int);
3125 static void add_unwind_opcode (valueT, int);
3126 static void flush_pending_unwind (void);
3127
3128 /* Directives: Data. */
3129
3130 static void
3131 s_arm_elf_cons (int nbytes)
3132 {
3133 expressionS exp;
3134
3135 #ifdef md_flush_pending_output
3136 md_flush_pending_output ();
3137 #endif
3138
3139 if (is_it_end_of_statement ())
3140 {
3141 demand_empty_rest_of_line ();
3142 return;
3143 }
3144
3145 #ifdef md_cons_align
3146 md_cons_align (nbytes);
3147 #endif
3148
3149 mapping_state (MAP_DATA);
3150 do
3151 {
3152 int reloc;
3153 char *base = input_line_pointer;
3154
3155 expression (& exp);
3156
3157 if (exp.X_op != O_symbol)
3158 emit_expr (&exp, (unsigned int) nbytes);
3159 else
3160 {
3161 char *before_reloc = input_line_pointer;
3162 reloc = parse_reloc (&input_line_pointer);
3163 if (reloc == -1)
3164 {
3165 as_bad (_("unrecognized relocation suffix"));
3166 ignore_rest_of_line ();
3167 return;
3168 }
3169 else if (reloc == BFD_RELOC_UNUSED)
3170 emit_expr (&exp, (unsigned int) nbytes);
3171 else
3172 {
3173 reloc_howto_type *howto = (reloc_howto_type *)
3174 bfd_reloc_type_lookup (stdoutput,
3175 (bfd_reloc_code_real_type) reloc);
3176 int size = bfd_get_reloc_size (howto);
3177
3178 if (reloc == BFD_RELOC_ARM_PLT32)
3179 {
3180 as_bad (_("(plt) is only valid on branch targets"));
3181 reloc = BFD_RELOC_UNUSED;
3182 size = 0;
3183 }
3184
3185 if (size > nbytes)
3186 as_bad (_("%s relocations do not fit in %d bytes"),
3187 howto->name, nbytes);
3188 else
3189 {
3190 /* We've parsed an expression stopping at O_symbol.
3191 But there may be more expression left now that we
3192 have parsed the relocation marker. Parse it again.
3193 XXX Surely there is a cleaner way to do this. */
3194 char *p = input_line_pointer;
3195 int offset;
3196 char *save_buf = (char *) alloca (input_line_pointer - base);
3197 memcpy (save_buf, base, input_line_pointer - base);
3198 memmove (base + (input_line_pointer - before_reloc),
3199 base, before_reloc - base);
3200
3201 input_line_pointer = base + (input_line_pointer-before_reloc);
3202 expression (&exp);
3203 memcpy (base, save_buf, p - base);
3204
3205 offset = nbytes - size;
3206 p = frag_more ((int) nbytes);
3207 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3208 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3209 }
3210 }
3211 }
3212 }
3213 while (*input_line_pointer++ == ',');
3214
3215 /* Put terminator back into stream. */
3216 input_line_pointer --;
3217 demand_empty_rest_of_line ();
3218 }
3219
3220 /* Emit an expression containing a 32-bit thumb instruction.
3221 Implementation based on put_thumb32_insn. */
3222
3223 static void
3224 emit_thumb32_expr (expressionS * exp)
3225 {
3226 expressionS exp_high = *exp;
3227
3228 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3229 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3230 exp->X_add_number &= 0xffff;
3231 emit_expr (exp, (unsigned int) THUMB_SIZE);
3232 }
3233
3234 /* Guess the instruction size based on the opcode. */
3235
3236 static int
3237 thumb_insn_size (int opcode)
3238 {
3239 if ((unsigned int) opcode < 0xe800u)
3240 return 2;
3241 else if ((unsigned int) opcode >= 0xe8000000u)
3242 return 4;
3243 else
3244 return 0;
3245 }
3246
3247 static bfd_boolean
3248 emit_insn (expressionS *exp, int nbytes)
3249 {
3250 int size = 0;
3251
3252 if (exp->X_op == O_constant)
3253 {
3254 size = nbytes;
3255
3256 if (size == 0)
3257 size = thumb_insn_size (exp->X_add_number);
3258
3259 if (size != 0)
3260 {
3261 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3262 {
3263 as_bad (_(".inst.n operand too big. "\
3264 "Use .inst.w instead"));
3265 size = 0;
3266 }
3267 else
3268 {
3269 if (now_it.state == AUTOMATIC_IT_BLOCK)
3270 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3271 else
3272 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3273
3274 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3275 emit_thumb32_expr (exp);
3276 else
3277 emit_expr (exp, (unsigned int) size);
3278
3279 it_fsm_post_encode ();
3280 }
3281 }
3282 else
3283 as_bad (_("cannot determine Thumb instruction size. " \
3284 "Use .inst.n/.inst.w instead"));
3285 }
3286 else
3287 as_bad (_("constant expression required"));
3288
3289 return (size != 0);
3290 }
3291
3292 /* Like s_arm_elf_cons but do not use md_cons_align and
3293 set the mapping state to MAP_ARM/MAP_THUMB. */
3294
3295 static void
3296 s_arm_elf_inst (int nbytes)
3297 {
3298 if (is_it_end_of_statement ())
3299 {
3300 demand_empty_rest_of_line ();
3301 return;
3302 }
3303
3304 /* Calling mapping_state () here will not change ARM/THUMB,
3305 but will ensure not to be in DATA state. */
3306
3307 if (thumb_mode)
3308 mapping_state (MAP_THUMB);
3309 else
3310 {
3311 if (nbytes != 0)
3312 {
3313 as_bad (_("width suffixes are invalid in ARM mode"));
3314 ignore_rest_of_line ();
3315 return;
3316 }
3317
3318 nbytes = 4;
3319
3320 mapping_state (MAP_ARM);
3321 }
3322
3323 do
3324 {
3325 expressionS exp;
3326
3327 expression (& exp);
3328
3329 if (! emit_insn (& exp, nbytes))
3330 {
3331 ignore_rest_of_line ();
3332 return;
3333 }
3334 }
3335 while (*input_line_pointer++ == ',');
3336
3337 /* Put terminator back into stream. */
3338 input_line_pointer --;
3339 demand_empty_rest_of_line ();
3340 }
3341
3342 /* Parse a .rel31 directive. */
3343
3344 static void
3345 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3346 {
3347 expressionS exp;
3348 char *p;
3349 valueT highbit;
3350
3351 highbit = 0;
3352 if (*input_line_pointer == '1')
3353 highbit = 0x80000000;
3354 else if (*input_line_pointer != '0')
3355 as_bad (_("expected 0 or 1"));
3356
3357 input_line_pointer++;
3358 if (*input_line_pointer != ',')
3359 as_bad (_("missing comma"));
3360 input_line_pointer++;
3361
3362 #ifdef md_flush_pending_output
3363 md_flush_pending_output ();
3364 #endif
3365
3366 #ifdef md_cons_align
3367 md_cons_align (4);
3368 #endif
3369
3370 mapping_state (MAP_DATA);
3371
3372 expression (&exp);
3373
3374 p = frag_more (4);
3375 md_number_to_chars (p, highbit, 4);
3376 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3377 BFD_RELOC_ARM_PREL31);
3378
3379 demand_empty_rest_of_line ();
3380 }
3381
3382 /* Directives: AEABI stack-unwind tables. */
3383
3384 /* Parse an unwind_fnstart directive. Simply records the current location. */
3385
3386 static void
3387 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3388 {
3389 demand_empty_rest_of_line ();
3390 if (unwind.proc_start)
3391 {
3392 as_bad (_("duplicate .fnstart directive"));
3393 return;
3394 }
3395
3396 /* Mark the start of the function. */
3397 unwind.proc_start = expr_build_dot ();
3398
3399 /* Reset the rest of the unwind info. */
3400 unwind.opcode_count = 0;
3401 unwind.table_entry = NULL;
3402 unwind.personality_routine = NULL;
3403 unwind.personality_index = -1;
3404 unwind.frame_size = 0;
3405 unwind.fp_offset = 0;
3406 unwind.fp_reg = REG_SP;
3407 unwind.fp_used = 0;
3408 unwind.sp_restored = 0;
3409 }
3410
3411
3412 /* Parse a handlerdata directive. Creates the exception handling table entry
3413 for the function. */
3414
3415 static void
3416 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3417 {
3418 demand_empty_rest_of_line ();
3419 if (!unwind.proc_start)
3420 as_bad (MISSING_FNSTART);
3421
3422 if (unwind.table_entry)
3423 as_bad (_("duplicate .handlerdata directive"));
3424
3425 create_unwind_entry (1);
3426 }
3427
3428 /* Parse an unwind_fnend directive. Generates the index table entry. */
3429
3430 static void
3431 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3432 {
3433 long where;
3434 char *ptr;
3435 valueT val;
3436 unsigned int marked_pr_dependency;
3437
3438 demand_empty_rest_of_line ();
3439
3440 if (!unwind.proc_start)
3441 {
3442 as_bad (_(".fnend directive without .fnstart"));
3443 return;
3444 }
3445
3446 /* Add eh table entry. */
3447 if (unwind.table_entry == NULL)
3448 val = create_unwind_entry (0);
3449 else
3450 val = 0;
3451
3452 /* Add index table entry. This is two words. */
3453 start_unwind_section (unwind.saved_seg, 1);
3454 frag_align (2, 0, 0);
3455 record_alignment (now_seg, 2);
3456
3457 ptr = frag_more (8);
3458 where = frag_now_fix () - 8;
3459
3460 /* Self relative offset of the function start. */
3461 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3462 BFD_RELOC_ARM_PREL31);
3463
3464 /* Indicate dependency on EHABI-defined personality routines to the
3465 linker, if it hasn't been done already. */
3466 marked_pr_dependency
3467 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3468 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3469 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3470 {
3471 static const char *const name[] =
3472 {
3473 "__aeabi_unwind_cpp_pr0",
3474 "__aeabi_unwind_cpp_pr1",
3475 "__aeabi_unwind_cpp_pr2"
3476 };
3477 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3478 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3479 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3480 |= 1 << unwind.personality_index;
3481 }
3482
3483 if (val)
3484 /* Inline exception table entry. */
3485 md_number_to_chars (ptr + 4, val, 4);
3486 else
3487 /* Self relative offset of the table entry. */
3488 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3489 BFD_RELOC_ARM_PREL31);
3490
3491 /* Restore the original section. */
3492 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3493
3494 unwind.proc_start = NULL;
3495 }
3496
3497
3498 /* Parse an unwind_cantunwind directive. */
3499
3500 static void
3501 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3502 {
3503 demand_empty_rest_of_line ();
3504 if (!unwind.proc_start)
3505 as_bad (MISSING_FNSTART);
3506
3507 if (unwind.personality_routine || unwind.personality_index != -1)
3508 as_bad (_("personality routine specified for cantunwind frame"));
3509
3510 unwind.personality_index = -2;
3511 }
3512
3513
3514 /* Parse a personalityindex directive. */
3515
3516 static void
3517 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3518 {
3519 expressionS exp;
3520
3521 if (!unwind.proc_start)
3522 as_bad (MISSING_FNSTART);
3523
3524 if (unwind.personality_routine || unwind.personality_index != -1)
3525 as_bad (_("duplicate .personalityindex directive"));
3526
3527 expression (&exp);
3528
3529 if (exp.X_op != O_constant
3530 || exp.X_add_number < 0 || exp.X_add_number > 15)
3531 {
3532 as_bad (_("bad personality routine number"));
3533 ignore_rest_of_line ();
3534 return;
3535 }
3536
3537 unwind.personality_index = exp.X_add_number;
3538
3539 demand_empty_rest_of_line ();
3540 }
3541
3542
3543 /* Parse a personality directive. */
3544
3545 static void
3546 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3547 {
3548 char *name, *p, c;
3549
3550 if (!unwind.proc_start)
3551 as_bad (MISSING_FNSTART);
3552
3553 if (unwind.personality_routine || unwind.personality_index != -1)
3554 as_bad (_("duplicate .personality directive"));
3555
3556 name = input_line_pointer;
3557 c = get_symbol_end ();
3558 p = input_line_pointer;
3559 unwind.personality_routine = symbol_find_or_make (name);
3560 *p = c;
3561 demand_empty_rest_of_line ();
3562 }
3563
3564
3565 /* Parse a directive saving core registers. */
3566
3567 static void
3568 s_arm_unwind_save_core (void)
3569 {
3570 valueT op;
3571 long range;
3572 int n;
3573
3574 range = parse_reg_list (&input_line_pointer);
3575 if (range == FAIL)
3576 {
3577 as_bad (_("expected register list"));
3578 ignore_rest_of_line ();
3579 return;
3580 }
3581
3582 demand_empty_rest_of_line ();
3583
3584 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3585 into .unwind_save {..., sp...}. We aren't bothered about the value of
3586 ip because it is clobbered by calls. */
3587 if (unwind.sp_restored && unwind.fp_reg == 12
3588 && (range & 0x3000) == 0x1000)
3589 {
3590 unwind.opcode_count--;
3591 unwind.sp_restored = 0;
3592 range = (range | 0x2000) & ~0x1000;
3593 unwind.pending_offset = 0;
3594 }
3595
3596 /* Pop r4-r15. */
3597 if (range & 0xfff0)
3598 {
3599 /* See if we can use the short opcodes. These pop a block of up to 8
3600 registers starting with r4, plus maybe r14. */
3601 for (n = 0; n < 8; n++)
3602 {
3603 /* Break at the first non-saved register. */
3604 if ((range & (1 << (n + 4))) == 0)
3605 break;
3606 }
3607 /* See if there are any other bits set. */
3608 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3609 {
3610 /* Use the long form. */
3611 op = 0x8000 | ((range >> 4) & 0xfff);
3612 add_unwind_opcode (op, 2);
3613 }
3614 else
3615 {
3616 /* Use the short form. */
3617 if (range & 0x4000)
3618 op = 0xa8; /* Pop r14. */
3619 else
3620 op = 0xa0; /* Do not pop r14. */
3621 op |= (n - 1);
3622 add_unwind_opcode (op, 1);
3623 }
3624 }
3625
3626 /* Pop r0-r3. */
3627 if (range & 0xf)
3628 {
3629 op = 0xb100 | (range & 0xf);
3630 add_unwind_opcode (op, 2);
3631 }
3632
3633 /* Record the number of bytes pushed. */
3634 for (n = 0; n < 16; n++)
3635 {
3636 if (range & (1 << n))
3637 unwind.frame_size += 4;
3638 }
3639 }
3640
3641
3642 /* Parse a directive saving FPA registers. */
3643
3644 static void
3645 s_arm_unwind_save_fpa (int reg)
3646 {
3647 expressionS exp;
3648 int num_regs;
3649 valueT op;
3650
3651 /* Get Number of registers to transfer. */
3652 if (skip_past_comma (&input_line_pointer) != FAIL)
3653 expression (&exp);
3654 else
3655 exp.X_op = O_illegal;
3656
3657 if (exp.X_op != O_constant)
3658 {
3659 as_bad (_("expected , <constant>"));
3660 ignore_rest_of_line ();
3661 return;
3662 }
3663
3664 num_regs = exp.X_add_number;
3665
3666 if (num_regs < 1 || num_regs > 4)
3667 {
3668 as_bad (_("number of registers must be in the range [1:4]"));
3669 ignore_rest_of_line ();
3670 return;
3671 }
3672
3673 demand_empty_rest_of_line ();
3674
3675 if (reg == 4)
3676 {
3677 /* Short form. */
3678 op = 0xb4 | (num_regs - 1);
3679 add_unwind_opcode (op, 1);
3680 }
3681 else
3682 {
3683 /* Long form. */
3684 op = 0xc800 | (reg << 4) | (num_regs - 1);
3685 add_unwind_opcode (op, 2);
3686 }
3687 unwind.frame_size += num_regs * 12;
3688 }
3689
3690
3691 /* Parse a directive saving VFP registers for ARMv6 and above. */
3692
3693 static void
3694 s_arm_unwind_save_vfp_armv6 (void)
3695 {
3696 int count;
3697 unsigned int start;
3698 valueT op;
3699 int num_vfpv3_regs = 0;
3700 int num_regs_below_16;
3701
3702 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3703 if (count == FAIL)
3704 {
3705 as_bad (_("expected register list"));
3706 ignore_rest_of_line ();
3707 return;
3708 }
3709
3710 demand_empty_rest_of_line ();
3711
3712 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3713 than FSTMX/FLDMX-style ones). */
3714
3715 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3716 if (start >= 16)
3717 num_vfpv3_regs = count;
3718 else if (start + count > 16)
3719 num_vfpv3_regs = start + count - 16;
3720
3721 if (num_vfpv3_regs > 0)
3722 {
3723 int start_offset = start > 16 ? start - 16 : 0;
3724 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3725 add_unwind_opcode (op, 2);
3726 }
3727
3728 /* Generate opcode for registers numbered in the range 0 .. 15. */
3729 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3730 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3731 if (num_regs_below_16 > 0)
3732 {
3733 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3734 add_unwind_opcode (op, 2);
3735 }
3736
3737 unwind.frame_size += count * 8;
3738 }
3739
3740
3741 /* Parse a directive saving VFP registers for pre-ARMv6. */
3742
3743 static void
3744 s_arm_unwind_save_vfp (void)
3745 {
3746 int count;
3747 unsigned int reg;
3748 valueT op;
3749
3750 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3751 if (count == FAIL)
3752 {
3753 as_bad (_("expected register list"));
3754 ignore_rest_of_line ();
3755 return;
3756 }
3757
3758 demand_empty_rest_of_line ();
3759
3760 if (reg == 8)
3761 {
3762 /* Short form. */
3763 op = 0xb8 | (count - 1);
3764 add_unwind_opcode (op, 1);
3765 }
3766 else
3767 {
3768 /* Long form. */
3769 op = 0xb300 | (reg << 4) | (count - 1);
3770 add_unwind_opcode (op, 2);
3771 }
3772 unwind.frame_size += count * 8 + 4;
3773 }
3774
3775
3776 /* Parse a directive saving iWMMXt data registers. */
3777
3778 static void
3779 s_arm_unwind_save_mmxwr (void)
3780 {
3781 int reg;
3782 int hi_reg;
3783 int i;
3784 unsigned mask = 0;
3785 valueT op;
3786
3787 if (*input_line_pointer == '{')
3788 input_line_pointer++;
3789
3790 do
3791 {
3792 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3793
3794 if (reg == FAIL)
3795 {
3796 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3797 goto error;
3798 }
3799
3800 if (mask >> reg)
3801 as_tsktsk (_("register list not in ascending order"));
3802 mask |= 1 << reg;
3803
3804 if (*input_line_pointer == '-')
3805 {
3806 input_line_pointer++;
3807 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3808 if (hi_reg == FAIL)
3809 {
3810 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3811 goto error;
3812 }
3813 else if (reg >= hi_reg)
3814 {
3815 as_bad (_("bad register range"));
3816 goto error;
3817 }
3818 for (; reg < hi_reg; reg++)
3819 mask |= 1 << reg;
3820 }
3821 }
3822 while (skip_past_comma (&input_line_pointer) != FAIL);
3823
3824 if (*input_line_pointer == '}')
3825 input_line_pointer++;
3826
3827 demand_empty_rest_of_line ();
3828
3829 /* Generate any deferred opcodes because we're going to be looking at
3830 the list. */
3831 flush_pending_unwind ();
3832
3833 for (i = 0; i < 16; i++)
3834 {
3835 if (mask & (1 << i))
3836 unwind.frame_size += 8;
3837 }
3838
3839 /* Attempt to combine with a previous opcode. We do this because gcc
3840 likes to output separate unwind directives for a single block of
3841 registers. */
3842 if (unwind.opcode_count > 0)
3843 {
3844 i = unwind.opcodes[unwind.opcode_count - 1];
3845 if ((i & 0xf8) == 0xc0)
3846 {
3847 i &= 7;
3848 /* Only merge if the blocks are contiguous. */
3849 if (i < 6)
3850 {
3851 if ((mask & 0xfe00) == (1 << 9))
3852 {
3853 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3854 unwind.opcode_count--;
3855 }
3856 }
3857 else if (i == 6 && unwind.opcode_count >= 2)
3858 {
3859 i = unwind.opcodes[unwind.opcode_count - 2];
3860 reg = i >> 4;
3861 i &= 0xf;
3862
3863 op = 0xffff << (reg - 1);
3864 if (reg > 0
3865 && ((mask & op) == (1u << (reg - 1))))
3866 {
3867 op = (1 << (reg + i + 1)) - 1;
3868 op &= ~((1 << reg) - 1);
3869 mask |= op;
3870 unwind.opcode_count -= 2;
3871 }
3872 }
3873 }
3874 }
3875
3876 hi_reg = 15;
3877 /* We want to generate opcodes in the order the registers have been
3878 saved, ie. descending order. */
3879 for (reg = 15; reg >= -1; reg--)
3880 {
3881 /* Save registers in blocks. */
3882 if (reg < 0
3883 || !(mask & (1 << reg)))
3884 {
3885 /* We found an unsaved reg. Generate opcodes to save the
3886 preceding block. */
3887 if (reg != hi_reg)
3888 {
3889 if (reg == 9)
3890 {
3891 /* Short form. */
3892 op = 0xc0 | (hi_reg - 10);
3893 add_unwind_opcode (op, 1);
3894 }
3895 else
3896 {
3897 /* Long form. */
3898 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3899 add_unwind_opcode (op, 2);
3900 }
3901 }
3902 hi_reg = reg - 1;
3903 }
3904 }
3905
3906 return;
3907 error:
3908 ignore_rest_of_line ();
3909 }
3910
3911 static void
3912 s_arm_unwind_save_mmxwcg (void)
3913 {
3914 int reg;
3915 int hi_reg;
3916 unsigned mask = 0;
3917 valueT op;
3918
3919 if (*input_line_pointer == '{')
3920 input_line_pointer++;
3921
3922 do
3923 {
3924 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3925
3926 if (reg == FAIL)
3927 {
3928 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3929 goto error;
3930 }
3931
3932 reg -= 8;
3933 if (mask >> reg)
3934 as_tsktsk (_("register list not in ascending order"));
3935 mask |= 1 << reg;
3936
3937 if (*input_line_pointer == '-')
3938 {
3939 input_line_pointer++;
3940 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3941 if (hi_reg == FAIL)
3942 {
3943 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3944 goto error;
3945 }
3946 else if (reg >= hi_reg)
3947 {
3948 as_bad (_("bad register range"));
3949 goto error;
3950 }
3951 for (; reg < hi_reg; reg++)
3952 mask |= 1 << reg;
3953 }
3954 }
3955 while (skip_past_comma (&input_line_pointer) != FAIL);
3956
3957 if (*input_line_pointer == '}')
3958 input_line_pointer++;
3959
3960 demand_empty_rest_of_line ();
3961
3962 /* Generate any deferred opcodes because we're going to be looking at
3963 the list. */
3964 flush_pending_unwind ();
3965
3966 for (reg = 0; reg < 16; reg++)
3967 {
3968 if (mask & (1 << reg))
3969 unwind.frame_size += 4;
3970 }
3971 op = 0xc700 | mask;
3972 add_unwind_opcode (op, 2);
3973 return;
3974 error:
3975 ignore_rest_of_line ();
3976 }
3977
3978
3979 /* Parse an unwind_save directive.
3980 If the argument is non-zero, this is a .vsave directive. */
3981
3982 static void
3983 s_arm_unwind_save (int arch_v6)
3984 {
3985 char *peek;
3986 struct reg_entry *reg;
3987 bfd_boolean had_brace = FALSE;
3988
3989 if (!unwind.proc_start)
3990 as_bad (MISSING_FNSTART);
3991
3992 /* Figure out what sort of save we have. */
3993 peek = input_line_pointer;
3994
3995 if (*peek == '{')
3996 {
3997 had_brace = TRUE;
3998 peek++;
3999 }
4000
4001 reg = arm_reg_parse_multi (&peek);
4002
4003 if (!reg)
4004 {
4005 as_bad (_("register expected"));
4006 ignore_rest_of_line ();
4007 return;
4008 }
4009
4010 switch (reg->type)
4011 {
4012 case REG_TYPE_FN:
4013 if (had_brace)
4014 {
4015 as_bad (_("FPA .unwind_save does not take a register list"));
4016 ignore_rest_of_line ();
4017 return;
4018 }
4019 input_line_pointer = peek;
4020 s_arm_unwind_save_fpa (reg->number);
4021 return;
4022
4023 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4024 case REG_TYPE_VFD:
4025 if (arch_v6)
4026 s_arm_unwind_save_vfp_armv6 ();
4027 else
4028 s_arm_unwind_save_vfp ();
4029 return;
4030 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4031 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4032
4033 default:
4034 as_bad (_(".unwind_save does not support this kind of register"));
4035 ignore_rest_of_line ();
4036 }
4037 }
4038
4039
4040 /* Parse an unwind_movsp directive. */
4041
4042 static void
4043 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4044 {
4045 int reg;
4046 valueT op;
4047 int offset;
4048
4049 if (!unwind.proc_start)
4050 as_bad (MISSING_FNSTART);
4051
4052 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4053 if (reg == FAIL)
4054 {
4055 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4056 ignore_rest_of_line ();
4057 return;
4058 }
4059
4060 /* Optional constant. */
4061 if (skip_past_comma (&input_line_pointer) != FAIL)
4062 {
4063 if (immediate_for_directive (&offset) == FAIL)
4064 return;
4065 }
4066 else
4067 offset = 0;
4068
4069 demand_empty_rest_of_line ();
4070
4071 if (reg == REG_SP || reg == REG_PC)
4072 {
4073 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4074 return;
4075 }
4076
4077 if (unwind.fp_reg != REG_SP)
4078 as_bad (_("unexpected .unwind_movsp directive"));
4079
4080 /* Generate opcode to restore the value. */
4081 op = 0x90 | reg;
4082 add_unwind_opcode (op, 1);
4083
4084 /* Record the information for later. */
4085 unwind.fp_reg = reg;
4086 unwind.fp_offset = unwind.frame_size - offset;
4087 unwind.sp_restored = 1;
4088 }
4089
4090 /* Parse an unwind_pad directive. */
4091
4092 static void
4093 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4094 {
4095 int offset;
4096
4097 if (!unwind.proc_start)
4098 as_bad (MISSING_FNSTART);
4099
4100 if (immediate_for_directive (&offset) == FAIL)
4101 return;
4102
4103 if (offset & 3)
4104 {
4105 as_bad (_("stack increment must be multiple of 4"));
4106 ignore_rest_of_line ();
4107 return;
4108 }
4109
4110 /* Don't generate any opcodes, just record the details for later. */
4111 unwind.frame_size += offset;
4112 unwind.pending_offset += offset;
4113
4114 demand_empty_rest_of_line ();
4115 }
4116
4117 /* Parse an unwind_setfp directive. */
4118
4119 static void
4120 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4121 {
4122 int sp_reg;
4123 int fp_reg;
4124 int offset;
4125
4126 if (!unwind.proc_start)
4127 as_bad (MISSING_FNSTART);
4128
4129 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4130 if (skip_past_comma (&input_line_pointer) == FAIL)
4131 sp_reg = FAIL;
4132 else
4133 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4134
4135 if (fp_reg == FAIL || sp_reg == FAIL)
4136 {
4137 as_bad (_("expected <reg>, <reg>"));
4138 ignore_rest_of_line ();
4139 return;
4140 }
4141
4142 /* Optional constant. */
4143 if (skip_past_comma (&input_line_pointer) != FAIL)
4144 {
4145 if (immediate_for_directive (&offset) == FAIL)
4146 return;
4147 }
4148 else
4149 offset = 0;
4150
4151 demand_empty_rest_of_line ();
4152
4153 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4154 {
4155 as_bad (_("register must be either sp or set by a previous"
4156 "unwind_movsp directive"));
4157 return;
4158 }
4159
4160 /* Don't generate any opcodes, just record the information for later. */
4161 unwind.fp_reg = fp_reg;
4162 unwind.fp_used = 1;
4163 if (sp_reg == REG_SP)
4164 unwind.fp_offset = unwind.frame_size - offset;
4165 else
4166 unwind.fp_offset -= offset;
4167 }
4168
4169 /* Parse an unwind_raw directive. */
4170
4171 static void
4172 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4173 {
4174 expressionS exp;
4175 /* This is an arbitrary limit. */
4176 unsigned char op[16];
4177 int count;
4178
4179 if (!unwind.proc_start)
4180 as_bad (MISSING_FNSTART);
4181
4182 expression (&exp);
4183 if (exp.X_op == O_constant
4184 && skip_past_comma (&input_line_pointer) != FAIL)
4185 {
4186 unwind.frame_size += exp.X_add_number;
4187 expression (&exp);
4188 }
4189 else
4190 exp.X_op = O_illegal;
4191
4192 if (exp.X_op != O_constant)
4193 {
4194 as_bad (_("expected <offset>, <opcode>"));
4195 ignore_rest_of_line ();
4196 return;
4197 }
4198
4199 count = 0;
4200
4201 /* Parse the opcode. */
4202 for (;;)
4203 {
4204 if (count >= 16)
4205 {
4206 as_bad (_("unwind opcode too long"));
4207 ignore_rest_of_line ();
4208 }
4209 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4210 {
4211 as_bad (_("invalid unwind opcode"));
4212 ignore_rest_of_line ();
4213 return;
4214 }
4215 op[count++] = exp.X_add_number;
4216
4217 /* Parse the next byte. */
4218 if (skip_past_comma (&input_line_pointer) == FAIL)
4219 break;
4220
4221 expression (&exp);
4222 }
4223
4224 /* Add the opcode bytes in reverse order. */
4225 while (count--)
4226 add_unwind_opcode (op[count], 1);
4227
4228 demand_empty_rest_of_line ();
4229 }
4230
4231
4232 /* Parse a .eabi_attribute directive. */
4233
4234 static void
4235 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4236 {
4237 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4238
4239 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4240 attributes_set_explicitly[tag] = 1;
4241 }
4242 #endif /* OBJ_ELF */
4243
4244 static void s_arm_arch (int);
4245 static void s_arm_object_arch (int);
4246 static void s_arm_cpu (int);
4247 static void s_arm_fpu (int);
4248
4249 #ifdef TE_PE
4250
4251 static void
4252 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4253 {
4254 expressionS exp;
4255
4256 do
4257 {
4258 expression (&exp);
4259 if (exp.X_op == O_symbol)
4260 exp.X_op = O_secrel;
4261
4262 emit_expr (&exp, 4);
4263 }
4264 while (*input_line_pointer++ == ',');
4265
4266 input_line_pointer--;
4267 demand_empty_rest_of_line ();
4268 }
4269 #endif /* TE_PE */
4270
4271 /* This table describes all the machine specific pseudo-ops the assembler
4272 has to support. The fields are:
4273 pseudo-op name without dot
4274 function to call to execute this pseudo-op
4275 Integer arg to pass to the function. */
4276
4277 const pseudo_typeS md_pseudo_table[] =
4278 {
4279 /* Never called because '.req' does not start a line. */
4280 { "req", s_req, 0 },
4281 /* Following two are likewise never called. */
4282 { "dn", s_dn, 0 },
4283 { "qn", s_qn, 0 },
4284 { "unreq", s_unreq, 0 },
4285 { "bss", s_bss, 0 },
4286 { "align", s_align, 0 },
4287 { "arm", s_arm, 0 },
4288 { "thumb", s_thumb, 0 },
4289 { "code", s_code, 0 },
4290 { "force_thumb", s_force_thumb, 0 },
4291 { "thumb_func", s_thumb_func, 0 },
4292 { "thumb_set", s_thumb_set, 0 },
4293 { "even", s_even, 0 },
4294 { "ltorg", s_ltorg, 0 },
4295 { "pool", s_ltorg, 0 },
4296 { "syntax", s_syntax, 0 },
4297 { "cpu", s_arm_cpu, 0 },
4298 { "arch", s_arm_arch, 0 },
4299 { "object_arch", s_arm_object_arch, 0 },
4300 { "fpu", s_arm_fpu, 0 },
4301 #ifdef OBJ_ELF
4302 { "word", s_arm_elf_cons, 4 },
4303 { "long", s_arm_elf_cons, 4 },
4304 { "inst.n", s_arm_elf_inst, 2 },
4305 { "inst.w", s_arm_elf_inst, 4 },
4306 { "inst", s_arm_elf_inst, 0 },
4307 { "rel31", s_arm_rel31, 0 },
4308 { "fnstart", s_arm_unwind_fnstart, 0 },
4309 { "fnend", s_arm_unwind_fnend, 0 },
4310 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4311 { "personality", s_arm_unwind_personality, 0 },
4312 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4313 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4314 { "save", s_arm_unwind_save, 0 },
4315 { "vsave", s_arm_unwind_save, 1 },
4316 { "movsp", s_arm_unwind_movsp, 0 },
4317 { "pad", s_arm_unwind_pad, 0 },
4318 { "setfp", s_arm_unwind_setfp, 0 },
4319 { "unwind_raw", s_arm_unwind_raw, 0 },
4320 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4321 #else
4322 { "word", cons, 4},
4323
4324 /* These are used for dwarf. */
4325 {"2byte", cons, 2},
4326 {"4byte", cons, 4},
4327 {"8byte", cons, 8},
4328 /* These are used for dwarf2. */
4329 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4330 { "loc", dwarf2_directive_loc, 0 },
4331 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4332 #endif
4333 { "extend", float_cons, 'x' },
4334 { "ldouble", float_cons, 'x' },
4335 { "packed", float_cons, 'p' },
4336 #ifdef TE_PE
4337 {"secrel32", pe_directive_secrel, 0},
4338 #endif
4339 { 0, 0, 0 }
4340 };
4341 \f
4342 /* Parser functions used exclusively in instruction operands. */
4343
4344 /* Generic immediate-value read function for use in insn parsing.
4345 STR points to the beginning of the immediate (the leading #);
4346 VAL receives the value; if the value is outside [MIN, MAX]
4347 issue an error. PREFIX_OPT is true if the immediate prefix is
4348 optional. */
4349
4350 static int
4351 parse_immediate (char **str, int *val, int min, int max,
4352 bfd_boolean prefix_opt)
4353 {
4354 expressionS exp;
4355 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4356 if (exp.X_op != O_constant)
4357 {
4358 inst.error = _("constant expression required");
4359 return FAIL;
4360 }
4361
4362 if (exp.X_add_number < min || exp.X_add_number > max)
4363 {
4364 inst.error = _("immediate value out of range");
4365 return FAIL;
4366 }
4367
4368 *val = exp.X_add_number;
4369 return SUCCESS;
4370 }
4371
4372 /* Less-generic immediate-value read function with the possibility of loading a
4373 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4374 instructions. Puts the result directly in inst.operands[i]. */
4375
4376 static int
4377 parse_big_immediate (char **str, int i)
4378 {
4379 expressionS exp;
4380 char *ptr = *str;
4381
4382 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4383
4384 if (exp.X_op == O_constant)
4385 {
4386 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4387 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4388 O_constant. We have to be careful not to break compilation for
4389 32-bit X_add_number, though. */
4390 if ((exp.X_add_number & ~0xffffffffl) != 0)
4391 {
4392 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4393 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4394 inst.operands[i].regisimm = 1;
4395 }
4396 }
4397 else if (exp.X_op == O_big
4398 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4399 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4400 {
4401 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4402 /* Bignums have their least significant bits in
4403 generic_bignum[0]. Make sure we put 32 bits in imm and
4404 32 bits in reg, in a (hopefully) portable way. */
4405 gas_assert (parts != 0);
4406 inst.operands[i].imm = 0;
4407 for (j = 0; j < parts; j++, idx++)
4408 inst.operands[i].imm |= generic_bignum[idx]
4409 << (LITTLENUM_NUMBER_OF_BITS * j);
4410 inst.operands[i].reg = 0;
4411 for (j = 0; j < parts; j++, idx++)
4412 inst.operands[i].reg |= generic_bignum[idx]
4413 << (LITTLENUM_NUMBER_OF_BITS * j);
4414 inst.operands[i].regisimm = 1;
4415 }
4416 else
4417 return FAIL;
4418
4419 *str = ptr;
4420
4421 return SUCCESS;
4422 }
4423
4424 /* Returns the pseudo-register number of an FPA immediate constant,
4425 or FAIL if there isn't a valid constant here. */
4426
4427 static int
4428 parse_fpa_immediate (char ** str)
4429 {
4430 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4431 char * save_in;
4432 expressionS exp;
4433 int i;
4434 int j;
4435
4436 /* First try and match exact strings, this is to guarantee
4437 that some formats will work even for cross assembly. */
4438
4439 for (i = 0; fp_const[i]; i++)
4440 {
4441 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4442 {
4443 char *start = *str;
4444
4445 *str += strlen (fp_const[i]);
4446 if (is_end_of_line[(unsigned char) **str])
4447 return i + 8;
4448 *str = start;
4449 }
4450 }
4451
4452 /* Just because we didn't get a match doesn't mean that the constant
4453 isn't valid, just that it is in a format that we don't
4454 automatically recognize. Try parsing it with the standard
4455 expression routines. */
4456
4457 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4458
4459 /* Look for a raw floating point number. */
4460 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4461 && is_end_of_line[(unsigned char) *save_in])
4462 {
4463 for (i = 0; i < NUM_FLOAT_VALS; i++)
4464 {
4465 for (j = 0; j < MAX_LITTLENUMS; j++)
4466 {
4467 if (words[j] != fp_values[i][j])
4468 break;
4469 }
4470
4471 if (j == MAX_LITTLENUMS)
4472 {
4473 *str = save_in;
4474 return i + 8;
4475 }
4476 }
4477 }
4478
4479 /* Try and parse a more complex expression, this will probably fail
4480 unless the code uses a floating point prefix (eg "0f"). */
4481 save_in = input_line_pointer;
4482 input_line_pointer = *str;
4483 if (expression (&exp) == absolute_section
4484 && exp.X_op == O_big
4485 && exp.X_add_number < 0)
4486 {
4487 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4488 Ditto for 15. */
4489 if (gen_to_words (words, 5, (long) 15) == 0)
4490 {
4491 for (i = 0; i < NUM_FLOAT_VALS; i++)
4492 {
4493 for (j = 0; j < MAX_LITTLENUMS; j++)
4494 {
4495 if (words[j] != fp_values[i][j])
4496 break;
4497 }
4498
4499 if (j == MAX_LITTLENUMS)
4500 {
4501 *str = input_line_pointer;
4502 input_line_pointer = save_in;
4503 return i + 8;
4504 }
4505 }
4506 }
4507 }
4508
4509 *str = input_line_pointer;
4510 input_line_pointer = save_in;
4511 inst.error = _("invalid FPA immediate expression");
4512 return FAIL;
4513 }
4514
4515 /* Returns 1 if a number has "quarter-precision" float format
4516 0baBbbbbbc defgh000 00000000 00000000. */
4517
4518 static int
4519 is_quarter_float (unsigned imm)
4520 {
4521 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4522 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4523 }
4524
4525 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4526 0baBbbbbbc defgh000 00000000 00000000.
4527 The zero and minus-zero cases need special handling, since they can't be
4528 encoded in the "quarter-precision" float format, but can nonetheless be
4529 loaded as integer constants. */
4530
4531 static unsigned
4532 parse_qfloat_immediate (char **ccp, int *immed)
4533 {
4534 char *str = *ccp;
4535 char *fpnum;
4536 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4537 int found_fpchar = 0;
4538
4539 skip_past_char (&str, '#');
4540
4541 /* We must not accidentally parse an integer as a floating-point number. Make
4542 sure that the value we parse is not an integer by checking for special
4543 characters '.' or 'e'.
4544 FIXME: This is a horrible hack, but doing better is tricky because type
4545 information isn't in a very usable state at parse time. */
4546 fpnum = str;
4547 skip_whitespace (fpnum);
4548
4549 if (strncmp (fpnum, "0x", 2) == 0)
4550 return FAIL;
4551 else
4552 {
4553 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4554 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4555 {
4556 found_fpchar = 1;
4557 break;
4558 }
4559
4560 if (!found_fpchar)
4561 return FAIL;
4562 }
4563
4564 if ((str = atof_ieee (str, 's', words)) != NULL)
4565 {
4566 unsigned fpword = 0;
4567 int i;
4568
4569 /* Our FP word must be 32 bits (single-precision FP). */
4570 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4571 {
4572 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4573 fpword |= words[i];
4574 }
4575
4576 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4577 *immed = fpword;
4578 else
4579 return FAIL;
4580
4581 *ccp = str;
4582
4583 return SUCCESS;
4584 }
4585
4586 return FAIL;
4587 }
4588
4589 /* Shift operands. */
4590 enum shift_kind
4591 {
4592 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4593 };
4594
4595 struct asm_shift_name
4596 {
4597 const char *name;
4598 enum shift_kind kind;
4599 };
4600
4601 /* Third argument to parse_shift. */
4602 enum parse_shift_mode
4603 {
4604 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4605 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4606 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4607 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4608 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4609 };
4610
4611 /* Parse a <shift> specifier on an ARM data processing instruction.
4612 This has three forms:
4613
4614 (LSL|LSR|ASL|ASR|ROR) Rs
4615 (LSL|LSR|ASL|ASR|ROR) #imm
4616 RRX
4617
4618 Note that ASL is assimilated to LSL in the instruction encoding, and
4619 RRX to ROR #0 (which cannot be written as such). */
4620
4621 static int
4622 parse_shift (char **str, int i, enum parse_shift_mode mode)
4623 {
4624 const struct asm_shift_name *shift_name;
4625 enum shift_kind shift;
4626 char *s = *str;
4627 char *p = s;
4628 int reg;
4629
4630 for (p = *str; ISALPHA (*p); p++)
4631 ;
4632
4633 if (p == *str)
4634 {
4635 inst.error = _("shift expression expected");
4636 return FAIL;
4637 }
4638
4639 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4640 p - *str);
4641
4642 if (shift_name == NULL)
4643 {
4644 inst.error = _("shift expression expected");
4645 return FAIL;
4646 }
4647
4648 shift = shift_name->kind;
4649
4650 switch (mode)
4651 {
4652 case NO_SHIFT_RESTRICT:
4653 case SHIFT_IMMEDIATE: break;
4654
4655 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4656 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4657 {
4658 inst.error = _("'LSL' or 'ASR' required");
4659 return FAIL;
4660 }
4661 break;
4662
4663 case SHIFT_LSL_IMMEDIATE:
4664 if (shift != SHIFT_LSL)
4665 {
4666 inst.error = _("'LSL' required");
4667 return FAIL;
4668 }
4669 break;
4670
4671 case SHIFT_ASR_IMMEDIATE:
4672 if (shift != SHIFT_ASR)
4673 {
4674 inst.error = _("'ASR' required");
4675 return FAIL;
4676 }
4677 break;
4678
4679 default: abort ();
4680 }
4681
4682 if (shift != SHIFT_RRX)
4683 {
4684 /* Whitespace can appear here if the next thing is a bare digit. */
4685 skip_whitespace (p);
4686
4687 if (mode == NO_SHIFT_RESTRICT
4688 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4689 {
4690 inst.operands[i].imm = reg;
4691 inst.operands[i].immisreg = 1;
4692 }
4693 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4694 return FAIL;
4695 }
4696 inst.operands[i].shift_kind = shift;
4697 inst.operands[i].shifted = 1;
4698 *str = p;
4699 return SUCCESS;
4700 }
4701
4702 /* Parse a <shifter_operand> for an ARM data processing instruction:
4703
4704 #<immediate>
4705 #<immediate>, <rotate>
4706 <Rm>
4707 <Rm>, <shift>
4708
4709 where <shift> is defined by parse_shift above, and <rotate> is a
4710 multiple of 2 between 0 and 30. Validation of immediate operands
4711 is deferred to md_apply_fix. */
4712
4713 static int
4714 parse_shifter_operand (char **str, int i)
4715 {
4716 int value;
4717 expressionS exp;
4718
4719 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4720 {
4721 inst.operands[i].reg = value;
4722 inst.operands[i].isreg = 1;
4723
4724 /* parse_shift will override this if appropriate */
4725 inst.reloc.exp.X_op = O_constant;
4726 inst.reloc.exp.X_add_number = 0;
4727
4728 if (skip_past_comma (str) == FAIL)
4729 return SUCCESS;
4730
4731 /* Shift operation on register. */
4732 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4733 }
4734
4735 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4736 return FAIL;
4737
4738 if (skip_past_comma (str) == SUCCESS)
4739 {
4740 /* #x, y -- ie explicit rotation by Y. */
4741 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4742 return FAIL;
4743
4744 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4745 {
4746 inst.error = _("constant expression expected");
4747 return FAIL;
4748 }
4749
4750 value = exp.X_add_number;
4751 if (value < 0 || value > 30 || value % 2 != 0)
4752 {
4753 inst.error = _("invalid rotation");
4754 return FAIL;
4755 }
4756 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4757 {
4758 inst.error = _("invalid constant");
4759 return FAIL;
4760 }
4761
4762 /* Convert to decoded value. md_apply_fix will put it back. */
4763 inst.reloc.exp.X_add_number
4764 = (((inst.reloc.exp.X_add_number << (32 - value))
4765 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4766 }
4767
4768 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4769 inst.reloc.pc_rel = 0;
4770 return SUCCESS;
4771 }
4772
4773 /* Group relocation information. Each entry in the table contains the
4774 textual name of the relocation as may appear in assembler source
4775 and must end with a colon.
4776 Along with this textual name are the relocation codes to be used if
4777 the corresponding instruction is an ALU instruction (ADD or SUB only),
4778 an LDR, an LDRS, or an LDC. */
4779
4780 struct group_reloc_table_entry
4781 {
4782 const char *name;
4783 int alu_code;
4784 int ldr_code;
4785 int ldrs_code;
4786 int ldc_code;
4787 };
4788
4789 typedef enum
4790 {
4791 /* Varieties of non-ALU group relocation. */
4792
4793 GROUP_LDR,
4794 GROUP_LDRS,
4795 GROUP_LDC
4796 } group_reloc_type;
4797
4798 static struct group_reloc_table_entry group_reloc_table[] =
4799 { /* Program counter relative: */
4800 { "pc_g0_nc",
4801 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4802 0, /* LDR */
4803 0, /* LDRS */
4804 0 }, /* LDC */
4805 { "pc_g0",
4806 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4807 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4808 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4809 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4810 { "pc_g1_nc",
4811 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4812 0, /* LDR */
4813 0, /* LDRS */
4814 0 }, /* LDC */
4815 { "pc_g1",
4816 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4817 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4818 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4819 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4820 { "pc_g2",
4821 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4822 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4823 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4824 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4825 /* Section base relative */
4826 { "sb_g0_nc",
4827 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4828 0, /* LDR */
4829 0, /* LDRS */
4830 0 }, /* LDC */
4831 { "sb_g0",
4832 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4833 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4834 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4835 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4836 { "sb_g1_nc",
4837 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4838 0, /* LDR */
4839 0, /* LDRS */
4840 0 }, /* LDC */
4841 { "sb_g1",
4842 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4843 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4844 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4845 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4846 { "sb_g2",
4847 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4848 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4849 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4850 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4851
4852 /* Given the address of a pointer pointing to the textual name of a group
4853 relocation as may appear in assembler source, attempt to find its details
4854 in group_reloc_table. The pointer will be updated to the character after
4855 the trailing colon. On failure, FAIL will be returned; SUCCESS
4856 otherwise. On success, *entry will be updated to point at the relevant
4857 group_reloc_table entry. */
4858
4859 static int
4860 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4861 {
4862 unsigned int i;
4863 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4864 {
4865 int length = strlen (group_reloc_table[i].name);
4866
4867 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4868 && (*str)[length] == ':')
4869 {
4870 *out = &group_reloc_table[i];
4871 *str += (length + 1);
4872 return SUCCESS;
4873 }
4874 }
4875
4876 return FAIL;
4877 }
4878
4879 /* Parse a <shifter_operand> for an ARM data processing instruction
4880 (as for parse_shifter_operand) where group relocations are allowed:
4881
4882 #<immediate>
4883 #<immediate>, <rotate>
4884 #:<group_reloc>:<expression>
4885 <Rm>
4886 <Rm>, <shift>
4887
4888 where <group_reloc> is one of the strings defined in group_reloc_table.
4889 The hashes are optional.
4890
4891 Everything else is as for parse_shifter_operand. */
4892
4893 static parse_operand_result
4894 parse_shifter_operand_group_reloc (char **str, int i)
4895 {
4896 /* Determine if we have the sequence of characters #: or just :
4897 coming next. If we do, then we check for a group relocation.
4898 If we don't, punt the whole lot to parse_shifter_operand. */
4899
4900 if (((*str)[0] == '#' && (*str)[1] == ':')
4901 || (*str)[0] == ':')
4902 {
4903 struct group_reloc_table_entry *entry;
4904
4905 if ((*str)[0] == '#')
4906 (*str) += 2;
4907 else
4908 (*str)++;
4909
4910 /* Try to parse a group relocation. Anything else is an error. */
4911 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4912 {
4913 inst.error = _("unknown group relocation");
4914 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4915 }
4916
4917 /* We now have the group relocation table entry corresponding to
4918 the name in the assembler source. Next, we parse the expression. */
4919 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4920 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4921
4922 /* Record the relocation type (always the ALU variant here). */
4923 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
4924 gas_assert (inst.reloc.type != 0);
4925
4926 return PARSE_OPERAND_SUCCESS;
4927 }
4928 else
4929 return parse_shifter_operand (str, i) == SUCCESS
4930 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4931
4932 /* Never reached. */
4933 }
4934
4935 /* Parse all forms of an ARM address expression. Information is written
4936 to inst.operands[i] and/or inst.reloc.
4937
4938 Preindexed addressing (.preind=1):
4939
4940 [Rn, #offset] .reg=Rn .reloc.exp=offset
4941 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4942 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4943 .shift_kind=shift .reloc.exp=shift_imm
4944
4945 These three may have a trailing ! which causes .writeback to be set also.
4946
4947 Postindexed addressing (.postind=1, .writeback=1):
4948
4949 [Rn], #offset .reg=Rn .reloc.exp=offset
4950 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4951 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4952 .shift_kind=shift .reloc.exp=shift_imm
4953
4954 Unindexed addressing (.preind=0, .postind=0):
4955
4956 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4957
4958 Other:
4959
4960 [Rn]{!} shorthand for [Rn,#0]{!}
4961 =immediate .isreg=0 .reloc.exp=immediate
4962 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4963
4964 It is the caller's responsibility to check for addressing modes not
4965 supported by the instruction, and to set inst.reloc.type. */
4966
4967 static parse_operand_result
4968 parse_address_main (char **str, int i, int group_relocations,
4969 group_reloc_type group_type)
4970 {
4971 char *p = *str;
4972 int reg;
4973
4974 if (skip_past_char (&p, '[') == FAIL)
4975 {
4976 if (skip_past_char (&p, '=') == FAIL)
4977 {
4978 /* Bare address - translate to PC-relative offset. */
4979 inst.reloc.pc_rel = 1;
4980 inst.operands[i].reg = REG_PC;
4981 inst.operands[i].isreg = 1;
4982 inst.operands[i].preind = 1;
4983 }
4984 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
4985
4986 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4987 return PARSE_OPERAND_FAIL;
4988
4989 *str = p;
4990 return PARSE_OPERAND_SUCCESS;
4991 }
4992
4993 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4994 {
4995 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4996 return PARSE_OPERAND_FAIL;
4997 }
4998 inst.operands[i].reg = reg;
4999 inst.operands[i].isreg = 1;
5000
5001 if (skip_past_comma (&p) == SUCCESS)
5002 {
5003 inst.operands[i].preind = 1;
5004
5005 if (*p == '+') p++;
5006 else if (*p == '-') p++, inst.operands[i].negative = 1;
5007
5008 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5009 {
5010 inst.operands[i].imm = reg;
5011 inst.operands[i].immisreg = 1;
5012
5013 if (skip_past_comma (&p) == SUCCESS)
5014 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5015 return PARSE_OPERAND_FAIL;
5016 }
5017 else if (skip_past_char (&p, ':') == SUCCESS)
5018 {
5019 /* FIXME: '@' should be used here, but it's filtered out by generic
5020 code before we get to see it here. This may be subject to
5021 change. */
5022 expressionS exp;
5023 my_get_expression (&exp, &p, GE_NO_PREFIX);
5024 if (exp.X_op != O_constant)
5025 {
5026 inst.error = _("alignment must be constant");
5027 return PARSE_OPERAND_FAIL;
5028 }
5029 inst.operands[i].imm = exp.X_add_number << 8;
5030 inst.operands[i].immisalign = 1;
5031 /* Alignments are not pre-indexes. */
5032 inst.operands[i].preind = 0;
5033 }
5034 else
5035 {
5036 if (inst.operands[i].negative)
5037 {
5038 inst.operands[i].negative = 0;
5039 p--;
5040 }
5041
5042 if (group_relocations
5043 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5044 {
5045 struct group_reloc_table_entry *entry;
5046
5047 /* Skip over the #: or : sequence. */
5048 if (*p == '#')
5049 p += 2;
5050 else
5051 p++;
5052
5053 /* Try to parse a group relocation. Anything else is an
5054 error. */
5055 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5056 {
5057 inst.error = _("unknown group relocation");
5058 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5059 }
5060
5061 /* We now have the group relocation table entry corresponding to
5062 the name in the assembler source. Next, we parse the
5063 expression. */
5064 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5065 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5066
5067 /* Record the relocation type. */
5068 switch (group_type)
5069 {
5070 case GROUP_LDR:
5071 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5072 break;
5073
5074 case GROUP_LDRS:
5075 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5076 break;
5077
5078 case GROUP_LDC:
5079 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5080 break;
5081
5082 default:
5083 gas_assert (0);
5084 }
5085
5086 if (inst.reloc.type == 0)
5087 {
5088 inst.error = _("this group relocation is not allowed on this instruction");
5089 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5090 }
5091 }
5092 else
5093 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5094 return PARSE_OPERAND_FAIL;
5095 }
5096 }
5097
5098 if (skip_past_char (&p, ']') == FAIL)
5099 {
5100 inst.error = _("']' expected");
5101 return PARSE_OPERAND_FAIL;
5102 }
5103
5104 if (skip_past_char (&p, '!') == SUCCESS)
5105 inst.operands[i].writeback = 1;
5106
5107 else if (skip_past_comma (&p) == SUCCESS)
5108 {
5109 if (skip_past_char (&p, '{') == SUCCESS)
5110 {
5111 /* [Rn], {expr} - unindexed, with option */
5112 if (parse_immediate (&p, &inst.operands[i].imm,
5113 0, 255, TRUE) == FAIL)
5114 return PARSE_OPERAND_FAIL;
5115
5116 if (skip_past_char (&p, '}') == FAIL)
5117 {
5118 inst.error = _("'}' expected at end of 'option' field");
5119 return PARSE_OPERAND_FAIL;
5120 }
5121 if (inst.operands[i].preind)
5122 {
5123 inst.error = _("cannot combine index with option");
5124 return PARSE_OPERAND_FAIL;
5125 }
5126 *str = p;
5127 return PARSE_OPERAND_SUCCESS;
5128 }
5129 else
5130 {
5131 inst.operands[i].postind = 1;
5132 inst.operands[i].writeback = 1;
5133
5134 if (inst.operands[i].preind)
5135 {
5136 inst.error = _("cannot combine pre- and post-indexing");
5137 return PARSE_OPERAND_FAIL;
5138 }
5139
5140 if (*p == '+') p++;
5141 else if (*p == '-') p++, inst.operands[i].negative = 1;
5142
5143 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5144 {
5145 /* We might be using the immediate for alignment already. If we
5146 are, OR the register number into the low-order bits. */
5147 if (inst.operands[i].immisalign)
5148 inst.operands[i].imm |= reg;
5149 else
5150 inst.operands[i].imm = reg;
5151 inst.operands[i].immisreg = 1;
5152
5153 if (skip_past_comma (&p) == SUCCESS)
5154 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5155 return PARSE_OPERAND_FAIL;
5156 }
5157 else
5158 {
5159 if (inst.operands[i].negative)
5160 {
5161 inst.operands[i].negative = 0;
5162 p--;
5163 }
5164 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5165 return PARSE_OPERAND_FAIL;
5166 }
5167 }
5168 }
5169
5170 /* If at this point neither .preind nor .postind is set, we have a
5171 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5172 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5173 {
5174 inst.operands[i].preind = 1;
5175 inst.reloc.exp.X_op = O_constant;
5176 inst.reloc.exp.X_add_number = 0;
5177 }
5178 *str = p;
5179 return PARSE_OPERAND_SUCCESS;
5180 }
5181
5182 static int
5183 parse_address (char **str, int i)
5184 {
5185 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5186 ? SUCCESS : FAIL;
5187 }
5188
5189 static parse_operand_result
5190 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5191 {
5192 return parse_address_main (str, i, 1, type);
5193 }
5194
5195 /* Parse an operand for a MOVW or MOVT instruction. */
5196 static int
5197 parse_half (char **str)
5198 {
5199 char * p;
5200
5201 p = *str;
5202 skip_past_char (&p, '#');
5203 if (strncasecmp (p, ":lower16:", 9) == 0)
5204 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5205 else if (strncasecmp (p, ":upper16:", 9) == 0)
5206 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5207
5208 if (inst.reloc.type != BFD_RELOC_UNUSED)
5209 {
5210 p += 9;
5211 skip_whitespace (p);
5212 }
5213
5214 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5215 return FAIL;
5216
5217 if (inst.reloc.type == BFD_RELOC_UNUSED)
5218 {
5219 if (inst.reloc.exp.X_op != O_constant)
5220 {
5221 inst.error = _("constant expression expected");
5222 return FAIL;
5223 }
5224 if (inst.reloc.exp.X_add_number < 0
5225 || inst.reloc.exp.X_add_number > 0xffff)
5226 {
5227 inst.error = _("immediate value out of range");
5228 return FAIL;
5229 }
5230 }
5231 *str = p;
5232 return SUCCESS;
5233 }
5234
5235 /* Miscellaneous. */
5236
5237 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5238 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5239 static int
5240 parse_psr (char **str)
5241 {
5242 char *p;
5243 unsigned long psr_field;
5244 const struct asm_psr *psr;
5245 char *start;
5246
5247 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5248 feature for ease of use and backwards compatibility. */
5249 p = *str;
5250 if (strncasecmp (p, "SPSR", 4) == 0)
5251 psr_field = SPSR_BIT;
5252 else if (strncasecmp (p, "CPSR", 4) == 0)
5253 psr_field = 0;
5254 else
5255 {
5256 start = p;
5257 do
5258 p++;
5259 while (ISALNUM (*p) || *p == '_');
5260
5261 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5262 p - start);
5263 if (!psr)
5264 return FAIL;
5265
5266 *str = p;
5267 return psr->field;
5268 }
5269
5270 p += 4;
5271 if (*p == '_')
5272 {
5273 /* A suffix follows. */
5274 p++;
5275 start = p;
5276
5277 do
5278 p++;
5279 while (ISALNUM (*p) || *p == '_');
5280
5281 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5282 p - start);
5283 if (!psr)
5284 goto error;
5285
5286 psr_field |= psr->field;
5287 }
5288 else
5289 {
5290 if (ISALNUM (*p))
5291 goto error; /* Garbage after "[CS]PSR". */
5292
5293 psr_field |= (PSR_c | PSR_f);
5294 }
5295 *str = p;
5296 return psr_field;
5297
5298 error:
5299 inst.error = _("flag for {c}psr instruction expected");
5300 return FAIL;
5301 }
5302
5303 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5304 value suitable for splatting into the AIF field of the instruction. */
5305
5306 static int
5307 parse_cps_flags (char **str)
5308 {
5309 int val = 0;
5310 int saw_a_flag = 0;
5311 char *s = *str;
5312
5313 for (;;)
5314 switch (*s++)
5315 {
5316 case '\0': case ',':
5317 goto done;
5318
5319 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5320 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5321 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5322
5323 default:
5324 inst.error = _("unrecognized CPS flag");
5325 return FAIL;
5326 }
5327
5328 done:
5329 if (saw_a_flag == 0)
5330 {
5331 inst.error = _("missing CPS flags");
5332 return FAIL;
5333 }
5334
5335 *str = s - 1;
5336 return val;
5337 }
5338
5339 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5340 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5341
5342 static int
5343 parse_endian_specifier (char **str)
5344 {
5345 int little_endian;
5346 char *s = *str;
5347
5348 if (strncasecmp (s, "BE", 2))
5349 little_endian = 0;
5350 else if (strncasecmp (s, "LE", 2))
5351 little_endian = 1;
5352 else
5353 {
5354 inst.error = _("valid endian specifiers are be or le");
5355 return FAIL;
5356 }
5357
5358 if (ISALNUM (s[2]) || s[2] == '_')
5359 {
5360 inst.error = _("valid endian specifiers are be or le");
5361 return FAIL;
5362 }
5363
5364 *str = s + 2;
5365 return little_endian;
5366 }
5367
5368 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5369 value suitable for poking into the rotate field of an sxt or sxta
5370 instruction, or FAIL on error. */
5371
5372 static int
5373 parse_ror (char **str)
5374 {
5375 int rot;
5376 char *s = *str;
5377
5378 if (strncasecmp (s, "ROR", 3) == 0)
5379 s += 3;
5380 else
5381 {
5382 inst.error = _("missing rotation field after comma");
5383 return FAIL;
5384 }
5385
5386 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5387 return FAIL;
5388
5389 switch (rot)
5390 {
5391 case 0: *str = s; return 0x0;
5392 case 8: *str = s; return 0x1;
5393 case 16: *str = s; return 0x2;
5394 case 24: *str = s; return 0x3;
5395
5396 default:
5397 inst.error = _("rotation can only be 0, 8, 16, or 24");
5398 return FAIL;
5399 }
5400 }
5401
5402 /* Parse a conditional code (from conds[] below). The value returned is in the
5403 range 0 .. 14, or FAIL. */
5404 static int
5405 parse_cond (char **str)
5406 {
5407 char *q;
5408 const struct asm_cond *c;
5409 int n;
5410 /* Condition codes are always 2 characters, so matching up to
5411 3 characters is sufficient. */
5412 char cond[3];
5413
5414 q = *str;
5415 n = 0;
5416 while (ISALPHA (*q) && n < 3)
5417 {
5418 cond[n] = TOLOWER (*q);
5419 q++;
5420 n++;
5421 }
5422
5423 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5424 if (!c)
5425 {
5426 inst.error = _("condition required");
5427 return FAIL;
5428 }
5429
5430 *str = q;
5431 return c->value;
5432 }
5433
5434 /* Parse an option for a barrier instruction. Returns the encoding for the
5435 option, or FAIL. */
5436 static int
5437 parse_barrier (char **str)
5438 {
5439 char *p, *q;
5440 const struct asm_barrier_opt *o;
5441
5442 p = q = *str;
5443 while (ISALPHA (*q))
5444 q++;
5445
5446 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5447 q - p);
5448 if (!o)
5449 return FAIL;
5450
5451 *str = q;
5452 return o->value;
5453 }
5454
5455 /* Parse the operands of a table branch instruction. Similar to a memory
5456 operand. */
5457 static int
5458 parse_tb (char **str)
5459 {
5460 char * p = *str;
5461 int reg;
5462
5463 if (skip_past_char (&p, '[') == FAIL)
5464 {
5465 inst.error = _("'[' expected");
5466 return FAIL;
5467 }
5468
5469 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5470 {
5471 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5472 return FAIL;
5473 }
5474 inst.operands[0].reg = reg;
5475
5476 if (skip_past_comma (&p) == FAIL)
5477 {
5478 inst.error = _("',' expected");
5479 return FAIL;
5480 }
5481
5482 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5483 {
5484 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5485 return FAIL;
5486 }
5487 inst.operands[0].imm = reg;
5488
5489 if (skip_past_comma (&p) == SUCCESS)
5490 {
5491 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5492 return FAIL;
5493 if (inst.reloc.exp.X_add_number != 1)
5494 {
5495 inst.error = _("invalid shift");
5496 return FAIL;
5497 }
5498 inst.operands[0].shifted = 1;
5499 }
5500
5501 if (skip_past_char (&p, ']') == FAIL)
5502 {
5503 inst.error = _("']' expected");
5504 return FAIL;
5505 }
5506 *str = p;
5507 return SUCCESS;
5508 }
5509
5510 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5511 information on the types the operands can take and how they are encoded.
5512 Up to four operands may be read; this function handles setting the
5513 ".present" field for each read operand itself.
5514 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5515 else returns FAIL. */
5516
5517 static int
5518 parse_neon_mov (char **str, int *which_operand)
5519 {
5520 int i = *which_operand, val;
5521 enum arm_reg_type rtype;
5522 char *ptr = *str;
5523 struct neon_type_el optype;
5524
5525 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5526 {
5527 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5528 inst.operands[i].reg = val;
5529 inst.operands[i].isscalar = 1;
5530 inst.operands[i].vectype = optype;
5531 inst.operands[i++].present = 1;
5532
5533 if (skip_past_comma (&ptr) == FAIL)
5534 goto wanted_comma;
5535
5536 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5537 goto wanted_arm;
5538
5539 inst.operands[i].reg = val;
5540 inst.operands[i].isreg = 1;
5541 inst.operands[i].present = 1;
5542 }
5543 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5544 != FAIL)
5545 {
5546 /* Cases 0, 1, 2, 3, 5 (D only). */
5547 if (skip_past_comma (&ptr) == FAIL)
5548 goto wanted_comma;
5549
5550 inst.operands[i].reg = val;
5551 inst.operands[i].isreg = 1;
5552 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5553 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5554 inst.operands[i].isvec = 1;
5555 inst.operands[i].vectype = optype;
5556 inst.operands[i++].present = 1;
5557
5558 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5559 {
5560 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5561 Case 13: VMOV <Sd>, <Rm> */
5562 inst.operands[i].reg = val;
5563 inst.operands[i].isreg = 1;
5564 inst.operands[i].present = 1;
5565
5566 if (rtype == REG_TYPE_NQ)
5567 {
5568 first_error (_("can't use Neon quad register here"));
5569 return FAIL;
5570 }
5571 else if (rtype != REG_TYPE_VFS)
5572 {
5573 i++;
5574 if (skip_past_comma (&ptr) == FAIL)
5575 goto wanted_comma;
5576 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5577 goto wanted_arm;
5578 inst.operands[i].reg = val;
5579 inst.operands[i].isreg = 1;
5580 inst.operands[i].present = 1;
5581 }
5582 }
5583 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5584 &optype)) != FAIL)
5585 {
5586 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5587 Case 1: VMOV<c><q> <Dd>, <Dm>
5588 Case 8: VMOV.F32 <Sd>, <Sm>
5589 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5590
5591 inst.operands[i].reg = val;
5592 inst.operands[i].isreg = 1;
5593 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5594 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5595 inst.operands[i].isvec = 1;
5596 inst.operands[i].vectype = optype;
5597 inst.operands[i].present = 1;
5598
5599 if (skip_past_comma (&ptr) == SUCCESS)
5600 {
5601 /* Case 15. */
5602 i++;
5603
5604 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5605 goto wanted_arm;
5606
5607 inst.operands[i].reg = val;
5608 inst.operands[i].isreg = 1;
5609 inst.operands[i++].present = 1;
5610
5611 if (skip_past_comma (&ptr) == FAIL)
5612 goto wanted_comma;
5613
5614 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5615 goto wanted_arm;
5616
5617 inst.operands[i].reg = val;
5618 inst.operands[i].isreg = 1;
5619 inst.operands[i++].present = 1;
5620 }
5621 }
5622 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5623 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5624 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5625 Case 10: VMOV.F32 <Sd>, #<imm>
5626 Case 11: VMOV.F64 <Dd>, #<imm> */
5627 inst.operands[i].immisfloat = 1;
5628 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5629 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5630 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5631 ;
5632 else
5633 {
5634 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5635 return FAIL;
5636 }
5637 }
5638 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5639 {
5640 /* Cases 6, 7. */
5641 inst.operands[i].reg = val;
5642 inst.operands[i].isreg = 1;
5643 inst.operands[i++].present = 1;
5644
5645 if (skip_past_comma (&ptr) == FAIL)
5646 goto wanted_comma;
5647
5648 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5649 {
5650 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5651 inst.operands[i].reg = val;
5652 inst.operands[i].isscalar = 1;
5653 inst.operands[i].present = 1;
5654 inst.operands[i].vectype = optype;
5655 }
5656 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5657 {
5658 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5659 inst.operands[i].reg = val;
5660 inst.operands[i].isreg = 1;
5661 inst.operands[i++].present = 1;
5662
5663 if (skip_past_comma (&ptr) == FAIL)
5664 goto wanted_comma;
5665
5666 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5667 == FAIL)
5668 {
5669 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5670 return FAIL;
5671 }
5672
5673 inst.operands[i].reg = val;
5674 inst.operands[i].isreg = 1;
5675 inst.operands[i].isvec = 1;
5676 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5677 inst.operands[i].vectype = optype;
5678 inst.operands[i].present = 1;
5679
5680 if (rtype == REG_TYPE_VFS)
5681 {
5682 /* Case 14. */
5683 i++;
5684 if (skip_past_comma (&ptr) == FAIL)
5685 goto wanted_comma;
5686 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5687 &optype)) == FAIL)
5688 {
5689 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5690 return FAIL;
5691 }
5692 inst.operands[i].reg = val;
5693 inst.operands[i].isreg = 1;
5694 inst.operands[i].isvec = 1;
5695 inst.operands[i].issingle = 1;
5696 inst.operands[i].vectype = optype;
5697 inst.operands[i].present = 1;
5698 }
5699 }
5700 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5701 != FAIL)
5702 {
5703 /* Case 13. */
5704 inst.operands[i].reg = val;
5705 inst.operands[i].isreg = 1;
5706 inst.operands[i].isvec = 1;
5707 inst.operands[i].issingle = 1;
5708 inst.operands[i].vectype = optype;
5709 inst.operands[i++].present = 1;
5710 }
5711 }
5712 else
5713 {
5714 first_error (_("parse error"));
5715 return FAIL;
5716 }
5717
5718 /* Successfully parsed the operands. Update args. */
5719 *which_operand = i;
5720 *str = ptr;
5721 return SUCCESS;
5722
5723 wanted_comma:
5724 first_error (_("expected comma"));
5725 return FAIL;
5726
5727 wanted_arm:
5728 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5729 return FAIL;
5730 }
5731
5732 /* Matcher codes for parse_operands. */
5733 enum operand_parse_code
5734 {
5735 OP_stop, /* end of line */
5736
5737 OP_RR, /* ARM register */
5738 OP_RRnpc, /* ARM register, not r15 */
5739 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5740 OP_RRw, /* ARM register, not r15, optional trailing ! */
5741 OP_RCP, /* Coprocessor number */
5742 OP_RCN, /* Coprocessor register */
5743 OP_RF, /* FPA register */
5744 OP_RVS, /* VFP single precision register */
5745 OP_RVD, /* VFP double precision register (0..15) */
5746 OP_RND, /* Neon double precision register (0..31) */
5747 OP_RNQ, /* Neon quad precision register */
5748 OP_RVSD, /* VFP single or double precision register */
5749 OP_RNDQ, /* Neon double or quad precision register */
5750 OP_RNSDQ, /* Neon single, double or quad precision register */
5751 OP_RNSC, /* Neon scalar D[X] */
5752 OP_RVC, /* VFP control register */
5753 OP_RMF, /* Maverick F register */
5754 OP_RMD, /* Maverick D register */
5755 OP_RMFX, /* Maverick FX register */
5756 OP_RMDX, /* Maverick DX register */
5757 OP_RMAX, /* Maverick AX register */
5758 OP_RMDS, /* Maverick DSPSC register */
5759 OP_RIWR, /* iWMMXt wR register */
5760 OP_RIWC, /* iWMMXt wC register */
5761 OP_RIWG, /* iWMMXt wCG register */
5762 OP_RXA, /* XScale accumulator register */
5763
5764 OP_REGLST, /* ARM register list */
5765 OP_VRSLST, /* VFP single-precision register list */
5766 OP_VRDLST, /* VFP double-precision register list */
5767 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5768 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5769 OP_NSTRLST, /* Neon element/structure list */
5770
5771 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5772 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5773 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5774 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5775 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5776 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5777 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5778 OP_VMOV, /* Neon VMOV operands. */
5779 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5780 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5781 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5782
5783 OP_I0, /* immediate zero */
5784 OP_I7, /* immediate value 0 .. 7 */
5785 OP_I15, /* 0 .. 15 */
5786 OP_I16, /* 1 .. 16 */
5787 OP_I16z, /* 0 .. 16 */
5788 OP_I31, /* 0 .. 31 */
5789 OP_I31w, /* 0 .. 31, optional trailing ! */
5790 OP_I32, /* 1 .. 32 */
5791 OP_I32z, /* 0 .. 32 */
5792 OP_I63, /* 0 .. 63 */
5793 OP_I63s, /* -64 .. 63 */
5794 OP_I64, /* 1 .. 64 */
5795 OP_I64z, /* 0 .. 64 */
5796 OP_I255, /* 0 .. 255 */
5797
5798 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5799 OP_I7b, /* 0 .. 7 */
5800 OP_I15b, /* 0 .. 15 */
5801 OP_I31b, /* 0 .. 31 */
5802
5803 OP_SH, /* shifter operand */
5804 OP_SHG, /* shifter operand with possible group relocation */
5805 OP_ADDR, /* Memory address expression (any mode) */
5806 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5807 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5808 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5809 OP_EXP, /* arbitrary expression */
5810 OP_EXPi, /* same, with optional immediate prefix */
5811 OP_EXPr, /* same, with optional relocation suffix */
5812 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5813
5814 OP_CPSF, /* CPS flags */
5815 OP_ENDI, /* Endianness specifier */
5816 OP_PSR, /* CPSR/SPSR mask for msr */
5817 OP_COND, /* conditional code */
5818 OP_TB, /* Table branch. */
5819
5820 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5821 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5822
5823 OP_RRnpc_I0, /* ARM register or literal 0 */
5824 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5825 OP_RR_EXi, /* ARM register or expression with imm prefix */
5826 OP_RF_IF, /* FPA register or immediate */
5827 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5828 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5829
5830 /* Optional operands. */
5831 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5832 OP_oI31b, /* 0 .. 31 */
5833 OP_oI32b, /* 1 .. 32 */
5834 OP_oIffffb, /* 0 .. 65535 */
5835 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5836
5837 OP_oRR, /* ARM register */
5838 OP_oRRnpc, /* ARM register, not the PC */
5839 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5840 OP_oRND, /* Optional Neon double precision register */
5841 OP_oRNQ, /* Optional Neon quad precision register */
5842 OP_oRNDQ, /* Optional Neon double or quad precision register */
5843 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5844 OP_oSHll, /* LSL immediate */
5845 OP_oSHar, /* ASR immediate */
5846 OP_oSHllar, /* LSL or ASR immediate */
5847 OP_oROR, /* ROR 0/8/16/24 */
5848 OP_oBARRIER, /* Option argument for a barrier instruction. */
5849
5850 OP_FIRST_OPTIONAL = OP_oI7b
5851 };
5852
5853 /* Generic instruction operand parser. This does no encoding and no
5854 semantic validation; it merely squirrels values away in the inst
5855 structure. Returns SUCCESS or FAIL depending on whether the
5856 specified grammar matched. */
5857 static int
5858 parse_operands (char *str, const unsigned char *pattern)
5859 {
5860 unsigned const char *upat = pattern;
5861 char *backtrack_pos = 0;
5862 const char *backtrack_error = 0;
5863 int i, val, backtrack_index = 0;
5864 enum arm_reg_type rtype;
5865 parse_operand_result result;
5866
5867 #define po_char_or_fail(chr) \
5868 do \
5869 { \
5870 if (skip_past_char (&str, chr) == FAIL) \
5871 goto bad_args; \
5872 } \
5873 while (0)
5874
5875 #define po_reg_or_fail(regtype) \
5876 do \
5877 { \
5878 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5879 & inst.operands[i].vectype); \
5880 if (val == FAIL) \
5881 { \
5882 first_error (_(reg_expected_msgs[regtype])); \
5883 goto failure; \
5884 } \
5885 inst.operands[i].reg = val; \
5886 inst.operands[i].isreg = 1; \
5887 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5888 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5889 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5890 || rtype == REG_TYPE_VFD \
5891 || rtype == REG_TYPE_NQ); \
5892 } \
5893 while (0)
5894
5895 #define po_reg_or_goto(regtype, label) \
5896 do \
5897 { \
5898 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5899 & inst.operands[i].vectype); \
5900 if (val == FAIL) \
5901 goto label; \
5902 \
5903 inst.operands[i].reg = val; \
5904 inst.operands[i].isreg = 1; \
5905 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5906 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5907 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5908 || rtype == REG_TYPE_VFD \
5909 || rtype == REG_TYPE_NQ); \
5910 } \
5911 while (0)
5912
5913 #define po_imm_or_fail(min, max, popt) \
5914 do \
5915 { \
5916 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5917 goto failure; \
5918 inst.operands[i].imm = val; \
5919 } \
5920 while (0)
5921
5922 #define po_scalar_or_goto(elsz, label) \
5923 do \
5924 { \
5925 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5926 if (val == FAIL) \
5927 goto label; \
5928 inst.operands[i].reg = val; \
5929 inst.operands[i].isscalar = 1; \
5930 } \
5931 while (0)
5932
5933 #define po_misc_or_fail(expr) \
5934 do \
5935 { \
5936 if (expr) \
5937 goto failure; \
5938 } \
5939 while (0)
5940
5941 #define po_misc_or_fail_no_backtrack(expr) \
5942 do \
5943 { \
5944 result = expr; \
5945 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5946 backtrack_pos = 0; \
5947 if (result != PARSE_OPERAND_SUCCESS) \
5948 goto failure; \
5949 } \
5950 while (0)
5951
5952 skip_whitespace (str);
5953
5954 for (i = 0; upat[i] != OP_stop; i++)
5955 {
5956 if (upat[i] >= OP_FIRST_OPTIONAL)
5957 {
5958 /* Remember where we are in case we need to backtrack. */
5959 gas_assert (!backtrack_pos);
5960 backtrack_pos = str;
5961 backtrack_error = inst.error;
5962 backtrack_index = i;
5963 }
5964
5965 if (i > 0 && (i > 1 || inst.operands[0].present))
5966 po_char_or_fail (',');
5967
5968 switch (upat[i])
5969 {
5970 /* Registers */
5971 case OP_oRRnpc:
5972 case OP_RRnpc:
5973 case OP_oRR:
5974 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5975 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5976 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5977 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5978 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5979 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5980 case OP_oRND:
5981 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5982 case OP_RVC:
5983 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
5984 break;
5985 /* Also accept generic coprocessor regs for unknown registers. */
5986 coproc_reg:
5987 po_reg_or_fail (REG_TYPE_CN);
5988 break;
5989 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5990 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5991 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5992 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5993 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5994 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5995 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5996 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5997 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5998 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5999 case OP_oRNQ:
6000 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6001 case OP_oRNDQ:
6002 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6003 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6004 case OP_oRNSDQ:
6005 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6006
6007 /* Neon scalar. Using an element size of 8 means that some invalid
6008 scalars are accepted here, so deal with those in later code. */
6009 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6010
6011 /* WARNING: We can expand to two operands here. This has the potential
6012 to totally confuse the backtracking mechanism! It will be OK at
6013 least as long as we don't try to use optional args as well,
6014 though. */
6015 case OP_NILO:
6016 {
6017 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
6018 inst.operands[i].present = 1;
6019 i++;
6020 skip_past_comma (&str);
6021 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
6022 break;
6023 one_reg_only:
6024 /* Optional register operand was omitted. Unfortunately, it's in
6025 operands[i-1] and we need it to be in inst.operands[i]. Fix that
6026 here (this is a bit grotty). */
6027 inst.operands[i] = inst.operands[i-1];
6028 inst.operands[i-1].present = 0;
6029 break;
6030 try_imm:
6031 /* There's a possibility of getting a 64-bit immediate here, so
6032 we need special handling. */
6033 if (parse_big_immediate (&str, i) == FAIL)
6034 {
6035 inst.error = _("immediate value is out of range");
6036 goto failure;
6037 }
6038 }
6039 break;
6040
6041 case OP_RNDQ_I0:
6042 {
6043 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6044 break;
6045 try_imm0:
6046 po_imm_or_fail (0, 0, TRUE);
6047 }
6048 break;
6049
6050 case OP_RVSD_I0:
6051 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6052 break;
6053
6054 case OP_RR_RNSC:
6055 {
6056 po_scalar_or_goto (8, try_rr);
6057 break;
6058 try_rr:
6059 po_reg_or_fail (REG_TYPE_RN);
6060 }
6061 break;
6062
6063 case OP_RNSDQ_RNSC:
6064 {
6065 po_scalar_or_goto (8, try_nsdq);
6066 break;
6067 try_nsdq:
6068 po_reg_or_fail (REG_TYPE_NSDQ);
6069 }
6070 break;
6071
6072 case OP_RNDQ_RNSC:
6073 {
6074 po_scalar_or_goto (8, try_ndq);
6075 break;
6076 try_ndq:
6077 po_reg_or_fail (REG_TYPE_NDQ);
6078 }
6079 break;
6080
6081 case OP_RND_RNSC:
6082 {
6083 po_scalar_or_goto (8, try_vfd);
6084 break;
6085 try_vfd:
6086 po_reg_or_fail (REG_TYPE_VFD);
6087 }
6088 break;
6089
6090 case OP_VMOV:
6091 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6092 not careful then bad things might happen. */
6093 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6094 break;
6095
6096 case OP_RNDQ_IMVNb:
6097 {
6098 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
6099 break;
6100 try_mvnimm:
6101 /* There's a possibility of getting a 64-bit immediate here, so
6102 we need special handling. */
6103 if (parse_big_immediate (&str, i) == FAIL)
6104 {
6105 inst.error = _("immediate value is out of range");
6106 goto failure;
6107 }
6108 }
6109 break;
6110
6111 case OP_RNDQ_I63b:
6112 {
6113 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6114 break;
6115 try_shimm:
6116 po_imm_or_fail (0, 63, TRUE);
6117 }
6118 break;
6119
6120 case OP_RRnpcb:
6121 po_char_or_fail ('[');
6122 po_reg_or_fail (REG_TYPE_RN);
6123 po_char_or_fail (']');
6124 break;
6125
6126 case OP_RRw:
6127 case OP_oRRw:
6128 po_reg_or_fail (REG_TYPE_RN);
6129 if (skip_past_char (&str, '!') == SUCCESS)
6130 inst.operands[i].writeback = 1;
6131 break;
6132
6133 /* Immediates */
6134 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6135 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6136 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6137 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6138 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6139 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6140 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6141 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6142 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6143 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6144 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6145 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6146
6147 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6148 case OP_oI7b:
6149 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6150 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6151 case OP_oI31b:
6152 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6153 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6154 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6155
6156 /* Immediate variants */
6157 case OP_oI255c:
6158 po_char_or_fail ('{');
6159 po_imm_or_fail (0, 255, TRUE);
6160 po_char_or_fail ('}');
6161 break;
6162
6163 case OP_I31w:
6164 /* The expression parser chokes on a trailing !, so we have
6165 to find it first and zap it. */
6166 {
6167 char *s = str;
6168 while (*s && *s != ',')
6169 s++;
6170 if (s[-1] == '!')
6171 {
6172 s[-1] = '\0';
6173 inst.operands[i].writeback = 1;
6174 }
6175 po_imm_or_fail (0, 31, TRUE);
6176 if (str == s - 1)
6177 str = s;
6178 }
6179 break;
6180
6181 /* Expressions */
6182 case OP_EXPi: EXPi:
6183 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6184 GE_OPT_PREFIX));
6185 break;
6186
6187 case OP_EXP:
6188 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6189 GE_NO_PREFIX));
6190 break;
6191
6192 case OP_EXPr: EXPr:
6193 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6194 GE_NO_PREFIX));
6195 if (inst.reloc.exp.X_op == O_symbol)
6196 {
6197 val = parse_reloc (&str);
6198 if (val == -1)
6199 {
6200 inst.error = _("unrecognized relocation suffix");
6201 goto failure;
6202 }
6203 else if (val != BFD_RELOC_UNUSED)
6204 {
6205 inst.operands[i].imm = val;
6206 inst.operands[i].hasreloc = 1;
6207 }
6208 }
6209 break;
6210
6211 /* Operand for MOVW or MOVT. */
6212 case OP_HALF:
6213 po_misc_or_fail (parse_half (&str));
6214 break;
6215
6216 /* Register or expression. */
6217 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6218 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6219
6220 /* Register or immediate. */
6221 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6222 I0: po_imm_or_fail (0, 0, FALSE); break;
6223
6224 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6225 IF:
6226 if (!is_immediate_prefix (*str))
6227 goto bad_args;
6228 str++;
6229 val = parse_fpa_immediate (&str);
6230 if (val == FAIL)
6231 goto failure;
6232 /* FPA immediates are encoded as registers 8-15.
6233 parse_fpa_immediate has already applied the offset. */
6234 inst.operands[i].reg = val;
6235 inst.operands[i].isreg = 1;
6236 break;
6237
6238 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6239 I32z: po_imm_or_fail (0, 32, FALSE); break;
6240
6241 /* Two kinds of register. */
6242 case OP_RIWR_RIWC:
6243 {
6244 struct reg_entry *rege = arm_reg_parse_multi (&str);
6245 if (!rege
6246 || (rege->type != REG_TYPE_MMXWR
6247 && rege->type != REG_TYPE_MMXWC
6248 && rege->type != REG_TYPE_MMXWCG))
6249 {
6250 inst.error = _("iWMMXt data or control register expected");
6251 goto failure;
6252 }
6253 inst.operands[i].reg = rege->number;
6254 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6255 }
6256 break;
6257
6258 case OP_RIWC_RIWG:
6259 {
6260 struct reg_entry *rege = arm_reg_parse_multi (&str);
6261 if (!rege
6262 || (rege->type != REG_TYPE_MMXWC
6263 && rege->type != REG_TYPE_MMXWCG))
6264 {
6265 inst.error = _("iWMMXt control register expected");
6266 goto failure;
6267 }
6268 inst.operands[i].reg = rege->number;
6269 inst.operands[i].isreg = 1;
6270 }
6271 break;
6272
6273 /* Misc */
6274 case OP_CPSF: val = parse_cps_flags (&str); break;
6275 case OP_ENDI: val = parse_endian_specifier (&str); break;
6276 case OP_oROR: val = parse_ror (&str); break;
6277 case OP_PSR: val = parse_psr (&str); break;
6278 case OP_COND: val = parse_cond (&str); break;
6279 case OP_oBARRIER:val = parse_barrier (&str); break;
6280
6281 case OP_RVC_PSR:
6282 po_reg_or_goto (REG_TYPE_VFC, try_psr);
6283 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
6284 break;
6285 try_psr:
6286 val = parse_psr (&str);
6287 break;
6288
6289 case OP_APSR_RR:
6290 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6291 break;
6292 try_apsr:
6293 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6294 instruction). */
6295 if (strncasecmp (str, "APSR_", 5) == 0)
6296 {
6297 unsigned found = 0;
6298 str += 5;
6299 while (found < 15)
6300 switch (*str++)
6301 {
6302 case 'c': found = (found & 1) ? 16 : found | 1; break;
6303 case 'n': found = (found & 2) ? 16 : found | 2; break;
6304 case 'z': found = (found & 4) ? 16 : found | 4; break;
6305 case 'v': found = (found & 8) ? 16 : found | 8; break;
6306 default: found = 16;
6307 }
6308 if (found != 15)
6309 goto failure;
6310 inst.operands[i].isvec = 1;
6311 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6312 inst.operands[i].reg = REG_PC;
6313 }
6314 else
6315 goto failure;
6316 break;
6317
6318 case OP_TB:
6319 po_misc_or_fail (parse_tb (&str));
6320 break;
6321
6322 /* Register lists. */
6323 case OP_REGLST:
6324 val = parse_reg_list (&str);
6325 if (*str == '^')
6326 {
6327 inst.operands[1].writeback = 1;
6328 str++;
6329 }
6330 break;
6331
6332 case OP_VRSLST:
6333 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6334 break;
6335
6336 case OP_VRDLST:
6337 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6338 break;
6339
6340 case OP_VRSDLST:
6341 /* Allow Q registers too. */
6342 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6343 REGLIST_NEON_D);
6344 if (val == FAIL)
6345 {
6346 inst.error = NULL;
6347 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6348 REGLIST_VFP_S);
6349 inst.operands[i].issingle = 1;
6350 }
6351 break;
6352
6353 case OP_NRDLST:
6354 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6355 REGLIST_NEON_D);
6356 break;
6357
6358 case OP_NSTRLST:
6359 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6360 &inst.operands[i].vectype);
6361 break;
6362
6363 /* Addressing modes */
6364 case OP_ADDR:
6365 po_misc_or_fail (parse_address (&str, i));
6366 break;
6367
6368 case OP_ADDRGLDR:
6369 po_misc_or_fail_no_backtrack (
6370 parse_address_group_reloc (&str, i, GROUP_LDR));
6371 break;
6372
6373 case OP_ADDRGLDRS:
6374 po_misc_or_fail_no_backtrack (
6375 parse_address_group_reloc (&str, i, GROUP_LDRS));
6376 break;
6377
6378 case OP_ADDRGLDC:
6379 po_misc_or_fail_no_backtrack (
6380 parse_address_group_reloc (&str, i, GROUP_LDC));
6381 break;
6382
6383 case OP_SH:
6384 po_misc_or_fail (parse_shifter_operand (&str, i));
6385 break;
6386
6387 case OP_SHG:
6388 po_misc_or_fail_no_backtrack (
6389 parse_shifter_operand_group_reloc (&str, i));
6390 break;
6391
6392 case OP_oSHll:
6393 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6394 break;
6395
6396 case OP_oSHar:
6397 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6398 break;
6399
6400 case OP_oSHllar:
6401 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6402 break;
6403
6404 default:
6405 as_fatal (_("unhandled operand code %d"), upat[i]);
6406 }
6407
6408 /* Various value-based sanity checks and shared operations. We
6409 do not signal immediate failures for the register constraints;
6410 this allows a syntax error to take precedence. */
6411 switch (upat[i])
6412 {
6413 case OP_oRRnpc:
6414 case OP_RRnpc:
6415 case OP_RRnpcb:
6416 case OP_RRw:
6417 case OP_oRRw:
6418 case OP_RRnpc_I0:
6419 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6420 inst.error = BAD_PC;
6421 break;
6422
6423 case OP_CPSF:
6424 case OP_ENDI:
6425 case OP_oROR:
6426 case OP_PSR:
6427 case OP_RVC_PSR:
6428 case OP_COND:
6429 case OP_oBARRIER:
6430 case OP_REGLST:
6431 case OP_VRSLST:
6432 case OP_VRDLST:
6433 case OP_VRSDLST:
6434 case OP_NRDLST:
6435 case OP_NSTRLST:
6436 if (val == FAIL)
6437 goto failure;
6438 inst.operands[i].imm = val;
6439 break;
6440
6441 default:
6442 break;
6443 }
6444
6445 /* If we get here, this operand was successfully parsed. */
6446 inst.operands[i].present = 1;
6447 continue;
6448
6449 bad_args:
6450 inst.error = BAD_ARGS;
6451
6452 failure:
6453 if (!backtrack_pos)
6454 {
6455 /* The parse routine should already have set inst.error, but set a
6456 default here just in case. */
6457 if (!inst.error)
6458 inst.error = _("syntax error");
6459 return FAIL;
6460 }
6461
6462 /* Do not backtrack over a trailing optional argument that
6463 absorbed some text. We will only fail again, with the
6464 'garbage following instruction' error message, which is
6465 probably less helpful than the current one. */
6466 if (backtrack_index == i && backtrack_pos != str
6467 && upat[i+1] == OP_stop)
6468 {
6469 if (!inst.error)
6470 inst.error = _("syntax error");
6471 return FAIL;
6472 }
6473
6474 /* Try again, skipping the optional argument at backtrack_pos. */
6475 str = backtrack_pos;
6476 inst.error = backtrack_error;
6477 inst.operands[backtrack_index].present = 0;
6478 i = backtrack_index;
6479 backtrack_pos = 0;
6480 }
6481
6482 /* Check that we have parsed all the arguments. */
6483 if (*str != '\0' && !inst.error)
6484 inst.error = _("garbage following instruction");
6485
6486 return inst.error ? FAIL : SUCCESS;
6487 }
6488
6489 #undef po_char_or_fail
6490 #undef po_reg_or_fail
6491 #undef po_reg_or_goto
6492 #undef po_imm_or_fail
6493 #undef po_scalar_or_fail
6494
6495 /* Shorthand macro for instruction encoding functions issuing errors. */
6496 #define constraint(expr, err) \
6497 do \
6498 { \
6499 if (expr) \
6500 { \
6501 inst.error = err; \
6502 return; \
6503 } \
6504 } \
6505 while (0)
6506
6507 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6508 instructions are unpredictable if these registers are used. This
6509 is the BadReg predicate in ARM's Thumb-2 documentation. */
6510 #define reject_bad_reg(reg) \
6511 do \
6512 if (reg == REG_SP || reg == REG_PC) \
6513 { \
6514 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6515 return; \
6516 } \
6517 while (0)
6518
6519 /* If REG is R13 (the stack pointer), warn that its use is
6520 deprecated. */
6521 #define warn_deprecated_sp(reg) \
6522 do \
6523 if (warn_on_deprecated && reg == REG_SP) \
6524 as_warn (_("use of r13 is deprecated")); \
6525 while (0)
6526
6527 /* Functions for operand encoding. ARM, then Thumb. */
6528
6529 #define rotate_left(v, n) (v << n | v >> (32 - n))
6530
6531 /* If VAL can be encoded in the immediate field of an ARM instruction,
6532 return the encoded form. Otherwise, return FAIL. */
6533
6534 static unsigned int
6535 encode_arm_immediate (unsigned int val)
6536 {
6537 unsigned int a, i;
6538
6539 for (i = 0; i < 32; i += 2)
6540 if ((a = rotate_left (val, i)) <= 0xff)
6541 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6542
6543 return FAIL;
6544 }
6545
6546 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6547 return the encoded form. Otherwise, return FAIL. */
6548 static unsigned int
6549 encode_thumb32_immediate (unsigned int val)
6550 {
6551 unsigned int a, i;
6552
6553 if (val <= 0xff)
6554 return val;
6555
6556 for (i = 1; i <= 24; i++)
6557 {
6558 a = val >> i;
6559 if ((val & ~(0xff << i)) == 0)
6560 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6561 }
6562
6563 a = val & 0xff;
6564 if (val == ((a << 16) | a))
6565 return 0x100 | a;
6566 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6567 return 0x300 | a;
6568
6569 a = val & 0xff00;
6570 if (val == ((a << 16) | a))
6571 return 0x200 | (a >> 8);
6572
6573 return FAIL;
6574 }
6575 /* Encode a VFP SP or DP register number into inst.instruction. */
6576
6577 static void
6578 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6579 {
6580 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6581 && reg > 15)
6582 {
6583 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6584 {
6585 if (thumb_mode)
6586 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6587 fpu_vfp_ext_d32);
6588 else
6589 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6590 fpu_vfp_ext_d32);
6591 }
6592 else
6593 {
6594 first_error (_("D register out of range for selected VFP version"));
6595 return;
6596 }
6597 }
6598
6599 switch (pos)
6600 {
6601 case VFP_REG_Sd:
6602 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6603 break;
6604
6605 case VFP_REG_Sn:
6606 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6607 break;
6608
6609 case VFP_REG_Sm:
6610 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6611 break;
6612
6613 case VFP_REG_Dd:
6614 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6615 break;
6616
6617 case VFP_REG_Dn:
6618 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6619 break;
6620
6621 case VFP_REG_Dm:
6622 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6623 break;
6624
6625 default:
6626 abort ();
6627 }
6628 }
6629
6630 /* Encode a <shift> in an ARM-format instruction. The immediate,
6631 if any, is handled by md_apply_fix. */
6632 static void
6633 encode_arm_shift (int i)
6634 {
6635 if (inst.operands[i].shift_kind == SHIFT_RRX)
6636 inst.instruction |= SHIFT_ROR << 5;
6637 else
6638 {
6639 inst.instruction |= inst.operands[i].shift_kind << 5;
6640 if (inst.operands[i].immisreg)
6641 {
6642 inst.instruction |= SHIFT_BY_REG;
6643 inst.instruction |= inst.operands[i].imm << 8;
6644 }
6645 else
6646 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6647 }
6648 }
6649
6650 static void
6651 encode_arm_shifter_operand (int i)
6652 {
6653 if (inst.operands[i].isreg)
6654 {
6655 inst.instruction |= inst.operands[i].reg;
6656 encode_arm_shift (i);
6657 }
6658 else
6659 inst.instruction |= INST_IMMEDIATE;
6660 }
6661
6662 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6663 static void
6664 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6665 {
6666 gas_assert (inst.operands[i].isreg);
6667 inst.instruction |= inst.operands[i].reg << 16;
6668
6669 if (inst.operands[i].preind)
6670 {
6671 if (is_t)
6672 {
6673 inst.error = _("instruction does not accept preindexed addressing");
6674 return;
6675 }
6676 inst.instruction |= PRE_INDEX;
6677 if (inst.operands[i].writeback)
6678 inst.instruction |= WRITE_BACK;
6679
6680 }
6681 else if (inst.operands[i].postind)
6682 {
6683 gas_assert (inst.operands[i].writeback);
6684 if (is_t)
6685 inst.instruction |= WRITE_BACK;
6686 }
6687 else /* unindexed - only for coprocessor */
6688 {
6689 inst.error = _("instruction does not accept unindexed addressing");
6690 return;
6691 }
6692
6693 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6694 && (((inst.instruction & 0x000f0000) >> 16)
6695 == ((inst.instruction & 0x0000f000) >> 12)))
6696 as_warn ((inst.instruction & LOAD_BIT)
6697 ? _("destination register same as write-back base")
6698 : _("source register same as write-back base"));
6699 }
6700
6701 /* inst.operands[i] was set up by parse_address. Encode it into an
6702 ARM-format mode 2 load or store instruction. If is_t is true,
6703 reject forms that cannot be used with a T instruction (i.e. not
6704 post-indexed). */
6705 static void
6706 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6707 {
6708 encode_arm_addr_mode_common (i, is_t);
6709
6710 if (inst.operands[i].immisreg)
6711 {
6712 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6713 inst.instruction |= inst.operands[i].imm;
6714 if (!inst.operands[i].negative)
6715 inst.instruction |= INDEX_UP;
6716 if (inst.operands[i].shifted)
6717 {
6718 if (inst.operands[i].shift_kind == SHIFT_RRX)
6719 inst.instruction |= SHIFT_ROR << 5;
6720 else
6721 {
6722 inst.instruction |= inst.operands[i].shift_kind << 5;
6723 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6724 }
6725 }
6726 }
6727 else /* immediate offset in inst.reloc */
6728 {
6729 if (inst.reloc.type == BFD_RELOC_UNUSED)
6730 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6731 }
6732 }
6733
6734 /* inst.operands[i] was set up by parse_address. Encode it into an
6735 ARM-format mode 3 load or store instruction. Reject forms that
6736 cannot be used with such instructions. If is_t is true, reject
6737 forms that cannot be used with a T instruction (i.e. not
6738 post-indexed). */
6739 static void
6740 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6741 {
6742 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6743 {
6744 inst.error = _("instruction does not accept scaled register index");
6745 return;
6746 }
6747
6748 encode_arm_addr_mode_common (i, is_t);
6749
6750 if (inst.operands[i].immisreg)
6751 {
6752 inst.instruction |= inst.operands[i].imm;
6753 if (!inst.operands[i].negative)
6754 inst.instruction |= INDEX_UP;
6755 }
6756 else /* immediate offset in inst.reloc */
6757 {
6758 inst.instruction |= HWOFFSET_IMM;
6759 if (inst.reloc.type == BFD_RELOC_UNUSED)
6760 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6761 }
6762 }
6763
6764 /* inst.operands[i] was set up by parse_address. Encode it into an
6765 ARM-format instruction. Reject all forms which cannot be encoded
6766 into a coprocessor load/store instruction. If wb_ok is false,
6767 reject use of writeback; if unind_ok is false, reject use of
6768 unindexed addressing. If reloc_override is not 0, use it instead
6769 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6770 (in which case it is preserved). */
6771
6772 static int
6773 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6774 {
6775 inst.instruction |= inst.operands[i].reg << 16;
6776
6777 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
6778
6779 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6780 {
6781 gas_assert (!inst.operands[i].writeback);
6782 if (!unind_ok)
6783 {
6784 inst.error = _("instruction does not support unindexed addressing");
6785 return FAIL;
6786 }
6787 inst.instruction |= inst.operands[i].imm;
6788 inst.instruction |= INDEX_UP;
6789 return SUCCESS;
6790 }
6791
6792 if (inst.operands[i].preind)
6793 inst.instruction |= PRE_INDEX;
6794
6795 if (inst.operands[i].writeback)
6796 {
6797 if (inst.operands[i].reg == REG_PC)
6798 {
6799 inst.error = _("pc may not be used with write-back");
6800 return FAIL;
6801 }
6802 if (!wb_ok)
6803 {
6804 inst.error = _("instruction does not support writeback");
6805 return FAIL;
6806 }
6807 inst.instruction |= WRITE_BACK;
6808 }
6809
6810 if (reloc_override)
6811 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
6812 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6813 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6814 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6815 {
6816 if (thumb_mode)
6817 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6818 else
6819 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6820 }
6821
6822 return SUCCESS;
6823 }
6824
6825 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6826 Determine whether it can be performed with a move instruction; if
6827 it can, convert inst.instruction to that move instruction and
6828 return TRUE; if it can't, convert inst.instruction to a literal-pool
6829 load and return FALSE. If this is not a valid thing to do in the
6830 current context, set inst.error and return TRUE.
6831
6832 inst.operands[i] describes the destination register. */
6833
6834 static bfd_boolean
6835 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6836 {
6837 unsigned long tbit;
6838
6839 if (thumb_p)
6840 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6841 else
6842 tbit = LOAD_BIT;
6843
6844 if ((inst.instruction & tbit) == 0)
6845 {
6846 inst.error = _("invalid pseudo operation");
6847 return TRUE;
6848 }
6849 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6850 {
6851 inst.error = _("constant expression expected");
6852 return TRUE;
6853 }
6854 if (inst.reloc.exp.X_op == O_constant)
6855 {
6856 if (thumb_p)
6857 {
6858 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6859 {
6860 /* This can be done with a mov(1) instruction. */
6861 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6862 inst.instruction |= inst.reloc.exp.X_add_number;
6863 return TRUE;
6864 }
6865 }
6866 else
6867 {
6868 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6869 if (value != FAIL)
6870 {
6871 /* This can be done with a mov instruction. */
6872 inst.instruction &= LITERAL_MASK;
6873 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6874 inst.instruction |= value & 0xfff;
6875 return TRUE;
6876 }
6877
6878 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6879 if (value != FAIL)
6880 {
6881 /* This can be done with a mvn instruction. */
6882 inst.instruction &= LITERAL_MASK;
6883 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6884 inst.instruction |= value & 0xfff;
6885 return TRUE;
6886 }
6887 }
6888 }
6889
6890 if (add_to_lit_pool () == FAIL)
6891 {
6892 inst.error = _("literal pool insertion failed");
6893 return TRUE;
6894 }
6895 inst.operands[1].reg = REG_PC;
6896 inst.operands[1].isreg = 1;
6897 inst.operands[1].preind = 1;
6898 inst.reloc.pc_rel = 1;
6899 inst.reloc.type = (thumb_p
6900 ? BFD_RELOC_ARM_THUMB_OFFSET
6901 : (mode_3
6902 ? BFD_RELOC_ARM_HWLITERAL
6903 : BFD_RELOC_ARM_LITERAL));
6904 return FALSE;
6905 }
6906
6907 /* Functions for instruction encoding, sorted by sub-architecture.
6908 First some generics; their names are taken from the conventional
6909 bit positions for register arguments in ARM format instructions. */
6910
6911 static void
6912 do_noargs (void)
6913 {
6914 }
6915
6916 static void
6917 do_rd (void)
6918 {
6919 inst.instruction |= inst.operands[0].reg << 12;
6920 }
6921
6922 static void
6923 do_rd_rm (void)
6924 {
6925 inst.instruction |= inst.operands[0].reg << 12;
6926 inst.instruction |= inst.operands[1].reg;
6927 }
6928
6929 static void
6930 do_rd_rn (void)
6931 {
6932 inst.instruction |= inst.operands[0].reg << 12;
6933 inst.instruction |= inst.operands[1].reg << 16;
6934 }
6935
6936 static void
6937 do_rn_rd (void)
6938 {
6939 inst.instruction |= inst.operands[0].reg << 16;
6940 inst.instruction |= inst.operands[1].reg << 12;
6941 }
6942
6943 static void
6944 do_rd_rm_rn (void)
6945 {
6946 unsigned Rn = inst.operands[2].reg;
6947 /* Enforce restrictions on SWP instruction. */
6948 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6949 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6950 _("Rn must not overlap other operands"));
6951 inst.instruction |= inst.operands[0].reg << 12;
6952 inst.instruction |= inst.operands[1].reg;
6953 inst.instruction |= Rn << 16;
6954 }
6955
6956 static void
6957 do_rd_rn_rm (void)
6958 {
6959 inst.instruction |= inst.operands[0].reg << 12;
6960 inst.instruction |= inst.operands[1].reg << 16;
6961 inst.instruction |= inst.operands[2].reg;
6962 }
6963
6964 static void
6965 do_rm_rd_rn (void)
6966 {
6967 inst.instruction |= inst.operands[0].reg;
6968 inst.instruction |= inst.operands[1].reg << 12;
6969 inst.instruction |= inst.operands[2].reg << 16;
6970 }
6971
6972 static void
6973 do_imm0 (void)
6974 {
6975 inst.instruction |= inst.operands[0].imm;
6976 }
6977
6978 static void
6979 do_rd_cpaddr (void)
6980 {
6981 inst.instruction |= inst.operands[0].reg << 12;
6982 encode_arm_cp_address (1, TRUE, TRUE, 0);
6983 }
6984
6985 /* ARM instructions, in alphabetical order by function name (except
6986 that wrapper functions appear immediately after the function they
6987 wrap). */
6988
6989 /* This is a pseudo-op of the form "adr rd, label" to be converted
6990 into a relative address of the form "add rd, pc, #label-.-8". */
6991
6992 static void
6993 do_adr (void)
6994 {
6995 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6996
6997 /* Frag hacking will turn this into a sub instruction if the offset turns
6998 out to be negative. */
6999 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7000 inst.reloc.pc_rel = 1;
7001 inst.reloc.exp.X_add_number -= 8;
7002 }
7003
7004 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7005 into a relative address of the form:
7006 add rd, pc, #low(label-.-8)"
7007 add rd, rd, #high(label-.-8)" */
7008
7009 static void
7010 do_adrl (void)
7011 {
7012 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7013
7014 /* Frag hacking will turn this into a sub instruction if the offset turns
7015 out to be negative. */
7016 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7017 inst.reloc.pc_rel = 1;
7018 inst.size = INSN_SIZE * 2;
7019 inst.reloc.exp.X_add_number -= 8;
7020 }
7021
7022 static void
7023 do_arit (void)
7024 {
7025 if (!inst.operands[1].present)
7026 inst.operands[1].reg = inst.operands[0].reg;
7027 inst.instruction |= inst.operands[0].reg << 12;
7028 inst.instruction |= inst.operands[1].reg << 16;
7029 encode_arm_shifter_operand (2);
7030 }
7031
7032 static void
7033 do_barrier (void)
7034 {
7035 if (inst.operands[0].present)
7036 {
7037 constraint ((inst.instruction & 0xf0) != 0x40
7038 && inst.operands[0].imm != 0xf,
7039 _("bad barrier type"));
7040 inst.instruction |= inst.operands[0].imm;
7041 }
7042 else
7043 inst.instruction |= 0xf;
7044 }
7045
7046 static void
7047 do_bfc (void)
7048 {
7049 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7050 constraint (msb > 32, _("bit-field extends past end of register"));
7051 /* The instruction encoding stores the LSB and MSB,
7052 not the LSB and width. */
7053 inst.instruction |= inst.operands[0].reg << 12;
7054 inst.instruction |= inst.operands[1].imm << 7;
7055 inst.instruction |= (msb - 1) << 16;
7056 }
7057
7058 static void
7059 do_bfi (void)
7060 {
7061 unsigned int msb;
7062
7063 /* #0 in second position is alternative syntax for bfc, which is
7064 the same instruction but with REG_PC in the Rm field. */
7065 if (!inst.operands[1].isreg)
7066 inst.operands[1].reg = REG_PC;
7067
7068 msb = inst.operands[2].imm + inst.operands[3].imm;
7069 constraint (msb > 32, _("bit-field extends past end of register"));
7070 /* The instruction encoding stores the LSB and MSB,
7071 not the LSB and width. */
7072 inst.instruction |= inst.operands[0].reg << 12;
7073 inst.instruction |= inst.operands[1].reg;
7074 inst.instruction |= inst.operands[2].imm << 7;
7075 inst.instruction |= (msb - 1) << 16;
7076 }
7077
7078 static void
7079 do_bfx (void)
7080 {
7081 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7082 _("bit-field extends past end of register"));
7083 inst.instruction |= inst.operands[0].reg << 12;
7084 inst.instruction |= inst.operands[1].reg;
7085 inst.instruction |= inst.operands[2].imm << 7;
7086 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7087 }
7088
7089 /* ARM V5 breakpoint instruction (argument parse)
7090 BKPT <16 bit unsigned immediate>
7091 Instruction is not conditional.
7092 The bit pattern given in insns[] has the COND_ALWAYS condition,
7093 and it is an error if the caller tried to override that. */
7094
7095 static void
7096 do_bkpt (void)
7097 {
7098 /* Top 12 of 16 bits to bits 19:8. */
7099 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7100
7101 /* Bottom 4 of 16 bits to bits 3:0. */
7102 inst.instruction |= inst.operands[0].imm & 0xf;
7103 }
7104
7105 static void
7106 encode_branch (int default_reloc)
7107 {
7108 if (inst.operands[0].hasreloc)
7109 {
7110 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
7111 _("the only suffix valid here is '(plt)'"));
7112 inst.reloc.type = BFD_RELOC_ARM_PLT32;
7113 }
7114 else
7115 {
7116 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7117 }
7118 inst.reloc.pc_rel = 1;
7119 }
7120
7121 static void
7122 do_branch (void)
7123 {
7124 #ifdef OBJ_ELF
7125 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7126 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7127 else
7128 #endif
7129 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7130 }
7131
7132 static void
7133 do_bl (void)
7134 {
7135 #ifdef OBJ_ELF
7136 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7137 {
7138 if (inst.cond == COND_ALWAYS)
7139 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7140 else
7141 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7142 }
7143 else
7144 #endif
7145 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7146 }
7147
7148 /* ARM V5 branch-link-exchange instruction (argument parse)
7149 BLX <target_addr> ie BLX(1)
7150 BLX{<condition>} <Rm> ie BLX(2)
7151 Unfortunately, there are two different opcodes for this mnemonic.
7152 So, the insns[].value is not used, and the code here zaps values
7153 into inst.instruction.
7154 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7155
7156 static void
7157 do_blx (void)
7158 {
7159 if (inst.operands[0].isreg)
7160 {
7161 /* Arg is a register; the opcode provided by insns[] is correct.
7162 It is not illegal to do "blx pc", just useless. */
7163 if (inst.operands[0].reg == REG_PC)
7164 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7165
7166 inst.instruction |= inst.operands[0].reg;
7167 }
7168 else
7169 {
7170 /* Arg is an address; this instruction cannot be executed
7171 conditionally, and the opcode must be adjusted.
7172 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7173 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7174 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7175 inst.instruction = 0xfa000000;
7176 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7177 }
7178 }
7179
7180 static void
7181 do_bx (void)
7182 {
7183 bfd_boolean want_reloc;
7184
7185 if (inst.operands[0].reg == REG_PC)
7186 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7187
7188 inst.instruction |= inst.operands[0].reg;
7189 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7190 it is for ARMv4t or earlier. */
7191 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7192 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7193 want_reloc = TRUE;
7194
7195 #ifdef OBJ_ELF
7196 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7197 #endif
7198 want_reloc = FALSE;
7199
7200 if (want_reloc)
7201 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7202 }
7203
7204
7205 /* ARM v5TEJ. Jump to Jazelle code. */
7206
7207 static void
7208 do_bxj (void)
7209 {
7210 if (inst.operands[0].reg == REG_PC)
7211 as_tsktsk (_("use of r15 in bxj is not really useful"));
7212
7213 inst.instruction |= inst.operands[0].reg;
7214 }
7215
7216 /* Co-processor data operation:
7217 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7218 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7219 static void
7220 do_cdp (void)
7221 {
7222 inst.instruction |= inst.operands[0].reg << 8;
7223 inst.instruction |= inst.operands[1].imm << 20;
7224 inst.instruction |= inst.operands[2].reg << 12;
7225 inst.instruction |= inst.operands[3].reg << 16;
7226 inst.instruction |= inst.operands[4].reg;
7227 inst.instruction |= inst.operands[5].imm << 5;
7228 }
7229
7230 static void
7231 do_cmp (void)
7232 {
7233 inst.instruction |= inst.operands[0].reg << 16;
7234 encode_arm_shifter_operand (1);
7235 }
7236
7237 /* Transfer between coprocessor and ARM registers.
7238 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7239 MRC2
7240 MCR{cond}
7241 MCR2
7242
7243 No special properties. */
7244
7245 static void
7246 do_co_reg (void)
7247 {
7248 unsigned Rd;
7249
7250 Rd = inst.operands[2].reg;
7251 if (thumb_mode)
7252 {
7253 if (inst.instruction == 0xee000010
7254 || inst.instruction == 0xfe000010)
7255 /* MCR, MCR2 */
7256 reject_bad_reg (Rd);
7257 else
7258 /* MRC, MRC2 */
7259 constraint (Rd == REG_SP, BAD_SP);
7260 }
7261 else
7262 {
7263 /* MCR */
7264 if (inst.instruction == 0xe000010)
7265 constraint (Rd == REG_PC, BAD_PC);
7266 }
7267
7268
7269 inst.instruction |= inst.operands[0].reg << 8;
7270 inst.instruction |= inst.operands[1].imm << 21;
7271 inst.instruction |= Rd << 12;
7272 inst.instruction |= inst.operands[3].reg << 16;
7273 inst.instruction |= inst.operands[4].reg;
7274 inst.instruction |= inst.operands[5].imm << 5;
7275 }
7276
7277 /* Transfer between coprocessor register and pair of ARM registers.
7278 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7279 MCRR2
7280 MRRC{cond}
7281 MRRC2
7282
7283 Two XScale instructions are special cases of these:
7284
7285 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7286 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7287
7288 Result unpredictable if Rd or Rn is R15. */
7289
7290 static void
7291 do_co_reg2c (void)
7292 {
7293 unsigned Rd, Rn;
7294
7295 Rd = inst.operands[2].reg;
7296 Rn = inst.operands[3].reg;
7297
7298 if (thumb_mode)
7299 {
7300 reject_bad_reg (Rd);
7301 reject_bad_reg (Rn);
7302 }
7303 else
7304 {
7305 constraint (Rd == REG_PC, BAD_PC);
7306 constraint (Rn == REG_PC, BAD_PC);
7307 }
7308
7309 inst.instruction |= inst.operands[0].reg << 8;
7310 inst.instruction |= inst.operands[1].imm << 4;
7311 inst.instruction |= Rd << 12;
7312 inst.instruction |= Rn << 16;
7313 inst.instruction |= inst.operands[4].reg;
7314 }
7315
7316 static void
7317 do_cpsi (void)
7318 {
7319 inst.instruction |= inst.operands[0].imm << 6;
7320 if (inst.operands[1].present)
7321 {
7322 inst.instruction |= CPSI_MMOD;
7323 inst.instruction |= inst.operands[1].imm;
7324 }
7325 }
7326
7327 static void
7328 do_dbg (void)
7329 {
7330 inst.instruction |= inst.operands[0].imm;
7331 }
7332
7333 static void
7334 do_it (void)
7335 {
7336 /* There is no IT instruction in ARM mode. We
7337 process it to do the validation as if in
7338 thumb mode, just in case the code gets
7339 assembled for thumb using the unified syntax. */
7340
7341 inst.size = 0;
7342 if (unified_syntax)
7343 {
7344 set_it_insn_type (IT_INSN);
7345 now_it.mask = (inst.instruction & 0xf) | 0x10;
7346 now_it.cc = inst.operands[0].imm;
7347 }
7348 }
7349
7350 static void
7351 do_ldmstm (void)
7352 {
7353 int base_reg = inst.operands[0].reg;
7354 int range = inst.operands[1].imm;
7355
7356 inst.instruction |= base_reg << 16;
7357 inst.instruction |= range;
7358
7359 if (inst.operands[1].writeback)
7360 inst.instruction |= LDM_TYPE_2_OR_3;
7361
7362 if (inst.operands[0].writeback)
7363 {
7364 inst.instruction |= WRITE_BACK;
7365 /* Check for unpredictable uses of writeback. */
7366 if (inst.instruction & LOAD_BIT)
7367 {
7368 /* Not allowed in LDM type 2. */
7369 if ((inst.instruction & LDM_TYPE_2_OR_3)
7370 && ((range & (1 << REG_PC)) == 0))
7371 as_warn (_("writeback of base register is UNPREDICTABLE"));
7372 /* Only allowed if base reg not in list for other types. */
7373 else if (range & (1 << base_reg))
7374 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7375 }
7376 else /* STM. */
7377 {
7378 /* Not allowed for type 2. */
7379 if (inst.instruction & LDM_TYPE_2_OR_3)
7380 as_warn (_("writeback of base register is UNPREDICTABLE"));
7381 /* Only allowed if base reg not in list, or first in list. */
7382 else if ((range & (1 << base_reg))
7383 && (range & ((1 << base_reg) - 1)))
7384 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7385 }
7386 }
7387 }
7388
7389 /* ARMv5TE load-consecutive (argument parse)
7390 Mode is like LDRH.
7391
7392 LDRccD R, mode
7393 STRccD R, mode. */
7394
7395 static void
7396 do_ldrd (void)
7397 {
7398 constraint (inst.operands[0].reg % 2 != 0,
7399 _("first destination register must be even"));
7400 constraint (inst.operands[1].present
7401 && inst.operands[1].reg != inst.operands[0].reg + 1,
7402 _("can only load two consecutive registers"));
7403 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7404 constraint (!inst.operands[2].isreg, _("'[' expected"));
7405
7406 if (!inst.operands[1].present)
7407 inst.operands[1].reg = inst.operands[0].reg + 1;
7408
7409 if (inst.instruction & LOAD_BIT)
7410 {
7411 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7412 register and the first register written; we have to diagnose
7413 overlap between the base and the second register written here. */
7414
7415 if (inst.operands[2].reg == inst.operands[1].reg
7416 && (inst.operands[2].writeback || inst.operands[2].postind))
7417 as_warn (_("base register written back, and overlaps "
7418 "second destination register"));
7419
7420 /* For an index-register load, the index register must not overlap the
7421 destination (even if not write-back). */
7422 else if (inst.operands[2].immisreg
7423 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7424 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7425 as_warn (_("index register overlaps destination register"));
7426 }
7427
7428 inst.instruction |= inst.operands[0].reg << 12;
7429 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7430 }
7431
7432 static void
7433 do_ldrex (void)
7434 {
7435 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7436 || inst.operands[1].postind || inst.operands[1].writeback
7437 || inst.operands[1].immisreg || inst.operands[1].shifted
7438 || inst.operands[1].negative
7439 /* This can arise if the programmer has written
7440 strex rN, rM, foo
7441 or if they have mistakenly used a register name as the last
7442 operand, eg:
7443 strex rN, rM, rX
7444 It is very difficult to distinguish between these two cases
7445 because "rX" might actually be a label. ie the register
7446 name has been occluded by a symbol of the same name. So we
7447 just generate a general 'bad addressing mode' type error
7448 message and leave it up to the programmer to discover the
7449 true cause and fix their mistake. */
7450 || (inst.operands[1].reg == REG_PC),
7451 BAD_ADDR_MODE);
7452
7453 constraint (inst.reloc.exp.X_op != O_constant
7454 || inst.reloc.exp.X_add_number != 0,
7455 _("offset must be zero in ARM encoding"));
7456
7457 inst.instruction |= inst.operands[0].reg << 12;
7458 inst.instruction |= inst.operands[1].reg << 16;
7459 inst.reloc.type = BFD_RELOC_UNUSED;
7460 }
7461
7462 static void
7463 do_ldrexd (void)
7464 {
7465 constraint (inst.operands[0].reg % 2 != 0,
7466 _("even register required"));
7467 constraint (inst.operands[1].present
7468 && inst.operands[1].reg != inst.operands[0].reg + 1,
7469 _("can only load two consecutive registers"));
7470 /* If op 1 were present and equal to PC, this function wouldn't
7471 have been called in the first place. */
7472 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7473
7474 inst.instruction |= inst.operands[0].reg << 12;
7475 inst.instruction |= inst.operands[2].reg << 16;
7476 }
7477
7478 static void
7479 do_ldst (void)
7480 {
7481 inst.instruction |= inst.operands[0].reg << 12;
7482 if (!inst.operands[1].isreg)
7483 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7484 return;
7485 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7486 }
7487
7488 static void
7489 do_ldstt (void)
7490 {
7491 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7492 reject [Rn,...]. */
7493 if (inst.operands[1].preind)
7494 {
7495 constraint (inst.reloc.exp.X_op != O_constant
7496 || inst.reloc.exp.X_add_number != 0,
7497 _("this instruction requires a post-indexed address"));
7498
7499 inst.operands[1].preind = 0;
7500 inst.operands[1].postind = 1;
7501 inst.operands[1].writeback = 1;
7502 }
7503 inst.instruction |= inst.operands[0].reg << 12;
7504 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7505 }
7506
7507 /* Halfword and signed-byte load/store operations. */
7508
7509 static void
7510 do_ldstv4 (void)
7511 {
7512 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7513 inst.instruction |= inst.operands[0].reg << 12;
7514 if (!inst.operands[1].isreg)
7515 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7516 return;
7517 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7518 }
7519
7520 static void
7521 do_ldsttv4 (void)
7522 {
7523 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7524 reject [Rn,...]. */
7525 if (inst.operands[1].preind)
7526 {
7527 constraint (inst.reloc.exp.X_op != O_constant
7528 || inst.reloc.exp.X_add_number != 0,
7529 _("this instruction requires a post-indexed address"));
7530
7531 inst.operands[1].preind = 0;
7532 inst.operands[1].postind = 1;
7533 inst.operands[1].writeback = 1;
7534 }
7535 inst.instruction |= inst.operands[0].reg << 12;
7536 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7537 }
7538
7539 /* Co-processor register load/store.
7540 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7541 static void
7542 do_lstc (void)
7543 {
7544 inst.instruction |= inst.operands[0].reg << 8;
7545 inst.instruction |= inst.operands[1].reg << 12;
7546 encode_arm_cp_address (2, TRUE, TRUE, 0);
7547 }
7548
7549 static void
7550 do_mlas (void)
7551 {
7552 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7553 if (inst.operands[0].reg == inst.operands[1].reg
7554 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7555 && !(inst.instruction & 0x00400000))
7556 as_tsktsk (_("Rd and Rm should be different in mla"));
7557
7558 inst.instruction |= inst.operands[0].reg << 16;
7559 inst.instruction |= inst.operands[1].reg;
7560 inst.instruction |= inst.operands[2].reg << 8;
7561 inst.instruction |= inst.operands[3].reg << 12;
7562 }
7563
7564 static void
7565 do_mov (void)
7566 {
7567 inst.instruction |= inst.operands[0].reg << 12;
7568 encode_arm_shifter_operand (1);
7569 }
7570
7571 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7572 static void
7573 do_mov16 (void)
7574 {
7575 bfd_vma imm;
7576 bfd_boolean top;
7577
7578 top = (inst.instruction & 0x00400000) != 0;
7579 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7580 _(":lower16: not allowed this instruction"));
7581 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7582 _(":upper16: not allowed instruction"));
7583 inst.instruction |= inst.operands[0].reg << 12;
7584 if (inst.reloc.type == BFD_RELOC_UNUSED)
7585 {
7586 imm = inst.reloc.exp.X_add_number;
7587 /* The value is in two pieces: 0:11, 16:19. */
7588 inst.instruction |= (imm & 0x00000fff);
7589 inst.instruction |= (imm & 0x0000f000) << 4;
7590 }
7591 }
7592
7593 static void do_vfp_nsyn_opcode (const char *);
7594
7595 static int
7596 do_vfp_nsyn_mrs (void)
7597 {
7598 if (inst.operands[0].isvec)
7599 {
7600 if (inst.operands[1].reg != 1)
7601 first_error (_("operand 1 must be FPSCR"));
7602 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7603 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7604 do_vfp_nsyn_opcode ("fmstat");
7605 }
7606 else if (inst.operands[1].isvec)
7607 do_vfp_nsyn_opcode ("fmrx");
7608 else
7609 return FAIL;
7610
7611 return SUCCESS;
7612 }
7613
7614 static int
7615 do_vfp_nsyn_msr (void)
7616 {
7617 if (inst.operands[0].isvec)
7618 do_vfp_nsyn_opcode ("fmxr");
7619 else
7620 return FAIL;
7621
7622 return SUCCESS;
7623 }
7624
7625 static void
7626 do_vmrs (void)
7627 {
7628 unsigned Rt = inst.operands[0].reg;
7629
7630 if (thumb_mode && inst.operands[0].reg == REG_SP)
7631 {
7632 inst.error = BAD_SP;
7633 return;
7634 }
7635
7636 /* APSR_ sets isvec. All other refs to PC are illegal. */
7637 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
7638 {
7639 inst.error = BAD_PC;
7640 return;
7641 }
7642
7643 if (inst.operands[1].reg != 1)
7644 first_error (_("operand 1 must be FPSCR"));
7645
7646 inst.instruction |= (Rt << 12);
7647 }
7648
7649 static void
7650 do_vmsr (void)
7651 {
7652 unsigned Rt = inst.operands[1].reg;
7653
7654 if (thumb_mode)
7655 reject_bad_reg (Rt);
7656 else if (Rt == REG_PC)
7657 {
7658 inst.error = BAD_PC;
7659 return;
7660 }
7661
7662 if (inst.operands[0].reg != 1)
7663 first_error (_("operand 0 must be FPSCR"));
7664
7665 inst.instruction |= (Rt << 12);
7666 }
7667
7668 static void
7669 do_mrs (void)
7670 {
7671 if (do_vfp_nsyn_mrs () == SUCCESS)
7672 return;
7673
7674 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7675 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7676 != (PSR_c|PSR_f),
7677 _("'CPSR' or 'SPSR' expected"));
7678 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7679 inst.instruction |= inst.operands[0].reg << 12;
7680 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7681 }
7682
7683 /* Two possible forms:
7684 "{C|S}PSR_<field>, Rm",
7685 "{C|S}PSR_f, #expression". */
7686
7687 static void
7688 do_msr (void)
7689 {
7690 if (do_vfp_nsyn_msr () == SUCCESS)
7691 return;
7692
7693 inst.instruction |= inst.operands[0].imm;
7694 if (inst.operands[1].isreg)
7695 inst.instruction |= inst.operands[1].reg;
7696 else
7697 {
7698 inst.instruction |= INST_IMMEDIATE;
7699 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7700 inst.reloc.pc_rel = 0;
7701 }
7702 }
7703
7704 static void
7705 do_mul (void)
7706 {
7707 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
7708
7709 if (!inst.operands[2].present)
7710 inst.operands[2].reg = inst.operands[0].reg;
7711 inst.instruction |= inst.operands[0].reg << 16;
7712 inst.instruction |= inst.operands[1].reg;
7713 inst.instruction |= inst.operands[2].reg << 8;
7714
7715 if (inst.operands[0].reg == inst.operands[1].reg
7716 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7717 as_tsktsk (_("Rd and Rm should be different in mul"));
7718 }
7719
7720 /* Long Multiply Parser
7721 UMULL RdLo, RdHi, Rm, Rs
7722 SMULL RdLo, RdHi, Rm, Rs
7723 UMLAL RdLo, RdHi, Rm, Rs
7724 SMLAL RdLo, RdHi, Rm, Rs. */
7725
7726 static void
7727 do_mull (void)
7728 {
7729 inst.instruction |= inst.operands[0].reg << 12;
7730 inst.instruction |= inst.operands[1].reg << 16;
7731 inst.instruction |= inst.operands[2].reg;
7732 inst.instruction |= inst.operands[3].reg << 8;
7733
7734 /* rdhi and rdlo must be different. */
7735 if (inst.operands[0].reg == inst.operands[1].reg)
7736 as_tsktsk (_("rdhi and rdlo must be different"));
7737
7738 /* rdhi, rdlo and rm must all be different before armv6. */
7739 if ((inst.operands[0].reg == inst.operands[2].reg
7740 || inst.operands[1].reg == inst.operands[2].reg)
7741 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7742 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7743 }
7744
7745 static void
7746 do_nop (void)
7747 {
7748 if (inst.operands[0].present
7749 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
7750 {
7751 /* Architectural NOP hints are CPSR sets with no bits selected. */
7752 inst.instruction &= 0xf0000000;
7753 inst.instruction |= 0x0320f000;
7754 if (inst.operands[0].present)
7755 inst.instruction |= inst.operands[0].imm;
7756 }
7757 }
7758
7759 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7760 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7761 Condition defaults to COND_ALWAYS.
7762 Error if Rd, Rn or Rm are R15. */
7763
7764 static void
7765 do_pkhbt (void)
7766 {
7767 inst.instruction |= inst.operands[0].reg << 12;
7768 inst.instruction |= inst.operands[1].reg << 16;
7769 inst.instruction |= inst.operands[2].reg;
7770 if (inst.operands[3].present)
7771 encode_arm_shift (3);
7772 }
7773
7774 /* ARM V6 PKHTB (Argument Parse). */
7775
7776 static void
7777 do_pkhtb (void)
7778 {
7779 if (!inst.operands[3].present)
7780 {
7781 /* If the shift specifier is omitted, turn the instruction
7782 into pkhbt rd, rm, rn. */
7783 inst.instruction &= 0xfff00010;
7784 inst.instruction |= inst.operands[0].reg << 12;
7785 inst.instruction |= inst.operands[1].reg;
7786 inst.instruction |= inst.operands[2].reg << 16;
7787 }
7788 else
7789 {
7790 inst.instruction |= inst.operands[0].reg << 12;
7791 inst.instruction |= inst.operands[1].reg << 16;
7792 inst.instruction |= inst.operands[2].reg;
7793 encode_arm_shift (3);
7794 }
7795 }
7796
7797 /* ARMv5TE: Preload-Cache
7798
7799 PLD <addr_mode>
7800
7801 Syntactically, like LDR with B=1, W=0, L=1. */
7802
7803 static void
7804 do_pld (void)
7805 {
7806 constraint (!inst.operands[0].isreg,
7807 _("'[' expected after PLD mnemonic"));
7808 constraint (inst.operands[0].postind,
7809 _("post-indexed expression used in preload instruction"));
7810 constraint (inst.operands[0].writeback,
7811 _("writeback used in preload instruction"));
7812 constraint (!inst.operands[0].preind,
7813 _("unindexed addressing used in preload instruction"));
7814 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7815 }
7816
7817 /* ARMv7: PLI <addr_mode> */
7818 static void
7819 do_pli (void)
7820 {
7821 constraint (!inst.operands[0].isreg,
7822 _("'[' expected after PLI mnemonic"));
7823 constraint (inst.operands[0].postind,
7824 _("post-indexed expression used in preload instruction"));
7825 constraint (inst.operands[0].writeback,
7826 _("writeback used in preload instruction"));
7827 constraint (!inst.operands[0].preind,
7828 _("unindexed addressing used in preload instruction"));
7829 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7830 inst.instruction &= ~PRE_INDEX;
7831 }
7832
7833 static void
7834 do_push_pop (void)
7835 {
7836 inst.operands[1] = inst.operands[0];
7837 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7838 inst.operands[0].isreg = 1;
7839 inst.operands[0].writeback = 1;
7840 inst.operands[0].reg = REG_SP;
7841 do_ldmstm ();
7842 }
7843
7844 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7845 word at the specified address and the following word
7846 respectively.
7847 Unconditionally executed.
7848 Error if Rn is R15. */
7849
7850 static void
7851 do_rfe (void)
7852 {
7853 inst.instruction |= inst.operands[0].reg << 16;
7854 if (inst.operands[0].writeback)
7855 inst.instruction |= WRITE_BACK;
7856 }
7857
7858 /* ARM V6 ssat (argument parse). */
7859
7860 static void
7861 do_ssat (void)
7862 {
7863 inst.instruction |= inst.operands[0].reg << 12;
7864 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7865 inst.instruction |= inst.operands[2].reg;
7866
7867 if (inst.operands[3].present)
7868 encode_arm_shift (3);
7869 }
7870
7871 /* ARM V6 usat (argument parse). */
7872
7873 static void
7874 do_usat (void)
7875 {
7876 inst.instruction |= inst.operands[0].reg << 12;
7877 inst.instruction |= inst.operands[1].imm << 16;
7878 inst.instruction |= inst.operands[2].reg;
7879
7880 if (inst.operands[3].present)
7881 encode_arm_shift (3);
7882 }
7883
7884 /* ARM V6 ssat16 (argument parse). */
7885
7886 static void
7887 do_ssat16 (void)
7888 {
7889 inst.instruction |= inst.operands[0].reg << 12;
7890 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7891 inst.instruction |= inst.operands[2].reg;
7892 }
7893
7894 static void
7895 do_usat16 (void)
7896 {
7897 inst.instruction |= inst.operands[0].reg << 12;
7898 inst.instruction |= inst.operands[1].imm << 16;
7899 inst.instruction |= inst.operands[2].reg;
7900 }
7901
7902 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7903 preserving the other bits.
7904
7905 setend <endian_specifier>, where <endian_specifier> is either
7906 BE or LE. */
7907
7908 static void
7909 do_setend (void)
7910 {
7911 if (inst.operands[0].imm)
7912 inst.instruction |= 0x200;
7913 }
7914
7915 static void
7916 do_shift (void)
7917 {
7918 unsigned int Rm = (inst.operands[1].present
7919 ? inst.operands[1].reg
7920 : inst.operands[0].reg);
7921
7922 inst.instruction |= inst.operands[0].reg << 12;
7923 inst.instruction |= Rm;
7924 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7925 {
7926 inst.instruction |= inst.operands[2].reg << 8;
7927 inst.instruction |= SHIFT_BY_REG;
7928 }
7929 else
7930 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7931 }
7932
7933 static void
7934 do_smc (void)
7935 {
7936 inst.reloc.type = BFD_RELOC_ARM_SMC;
7937 inst.reloc.pc_rel = 0;
7938 }
7939
7940 static void
7941 do_swi (void)
7942 {
7943 inst.reloc.type = BFD_RELOC_ARM_SWI;
7944 inst.reloc.pc_rel = 0;
7945 }
7946
7947 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7948 SMLAxy{cond} Rd,Rm,Rs,Rn
7949 SMLAWy{cond} Rd,Rm,Rs,Rn
7950 Error if any register is R15. */
7951
7952 static void
7953 do_smla (void)
7954 {
7955 inst.instruction |= inst.operands[0].reg << 16;
7956 inst.instruction |= inst.operands[1].reg;
7957 inst.instruction |= inst.operands[2].reg << 8;
7958 inst.instruction |= inst.operands[3].reg << 12;
7959 }
7960
7961 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7962 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7963 Error if any register is R15.
7964 Warning if Rdlo == Rdhi. */
7965
7966 static void
7967 do_smlal (void)
7968 {
7969 inst.instruction |= inst.operands[0].reg << 12;
7970 inst.instruction |= inst.operands[1].reg << 16;
7971 inst.instruction |= inst.operands[2].reg;
7972 inst.instruction |= inst.operands[3].reg << 8;
7973
7974 if (inst.operands[0].reg == inst.operands[1].reg)
7975 as_tsktsk (_("rdhi and rdlo must be different"));
7976 }
7977
7978 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7979 SMULxy{cond} Rd,Rm,Rs
7980 Error if any register is R15. */
7981
7982 static void
7983 do_smul (void)
7984 {
7985 inst.instruction |= inst.operands[0].reg << 16;
7986 inst.instruction |= inst.operands[1].reg;
7987 inst.instruction |= inst.operands[2].reg << 8;
7988 }
7989
7990 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7991 the same for both ARM and Thumb-2. */
7992
7993 static void
7994 do_srs (void)
7995 {
7996 int reg;
7997
7998 if (inst.operands[0].present)
7999 {
8000 reg = inst.operands[0].reg;
8001 constraint (reg != REG_SP, _("SRS base register must be r13"));
8002 }
8003 else
8004 reg = REG_SP;
8005
8006 inst.instruction |= reg << 16;
8007 inst.instruction |= inst.operands[1].imm;
8008 if (inst.operands[0].writeback || inst.operands[1].writeback)
8009 inst.instruction |= WRITE_BACK;
8010 }
8011
8012 /* ARM V6 strex (argument parse). */
8013
8014 static void
8015 do_strex (void)
8016 {
8017 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8018 || inst.operands[2].postind || inst.operands[2].writeback
8019 || inst.operands[2].immisreg || inst.operands[2].shifted
8020 || inst.operands[2].negative
8021 /* See comment in do_ldrex(). */
8022 || (inst.operands[2].reg == REG_PC),
8023 BAD_ADDR_MODE);
8024
8025 constraint (inst.operands[0].reg == inst.operands[1].reg
8026 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8027
8028 constraint (inst.reloc.exp.X_op != O_constant
8029 || inst.reloc.exp.X_add_number != 0,
8030 _("offset must be zero in ARM encoding"));
8031
8032 inst.instruction |= inst.operands[0].reg << 12;
8033 inst.instruction |= inst.operands[1].reg;
8034 inst.instruction |= inst.operands[2].reg << 16;
8035 inst.reloc.type = BFD_RELOC_UNUSED;
8036 }
8037
8038 static void
8039 do_strexd (void)
8040 {
8041 constraint (inst.operands[1].reg % 2 != 0,
8042 _("even register required"));
8043 constraint (inst.operands[2].present
8044 && inst.operands[2].reg != inst.operands[1].reg + 1,
8045 _("can only store two consecutive registers"));
8046 /* If op 2 were present and equal to PC, this function wouldn't
8047 have been called in the first place. */
8048 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8049
8050 constraint (inst.operands[0].reg == inst.operands[1].reg
8051 || inst.operands[0].reg == inst.operands[1].reg + 1
8052 || inst.operands[0].reg == inst.operands[3].reg,
8053 BAD_OVERLAP);
8054
8055 inst.instruction |= inst.operands[0].reg << 12;
8056 inst.instruction |= inst.operands[1].reg;
8057 inst.instruction |= inst.operands[3].reg << 16;
8058 }
8059
8060 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8061 extends it to 32-bits, and adds the result to a value in another
8062 register. You can specify a rotation by 0, 8, 16, or 24 bits
8063 before extracting the 16-bit value.
8064 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8065 Condition defaults to COND_ALWAYS.
8066 Error if any register uses R15. */
8067
8068 static void
8069 do_sxtah (void)
8070 {
8071 inst.instruction |= inst.operands[0].reg << 12;
8072 inst.instruction |= inst.operands[1].reg << 16;
8073 inst.instruction |= inst.operands[2].reg;
8074 inst.instruction |= inst.operands[3].imm << 10;
8075 }
8076
8077 /* ARM V6 SXTH.
8078
8079 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8080 Condition defaults to COND_ALWAYS.
8081 Error if any register uses R15. */
8082
8083 static void
8084 do_sxth (void)
8085 {
8086 inst.instruction |= inst.operands[0].reg << 12;
8087 inst.instruction |= inst.operands[1].reg;
8088 inst.instruction |= inst.operands[2].imm << 10;
8089 }
8090 \f
8091 /* VFP instructions. In a logical order: SP variant first, monad
8092 before dyad, arithmetic then move then load/store. */
8093
8094 static void
8095 do_vfp_sp_monadic (void)
8096 {
8097 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8098 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8099 }
8100
8101 static void
8102 do_vfp_sp_dyadic (void)
8103 {
8104 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8105 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8106 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8107 }
8108
8109 static void
8110 do_vfp_sp_compare_z (void)
8111 {
8112 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8113 }
8114
8115 static void
8116 do_vfp_dp_sp_cvt (void)
8117 {
8118 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8119 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8120 }
8121
8122 static void
8123 do_vfp_sp_dp_cvt (void)
8124 {
8125 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8126 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8127 }
8128
8129 static void
8130 do_vfp_reg_from_sp (void)
8131 {
8132 inst.instruction |= inst.operands[0].reg << 12;
8133 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8134 }
8135
8136 static void
8137 do_vfp_reg2_from_sp2 (void)
8138 {
8139 constraint (inst.operands[2].imm != 2,
8140 _("only two consecutive VFP SP registers allowed here"));
8141 inst.instruction |= inst.operands[0].reg << 12;
8142 inst.instruction |= inst.operands[1].reg << 16;
8143 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8144 }
8145
8146 static void
8147 do_vfp_sp_from_reg (void)
8148 {
8149 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8150 inst.instruction |= inst.operands[1].reg << 12;
8151 }
8152
8153 static void
8154 do_vfp_sp2_from_reg2 (void)
8155 {
8156 constraint (inst.operands[0].imm != 2,
8157 _("only two consecutive VFP SP registers allowed here"));
8158 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8159 inst.instruction |= inst.operands[1].reg << 12;
8160 inst.instruction |= inst.operands[2].reg << 16;
8161 }
8162
8163 static void
8164 do_vfp_sp_ldst (void)
8165 {
8166 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8167 encode_arm_cp_address (1, FALSE, TRUE, 0);
8168 }
8169
8170 static void
8171 do_vfp_dp_ldst (void)
8172 {
8173 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8174 encode_arm_cp_address (1, FALSE, TRUE, 0);
8175 }
8176
8177
8178 static void
8179 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8180 {
8181 if (inst.operands[0].writeback)
8182 inst.instruction |= WRITE_BACK;
8183 else
8184 constraint (ldstm_type != VFP_LDSTMIA,
8185 _("this addressing mode requires base-register writeback"));
8186 inst.instruction |= inst.operands[0].reg << 16;
8187 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8188 inst.instruction |= inst.operands[1].imm;
8189 }
8190
8191 static void
8192 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8193 {
8194 int count;
8195
8196 if (inst.operands[0].writeback)
8197 inst.instruction |= WRITE_BACK;
8198 else
8199 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8200 _("this addressing mode requires base-register writeback"));
8201
8202 inst.instruction |= inst.operands[0].reg << 16;
8203 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8204
8205 count = inst.operands[1].imm << 1;
8206 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8207 count += 1;
8208
8209 inst.instruction |= count;
8210 }
8211
8212 static void
8213 do_vfp_sp_ldstmia (void)
8214 {
8215 vfp_sp_ldstm (VFP_LDSTMIA);
8216 }
8217
8218 static void
8219 do_vfp_sp_ldstmdb (void)
8220 {
8221 vfp_sp_ldstm (VFP_LDSTMDB);
8222 }
8223
8224 static void
8225 do_vfp_dp_ldstmia (void)
8226 {
8227 vfp_dp_ldstm (VFP_LDSTMIA);
8228 }
8229
8230 static void
8231 do_vfp_dp_ldstmdb (void)
8232 {
8233 vfp_dp_ldstm (VFP_LDSTMDB);
8234 }
8235
8236 static void
8237 do_vfp_xp_ldstmia (void)
8238 {
8239 vfp_dp_ldstm (VFP_LDSTMIAX);
8240 }
8241
8242 static void
8243 do_vfp_xp_ldstmdb (void)
8244 {
8245 vfp_dp_ldstm (VFP_LDSTMDBX);
8246 }
8247
8248 static void
8249 do_vfp_dp_rd_rm (void)
8250 {
8251 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8252 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8253 }
8254
8255 static void
8256 do_vfp_dp_rn_rd (void)
8257 {
8258 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8259 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8260 }
8261
8262 static void
8263 do_vfp_dp_rd_rn (void)
8264 {
8265 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8266 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8267 }
8268
8269 static void
8270 do_vfp_dp_rd_rn_rm (void)
8271 {
8272 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8273 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8274 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8275 }
8276
8277 static void
8278 do_vfp_dp_rd (void)
8279 {
8280 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8281 }
8282
8283 static void
8284 do_vfp_dp_rm_rd_rn (void)
8285 {
8286 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8287 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8288 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8289 }
8290
8291 /* VFPv3 instructions. */
8292 static void
8293 do_vfp_sp_const (void)
8294 {
8295 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8296 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8297 inst.instruction |= (inst.operands[1].imm & 0x0f);
8298 }
8299
8300 static void
8301 do_vfp_dp_const (void)
8302 {
8303 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8304 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8305 inst.instruction |= (inst.operands[1].imm & 0x0f);
8306 }
8307
8308 static void
8309 vfp_conv (int srcsize)
8310 {
8311 unsigned immbits = srcsize - inst.operands[1].imm;
8312 inst.instruction |= (immbits & 1) << 5;
8313 inst.instruction |= (immbits >> 1);
8314 }
8315
8316 static void
8317 do_vfp_sp_conv_16 (void)
8318 {
8319 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8320 vfp_conv (16);
8321 }
8322
8323 static void
8324 do_vfp_dp_conv_16 (void)
8325 {
8326 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8327 vfp_conv (16);
8328 }
8329
8330 static void
8331 do_vfp_sp_conv_32 (void)
8332 {
8333 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8334 vfp_conv (32);
8335 }
8336
8337 static void
8338 do_vfp_dp_conv_32 (void)
8339 {
8340 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8341 vfp_conv (32);
8342 }
8343 \f
8344 /* FPA instructions. Also in a logical order. */
8345
8346 static void
8347 do_fpa_cmp (void)
8348 {
8349 inst.instruction |= inst.operands[0].reg << 16;
8350 inst.instruction |= inst.operands[1].reg;
8351 }
8352
8353 static void
8354 do_fpa_ldmstm (void)
8355 {
8356 inst.instruction |= inst.operands[0].reg << 12;
8357 switch (inst.operands[1].imm)
8358 {
8359 case 1: inst.instruction |= CP_T_X; break;
8360 case 2: inst.instruction |= CP_T_Y; break;
8361 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8362 case 4: break;
8363 default: abort ();
8364 }
8365
8366 if (inst.instruction & (PRE_INDEX | INDEX_UP))
8367 {
8368 /* The instruction specified "ea" or "fd", so we can only accept
8369 [Rn]{!}. The instruction does not really support stacking or
8370 unstacking, so we have to emulate these by setting appropriate
8371 bits and offsets. */
8372 constraint (inst.reloc.exp.X_op != O_constant
8373 || inst.reloc.exp.X_add_number != 0,
8374 _("this instruction does not support indexing"));
8375
8376 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8377 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8378
8379 if (!(inst.instruction & INDEX_UP))
8380 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8381
8382 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8383 {
8384 inst.operands[2].preind = 0;
8385 inst.operands[2].postind = 1;
8386 }
8387 }
8388
8389 encode_arm_cp_address (2, TRUE, TRUE, 0);
8390 }
8391 \f
8392 /* iWMMXt instructions: strictly in alphabetical order. */
8393
8394 static void
8395 do_iwmmxt_tandorc (void)
8396 {
8397 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8398 }
8399
8400 static void
8401 do_iwmmxt_textrc (void)
8402 {
8403 inst.instruction |= inst.operands[0].reg << 12;
8404 inst.instruction |= inst.operands[1].imm;
8405 }
8406
8407 static void
8408 do_iwmmxt_textrm (void)
8409 {
8410 inst.instruction |= inst.operands[0].reg << 12;
8411 inst.instruction |= inst.operands[1].reg << 16;
8412 inst.instruction |= inst.operands[2].imm;
8413 }
8414
8415 static void
8416 do_iwmmxt_tinsr (void)
8417 {
8418 inst.instruction |= inst.operands[0].reg << 16;
8419 inst.instruction |= inst.operands[1].reg << 12;
8420 inst.instruction |= inst.operands[2].imm;
8421 }
8422
8423 static void
8424 do_iwmmxt_tmia (void)
8425 {
8426 inst.instruction |= inst.operands[0].reg << 5;
8427 inst.instruction |= inst.operands[1].reg;
8428 inst.instruction |= inst.operands[2].reg << 12;
8429 }
8430
8431 static void
8432 do_iwmmxt_waligni (void)
8433 {
8434 inst.instruction |= inst.operands[0].reg << 12;
8435 inst.instruction |= inst.operands[1].reg << 16;
8436 inst.instruction |= inst.operands[2].reg;
8437 inst.instruction |= inst.operands[3].imm << 20;
8438 }
8439
8440 static void
8441 do_iwmmxt_wmerge (void)
8442 {
8443 inst.instruction |= inst.operands[0].reg << 12;
8444 inst.instruction |= inst.operands[1].reg << 16;
8445 inst.instruction |= inst.operands[2].reg;
8446 inst.instruction |= inst.operands[3].imm << 21;
8447 }
8448
8449 static void
8450 do_iwmmxt_wmov (void)
8451 {
8452 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8453 inst.instruction |= inst.operands[0].reg << 12;
8454 inst.instruction |= inst.operands[1].reg << 16;
8455 inst.instruction |= inst.operands[1].reg;
8456 }
8457
8458 static void
8459 do_iwmmxt_wldstbh (void)
8460 {
8461 int reloc;
8462 inst.instruction |= inst.operands[0].reg << 12;
8463 if (thumb_mode)
8464 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8465 else
8466 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8467 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8468 }
8469
8470 static void
8471 do_iwmmxt_wldstw (void)
8472 {
8473 /* RIWR_RIWC clears .isreg for a control register. */
8474 if (!inst.operands[0].isreg)
8475 {
8476 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8477 inst.instruction |= 0xf0000000;
8478 }
8479
8480 inst.instruction |= inst.operands[0].reg << 12;
8481 encode_arm_cp_address (1, TRUE, TRUE, 0);
8482 }
8483
8484 static void
8485 do_iwmmxt_wldstd (void)
8486 {
8487 inst.instruction |= inst.operands[0].reg << 12;
8488 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8489 && inst.operands[1].immisreg)
8490 {
8491 inst.instruction &= ~0x1a000ff;
8492 inst.instruction |= (0xf << 28);
8493 if (inst.operands[1].preind)
8494 inst.instruction |= PRE_INDEX;
8495 if (!inst.operands[1].negative)
8496 inst.instruction |= INDEX_UP;
8497 if (inst.operands[1].writeback)
8498 inst.instruction |= WRITE_BACK;
8499 inst.instruction |= inst.operands[1].reg << 16;
8500 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8501 inst.instruction |= inst.operands[1].imm;
8502 }
8503 else
8504 encode_arm_cp_address (1, TRUE, FALSE, 0);
8505 }
8506
8507 static void
8508 do_iwmmxt_wshufh (void)
8509 {
8510 inst.instruction |= inst.operands[0].reg << 12;
8511 inst.instruction |= inst.operands[1].reg << 16;
8512 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8513 inst.instruction |= (inst.operands[2].imm & 0x0f);
8514 }
8515
8516 static void
8517 do_iwmmxt_wzero (void)
8518 {
8519 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8520 inst.instruction |= inst.operands[0].reg;
8521 inst.instruction |= inst.operands[0].reg << 12;
8522 inst.instruction |= inst.operands[0].reg << 16;
8523 }
8524
8525 static void
8526 do_iwmmxt_wrwrwr_or_imm5 (void)
8527 {
8528 if (inst.operands[2].isreg)
8529 do_rd_rn_rm ();
8530 else {
8531 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8532 _("immediate operand requires iWMMXt2"));
8533 do_rd_rn ();
8534 if (inst.operands[2].imm == 0)
8535 {
8536 switch ((inst.instruction >> 20) & 0xf)
8537 {
8538 case 4:
8539 case 5:
8540 case 6:
8541 case 7:
8542 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8543 inst.operands[2].imm = 16;
8544 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8545 break;
8546 case 8:
8547 case 9:
8548 case 10:
8549 case 11:
8550 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8551 inst.operands[2].imm = 32;
8552 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8553 break;
8554 case 12:
8555 case 13:
8556 case 14:
8557 case 15:
8558 {
8559 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8560 unsigned long wrn;
8561 wrn = (inst.instruction >> 16) & 0xf;
8562 inst.instruction &= 0xff0fff0f;
8563 inst.instruction |= wrn;
8564 /* Bail out here; the instruction is now assembled. */
8565 return;
8566 }
8567 }
8568 }
8569 /* Map 32 -> 0, etc. */
8570 inst.operands[2].imm &= 0x1f;
8571 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8572 }
8573 }
8574 \f
8575 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8576 operations first, then control, shift, and load/store. */
8577
8578 /* Insns like "foo X,Y,Z". */
8579
8580 static void
8581 do_mav_triple (void)
8582 {
8583 inst.instruction |= inst.operands[0].reg << 16;
8584 inst.instruction |= inst.operands[1].reg;
8585 inst.instruction |= inst.operands[2].reg << 12;
8586 }
8587
8588 /* Insns like "foo W,X,Y,Z".
8589 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8590
8591 static void
8592 do_mav_quad (void)
8593 {
8594 inst.instruction |= inst.operands[0].reg << 5;
8595 inst.instruction |= inst.operands[1].reg << 12;
8596 inst.instruction |= inst.operands[2].reg << 16;
8597 inst.instruction |= inst.operands[3].reg;
8598 }
8599
8600 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8601 static void
8602 do_mav_dspsc (void)
8603 {
8604 inst.instruction |= inst.operands[1].reg << 12;
8605 }
8606
8607 /* Maverick shift immediate instructions.
8608 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8609 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8610
8611 static void
8612 do_mav_shift (void)
8613 {
8614 int imm = inst.operands[2].imm;
8615
8616 inst.instruction |= inst.operands[0].reg << 12;
8617 inst.instruction |= inst.operands[1].reg << 16;
8618
8619 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8620 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8621 Bit 4 should be 0. */
8622 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8623
8624 inst.instruction |= imm;
8625 }
8626 \f
8627 /* XScale instructions. Also sorted arithmetic before move. */
8628
8629 /* Xscale multiply-accumulate (argument parse)
8630 MIAcc acc0,Rm,Rs
8631 MIAPHcc acc0,Rm,Rs
8632 MIAxycc acc0,Rm,Rs. */
8633
8634 static void
8635 do_xsc_mia (void)
8636 {
8637 inst.instruction |= inst.operands[1].reg;
8638 inst.instruction |= inst.operands[2].reg << 12;
8639 }
8640
8641 /* Xscale move-accumulator-register (argument parse)
8642
8643 MARcc acc0,RdLo,RdHi. */
8644
8645 static void
8646 do_xsc_mar (void)
8647 {
8648 inst.instruction |= inst.operands[1].reg << 12;
8649 inst.instruction |= inst.operands[2].reg << 16;
8650 }
8651
8652 /* Xscale move-register-accumulator (argument parse)
8653
8654 MRAcc RdLo,RdHi,acc0. */
8655
8656 static void
8657 do_xsc_mra (void)
8658 {
8659 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8660 inst.instruction |= inst.operands[0].reg << 12;
8661 inst.instruction |= inst.operands[1].reg << 16;
8662 }
8663 \f
8664 /* Encoding functions relevant only to Thumb. */
8665
8666 /* inst.operands[i] is a shifted-register operand; encode
8667 it into inst.instruction in the format used by Thumb32. */
8668
8669 static void
8670 encode_thumb32_shifted_operand (int i)
8671 {
8672 unsigned int value = inst.reloc.exp.X_add_number;
8673 unsigned int shift = inst.operands[i].shift_kind;
8674
8675 constraint (inst.operands[i].immisreg,
8676 _("shift by register not allowed in thumb mode"));
8677 inst.instruction |= inst.operands[i].reg;
8678 if (shift == SHIFT_RRX)
8679 inst.instruction |= SHIFT_ROR << 4;
8680 else
8681 {
8682 constraint (inst.reloc.exp.X_op != O_constant,
8683 _("expression too complex"));
8684
8685 constraint (value > 32
8686 || (value == 32 && (shift == SHIFT_LSL
8687 || shift == SHIFT_ROR)),
8688 _("shift expression is too large"));
8689
8690 if (value == 0)
8691 shift = SHIFT_LSL;
8692 else if (value == 32)
8693 value = 0;
8694
8695 inst.instruction |= shift << 4;
8696 inst.instruction |= (value & 0x1c) << 10;
8697 inst.instruction |= (value & 0x03) << 6;
8698 }
8699 }
8700
8701
8702 /* inst.operands[i] was set up by parse_address. Encode it into a
8703 Thumb32 format load or store instruction. Reject forms that cannot
8704 be used with such instructions. If is_t is true, reject forms that
8705 cannot be used with a T instruction; if is_d is true, reject forms
8706 that cannot be used with a D instruction. */
8707
8708 static void
8709 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8710 {
8711 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8712
8713 constraint (!inst.operands[i].isreg,
8714 _("Instruction does not support =N addresses"));
8715
8716 inst.instruction |= inst.operands[i].reg << 16;
8717 if (inst.operands[i].immisreg)
8718 {
8719 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8720 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8721 constraint (inst.operands[i].negative,
8722 _("Thumb does not support negative register indexing"));
8723 constraint (inst.operands[i].postind,
8724 _("Thumb does not support register post-indexing"));
8725 constraint (inst.operands[i].writeback,
8726 _("Thumb does not support register indexing with writeback"));
8727 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8728 _("Thumb supports only LSL in shifted register indexing"));
8729
8730 inst.instruction |= inst.operands[i].imm;
8731 if (inst.operands[i].shifted)
8732 {
8733 constraint (inst.reloc.exp.X_op != O_constant,
8734 _("expression too complex"));
8735 constraint (inst.reloc.exp.X_add_number < 0
8736 || inst.reloc.exp.X_add_number > 3,
8737 _("shift out of range"));
8738 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8739 }
8740 inst.reloc.type = BFD_RELOC_UNUSED;
8741 }
8742 else if (inst.operands[i].preind)
8743 {
8744 constraint (is_pc && inst.operands[i].writeback,
8745 _("cannot use writeback with PC-relative addressing"));
8746 constraint (is_t && inst.operands[i].writeback,
8747 _("cannot use writeback with this instruction"));
8748
8749 if (is_d)
8750 {
8751 inst.instruction |= 0x01000000;
8752 if (inst.operands[i].writeback)
8753 inst.instruction |= 0x00200000;
8754 }
8755 else
8756 {
8757 inst.instruction |= 0x00000c00;
8758 if (inst.operands[i].writeback)
8759 inst.instruction |= 0x00000100;
8760 }
8761 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8762 }
8763 else if (inst.operands[i].postind)
8764 {
8765 gas_assert (inst.operands[i].writeback);
8766 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8767 constraint (is_t, _("cannot use post-indexing with this instruction"));
8768
8769 if (is_d)
8770 inst.instruction |= 0x00200000;
8771 else
8772 inst.instruction |= 0x00000900;
8773 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8774 }
8775 else /* unindexed - only for coprocessor */
8776 inst.error = _("instruction does not accept unindexed addressing");
8777 }
8778
8779 /* Table of Thumb instructions which exist in both 16- and 32-bit
8780 encodings (the latter only in post-V6T2 cores). The index is the
8781 value used in the insns table below. When there is more than one
8782 possible 16-bit encoding for the instruction, this table always
8783 holds variant (1).
8784 Also contains several pseudo-instructions used during relaxation. */
8785 #define T16_32_TAB \
8786 X(_adc, 4140, eb400000), \
8787 X(_adcs, 4140, eb500000), \
8788 X(_add, 1c00, eb000000), \
8789 X(_adds, 1c00, eb100000), \
8790 X(_addi, 0000, f1000000), \
8791 X(_addis, 0000, f1100000), \
8792 X(_add_pc,000f, f20f0000), \
8793 X(_add_sp,000d, f10d0000), \
8794 X(_adr, 000f, f20f0000), \
8795 X(_and, 4000, ea000000), \
8796 X(_ands, 4000, ea100000), \
8797 X(_asr, 1000, fa40f000), \
8798 X(_asrs, 1000, fa50f000), \
8799 X(_b, e000, f000b000), \
8800 X(_bcond, d000, f0008000), \
8801 X(_bic, 4380, ea200000), \
8802 X(_bics, 4380, ea300000), \
8803 X(_cmn, 42c0, eb100f00), \
8804 X(_cmp, 2800, ebb00f00), \
8805 X(_cpsie, b660, f3af8400), \
8806 X(_cpsid, b670, f3af8600), \
8807 X(_cpy, 4600, ea4f0000), \
8808 X(_dec_sp,80dd, f1ad0d00), \
8809 X(_eor, 4040, ea800000), \
8810 X(_eors, 4040, ea900000), \
8811 X(_inc_sp,00dd, f10d0d00), \
8812 X(_ldmia, c800, e8900000), \
8813 X(_ldr, 6800, f8500000), \
8814 X(_ldrb, 7800, f8100000), \
8815 X(_ldrh, 8800, f8300000), \
8816 X(_ldrsb, 5600, f9100000), \
8817 X(_ldrsh, 5e00, f9300000), \
8818 X(_ldr_pc,4800, f85f0000), \
8819 X(_ldr_pc2,4800, f85f0000), \
8820 X(_ldr_sp,9800, f85d0000), \
8821 X(_lsl, 0000, fa00f000), \
8822 X(_lsls, 0000, fa10f000), \
8823 X(_lsr, 0800, fa20f000), \
8824 X(_lsrs, 0800, fa30f000), \
8825 X(_mov, 2000, ea4f0000), \
8826 X(_movs, 2000, ea5f0000), \
8827 X(_mul, 4340, fb00f000), \
8828 X(_muls, 4340, ffffffff), /* no 32b muls */ \
8829 X(_mvn, 43c0, ea6f0000), \
8830 X(_mvns, 43c0, ea7f0000), \
8831 X(_neg, 4240, f1c00000), /* rsb #0 */ \
8832 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
8833 X(_orr, 4300, ea400000), \
8834 X(_orrs, 4300, ea500000), \
8835 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8836 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
8837 X(_rev, ba00, fa90f080), \
8838 X(_rev16, ba40, fa90f090), \
8839 X(_revsh, bac0, fa90f0b0), \
8840 X(_ror, 41c0, fa60f000), \
8841 X(_rors, 41c0, fa70f000), \
8842 X(_sbc, 4180, eb600000), \
8843 X(_sbcs, 4180, eb700000), \
8844 X(_stmia, c000, e8800000), \
8845 X(_str, 6000, f8400000), \
8846 X(_strb, 7000, f8000000), \
8847 X(_strh, 8000, f8200000), \
8848 X(_str_sp,9000, f84d0000), \
8849 X(_sub, 1e00, eba00000), \
8850 X(_subs, 1e00, ebb00000), \
8851 X(_subi, 8000, f1a00000), \
8852 X(_subis, 8000, f1b00000), \
8853 X(_sxtb, b240, fa4ff080), \
8854 X(_sxth, b200, fa0ff080), \
8855 X(_tst, 4200, ea100f00), \
8856 X(_uxtb, b2c0, fa5ff080), \
8857 X(_uxth, b280, fa1ff080), \
8858 X(_nop, bf00, f3af8000), \
8859 X(_yield, bf10, f3af8001), \
8860 X(_wfe, bf20, f3af8002), \
8861 X(_wfi, bf30, f3af8003), \
8862 X(_sev, bf40, f3af8004),
8863
8864 /* To catch errors in encoding functions, the codes are all offset by
8865 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8866 as 16-bit instructions. */
8867 #define X(a,b,c) T_MNEM##a
8868 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8869 #undef X
8870
8871 #define X(a,b,c) 0x##b
8872 static const unsigned short thumb_op16[] = { T16_32_TAB };
8873 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8874 #undef X
8875
8876 #define X(a,b,c) 0x##c
8877 static const unsigned int thumb_op32[] = { T16_32_TAB };
8878 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8879 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8880 #undef X
8881 #undef T16_32_TAB
8882
8883 /* Thumb instruction encoders, in alphabetical order. */
8884
8885 /* ADDW or SUBW. */
8886
8887 static void
8888 do_t_add_sub_w (void)
8889 {
8890 int Rd, Rn;
8891
8892 Rd = inst.operands[0].reg;
8893 Rn = inst.operands[1].reg;
8894
8895 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
8896 is the SP-{plus,minus}-immediate form of the instruction. */
8897 if (Rn == REG_SP)
8898 constraint (Rd == REG_PC, BAD_PC);
8899 else
8900 reject_bad_reg (Rd);
8901
8902 inst.instruction |= (Rn << 16) | (Rd << 8);
8903 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8904 }
8905
8906 /* Parse an add or subtract instruction. We get here with inst.instruction
8907 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8908
8909 static void
8910 do_t_add_sub (void)
8911 {
8912 int Rd, Rs, Rn;
8913
8914 Rd = inst.operands[0].reg;
8915 Rs = (inst.operands[1].present
8916 ? inst.operands[1].reg /* Rd, Rs, foo */
8917 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8918
8919 if (Rd == REG_PC)
8920 set_it_insn_type_last ();
8921
8922 if (unified_syntax)
8923 {
8924 bfd_boolean flags;
8925 bfd_boolean narrow;
8926 int opcode;
8927
8928 flags = (inst.instruction == T_MNEM_adds
8929 || inst.instruction == T_MNEM_subs);
8930 if (flags)
8931 narrow = !in_it_block ();
8932 else
8933 narrow = in_it_block ();
8934 if (!inst.operands[2].isreg)
8935 {
8936 int add;
8937
8938 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8939
8940 add = (inst.instruction == T_MNEM_add
8941 || inst.instruction == T_MNEM_adds);
8942 opcode = 0;
8943 if (inst.size_req != 4)
8944 {
8945 /* Attempt to use a narrow opcode, with relaxation if
8946 appropriate. */
8947 if (Rd == REG_SP && Rs == REG_SP && !flags)
8948 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8949 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8950 opcode = T_MNEM_add_sp;
8951 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8952 opcode = T_MNEM_add_pc;
8953 else if (Rd <= 7 && Rs <= 7 && narrow)
8954 {
8955 if (flags)
8956 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8957 else
8958 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8959 }
8960 if (opcode)
8961 {
8962 inst.instruction = THUMB_OP16(opcode);
8963 inst.instruction |= (Rd << 4) | Rs;
8964 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8965 if (inst.size_req != 2)
8966 inst.relax = opcode;
8967 }
8968 else
8969 constraint (inst.size_req == 2, BAD_HIREG);
8970 }
8971 if (inst.size_req == 4
8972 || (inst.size_req != 2 && !opcode))
8973 {
8974 if (Rd == REG_PC)
8975 {
8976 constraint (add, BAD_PC);
8977 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8978 _("only SUBS PC, LR, #const allowed"));
8979 constraint (inst.reloc.exp.X_op != O_constant,
8980 _("expression too complex"));
8981 constraint (inst.reloc.exp.X_add_number < 0
8982 || inst.reloc.exp.X_add_number > 0xff,
8983 _("immediate value out of range"));
8984 inst.instruction = T2_SUBS_PC_LR
8985 | inst.reloc.exp.X_add_number;
8986 inst.reloc.type = BFD_RELOC_UNUSED;
8987 return;
8988 }
8989 else if (Rs == REG_PC)
8990 {
8991 /* Always use addw/subw. */
8992 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8993 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8994 }
8995 else
8996 {
8997 inst.instruction = THUMB_OP32 (inst.instruction);
8998 inst.instruction = (inst.instruction & 0xe1ffffff)
8999 | 0x10000000;
9000 if (flags)
9001 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9002 else
9003 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9004 }
9005 inst.instruction |= Rd << 8;
9006 inst.instruction |= Rs << 16;
9007 }
9008 }
9009 else
9010 {
9011 Rn = inst.operands[2].reg;
9012 /* See if we can do this with a 16-bit instruction. */
9013 if (!inst.operands[2].shifted && inst.size_req != 4)
9014 {
9015 if (Rd > 7 || Rs > 7 || Rn > 7)
9016 narrow = FALSE;
9017
9018 if (narrow)
9019 {
9020 inst.instruction = ((inst.instruction == T_MNEM_adds
9021 || inst.instruction == T_MNEM_add)
9022 ? T_OPCODE_ADD_R3
9023 : T_OPCODE_SUB_R3);
9024 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9025 return;
9026 }
9027
9028 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9029 {
9030 /* Thumb-1 cores (except v6-M) require at least one high
9031 register in a narrow non flag setting add. */
9032 if (Rd > 7 || Rn > 7
9033 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9034 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9035 {
9036 if (Rd == Rn)
9037 {
9038 Rn = Rs;
9039 Rs = Rd;
9040 }
9041 inst.instruction = T_OPCODE_ADD_HI;
9042 inst.instruction |= (Rd & 8) << 4;
9043 inst.instruction |= (Rd & 7);
9044 inst.instruction |= Rn << 3;
9045 return;
9046 }
9047 }
9048 }
9049
9050 constraint (Rd == REG_PC, BAD_PC);
9051 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9052 constraint (Rs == REG_PC, BAD_PC);
9053 reject_bad_reg (Rn);
9054
9055 /* If we get here, it can't be done in 16 bits. */
9056 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9057 _("shift must be constant"));
9058 inst.instruction = THUMB_OP32 (inst.instruction);
9059 inst.instruction |= Rd << 8;
9060 inst.instruction |= Rs << 16;
9061 encode_thumb32_shifted_operand (2);
9062 }
9063 }
9064 else
9065 {
9066 constraint (inst.instruction == T_MNEM_adds
9067 || inst.instruction == T_MNEM_subs,
9068 BAD_THUMB32);
9069
9070 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9071 {
9072 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9073 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9074 BAD_HIREG);
9075
9076 inst.instruction = (inst.instruction == T_MNEM_add
9077 ? 0x0000 : 0x8000);
9078 inst.instruction |= (Rd << 4) | Rs;
9079 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9080 return;
9081 }
9082
9083 Rn = inst.operands[2].reg;
9084 constraint (inst.operands[2].shifted, _("unshifted register required"));
9085
9086 /* We now have Rd, Rs, and Rn set to registers. */
9087 if (Rd > 7 || Rs > 7 || Rn > 7)
9088 {
9089 /* Can't do this for SUB. */
9090 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9091 inst.instruction = T_OPCODE_ADD_HI;
9092 inst.instruction |= (Rd & 8) << 4;
9093 inst.instruction |= (Rd & 7);
9094 if (Rs == Rd)
9095 inst.instruction |= Rn << 3;
9096 else if (Rn == Rd)
9097 inst.instruction |= Rs << 3;
9098 else
9099 constraint (1, _("dest must overlap one source register"));
9100 }
9101 else
9102 {
9103 inst.instruction = (inst.instruction == T_MNEM_add
9104 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9105 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9106 }
9107 }
9108 }
9109
9110 static void
9111 do_t_adr (void)
9112 {
9113 unsigned Rd;
9114
9115 Rd = inst.operands[0].reg;
9116 reject_bad_reg (Rd);
9117
9118 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9119 {
9120 /* Defer to section relaxation. */
9121 inst.relax = inst.instruction;
9122 inst.instruction = THUMB_OP16 (inst.instruction);
9123 inst.instruction |= Rd << 4;
9124 }
9125 else if (unified_syntax && inst.size_req != 2)
9126 {
9127 /* Generate a 32-bit opcode. */
9128 inst.instruction = THUMB_OP32 (inst.instruction);
9129 inst.instruction |= Rd << 8;
9130 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9131 inst.reloc.pc_rel = 1;
9132 }
9133 else
9134 {
9135 /* Generate a 16-bit opcode. */
9136 inst.instruction = THUMB_OP16 (inst.instruction);
9137 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9138 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9139 inst.reloc.pc_rel = 1;
9140
9141 inst.instruction |= Rd << 4;
9142 }
9143 }
9144
9145 /* Arithmetic instructions for which there is just one 16-bit
9146 instruction encoding, and it allows only two low registers.
9147 For maximal compatibility with ARM syntax, we allow three register
9148 operands even when Thumb-32 instructions are not available, as long
9149 as the first two are identical. For instance, both "sbc r0,r1" and
9150 "sbc r0,r0,r1" are allowed. */
9151 static void
9152 do_t_arit3 (void)
9153 {
9154 int Rd, Rs, Rn;
9155
9156 Rd = inst.operands[0].reg;
9157 Rs = (inst.operands[1].present
9158 ? inst.operands[1].reg /* Rd, Rs, foo */
9159 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9160 Rn = inst.operands[2].reg;
9161
9162 reject_bad_reg (Rd);
9163 reject_bad_reg (Rs);
9164 if (inst.operands[2].isreg)
9165 reject_bad_reg (Rn);
9166
9167 if (unified_syntax)
9168 {
9169 if (!inst.operands[2].isreg)
9170 {
9171 /* For an immediate, we always generate a 32-bit opcode;
9172 section relaxation will shrink it later if possible. */
9173 inst.instruction = THUMB_OP32 (inst.instruction);
9174 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9175 inst.instruction |= Rd << 8;
9176 inst.instruction |= Rs << 16;
9177 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9178 }
9179 else
9180 {
9181 bfd_boolean narrow;
9182
9183 /* See if we can do this with a 16-bit instruction. */
9184 if (THUMB_SETS_FLAGS (inst.instruction))
9185 narrow = !in_it_block ();
9186 else
9187 narrow = in_it_block ();
9188
9189 if (Rd > 7 || Rn > 7 || Rs > 7)
9190 narrow = FALSE;
9191 if (inst.operands[2].shifted)
9192 narrow = FALSE;
9193 if (inst.size_req == 4)
9194 narrow = FALSE;
9195
9196 if (narrow
9197 && Rd == Rs)
9198 {
9199 inst.instruction = THUMB_OP16 (inst.instruction);
9200 inst.instruction |= Rd;
9201 inst.instruction |= Rn << 3;
9202 return;
9203 }
9204
9205 /* If we get here, it can't be done in 16 bits. */
9206 constraint (inst.operands[2].shifted
9207 && inst.operands[2].immisreg,
9208 _("shift must be constant"));
9209 inst.instruction = THUMB_OP32 (inst.instruction);
9210 inst.instruction |= Rd << 8;
9211 inst.instruction |= Rs << 16;
9212 encode_thumb32_shifted_operand (2);
9213 }
9214 }
9215 else
9216 {
9217 /* On its face this is a lie - the instruction does set the
9218 flags. However, the only supported mnemonic in this mode
9219 says it doesn't. */
9220 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9221
9222 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9223 _("unshifted register required"));
9224 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9225 constraint (Rd != Rs,
9226 _("dest and source1 must be the same register"));
9227
9228 inst.instruction = THUMB_OP16 (inst.instruction);
9229 inst.instruction |= Rd;
9230 inst.instruction |= Rn << 3;
9231 }
9232 }
9233
9234 /* Similarly, but for instructions where the arithmetic operation is
9235 commutative, so we can allow either of them to be different from
9236 the destination operand in a 16-bit instruction. For instance, all
9237 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9238 accepted. */
9239 static void
9240 do_t_arit3c (void)
9241 {
9242 int Rd, Rs, Rn;
9243
9244 Rd = inst.operands[0].reg;
9245 Rs = (inst.operands[1].present
9246 ? inst.operands[1].reg /* Rd, Rs, foo */
9247 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9248 Rn = inst.operands[2].reg;
9249
9250 reject_bad_reg (Rd);
9251 reject_bad_reg (Rs);
9252 if (inst.operands[2].isreg)
9253 reject_bad_reg (Rn);
9254
9255 if (unified_syntax)
9256 {
9257 if (!inst.operands[2].isreg)
9258 {
9259 /* For an immediate, we always generate a 32-bit opcode;
9260 section relaxation will shrink it later if possible. */
9261 inst.instruction = THUMB_OP32 (inst.instruction);
9262 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9263 inst.instruction |= Rd << 8;
9264 inst.instruction |= Rs << 16;
9265 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9266 }
9267 else
9268 {
9269 bfd_boolean narrow;
9270
9271 /* See if we can do this with a 16-bit instruction. */
9272 if (THUMB_SETS_FLAGS (inst.instruction))
9273 narrow = !in_it_block ();
9274 else
9275 narrow = in_it_block ();
9276
9277 if (Rd > 7 || Rn > 7 || Rs > 7)
9278 narrow = FALSE;
9279 if (inst.operands[2].shifted)
9280 narrow = FALSE;
9281 if (inst.size_req == 4)
9282 narrow = FALSE;
9283
9284 if (narrow)
9285 {
9286 if (Rd == Rs)
9287 {
9288 inst.instruction = THUMB_OP16 (inst.instruction);
9289 inst.instruction |= Rd;
9290 inst.instruction |= Rn << 3;
9291 return;
9292 }
9293 if (Rd == Rn)
9294 {
9295 inst.instruction = THUMB_OP16 (inst.instruction);
9296 inst.instruction |= Rd;
9297 inst.instruction |= Rs << 3;
9298 return;
9299 }
9300 }
9301
9302 /* If we get here, it can't be done in 16 bits. */
9303 constraint (inst.operands[2].shifted
9304 && inst.operands[2].immisreg,
9305 _("shift must be constant"));
9306 inst.instruction = THUMB_OP32 (inst.instruction);
9307 inst.instruction |= Rd << 8;
9308 inst.instruction |= Rs << 16;
9309 encode_thumb32_shifted_operand (2);
9310 }
9311 }
9312 else
9313 {
9314 /* On its face this is a lie - the instruction does set the
9315 flags. However, the only supported mnemonic in this mode
9316 says it doesn't. */
9317 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9318
9319 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9320 _("unshifted register required"));
9321 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9322
9323 inst.instruction = THUMB_OP16 (inst.instruction);
9324 inst.instruction |= Rd;
9325
9326 if (Rd == Rs)
9327 inst.instruction |= Rn << 3;
9328 else if (Rd == Rn)
9329 inst.instruction |= Rs << 3;
9330 else
9331 constraint (1, _("dest must overlap one source register"));
9332 }
9333 }
9334
9335 static void
9336 do_t_barrier (void)
9337 {
9338 if (inst.operands[0].present)
9339 {
9340 constraint ((inst.instruction & 0xf0) != 0x40
9341 && inst.operands[0].imm != 0xf,
9342 _("bad barrier type"));
9343 inst.instruction |= inst.operands[0].imm;
9344 }
9345 else
9346 inst.instruction |= 0xf;
9347 }
9348
9349 static void
9350 do_t_bfc (void)
9351 {
9352 unsigned Rd;
9353 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9354 constraint (msb > 32, _("bit-field extends past end of register"));
9355 /* The instruction encoding stores the LSB and MSB,
9356 not the LSB and width. */
9357 Rd = inst.operands[0].reg;
9358 reject_bad_reg (Rd);
9359 inst.instruction |= Rd << 8;
9360 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9361 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9362 inst.instruction |= msb - 1;
9363 }
9364
9365 static void
9366 do_t_bfi (void)
9367 {
9368 int Rd, Rn;
9369 unsigned int msb;
9370
9371 Rd = inst.operands[0].reg;
9372 reject_bad_reg (Rd);
9373
9374 /* #0 in second position is alternative syntax for bfc, which is
9375 the same instruction but with REG_PC in the Rm field. */
9376 if (!inst.operands[1].isreg)
9377 Rn = REG_PC;
9378 else
9379 {
9380 Rn = inst.operands[1].reg;
9381 reject_bad_reg (Rn);
9382 }
9383
9384 msb = inst.operands[2].imm + inst.operands[3].imm;
9385 constraint (msb > 32, _("bit-field extends past end of register"));
9386 /* The instruction encoding stores the LSB and MSB,
9387 not the LSB and width. */
9388 inst.instruction |= Rd << 8;
9389 inst.instruction |= Rn << 16;
9390 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9391 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9392 inst.instruction |= msb - 1;
9393 }
9394
9395 static void
9396 do_t_bfx (void)
9397 {
9398 unsigned Rd, Rn;
9399
9400 Rd = inst.operands[0].reg;
9401 Rn = inst.operands[1].reg;
9402
9403 reject_bad_reg (Rd);
9404 reject_bad_reg (Rn);
9405
9406 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9407 _("bit-field extends past end of register"));
9408 inst.instruction |= Rd << 8;
9409 inst.instruction |= Rn << 16;
9410 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9411 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9412 inst.instruction |= inst.operands[3].imm - 1;
9413 }
9414
9415 /* ARM V5 Thumb BLX (argument parse)
9416 BLX <target_addr> which is BLX(1)
9417 BLX <Rm> which is BLX(2)
9418 Unfortunately, there are two different opcodes for this mnemonic.
9419 So, the insns[].value is not used, and the code here zaps values
9420 into inst.instruction.
9421
9422 ??? How to take advantage of the additional two bits of displacement
9423 available in Thumb32 mode? Need new relocation? */
9424
9425 static void
9426 do_t_blx (void)
9427 {
9428 set_it_insn_type_last ();
9429
9430 if (inst.operands[0].isreg)
9431 {
9432 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9433 /* We have a register, so this is BLX(2). */
9434 inst.instruction |= inst.operands[0].reg << 3;
9435 }
9436 else
9437 {
9438 /* No register. This must be BLX(1). */
9439 inst.instruction = 0xf000e800;
9440 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
9441 inst.reloc.pc_rel = 1;
9442 }
9443 }
9444
9445 static void
9446 do_t_branch (void)
9447 {
9448 int opcode;
9449 int cond;
9450
9451 cond = inst.cond;
9452 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9453
9454 if (in_it_block ())
9455 {
9456 /* Conditional branches inside IT blocks are encoded as unconditional
9457 branches. */
9458 cond = COND_ALWAYS;
9459 }
9460 else
9461 cond = inst.cond;
9462
9463 if (cond != COND_ALWAYS)
9464 opcode = T_MNEM_bcond;
9465 else
9466 opcode = inst.instruction;
9467
9468 if (unified_syntax && inst.size_req == 4)
9469 {
9470 inst.instruction = THUMB_OP32(opcode);
9471 if (cond == COND_ALWAYS)
9472 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9473 else
9474 {
9475 gas_assert (cond != 0xF);
9476 inst.instruction |= cond << 22;
9477 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9478 }
9479 }
9480 else
9481 {
9482 inst.instruction = THUMB_OP16(opcode);
9483 if (cond == COND_ALWAYS)
9484 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9485 else
9486 {
9487 inst.instruction |= cond << 8;
9488 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9489 }
9490 /* Allow section relaxation. */
9491 if (unified_syntax && inst.size_req != 2)
9492 inst.relax = opcode;
9493 }
9494
9495 inst.reloc.pc_rel = 1;
9496 }
9497
9498 static void
9499 do_t_bkpt (void)
9500 {
9501 constraint (inst.cond != COND_ALWAYS,
9502 _("instruction is always unconditional"));
9503 if (inst.operands[0].present)
9504 {
9505 constraint (inst.operands[0].imm > 255,
9506 _("immediate value out of range"));
9507 inst.instruction |= inst.operands[0].imm;
9508 set_it_insn_type (NEUTRAL_IT_INSN);
9509 }
9510 }
9511
9512 static void
9513 do_t_branch23 (void)
9514 {
9515 set_it_insn_type_last ();
9516 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9517 inst.reloc.pc_rel = 1;
9518
9519 #if defined(OBJ_COFF)
9520 /* If the destination of the branch is a defined symbol which does not have
9521 the THUMB_FUNC attribute, then we must be calling a function which has
9522 the (interfacearm) attribute. We look for the Thumb entry point to that
9523 function and change the branch to refer to that function instead. */
9524 if ( inst.reloc.exp.X_op == O_symbol
9525 && inst.reloc.exp.X_add_symbol != NULL
9526 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9527 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9528 inst.reloc.exp.X_add_symbol =
9529 find_real_start (inst.reloc.exp.X_add_symbol);
9530 #endif
9531 }
9532
9533 static void
9534 do_t_bx (void)
9535 {
9536 set_it_insn_type_last ();
9537 inst.instruction |= inst.operands[0].reg << 3;
9538 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9539 should cause the alignment to be checked once it is known. This is
9540 because BX PC only works if the instruction is word aligned. */
9541 }
9542
9543 static void
9544 do_t_bxj (void)
9545 {
9546 int Rm;
9547
9548 set_it_insn_type_last ();
9549 Rm = inst.operands[0].reg;
9550 reject_bad_reg (Rm);
9551 inst.instruction |= Rm << 16;
9552 }
9553
9554 static void
9555 do_t_clz (void)
9556 {
9557 unsigned Rd;
9558 unsigned Rm;
9559
9560 Rd = inst.operands[0].reg;
9561 Rm = inst.operands[1].reg;
9562
9563 reject_bad_reg (Rd);
9564 reject_bad_reg (Rm);
9565
9566 inst.instruction |= Rd << 8;
9567 inst.instruction |= Rm << 16;
9568 inst.instruction |= Rm;
9569 }
9570
9571 static void
9572 do_t_cps (void)
9573 {
9574 set_it_insn_type (OUTSIDE_IT_INSN);
9575 inst.instruction |= inst.operands[0].imm;
9576 }
9577
9578 static void
9579 do_t_cpsi (void)
9580 {
9581 set_it_insn_type (OUTSIDE_IT_INSN);
9582 if (unified_syntax
9583 && (inst.operands[1].present || inst.size_req == 4)
9584 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9585 {
9586 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9587 inst.instruction = 0xf3af8000;
9588 inst.instruction |= imod << 9;
9589 inst.instruction |= inst.operands[0].imm << 5;
9590 if (inst.operands[1].present)
9591 inst.instruction |= 0x100 | inst.operands[1].imm;
9592 }
9593 else
9594 {
9595 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9596 && (inst.operands[0].imm & 4),
9597 _("selected processor does not support 'A' form "
9598 "of this instruction"));
9599 constraint (inst.operands[1].present || inst.size_req == 4,
9600 _("Thumb does not support the 2-argument "
9601 "form of this instruction"));
9602 inst.instruction |= inst.operands[0].imm;
9603 }
9604 }
9605
9606 /* THUMB CPY instruction (argument parse). */
9607
9608 static void
9609 do_t_cpy (void)
9610 {
9611 if (inst.size_req == 4)
9612 {
9613 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9614 inst.instruction |= inst.operands[0].reg << 8;
9615 inst.instruction |= inst.operands[1].reg;
9616 }
9617 else
9618 {
9619 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9620 inst.instruction |= (inst.operands[0].reg & 0x7);
9621 inst.instruction |= inst.operands[1].reg << 3;
9622 }
9623 }
9624
9625 static void
9626 do_t_cbz (void)
9627 {
9628 set_it_insn_type (OUTSIDE_IT_INSN);
9629 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9630 inst.instruction |= inst.operands[0].reg;
9631 inst.reloc.pc_rel = 1;
9632 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9633 }
9634
9635 static void
9636 do_t_dbg (void)
9637 {
9638 inst.instruction |= inst.operands[0].imm;
9639 }
9640
9641 static void
9642 do_t_div (void)
9643 {
9644 unsigned Rd, Rn, Rm;
9645
9646 Rd = inst.operands[0].reg;
9647 Rn = (inst.operands[1].present
9648 ? inst.operands[1].reg : Rd);
9649 Rm = inst.operands[2].reg;
9650
9651 reject_bad_reg (Rd);
9652 reject_bad_reg (Rn);
9653 reject_bad_reg (Rm);
9654
9655 inst.instruction |= Rd << 8;
9656 inst.instruction |= Rn << 16;
9657 inst.instruction |= Rm;
9658 }
9659
9660 static void
9661 do_t_hint (void)
9662 {
9663 if (unified_syntax && inst.size_req == 4)
9664 inst.instruction = THUMB_OP32 (inst.instruction);
9665 else
9666 inst.instruction = THUMB_OP16 (inst.instruction);
9667 }
9668
9669 static void
9670 do_t_it (void)
9671 {
9672 unsigned int cond = inst.operands[0].imm;
9673
9674 set_it_insn_type (IT_INSN);
9675 now_it.mask = (inst.instruction & 0xf) | 0x10;
9676 now_it.cc = cond;
9677
9678 /* If the condition is a negative condition, invert the mask. */
9679 if ((cond & 0x1) == 0x0)
9680 {
9681 unsigned int mask = inst.instruction & 0x000f;
9682
9683 if ((mask & 0x7) == 0)
9684 /* no conversion needed */;
9685 else if ((mask & 0x3) == 0)
9686 mask ^= 0x8;
9687 else if ((mask & 0x1) == 0)
9688 mask ^= 0xC;
9689 else
9690 mask ^= 0xE;
9691
9692 inst.instruction &= 0xfff0;
9693 inst.instruction |= mask;
9694 }
9695
9696 inst.instruction |= cond << 4;
9697 }
9698
9699 /* Helper function used for both push/pop and ldm/stm. */
9700 static void
9701 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9702 {
9703 bfd_boolean load;
9704
9705 load = (inst.instruction & (1 << 20)) != 0;
9706
9707 if (mask & (1 << 13))
9708 inst.error = _("SP not allowed in register list");
9709 if (load)
9710 {
9711 if (mask & (1 << 15))
9712 {
9713 if (mask & (1 << 14))
9714 inst.error = _("LR and PC should not both be in register list");
9715 else
9716 set_it_insn_type_last ();
9717 }
9718
9719 if ((mask & (1 << base)) != 0
9720 && writeback)
9721 as_warn (_("base register should not be in register list "
9722 "when written back"));
9723 }
9724 else
9725 {
9726 if (mask & (1 << 15))
9727 inst.error = _("PC not allowed in register list");
9728
9729 if (mask & (1 << base))
9730 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9731 }
9732
9733 if ((mask & (mask - 1)) == 0)
9734 {
9735 /* Single register transfers implemented as str/ldr. */
9736 if (writeback)
9737 {
9738 if (inst.instruction & (1 << 23))
9739 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9740 else
9741 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9742 }
9743 else
9744 {
9745 if (inst.instruction & (1 << 23))
9746 inst.instruction = 0x00800000; /* ia -> [base] */
9747 else
9748 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9749 }
9750
9751 inst.instruction |= 0xf8400000;
9752 if (load)
9753 inst.instruction |= 0x00100000;
9754
9755 mask = ffs (mask) - 1;
9756 mask <<= 12;
9757 }
9758 else if (writeback)
9759 inst.instruction |= WRITE_BACK;
9760
9761 inst.instruction |= mask;
9762 inst.instruction |= base << 16;
9763 }
9764
9765 static void
9766 do_t_ldmstm (void)
9767 {
9768 /* This really doesn't seem worth it. */
9769 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9770 _("expression too complex"));
9771 constraint (inst.operands[1].writeback,
9772 _("Thumb load/store multiple does not support {reglist}^"));
9773
9774 if (unified_syntax)
9775 {
9776 bfd_boolean narrow;
9777 unsigned mask;
9778
9779 narrow = FALSE;
9780 /* See if we can use a 16-bit instruction. */
9781 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9782 && inst.size_req != 4
9783 && !(inst.operands[1].imm & ~0xff))
9784 {
9785 mask = 1 << inst.operands[0].reg;
9786
9787 if (inst.operands[0].reg <= 7
9788 && (inst.instruction == T_MNEM_stmia
9789 ? inst.operands[0].writeback
9790 : (inst.operands[0].writeback
9791 == !(inst.operands[1].imm & mask))))
9792 {
9793 if (inst.instruction == T_MNEM_stmia
9794 && (inst.operands[1].imm & mask)
9795 && (inst.operands[1].imm & (mask - 1)))
9796 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9797 inst.operands[0].reg);
9798
9799 inst.instruction = THUMB_OP16 (inst.instruction);
9800 inst.instruction |= inst.operands[0].reg << 8;
9801 inst.instruction |= inst.operands[1].imm;
9802 narrow = TRUE;
9803 }
9804 else if (inst.operands[0] .reg == REG_SP
9805 && inst.operands[0].writeback)
9806 {
9807 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9808 ? T_MNEM_push : T_MNEM_pop);
9809 inst.instruction |= inst.operands[1].imm;
9810 narrow = TRUE;
9811 }
9812 }
9813
9814 if (!narrow)
9815 {
9816 if (inst.instruction < 0xffff)
9817 inst.instruction = THUMB_OP32 (inst.instruction);
9818
9819 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9820 inst.operands[0].writeback);
9821 }
9822 }
9823 else
9824 {
9825 constraint (inst.operands[0].reg > 7
9826 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9827 constraint (inst.instruction != T_MNEM_ldmia
9828 && inst.instruction != T_MNEM_stmia,
9829 _("Thumb-2 instruction only valid in unified syntax"));
9830 if (inst.instruction == T_MNEM_stmia)
9831 {
9832 if (!inst.operands[0].writeback)
9833 as_warn (_("this instruction will write back the base register"));
9834 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9835 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9836 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9837 inst.operands[0].reg);
9838 }
9839 else
9840 {
9841 if (!inst.operands[0].writeback
9842 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9843 as_warn (_("this instruction will write back the base register"));
9844 else if (inst.operands[0].writeback
9845 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9846 as_warn (_("this instruction will not write back the base register"));
9847 }
9848
9849 inst.instruction = THUMB_OP16 (inst.instruction);
9850 inst.instruction |= inst.operands[0].reg << 8;
9851 inst.instruction |= inst.operands[1].imm;
9852 }
9853 }
9854
9855 static void
9856 do_t_ldrex (void)
9857 {
9858 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9859 || inst.operands[1].postind || inst.operands[1].writeback
9860 || inst.operands[1].immisreg || inst.operands[1].shifted
9861 || inst.operands[1].negative,
9862 BAD_ADDR_MODE);
9863
9864 inst.instruction |= inst.operands[0].reg << 12;
9865 inst.instruction |= inst.operands[1].reg << 16;
9866 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9867 }
9868
9869 static void
9870 do_t_ldrexd (void)
9871 {
9872 if (!inst.operands[1].present)
9873 {
9874 constraint (inst.operands[0].reg == REG_LR,
9875 _("r14 not allowed as first register "
9876 "when second register is omitted"));
9877 inst.operands[1].reg = inst.operands[0].reg + 1;
9878 }
9879 constraint (inst.operands[0].reg == inst.operands[1].reg,
9880 BAD_OVERLAP);
9881
9882 inst.instruction |= inst.operands[0].reg << 12;
9883 inst.instruction |= inst.operands[1].reg << 8;
9884 inst.instruction |= inst.operands[2].reg << 16;
9885 }
9886
9887 static void
9888 do_t_ldst (void)
9889 {
9890 unsigned long opcode;
9891 int Rn;
9892
9893 if (inst.operands[0].isreg
9894 && !inst.operands[0].preind
9895 && inst.operands[0].reg == REG_PC)
9896 set_it_insn_type_last ();
9897
9898 opcode = inst.instruction;
9899 if (unified_syntax)
9900 {
9901 if (!inst.operands[1].isreg)
9902 {
9903 if (opcode <= 0xffff)
9904 inst.instruction = THUMB_OP32 (opcode);
9905 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9906 return;
9907 }
9908 if (inst.operands[1].isreg
9909 && !inst.operands[1].writeback
9910 && !inst.operands[1].shifted && !inst.operands[1].postind
9911 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9912 && opcode <= 0xffff
9913 && inst.size_req != 4)
9914 {
9915 /* Insn may have a 16-bit form. */
9916 Rn = inst.operands[1].reg;
9917 if (inst.operands[1].immisreg)
9918 {
9919 inst.instruction = THUMB_OP16 (opcode);
9920 /* [Rn, Rik] */
9921 if (Rn <= 7 && inst.operands[1].imm <= 7)
9922 goto op16;
9923 }
9924 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9925 && opcode != T_MNEM_ldrsb)
9926 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9927 || (Rn == REG_SP && opcode == T_MNEM_str))
9928 {
9929 /* [Rn, #const] */
9930 if (Rn > 7)
9931 {
9932 if (Rn == REG_PC)
9933 {
9934 if (inst.reloc.pc_rel)
9935 opcode = T_MNEM_ldr_pc2;
9936 else
9937 opcode = T_MNEM_ldr_pc;
9938 }
9939 else
9940 {
9941 if (opcode == T_MNEM_ldr)
9942 opcode = T_MNEM_ldr_sp;
9943 else
9944 opcode = T_MNEM_str_sp;
9945 }
9946 inst.instruction = inst.operands[0].reg << 8;
9947 }
9948 else
9949 {
9950 inst.instruction = inst.operands[0].reg;
9951 inst.instruction |= inst.operands[1].reg << 3;
9952 }
9953 inst.instruction |= THUMB_OP16 (opcode);
9954 if (inst.size_req == 2)
9955 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9956 else
9957 inst.relax = opcode;
9958 return;
9959 }
9960 }
9961 /* Definitely a 32-bit variant. */
9962 inst.instruction = THUMB_OP32 (opcode);
9963 inst.instruction |= inst.operands[0].reg << 12;
9964 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9965 return;
9966 }
9967
9968 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9969
9970 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9971 {
9972 /* Only [Rn,Rm] is acceptable. */
9973 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9974 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9975 || inst.operands[1].postind || inst.operands[1].shifted
9976 || inst.operands[1].negative,
9977 _("Thumb does not support this addressing mode"));
9978 inst.instruction = THUMB_OP16 (inst.instruction);
9979 goto op16;
9980 }
9981
9982 inst.instruction = THUMB_OP16 (inst.instruction);
9983 if (!inst.operands[1].isreg)
9984 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9985 return;
9986
9987 constraint (!inst.operands[1].preind
9988 || inst.operands[1].shifted
9989 || inst.operands[1].writeback,
9990 _("Thumb does not support this addressing mode"));
9991 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9992 {
9993 constraint (inst.instruction & 0x0600,
9994 _("byte or halfword not valid for base register"));
9995 constraint (inst.operands[1].reg == REG_PC
9996 && !(inst.instruction & THUMB_LOAD_BIT),
9997 _("r15 based store not allowed"));
9998 constraint (inst.operands[1].immisreg,
9999 _("invalid base register for register offset"));
10000
10001 if (inst.operands[1].reg == REG_PC)
10002 inst.instruction = T_OPCODE_LDR_PC;
10003 else if (inst.instruction & THUMB_LOAD_BIT)
10004 inst.instruction = T_OPCODE_LDR_SP;
10005 else
10006 inst.instruction = T_OPCODE_STR_SP;
10007
10008 inst.instruction |= inst.operands[0].reg << 8;
10009 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10010 return;
10011 }
10012
10013 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10014 if (!inst.operands[1].immisreg)
10015 {
10016 /* Immediate offset. */
10017 inst.instruction |= inst.operands[0].reg;
10018 inst.instruction |= inst.operands[1].reg << 3;
10019 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10020 return;
10021 }
10022
10023 /* Register offset. */
10024 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10025 constraint (inst.operands[1].negative,
10026 _("Thumb does not support this addressing mode"));
10027
10028 op16:
10029 switch (inst.instruction)
10030 {
10031 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10032 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10033 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10034 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10035 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10036 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10037 case 0x5600 /* ldrsb */:
10038 case 0x5e00 /* ldrsh */: break;
10039 default: abort ();
10040 }
10041
10042 inst.instruction |= inst.operands[0].reg;
10043 inst.instruction |= inst.operands[1].reg << 3;
10044 inst.instruction |= inst.operands[1].imm << 6;
10045 }
10046
10047 static void
10048 do_t_ldstd (void)
10049 {
10050 if (!inst.operands[1].present)
10051 {
10052 inst.operands[1].reg = inst.operands[0].reg + 1;
10053 constraint (inst.operands[0].reg == REG_LR,
10054 _("r14 not allowed here"));
10055 }
10056 inst.instruction |= inst.operands[0].reg << 12;
10057 inst.instruction |= inst.operands[1].reg << 8;
10058 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10059 }
10060
10061 static void
10062 do_t_ldstt (void)
10063 {
10064 inst.instruction |= inst.operands[0].reg << 12;
10065 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10066 }
10067
10068 static void
10069 do_t_mla (void)
10070 {
10071 unsigned Rd, Rn, Rm, Ra;
10072
10073 Rd = inst.operands[0].reg;
10074 Rn = inst.operands[1].reg;
10075 Rm = inst.operands[2].reg;
10076 Ra = inst.operands[3].reg;
10077
10078 reject_bad_reg (Rd);
10079 reject_bad_reg (Rn);
10080 reject_bad_reg (Rm);
10081 reject_bad_reg (Ra);
10082
10083 inst.instruction |= Rd << 8;
10084 inst.instruction |= Rn << 16;
10085 inst.instruction |= Rm;
10086 inst.instruction |= Ra << 12;
10087 }
10088
10089 static void
10090 do_t_mlal (void)
10091 {
10092 unsigned RdLo, RdHi, Rn, Rm;
10093
10094 RdLo = inst.operands[0].reg;
10095 RdHi = inst.operands[1].reg;
10096 Rn = inst.operands[2].reg;
10097 Rm = inst.operands[3].reg;
10098
10099 reject_bad_reg (RdLo);
10100 reject_bad_reg (RdHi);
10101 reject_bad_reg (Rn);
10102 reject_bad_reg (Rm);
10103
10104 inst.instruction |= RdLo << 12;
10105 inst.instruction |= RdHi << 8;
10106 inst.instruction |= Rn << 16;
10107 inst.instruction |= Rm;
10108 }
10109
10110 static void
10111 do_t_mov_cmp (void)
10112 {
10113 unsigned Rn, Rm;
10114
10115 Rn = inst.operands[0].reg;
10116 Rm = inst.operands[1].reg;
10117
10118 if (Rn == REG_PC)
10119 set_it_insn_type_last ();
10120
10121 if (unified_syntax)
10122 {
10123 int r0off = (inst.instruction == T_MNEM_mov
10124 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10125 unsigned long opcode;
10126 bfd_boolean narrow;
10127 bfd_boolean low_regs;
10128
10129 low_regs = (Rn <= 7 && Rm <= 7);
10130 opcode = inst.instruction;
10131 if (in_it_block ())
10132 narrow = opcode != T_MNEM_movs;
10133 else
10134 narrow = opcode != T_MNEM_movs || low_regs;
10135 if (inst.size_req == 4
10136 || inst.operands[1].shifted)
10137 narrow = FALSE;
10138
10139 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10140 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10141 && !inst.operands[1].shifted
10142 && Rn == REG_PC
10143 && Rm == REG_LR)
10144 {
10145 inst.instruction = T2_SUBS_PC_LR;
10146 return;
10147 }
10148
10149 if (opcode == T_MNEM_cmp)
10150 {
10151 constraint (Rn == REG_PC, BAD_PC);
10152 if (narrow)
10153 {
10154 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10155 but valid. */
10156 warn_deprecated_sp (Rm);
10157 /* R15 was documented as a valid choice for Rm in ARMv6,
10158 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10159 tools reject R15, so we do too. */
10160 constraint (Rm == REG_PC, BAD_PC);
10161 }
10162 else
10163 reject_bad_reg (Rm);
10164 }
10165 else if (opcode == T_MNEM_mov
10166 || opcode == T_MNEM_movs)
10167 {
10168 if (inst.operands[1].isreg)
10169 {
10170 if (opcode == T_MNEM_movs)
10171 {
10172 reject_bad_reg (Rn);
10173 reject_bad_reg (Rm);
10174 }
10175 else if ((Rn == REG_SP || Rn == REG_PC)
10176 && (Rm == REG_SP || Rm == REG_PC))
10177 reject_bad_reg (Rm);
10178 }
10179 else
10180 reject_bad_reg (Rn);
10181 }
10182
10183 if (!inst.operands[1].isreg)
10184 {
10185 /* Immediate operand. */
10186 if (!in_it_block () && opcode == T_MNEM_mov)
10187 narrow = 0;
10188 if (low_regs && narrow)
10189 {
10190 inst.instruction = THUMB_OP16 (opcode);
10191 inst.instruction |= Rn << 8;
10192 if (inst.size_req == 2)
10193 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10194 else
10195 inst.relax = opcode;
10196 }
10197 else
10198 {
10199 inst.instruction = THUMB_OP32 (inst.instruction);
10200 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10201 inst.instruction |= Rn << r0off;
10202 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10203 }
10204 }
10205 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10206 && (inst.instruction == T_MNEM_mov
10207 || inst.instruction == T_MNEM_movs))
10208 {
10209 /* Register shifts are encoded as separate shift instructions. */
10210 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10211
10212 if (in_it_block ())
10213 narrow = !flags;
10214 else
10215 narrow = flags;
10216
10217 if (inst.size_req == 4)
10218 narrow = FALSE;
10219
10220 if (!low_regs || inst.operands[1].imm > 7)
10221 narrow = FALSE;
10222
10223 if (Rn != Rm)
10224 narrow = FALSE;
10225
10226 switch (inst.operands[1].shift_kind)
10227 {
10228 case SHIFT_LSL:
10229 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10230 break;
10231 case SHIFT_ASR:
10232 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10233 break;
10234 case SHIFT_LSR:
10235 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10236 break;
10237 case SHIFT_ROR:
10238 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10239 break;
10240 default:
10241 abort ();
10242 }
10243
10244 inst.instruction = opcode;
10245 if (narrow)
10246 {
10247 inst.instruction |= Rn;
10248 inst.instruction |= inst.operands[1].imm << 3;
10249 }
10250 else
10251 {
10252 if (flags)
10253 inst.instruction |= CONDS_BIT;
10254
10255 inst.instruction |= Rn << 8;
10256 inst.instruction |= Rm << 16;
10257 inst.instruction |= inst.operands[1].imm;
10258 }
10259 }
10260 else if (!narrow)
10261 {
10262 /* Some mov with immediate shift have narrow variants.
10263 Register shifts are handled above. */
10264 if (low_regs && inst.operands[1].shifted
10265 && (inst.instruction == T_MNEM_mov
10266 || inst.instruction == T_MNEM_movs))
10267 {
10268 if (in_it_block ())
10269 narrow = (inst.instruction == T_MNEM_mov);
10270 else
10271 narrow = (inst.instruction == T_MNEM_movs);
10272 }
10273
10274 if (narrow)
10275 {
10276 switch (inst.operands[1].shift_kind)
10277 {
10278 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10279 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10280 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10281 default: narrow = FALSE; break;
10282 }
10283 }
10284
10285 if (narrow)
10286 {
10287 inst.instruction |= Rn;
10288 inst.instruction |= Rm << 3;
10289 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10290 }
10291 else
10292 {
10293 inst.instruction = THUMB_OP32 (inst.instruction);
10294 inst.instruction |= Rn << r0off;
10295 encode_thumb32_shifted_operand (1);
10296 }
10297 }
10298 else
10299 switch (inst.instruction)
10300 {
10301 case T_MNEM_mov:
10302 inst.instruction = T_OPCODE_MOV_HR;
10303 inst.instruction |= (Rn & 0x8) << 4;
10304 inst.instruction |= (Rn & 0x7);
10305 inst.instruction |= Rm << 3;
10306 break;
10307
10308 case T_MNEM_movs:
10309 /* We know we have low registers at this point.
10310 Generate ADD Rd, Rs, #0. */
10311 inst.instruction = T_OPCODE_ADD_I3;
10312 inst.instruction |= Rn;
10313 inst.instruction |= Rm << 3;
10314 break;
10315
10316 case T_MNEM_cmp:
10317 if (low_regs)
10318 {
10319 inst.instruction = T_OPCODE_CMP_LR;
10320 inst.instruction |= Rn;
10321 inst.instruction |= Rm << 3;
10322 }
10323 else
10324 {
10325 inst.instruction = T_OPCODE_CMP_HR;
10326 inst.instruction |= (Rn & 0x8) << 4;
10327 inst.instruction |= (Rn & 0x7);
10328 inst.instruction |= Rm << 3;
10329 }
10330 break;
10331 }
10332 return;
10333 }
10334
10335 inst.instruction = THUMB_OP16 (inst.instruction);
10336
10337 /* PR 10443: Do not silently ignore shifted operands. */
10338 constraint (inst.operands[1].shifted,
10339 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10340
10341 if (inst.operands[1].isreg)
10342 {
10343 if (Rn < 8 && Rm < 8)
10344 {
10345 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10346 since a MOV instruction produces unpredictable results. */
10347 if (inst.instruction == T_OPCODE_MOV_I8)
10348 inst.instruction = T_OPCODE_ADD_I3;
10349 else
10350 inst.instruction = T_OPCODE_CMP_LR;
10351
10352 inst.instruction |= Rn;
10353 inst.instruction |= Rm << 3;
10354 }
10355 else
10356 {
10357 if (inst.instruction == T_OPCODE_MOV_I8)
10358 inst.instruction = T_OPCODE_MOV_HR;
10359 else
10360 inst.instruction = T_OPCODE_CMP_HR;
10361 do_t_cpy ();
10362 }
10363 }
10364 else
10365 {
10366 constraint (Rn > 7,
10367 _("only lo regs allowed with immediate"));
10368 inst.instruction |= Rn << 8;
10369 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10370 }
10371 }
10372
10373 static void
10374 do_t_mov16 (void)
10375 {
10376 unsigned Rd;
10377 bfd_vma imm;
10378 bfd_boolean top;
10379
10380 top = (inst.instruction & 0x00800000) != 0;
10381 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10382 {
10383 constraint (top, _(":lower16: not allowed this instruction"));
10384 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10385 }
10386 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
10387 {
10388 constraint (!top, _(":upper16: not allowed this instruction"));
10389 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
10390 }
10391
10392 Rd = inst.operands[0].reg;
10393 reject_bad_reg (Rd);
10394
10395 inst.instruction |= Rd << 8;
10396 if (inst.reloc.type == BFD_RELOC_UNUSED)
10397 {
10398 imm = inst.reloc.exp.X_add_number;
10399 inst.instruction |= (imm & 0xf000) << 4;
10400 inst.instruction |= (imm & 0x0800) << 15;
10401 inst.instruction |= (imm & 0x0700) << 4;
10402 inst.instruction |= (imm & 0x00ff);
10403 }
10404 }
10405
10406 static void
10407 do_t_mvn_tst (void)
10408 {
10409 unsigned Rn, Rm;
10410
10411 Rn = inst.operands[0].reg;
10412 Rm = inst.operands[1].reg;
10413
10414 if (inst.instruction == T_MNEM_cmp
10415 || inst.instruction == T_MNEM_cmn)
10416 constraint (Rn == REG_PC, BAD_PC);
10417 else
10418 reject_bad_reg (Rn);
10419 reject_bad_reg (Rm);
10420
10421 if (unified_syntax)
10422 {
10423 int r0off = (inst.instruction == T_MNEM_mvn
10424 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
10425 bfd_boolean narrow;
10426
10427 if (inst.size_req == 4
10428 || inst.instruction > 0xffff
10429 || inst.operands[1].shifted
10430 || Rn > 7 || Rm > 7)
10431 narrow = FALSE;
10432 else if (inst.instruction == T_MNEM_cmn)
10433 narrow = TRUE;
10434 else if (THUMB_SETS_FLAGS (inst.instruction))
10435 narrow = !in_it_block ();
10436 else
10437 narrow = in_it_block ();
10438
10439 if (!inst.operands[1].isreg)
10440 {
10441 /* For an immediate, we always generate a 32-bit opcode;
10442 section relaxation will shrink it later if possible. */
10443 if (inst.instruction < 0xffff)
10444 inst.instruction = THUMB_OP32 (inst.instruction);
10445 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10446 inst.instruction |= Rn << r0off;
10447 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10448 }
10449 else
10450 {
10451 /* See if we can do this with a 16-bit instruction. */
10452 if (narrow)
10453 {
10454 inst.instruction = THUMB_OP16 (inst.instruction);
10455 inst.instruction |= Rn;
10456 inst.instruction |= Rm << 3;
10457 }
10458 else
10459 {
10460 constraint (inst.operands[1].shifted
10461 && inst.operands[1].immisreg,
10462 _("shift must be constant"));
10463 if (inst.instruction < 0xffff)
10464 inst.instruction = THUMB_OP32 (inst.instruction);
10465 inst.instruction |= Rn << r0off;
10466 encode_thumb32_shifted_operand (1);
10467 }
10468 }
10469 }
10470 else
10471 {
10472 constraint (inst.instruction > 0xffff
10473 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
10474 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
10475 _("unshifted register required"));
10476 constraint (Rn > 7 || Rm > 7,
10477 BAD_HIREG);
10478
10479 inst.instruction = THUMB_OP16 (inst.instruction);
10480 inst.instruction |= Rn;
10481 inst.instruction |= Rm << 3;
10482 }
10483 }
10484
10485 static void
10486 do_t_mrs (void)
10487 {
10488 unsigned Rd;
10489 int flags;
10490
10491 if (do_vfp_nsyn_mrs () == SUCCESS)
10492 return;
10493
10494 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10495 if (flags == 0)
10496 {
10497 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10498 _("selected processor does not support "
10499 "requested special purpose register"));
10500 }
10501 else
10502 {
10503 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10504 _("selected processor does not support "
10505 "requested special purpose register"));
10506 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10507 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10508 _("'CPSR' or 'SPSR' expected"));
10509 }
10510
10511 Rd = inst.operands[0].reg;
10512 reject_bad_reg (Rd);
10513
10514 inst.instruction |= Rd << 8;
10515 inst.instruction |= (flags & SPSR_BIT) >> 2;
10516 inst.instruction |= inst.operands[1].imm & 0xff;
10517 }
10518
10519 static void
10520 do_t_msr (void)
10521 {
10522 int flags;
10523 unsigned Rn;
10524
10525 if (do_vfp_nsyn_msr () == SUCCESS)
10526 return;
10527
10528 constraint (!inst.operands[1].isreg,
10529 _("Thumb encoding does not support an immediate here"));
10530 flags = inst.operands[0].imm;
10531 if (flags & ~0xff)
10532 {
10533 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10534 _("selected processor does not support "
10535 "requested special purpose register"));
10536 }
10537 else
10538 {
10539 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10540 _("selected processor does not support "
10541 "requested special purpose register"));
10542 flags |= PSR_f;
10543 }
10544
10545 Rn = inst.operands[1].reg;
10546 reject_bad_reg (Rn);
10547
10548 inst.instruction |= (flags & SPSR_BIT) >> 2;
10549 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10550 inst.instruction |= (flags & 0xff);
10551 inst.instruction |= Rn << 16;
10552 }
10553
10554 static void
10555 do_t_mul (void)
10556 {
10557 bfd_boolean narrow;
10558 unsigned Rd, Rn, Rm;
10559
10560 if (!inst.operands[2].present)
10561 inst.operands[2].reg = inst.operands[0].reg;
10562
10563 Rd = inst.operands[0].reg;
10564 Rn = inst.operands[1].reg;
10565 Rm = inst.operands[2].reg;
10566
10567 if (unified_syntax)
10568 {
10569 if (inst.size_req == 4
10570 || (Rd != Rn
10571 && Rd != Rm)
10572 || Rn > 7
10573 || Rm > 7)
10574 narrow = FALSE;
10575 else if (inst.instruction == T_MNEM_muls)
10576 narrow = !in_it_block ();
10577 else
10578 narrow = in_it_block ();
10579 }
10580 else
10581 {
10582 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10583 constraint (Rn > 7 || Rm > 7,
10584 BAD_HIREG);
10585 narrow = TRUE;
10586 }
10587
10588 if (narrow)
10589 {
10590 /* 16-bit MULS/Conditional MUL. */
10591 inst.instruction = THUMB_OP16 (inst.instruction);
10592 inst.instruction |= Rd;
10593
10594 if (Rd == Rn)
10595 inst.instruction |= Rm << 3;
10596 else if (Rd == Rm)
10597 inst.instruction |= Rn << 3;
10598 else
10599 constraint (1, _("dest must overlap one source register"));
10600 }
10601 else
10602 {
10603 constraint (inst.instruction != T_MNEM_mul,
10604 _("Thumb-2 MUL must not set flags"));
10605 /* 32-bit MUL. */
10606 inst.instruction = THUMB_OP32 (inst.instruction);
10607 inst.instruction |= Rd << 8;
10608 inst.instruction |= Rn << 16;
10609 inst.instruction |= Rm << 0;
10610
10611 reject_bad_reg (Rd);
10612 reject_bad_reg (Rn);
10613 reject_bad_reg (Rm);
10614 }
10615 }
10616
10617 static void
10618 do_t_mull (void)
10619 {
10620 unsigned RdLo, RdHi, Rn, Rm;
10621
10622 RdLo = inst.operands[0].reg;
10623 RdHi = inst.operands[1].reg;
10624 Rn = inst.operands[2].reg;
10625 Rm = inst.operands[3].reg;
10626
10627 reject_bad_reg (RdLo);
10628 reject_bad_reg (RdHi);
10629 reject_bad_reg (Rn);
10630 reject_bad_reg (Rm);
10631
10632 inst.instruction |= RdLo << 12;
10633 inst.instruction |= RdHi << 8;
10634 inst.instruction |= Rn << 16;
10635 inst.instruction |= Rm;
10636
10637 if (RdLo == RdHi)
10638 as_tsktsk (_("rdhi and rdlo must be different"));
10639 }
10640
10641 static void
10642 do_t_nop (void)
10643 {
10644 set_it_insn_type (NEUTRAL_IT_INSN);
10645
10646 if (unified_syntax)
10647 {
10648 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10649 {
10650 inst.instruction = THUMB_OP32 (inst.instruction);
10651 inst.instruction |= inst.operands[0].imm;
10652 }
10653 else
10654 {
10655 /* PR9722: Check for Thumb2 availability before
10656 generating a thumb2 nop instruction. */
10657 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
10658 {
10659 inst.instruction = THUMB_OP16 (inst.instruction);
10660 inst.instruction |= inst.operands[0].imm << 4;
10661 }
10662 else
10663 inst.instruction = 0x46c0;
10664 }
10665 }
10666 else
10667 {
10668 constraint (inst.operands[0].present,
10669 _("Thumb does not support NOP with hints"));
10670 inst.instruction = 0x46c0;
10671 }
10672 }
10673
10674 static void
10675 do_t_neg (void)
10676 {
10677 if (unified_syntax)
10678 {
10679 bfd_boolean narrow;
10680
10681 if (THUMB_SETS_FLAGS (inst.instruction))
10682 narrow = !in_it_block ();
10683 else
10684 narrow = in_it_block ();
10685 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10686 narrow = FALSE;
10687 if (inst.size_req == 4)
10688 narrow = FALSE;
10689
10690 if (!narrow)
10691 {
10692 inst.instruction = THUMB_OP32 (inst.instruction);
10693 inst.instruction |= inst.operands[0].reg << 8;
10694 inst.instruction |= inst.operands[1].reg << 16;
10695 }
10696 else
10697 {
10698 inst.instruction = THUMB_OP16 (inst.instruction);
10699 inst.instruction |= inst.operands[0].reg;
10700 inst.instruction |= inst.operands[1].reg << 3;
10701 }
10702 }
10703 else
10704 {
10705 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10706 BAD_HIREG);
10707 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10708
10709 inst.instruction = THUMB_OP16 (inst.instruction);
10710 inst.instruction |= inst.operands[0].reg;
10711 inst.instruction |= inst.operands[1].reg << 3;
10712 }
10713 }
10714
10715 static void
10716 do_t_orn (void)
10717 {
10718 unsigned Rd, Rn;
10719
10720 Rd = inst.operands[0].reg;
10721 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10722
10723 reject_bad_reg (Rd);
10724 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10725 reject_bad_reg (Rn);
10726
10727 inst.instruction |= Rd << 8;
10728 inst.instruction |= Rn << 16;
10729
10730 if (!inst.operands[2].isreg)
10731 {
10732 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10733 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10734 }
10735 else
10736 {
10737 unsigned Rm;
10738
10739 Rm = inst.operands[2].reg;
10740 reject_bad_reg (Rm);
10741
10742 constraint (inst.operands[2].shifted
10743 && inst.operands[2].immisreg,
10744 _("shift must be constant"));
10745 encode_thumb32_shifted_operand (2);
10746 }
10747 }
10748
10749 static void
10750 do_t_pkhbt (void)
10751 {
10752 unsigned Rd, Rn, Rm;
10753
10754 Rd = inst.operands[0].reg;
10755 Rn = inst.operands[1].reg;
10756 Rm = inst.operands[2].reg;
10757
10758 reject_bad_reg (Rd);
10759 reject_bad_reg (Rn);
10760 reject_bad_reg (Rm);
10761
10762 inst.instruction |= Rd << 8;
10763 inst.instruction |= Rn << 16;
10764 inst.instruction |= Rm;
10765 if (inst.operands[3].present)
10766 {
10767 unsigned int val = inst.reloc.exp.X_add_number;
10768 constraint (inst.reloc.exp.X_op != O_constant,
10769 _("expression too complex"));
10770 inst.instruction |= (val & 0x1c) << 10;
10771 inst.instruction |= (val & 0x03) << 6;
10772 }
10773 }
10774
10775 static void
10776 do_t_pkhtb (void)
10777 {
10778 if (!inst.operands[3].present)
10779 {
10780 unsigned Rtmp;
10781
10782 inst.instruction &= ~0x00000020;
10783
10784 /* PR 10168. Swap the Rm and Rn registers. */
10785 Rtmp = inst.operands[1].reg;
10786 inst.operands[1].reg = inst.operands[2].reg;
10787 inst.operands[2].reg = Rtmp;
10788 }
10789 do_t_pkhbt ();
10790 }
10791
10792 static void
10793 do_t_pld (void)
10794 {
10795 if (inst.operands[0].immisreg)
10796 reject_bad_reg (inst.operands[0].imm);
10797
10798 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10799 }
10800
10801 static void
10802 do_t_push_pop (void)
10803 {
10804 unsigned mask;
10805
10806 constraint (inst.operands[0].writeback,
10807 _("push/pop do not support {reglist}^"));
10808 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10809 _("expression too complex"));
10810
10811 mask = inst.operands[0].imm;
10812 if ((mask & ~0xff) == 0)
10813 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10814 else if ((inst.instruction == T_MNEM_push
10815 && (mask & ~0xff) == 1 << REG_LR)
10816 || (inst.instruction == T_MNEM_pop
10817 && (mask & ~0xff) == 1 << REG_PC))
10818 {
10819 inst.instruction = THUMB_OP16 (inst.instruction);
10820 inst.instruction |= THUMB_PP_PC_LR;
10821 inst.instruction |= mask & 0xff;
10822 }
10823 else if (unified_syntax)
10824 {
10825 inst.instruction = THUMB_OP32 (inst.instruction);
10826 encode_thumb2_ldmstm (13, mask, TRUE);
10827 }
10828 else
10829 {
10830 inst.error = _("invalid register list to push/pop instruction");
10831 return;
10832 }
10833 }
10834
10835 static void
10836 do_t_rbit (void)
10837 {
10838 unsigned Rd, Rm;
10839
10840 Rd = inst.operands[0].reg;
10841 Rm = inst.operands[1].reg;
10842
10843 reject_bad_reg (Rd);
10844 reject_bad_reg (Rm);
10845
10846 inst.instruction |= Rd << 8;
10847 inst.instruction |= Rm << 16;
10848 inst.instruction |= Rm;
10849 }
10850
10851 static void
10852 do_t_rev (void)
10853 {
10854 unsigned Rd, Rm;
10855
10856 Rd = inst.operands[0].reg;
10857 Rm = inst.operands[1].reg;
10858
10859 reject_bad_reg (Rd);
10860 reject_bad_reg (Rm);
10861
10862 if (Rd <= 7 && Rm <= 7
10863 && inst.size_req != 4)
10864 {
10865 inst.instruction = THUMB_OP16 (inst.instruction);
10866 inst.instruction |= Rd;
10867 inst.instruction |= Rm << 3;
10868 }
10869 else if (unified_syntax)
10870 {
10871 inst.instruction = THUMB_OP32 (inst.instruction);
10872 inst.instruction |= Rd << 8;
10873 inst.instruction |= Rm << 16;
10874 inst.instruction |= Rm;
10875 }
10876 else
10877 inst.error = BAD_HIREG;
10878 }
10879
10880 static void
10881 do_t_rrx (void)
10882 {
10883 unsigned Rd, Rm;
10884
10885 Rd = inst.operands[0].reg;
10886 Rm = inst.operands[1].reg;
10887
10888 reject_bad_reg (Rd);
10889 reject_bad_reg (Rm);
10890
10891 inst.instruction |= Rd << 8;
10892 inst.instruction |= Rm;
10893 }
10894
10895 static void
10896 do_t_rsb (void)
10897 {
10898 unsigned Rd, Rs;
10899
10900 Rd = inst.operands[0].reg;
10901 Rs = (inst.operands[1].present
10902 ? inst.operands[1].reg /* Rd, Rs, foo */
10903 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10904
10905 reject_bad_reg (Rd);
10906 reject_bad_reg (Rs);
10907 if (inst.operands[2].isreg)
10908 reject_bad_reg (inst.operands[2].reg);
10909
10910 inst.instruction |= Rd << 8;
10911 inst.instruction |= Rs << 16;
10912 if (!inst.operands[2].isreg)
10913 {
10914 bfd_boolean narrow;
10915
10916 if ((inst.instruction & 0x00100000) != 0)
10917 narrow = !in_it_block ();
10918 else
10919 narrow = in_it_block ();
10920
10921 if (Rd > 7 || Rs > 7)
10922 narrow = FALSE;
10923
10924 if (inst.size_req == 4 || !unified_syntax)
10925 narrow = FALSE;
10926
10927 if (inst.reloc.exp.X_op != O_constant
10928 || inst.reloc.exp.X_add_number != 0)
10929 narrow = FALSE;
10930
10931 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10932 relaxation, but it doesn't seem worth the hassle. */
10933 if (narrow)
10934 {
10935 inst.reloc.type = BFD_RELOC_UNUSED;
10936 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10937 inst.instruction |= Rs << 3;
10938 inst.instruction |= Rd;
10939 }
10940 else
10941 {
10942 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10943 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10944 }
10945 }
10946 else
10947 encode_thumb32_shifted_operand (2);
10948 }
10949
10950 static void
10951 do_t_setend (void)
10952 {
10953 set_it_insn_type (OUTSIDE_IT_INSN);
10954 if (inst.operands[0].imm)
10955 inst.instruction |= 0x8;
10956 }
10957
10958 static void
10959 do_t_shift (void)
10960 {
10961 if (!inst.operands[1].present)
10962 inst.operands[1].reg = inst.operands[0].reg;
10963
10964 if (unified_syntax)
10965 {
10966 bfd_boolean narrow;
10967 int shift_kind;
10968
10969 switch (inst.instruction)
10970 {
10971 case T_MNEM_asr:
10972 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10973 case T_MNEM_lsl:
10974 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10975 case T_MNEM_lsr:
10976 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10977 case T_MNEM_ror:
10978 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10979 default: abort ();
10980 }
10981
10982 if (THUMB_SETS_FLAGS (inst.instruction))
10983 narrow = !in_it_block ();
10984 else
10985 narrow = in_it_block ();
10986 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10987 narrow = FALSE;
10988 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10989 narrow = FALSE;
10990 if (inst.operands[2].isreg
10991 && (inst.operands[1].reg != inst.operands[0].reg
10992 || inst.operands[2].reg > 7))
10993 narrow = FALSE;
10994 if (inst.size_req == 4)
10995 narrow = FALSE;
10996
10997 reject_bad_reg (inst.operands[0].reg);
10998 reject_bad_reg (inst.operands[1].reg);
10999
11000 if (!narrow)
11001 {
11002 if (inst.operands[2].isreg)
11003 {
11004 reject_bad_reg (inst.operands[2].reg);
11005 inst.instruction = THUMB_OP32 (inst.instruction);
11006 inst.instruction |= inst.operands[0].reg << 8;
11007 inst.instruction |= inst.operands[1].reg << 16;
11008 inst.instruction |= inst.operands[2].reg;
11009 }
11010 else
11011 {
11012 inst.operands[1].shifted = 1;
11013 inst.operands[1].shift_kind = shift_kind;
11014 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11015 ? T_MNEM_movs : T_MNEM_mov);
11016 inst.instruction |= inst.operands[0].reg << 8;
11017 encode_thumb32_shifted_operand (1);
11018 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11019 inst.reloc.type = BFD_RELOC_UNUSED;
11020 }
11021 }
11022 else
11023 {
11024 if (inst.operands[2].isreg)
11025 {
11026 switch (shift_kind)
11027 {
11028 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11029 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11030 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11031 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11032 default: abort ();
11033 }
11034
11035 inst.instruction |= inst.operands[0].reg;
11036 inst.instruction |= inst.operands[2].reg << 3;
11037 }
11038 else
11039 {
11040 switch (shift_kind)
11041 {
11042 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11043 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11044 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11045 default: abort ();
11046 }
11047 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11048 inst.instruction |= inst.operands[0].reg;
11049 inst.instruction |= inst.operands[1].reg << 3;
11050 }
11051 }
11052 }
11053 else
11054 {
11055 constraint (inst.operands[0].reg > 7
11056 || inst.operands[1].reg > 7, BAD_HIREG);
11057 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11058
11059 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11060 {
11061 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11062 constraint (inst.operands[0].reg != inst.operands[1].reg,
11063 _("source1 and dest must be same register"));
11064
11065 switch (inst.instruction)
11066 {
11067 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11068 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11069 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11070 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11071 default: abort ();
11072 }
11073
11074 inst.instruction |= inst.operands[0].reg;
11075 inst.instruction |= inst.operands[2].reg << 3;
11076 }
11077 else
11078 {
11079 switch (inst.instruction)
11080 {
11081 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11082 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11083 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11084 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11085 default: abort ();
11086 }
11087 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11088 inst.instruction |= inst.operands[0].reg;
11089 inst.instruction |= inst.operands[1].reg << 3;
11090 }
11091 }
11092 }
11093
11094 static void
11095 do_t_simd (void)
11096 {
11097 unsigned Rd, Rn, Rm;
11098
11099 Rd = inst.operands[0].reg;
11100 Rn = inst.operands[1].reg;
11101 Rm = inst.operands[2].reg;
11102
11103 reject_bad_reg (Rd);
11104 reject_bad_reg (Rn);
11105 reject_bad_reg (Rm);
11106
11107 inst.instruction |= Rd << 8;
11108 inst.instruction |= Rn << 16;
11109 inst.instruction |= Rm;
11110 }
11111
11112 static void
11113 do_t_simd2 (void)
11114 {
11115 unsigned Rd, Rn, Rm;
11116
11117 Rd = inst.operands[0].reg;
11118 Rm = inst.operands[1].reg;
11119 Rn = inst.operands[2].reg;
11120
11121 reject_bad_reg (Rd);
11122 reject_bad_reg (Rn);
11123 reject_bad_reg (Rm);
11124
11125 inst.instruction |= Rd << 8;
11126 inst.instruction |= Rn << 16;
11127 inst.instruction |= Rm;
11128 }
11129
11130 static void
11131 do_t_smc (void)
11132 {
11133 unsigned int value = inst.reloc.exp.X_add_number;
11134 constraint (inst.reloc.exp.X_op != O_constant,
11135 _("expression too complex"));
11136 inst.reloc.type = BFD_RELOC_UNUSED;
11137 inst.instruction |= (value & 0xf000) >> 12;
11138 inst.instruction |= (value & 0x0ff0);
11139 inst.instruction |= (value & 0x000f) << 16;
11140 }
11141
11142 static void
11143 do_t_ssat_usat (int bias)
11144 {
11145 unsigned Rd, Rn;
11146
11147 Rd = inst.operands[0].reg;
11148 Rn = inst.operands[2].reg;
11149
11150 reject_bad_reg (Rd);
11151 reject_bad_reg (Rn);
11152
11153 inst.instruction |= Rd << 8;
11154 inst.instruction |= inst.operands[1].imm - bias;
11155 inst.instruction |= Rn << 16;
11156
11157 if (inst.operands[3].present)
11158 {
11159 offsetT shift_amount = inst.reloc.exp.X_add_number;
11160
11161 inst.reloc.type = BFD_RELOC_UNUSED;
11162
11163 constraint (inst.reloc.exp.X_op != O_constant,
11164 _("expression too complex"));
11165
11166 if (shift_amount != 0)
11167 {
11168 constraint (shift_amount > 31,
11169 _("shift expression is too large"));
11170
11171 if (inst.operands[3].shift_kind == SHIFT_ASR)
11172 inst.instruction |= 0x00200000; /* sh bit. */
11173
11174 inst.instruction |= (shift_amount & 0x1c) << 10;
11175 inst.instruction |= (shift_amount & 0x03) << 6;
11176 }
11177 }
11178 }
11179
11180 static void
11181 do_t_ssat (void)
11182 {
11183 do_t_ssat_usat (1);
11184 }
11185
11186 static void
11187 do_t_ssat16 (void)
11188 {
11189 unsigned Rd, Rn;
11190
11191 Rd = inst.operands[0].reg;
11192 Rn = inst.operands[2].reg;
11193
11194 reject_bad_reg (Rd);
11195 reject_bad_reg (Rn);
11196
11197 inst.instruction |= Rd << 8;
11198 inst.instruction |= inst.operands[1].imm - 1;
11199 inst.instruction |= Rn << 16;
11200 }
11201
11202 static void
11203 do_t_strex (void)
11204 {
11205 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11206 || inst.operands[2].postind || inst.operands[2].writeback
11207 || inst.operands[2].immisreg || inst.operands[2].shifted
11208 || inst.operands[2].negative,
11209 BAD_ADDR_MODE);
11210
11211 inst.instruction |= inst.operands[0].reg << 8;
11212 inst.instruction |= inst.operands[1].reg << 12;
11213 inst.instruction |= inst.operands[2].reg << 16;
11214 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11215 }
11216
11217 static void
11218 do_t_strexd (void)
11219 {
11220 if (!inst.operands[2].present)
11221 inst.operands[2].reg = inst.operands[1].reg + 1;
11222
11223 constraint (inst.operands[0].reg == inst.operands[1].reg
11224 || inst.operands[0].reg == inst.operands[2].reg
11225 || inst.operands[0].reg == inst.operands[3].reg
11226 || inst.operands[1].reg == inst.operands[2].reg,
11227 BAD_OVERLAP);
11228
11229 inst.instruction |= inst.operands[0].reg;
11230 inst.instruction |= inst.operands[1].reg << 12;
11231 inst.instruction |= inst.operands[2].reg << 8;
11232 inst.instruction |= inst.operands[3].reg << 16;
11233 }
11234
11235 static void
11236 do_t_sxtah (void)
11237 {
11238 unsigned Rd, Rn, Rm;
11239
11240 Rd = inst.operands[0].reg;
11241 Rn = inst.operands[1].reg;
11242 Rm = inst.operands[2].reg;
11243
11244 reject_bad_reg (Rd);
11245 reject_bad_reg (Rn);
11246 reject_bad_reg (Rm);
11247
11248 inst.instruction |= Rd << 8;
11249 inst.instruction |= Rn << 16;
11250 inst.instruction |= Rm;
11251 inst.instruction |= inst.operands[3].imm << 4;
11252 }
11253
11254 static void
11255 do_t_sxth (void)
11256 {
11257 unsigned Rd, Rm;
11258
11259 Rd = inst.operands[0].reg;
11260 Rm = inst.operands[1].reg;
11261
11262 reject_bad_reg (Rd);
11263 reject_bad_reg (Rm);
11264
11265 if (inst.instruction <= 0xffff
11266 && inst.size_req != 4
11267 && Rd <= 7 && Rm <= 7
11268 && (!inst.operands[2].present || inst.operands[2].imm == 0))
11269 {
11270 inst.instruction = THUMB_OP16 (inst.instruction);
11271 inst.instruction |= Rd;
11272 inst.instruction |= Rm << 3;
11273 }
11274 else if (unified_syntax)
11275 {
11276 if (inst.instruction <= 0xffff)
11277 inst.instruction = THUMB_OP32 (inst.instruction);
11278 inst.instruction |= Rd << 8;
11279 inst.instruction |= Rm;
11280 inst.instruction |= inst.operands[2].imm << 4;
11281 }
11282 else
11283 {
11284 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11285 _("Thumb encoding does not support rotation"));
11286 constraint (1, BAD_HIREG);
11287 }
11288 }
11289
11290 static void
11291 do_t_swi (void)
11292 {
11293 inst.reloc.type = BFD_RELOC_ARM_SWI;
11294 }
11295
11296 static void
11297 do_t_tb (void)
11298 {
11299 unsigned Rn, Rm;
11300 int half;
11301
11302 half = (inst.instruction & 0x10) != 0;
11303 set_it_insn_type_last ();
11304 constraint (inst.operands[0].immisreg,
11305 _("instruction requires register index"));
11306
11307 Rn = inst.operands[0].reg;
11308 Rm = inst.operands[0].imm;
11309
11310 constraint (Rn == REG_SP, BAD_SP);
11311 reject_bad_reg (Rm);
11312
11313 constraint (!half && inst.operands[0].shifted,
11314 _("instruction does not allow shifted index"));
11315 inst.instruction |= (Rn << 16) | Rm;
11316 }
11317
11318 static void
11319 do_t_usat (void)
11320 {
11321 do_t_ssat_usat (0);
11322 }
11323
11324 static void
11325 do_t_usat16 (void)
11326 {
11327 unsigned Rd, Rn;
11328
11329 Rd = inst.operands[0].reg;
11330 Rn = inst.operands[2].reg;
11331
11332 reject_bad_reg (Rd);
11333 reject_bad_reg (Rn);
11334
11335 inst.instruction |= Rd << 8;
11336 inst.instruction |= inst.operands[1].imm;
11337 inst.instruction |= Rn << 16;
11338 }
11339
11340 /* Neon instruction encoder helpers. */
11341
11342 /* Encodings for the different types for various Neon opcodes. */
11343
11344 /* An "invalid" code for the following tables. */
11345 #define N_INV -1u
11346
11347 struct neon_tab_entry
11348 {
11349 unsigned integer;
11350 unsigned float_or_poly;
11351 unsigned scalar_or_imm;
11352 };
11353
11354 /* Map overloaded Neon opcodes to their respective encodings. */
11355 #define NEON_ENC_TAB \
11356 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11357 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11358 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11359 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11360 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11361 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11362 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11363 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11364 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11365 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11366 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11367 /* Register variants of the following two instructions are encoded as
11368 vcge / vcgt with the operands reversed. */ \
11369 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11370 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11371 X(vfma, N_INV, 0x0000c10, N_INV), \
11372 X(vfms, N_INV, 0x0200c10, N_INV), \
11373 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11374 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11375 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11376 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11377 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11378 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11379 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11380 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11381 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11382 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11383 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11384 X(vshl, 0x0000400, N_INV, 0x0800510), \
11385 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11386 X(vand, 0x0000110, N_INV, 0x0800030), \
11387 X(vbic, 0x0100110, N_INV, 0x0800030), \
11388 X(veor, 0x1000110, N_INV, N_INV), \
11389 X(vorn, 0x0300110, N_INV, 0x0800010), \
11390 X(vorr, 0x0200110, N_INV, 0x0800010), \
11391 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11392 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11393 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11394 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11395 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11396 X(vst1, 0x0000000, 0x0800000, N_INV), \
11397 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11398 X(vst2, 0x0000100, 0x0800100, N_INV), \
11399 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11400 X(vst3, 0x0000200, 0x0800200, N_INV), \
11401 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11402 X(vst4, 0x0000300, 0x0800300, N_INV), \
11403 X(vmovn, 0x1b20200, N_INV, N_INV), \
11404 X(vtrn, 0x1b20080, N_INV, N_INV), \
11405 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11406 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11407 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11408 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11409 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11410 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11411 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11412 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11413 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11414 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11415 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11416
11417 enum neon_opc
11418 {
11419 #define X(OPC,I,F,S) N_MNEM_##OPC
11420 NEON_ENC_TAB
11421 #undef X
11422 };
11423
11424 static const struct neon_tab_entry neon_enc_tab[] =
11425 {
11426 #define X(OPC,I,F,S) { (I), (F), (S) }
11427 NEON_ENC_TAB
11428 #undef X
11429 };
11430
11431 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11432 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11433 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11434 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11435 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11436 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11437 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11438 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11439 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11440 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11441 #define NEON_ENC_SINGLE_(X) \
11442 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11443 #define NEON_ENC_DOUBLE_(X) \
11444 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11445
11446 #define NEON_ENCODE(type, inst) \
11447 do \
11448 { \
11449 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11450 inst.is_neon = 1; \
11451 } \
11452 while (0)
11453
11454 #define check_neon_suffixes \
11455 do \
11456 { \
11457 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11458 { \
11459 as_bad (_("invalid neon suffix for non neon instruction")); \
11460 return; \
11461 } \
11462 } \
11463 while (0)
11464
11465 /* Define shapes for instruction operands. The following mnemonic characters
11466 are used in this table:
11467
11468 F - VFP S<n> register
11469 D - Neon D<n> register
11470 Q - Neon Q<n> register
11471 I - Immediate
11472 S - Scalar
11473 R - ARM register
11474 L - D<n> register list
11475
11476 This table is used to generate various data:
11477 - enumerations of the form NS_DDR to be used as arguments to
11478 neon_select_shape.
11479 - a table classifying shapes into single, double, quad, mixed.
11480 - a table used to drive neon_select_shape. */
11481
11482 #define NEON_SHAPE_DEF \
11483 X(3, (D, D, D), DOUBLE), \
11484 X(3, (Q, Q, Q), QUAD), \
11485 X(3, (D, D, I), DOUBLE), \
11486 X(3, (Q, Q, I), QUAD), \
11487 X(3, (D, D, S), DOUBLE), \
11488 X(3, (Q, Q, S), QUAD), \
11489 X(2, (D, D), DOUBLE), \
11490 X(2, (Q, Q), QUAD), \
11491 X(2, (D, S), DOUBLE), \
11492 X(2, (Q, S), QUAD), \
11493 X(2, (D, R), DOUBLE), \
11494 X(2, (Q, R), QUAD), \
11495 X(2, (D, I), DOUBLE), \
11496 X(2, (Q, I), QUAD), \
11497 X(3, (D, L, D), DOUBLE), \
11498 X(2, (D, Q), MIXED), \
11499 X(2, (Q, D), MIXED), \
11500 X(3, (D, Q, I), MIXED), \
11501 X(3, (Q, D, I), MIXED), \
11502 X(3, (Q, D, D), MIXED), \
11503 X(3, (D, Q, Q), MIXED), \
11504 X(3, (Q, Q, D), MIXED), \
11505 X(3, (Q, D, S), MIXED), \
11506 X(3, (D, Q, S), MIXED), \
11507 X(4, (D, D, D, I), DOUBLE), \
11508 X(4, (Q, Q, Q, I), QUAD), \
11509 X(2, (F, F), SINGLE), \
11510 X(3, (F, F, F), SINGLE), \
11511 X(2, (F, I), SINGLE), \
11512 X(2, (F, D), MIXED), \
11513 X(2, (D, F), MIXED), \
11514 X(3, (F, F, I), MIXED), \
11515 X(4, (R, R, F, F), SINGLE), \
11516 X(4, (F, F, R, R), SINGLE), \
11517 X(3, (D, R, R), DOUBLE), \
11518 X(3, (R, R, D), DOUBLE), \
11519 X(2, (S, R), SINGLE), \
11520 X(2, (R, S), SINGLE), \
11521 X(2, (F, R), SINGLE), \
11522 X(2, (R, F), SINGLE)
11523
11524 #define S2(A,B) NS_##A##B
11525 #define S3(A,B,C) NS_##A##B##C
11526 #define S4(A,B,C,D) NS_##A##B##C##D
11527
11528 #define X(N, L, C) S##N L
11529
11530 enum neon_shape
11531 {
11532 NEON_SHAPE_DEF,
11533 NS_NULL
11534 };
11535
11536 #undef X
11537 #undef S2
11538 #undef S3
11539 #undef S4
11540
11541 enum neon_shape_class
11542 {
11543 SC_SINGLE,
11544 SC_DOUBLE,
11545 SC_QUAD,
11546 SC_MIXED
11547 };
11548
11549 #define X(N, L, C) SC_##C
11550
11551 static enum neon_shape_class neon_shape_class[] =
11552 {
11553 NEON_SHAPE_DEF
11554 };
11555
11556 #undef X
11557
11558 enum neon_shape_el
11559 {
11560 SE_F,
11561 SE_D,
11562 SE_Q,
11563 SE_I,
11564 SE_S,
11565 SE_R,
11566 SE_L
11567 };
11568
11569 /* Register widths of above. */
11570 static unsigned neon_shape_el_size[] =
11571 {
11572 32,
11573 64,
11574 128,
11575 0,
11576 32,
11577 32,
11578 0
11579 };
11580
11581 struct neon_shape_info
11582 {
11583 unsigned els;
11584 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11585 };
11586
11587 #define S2(A,B) { SE_##A, SE_##B }
11588 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11589 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11590
11591 #define X(N, L, C) { N, S##N L }
11592
11593 static struct neon_shape_info neon_shape_tab[] =
11594 {
11595 NEON_SHAPE_DEF
11596 };
11597
11598 #undef X
11599 #undef S2
11600 #undef S3
11601 #undef S4
11602
11603 /* Bit masks used in type checking given instructions.
11604 'N_EQK' means the type must be the same as (or based on in some way) the key
11605 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11606 set, various other bits can be set as well in order to modify the meaning of
11607 the type constraint. */
11608
11609 enum neon_type_mask
11610 {
11611 N_S8 = 0x0000001,
11612 N_S16 = 0x0000002,
11613 N_S32 = 0x0000004,
11614 N_S64 = 0x0000008,
11615 N_U8 = 0x0000010,
11616 N_U16 = 0x0000020,
11617 N_U32 = 0x0000040,
11618 N_U64 = 0x0000080,
11619 N_I8 = 0x0000100,
11620 N_I16 = 0x0000200,
11621 N_I32 = 0x0000400,
11622 N_I64 = 0x0000800,
11623 N_8 = 0x0001000,
11624 N_16 = 0x0002000,
11625 N_32 = 0x0004000,
11626 N_64 = 0x0008000,
11627 N_P8 = 0x0010000,
11628 N_P16 = 0x0020000,
11629 N_F16 = 0x0040000,
11630 N_F32 = 0x0080000,
11631 N_F64 = 0x0100000,
11632 N_KEY = 0x1000000, /* Key element (main type specifier). */
11633 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
11634 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11635 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
11636 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
11637 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11638 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11639 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11640 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
11641 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11642 N_UTYP = 0,
11643 N_MAX_NONSPECIAL = N_F64
11644 };
11645
11646 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11647
11648 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11649 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11650 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11651 #define N_SUF_32 (N_SU_32 | N_F32)
11652 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11653 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11654
11655 /* Pass this as the first type argument to neon_check_type to ignore types
11656 altogether. */
11657 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11658
11659 /* Select a "shape" for the current instruction (describing register types or
11660 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11661 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11662 function of operand parsing, so this function doesn't need to be called.
11663 Shapes should be listed in order of decreasing length. */
11664
11665 static enum neon_shape
11666 neon_select_shape (enum neon_shape shape, ...)
11667 {
11668 va_list ap;
11669 enum neon_shape first_shape = shape;
11670
11671 /* Fix missing optional operands. FIXME: we don't know at this point how
11672 many arguments we should have, so this makes the assumption that we have
11673 > 1. This is true of all current Neon opcodes, I think, but may not be
11674 true in the future. */
11675 if (!inst.operands[1].present)
11676 inst.operands[1] = inst.operands[0];
11677
11678 va_start (ap, shape);
11679
11680 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
11681 {
11682 unsigned j;
11683 int matches = 1;
11684
11685 for (j = 0; j < neon_shape_tab[shape].els; j++)
11686 {
11687 if (!inst.operands[j].present)
11688 {
11689 matches = 0;
11690 break;
11691 }
11692
11693 switch (neon_shape_tab[shape].el[j])
11694 {
11695 case SE_F:
11696 if (!(inst.operands[j].isreg
11697 && inst.operands[j].isvec
11698 && inst.operands[j].issingle
11699 && !inst.operands[j].isquad))
11700 matches = 0;
11701 break;
11702
11703 case SE_D:
11704 if (!(inst.operands[j].isreg
11705 && inst.operands[j].isvec
11706 && !inst.operands[j].isquad
11707 && !inst.operands[j].issingle))
11708 matches = 0;
11709 break;
11710
11711 case SE_R:
11712 if (!(inst.operands[j].isreg
11713 && !inst.operands[j].isvec))
11714 matches = 0;
11715 break;
11716
11717 case SE_Q:
11718 if (!(inst.operands[j].isreg
11719 && inst.operands[j].isvec
11720 && inst.operands[j].isquad
11721 && !inst.operands[j].issingle))
11722 matches = 0;
11723 break;
11724
11725 case SE_I:
11726 if (!(!inst.operands[j].isreg
11727 && !inst.operands[j].isscalar))
11728 matches = 0;
11729 break;
11730
11731 case SE_S:
11732 if (!(!inst.operands[j].isreg
11733 && inst.operands[j].isscalar))
11734 matches = 0;
11735 break;
11736
11737 case SE_L:
11738 break;
11739 }
11740 }
11741 if (matches)
11742 break;
11743 }
11744
11745 va_end (ap);
11746
11747 if (shape == NS_NULL && first_shape != NS_NULL)
11748 first_error (_("invalid instruction shape"));
11749
11750 return shape;
11751 }
11752
11753 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11754 means the Q bit should be set). */
11755
11756 static int
11757 neon_quad (enum neon_shape shape)
11758 {
11759 return neon_shape_class[shape] == SC_QUAD;
11760 }
11761
11762 static void
11763 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
11764 unsigned *g_size)
11765 {
11766 /* Allow modification to be made to types which are constrained to be
11767 based on the key element, based on bits set alongside N_EQK. */
11768 if ((typebits & N_EQK) != 0)
11769 {
11770 if ((typebits & N_HLF) != 0)
11771 *g_size /= 2;
11772 else if ((typebits & N_DBL) != 0)
11773 *g_size *= 2;
11774 if ((typebits & N_SGN) != 0)
11775 *g_type = NT_signed;
11776 else if ((typebits & N_UNS) != 0)
11777 *g_type = NT_unsigned;
11778 else if ((typebits & N_INT) != 0)
11779 *g_type = NT_integer;
11780 else if ((typebits & N_FLT) != 0)
11781 *g_type = NT_float;
11782 else if ((typebits & N_SIZ) != 0)
11783 *g_type = NT_untyped;
11784 }
11785 }
11786
11787 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11788 operand type, i.e. the single type specified in a Neon instruction when it
11789 is the only one given. */
11790
11791 static struct neon_type_el
11792 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
11793 {
11794 struct neon_type_el dest = *key;
11795
11796 gas_assert ((thisarg & N_EQK) != 0);
11797
11798 neon_modify_type_size (thisarg, &dest.type, &dest.size);
11799
11800 return dest;
11801 }
11802
11803 /* Convert Neon type and size into compact bitmask representation. */
11804
11805 static enum neon_type_mask
11806 type_chk_of_el_type (enum neon_el_type type, unsigned size)
11807 {
11808 switch (type)
11809 {
11810 case NT_untyped:
11811 switch (size)
11812 {
11813 case 8: return N_8;
11814 case 16: return N_16;
11815 case 32: return N_32;
11816 case 64: return N_64;
11817 default: ;
11818 }
11819 break;
11820
11821 case NT_integer:
11822 switch (size)
11823 {
11824 case 8: return N_I8;
11825 case 16: return N_I16;
11826 case 32: return N_I32;
11827 case 64: return N_I64;
11828 default: ;
11829 }
11830 break;
11831
11832 case NT_float:
11833 switch (size)
11834 {
11835 case 16: return N_F16;
11836 case 32: return N_F32;
11837 case 64: return N_F64;
11838 default: ;
11839 }
11840 break;
11841
11842 case NT_poly:
11843 switch (size)
11844 {
11845 case 8: return N_P8;
11846 case 16: return N_P16;
11847 default: ;
11848 }
11849 break;
11850
11851 case NT_signed:
11852 switch (size)
11853 {
11854 case 8: return N_S8;
11855 case 16: return N_S16;
11856 case 32: return N_S32;
11857 case 64: return N_S64;
11858 default: ;
11859 }
11860 break;
11861
11862 case NT_unsigned:
11863 switch (size)
11864 {
11865 case 8: return N_U8;
11866 case 16: return N_U16;
11867 case 32: return N_U32;
11868 case 64: return N_U64;
11869 default: ;
11870 }
11871 break;
11872
11873 default: ;
11874 }
11875
11876 return N_UTYP;
11877 }
11878
11879 /* Convert compact Neon bitmask type representation to a type and size. Only
11880 handles the case where a single bit is set in the mask. */
11881
11882 static int
11883 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
11884 enum neon_type_mask mask)
11885 {
11886 if ((mask & N_EQK) != 0)
11887 return FAIL;
11888
11889 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
11890 *size = 8;
11891 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
11892 *size = 16;
11893 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
11894 *size = 32;
11895 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
11896 *size = 64;
11897 else
11898 return FAIL;
11899
11900 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
11901 *type = NT_signed;
11902 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
11903 *type = NT_unsigned;
11904 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
11905 *type = NT_integer;
11906 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
11907 *type = NT_untyped;
11908 else if ((mask & (N_P8 | N_P16)) != 0)
11909 *type = NT_poly;
11910 else if ((mask & (N_F32 | N_F64)) != 0)
11911 *type = NT_float;
11912 else
11913 return FAIL;
11914
11915 return SUCCESS;
11916 }
11917
11918 /* Modify a bitmask of allowed types. This is only needed for type
11919 relaxation. */
11920
11921 static unsigned
11922 modify_types_allowed (unsigned allowed, unsigned mods)
11923 {
11924 unsigned size;
11925 enum neon_el_type type;
11926 unsigned destmask;
11927 int i;
11928
11929 destmask = 0;
11930
11931 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11932 {
11933 if (el_type_of_type_chk (&type, &size,
11934 (enum neon_type_mask) (allowed & i)) == SUCCESS)
11935 {
11936 neon_modify_type_size (mods, &type, &size);
11937 destmask |= type_chk_of_el_type (type, size);
11938 }
11939 }
11940
11941 return destmask;
11942 }
11943
11944 /* Check type and return type classification.
11945 The manual states (paraphrase): If one datatype is given, it indicates the
11946 type given in:
11947 - the second operand, if there is one
11948 - the operand, if there is no second operand
11949 - the result, if there are no operands.
11950 This isn't quite good enough though, so we use a concept of a "key" datatype
11951 which is set on a per-instruction basis, which is the one which matters when
11952 only one data type is written.
11953 Note: this function has side-effects (e.g. filling in missing operands). All
11954 Neon instructions should call it before performing bit encoding. */
11955
11956 static struct neon_type_el
11957 neon_check_type (unsigned els, enum neon_shape ns, ...)
11958 {
11959 va_list ap;
11960 unsigned i, pass, key_el = 0;
11961 unsigned types[NEON_MAX_TYPE_ELS];
11962 enum neon_el_type k_type = NT_invtype;
11963 unsigned k_size = -1u;
11964 struct neon_type_el badtype = {NT_invtype, -1};
11965 unsigned key_allowed = 0;
11966
11967 /* Optional registers in Neon instructions are always (not) in operand 1.
11968 Fill in the missing operand here, if it was omitted. */
11969 if (els > 1 && !inst.operands[1].present)
11970 inst.operands[1] = inst.operands[0];
11971
11972 /* Suck up all the varargs. */
11973 va_start (ap, ns);
11974 for (i = 0; i < els; i++)
11975 {
11976 unsigned thisarg = va_arg (ap, unsigned);
11977 if (thisarg == N_IGNORE_TYPE)
11978 {
11979 va_end (ap);
11980 return badtype;
11981 }
11982 types[i] = thisarg;
11983 if ((thisarg & N_KEY) != 0)
11984 key_el = i;
11985 }
11986 va_end (ap);
11987
11988 if (inst.vectype.elems > 0)
11989 for (i = 0; i < els; i++)
11990 if (inst.operands[i].vectype.type != NT_invtype)
11991 {
11992 first_error (_("types specified in both the mnemonic and operands"));
11993 return badtype;
11994 }
11995
11996 /* Duplicate inst.vectype elements here as necessary.
11997 FIXME: No idea if this is exactly the same as the ARM assembler,
11998 particularly when an insn takes one register and one non-register
11999 operand. */
12000 if (inst.vectype.elems == 1 && els > 1)
12001 {
12002 unsigned j;
12003 inst.vectype.elems = els;
12004 inst.vectype.el[key_el] = inst.vectype.el[0];
12005 for (j = 0; j < els; j++)
12006 if (j != key_el)
12007 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12008 types[j]);
12009 }
12010 else if (inst.vectype.elems == 0 && els > 0)
12011 {
12012 unsigned j;
12013 /* No types were given after the mnemonic, so look for types specified
12014 after each operand. We allow some flexibility here; as long as the
12015 "key" operand has a type, we can infer the others. */
12016 for (j = 0; j < els; j++)
12017 if (inst.operands[j].vectype.type != NT_invtype)
12018 inst.vectype.el[j] = inst.operands[j].vectype;
12019
12020 if (inst.operands[key_el].vectype.type != NT_invtype)
12021 {
12022 for (j = 0; j < els; j++)
12023 if (inst.operands[j].vectype.type == NT_invtype)
12024 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12025 types[j]);
12026 }
12027 else
12028 {
12029 first_error (_("operand types can't be inferred"));
12030 return badtype;
12031 }
12032 }
12033 else if (inst.vectype.elems != els)
12034 {
12035 first_error (_("type specifier has the wrong number of parts"));
12036 return badtype;
12037 }
12038
12039 for (pass = 0; pass < 2; pass++)
12040 {
12041 for (i = 0; i < els; i++)
12042 {
12043 unsigned thisarg = types[i];
12044 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12045 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12046 enum neon_el_type g_type = inst.vectype.el[i].type;
12047 unsigned g_size = inst.vectype.el[i].size;
12048
12049 /* Decay more-specific signed & unsigned types to sign-insensitive
12050 integer types if sign-specific variants are unavailable. */
12051 if ((g_type == NT_signed || g_type == NT_unsigned)
12052 && (types_allowed & N_SU_ALL) == 0)
12053 g_type = NT_integer;
12054
12055 /* If only untyped args are allowed, decay any more specific types to
12056 them. Some instructions only care about signs for some element
12057 sizes, so handle that properly. */
12058 if ((g_size == 8 && (types_allowed & N_8) != 0)
12059 || (g_size == 16 && (types_allowed & N_16) != 0)
12060 || (g_size == 32 && (types_allowed & N_32) != 0)
12061 || (g_size == 64 && (types_allowed & N_64) != 0))
12062 g_type = NT_untyped;
12063
12064 if (pass == 0)
12065 {
12066 if ((thisarg & N_KEY) != 0)
12067 {
12068 k_type = g_type;
12069 k_size = g_size;
12070 key_allowed = thisarg & ~N_KEY;
12071 }
12072 }
12073 else
12074 {
12075 if ((thisarg & N_VFP) != 0)
12076 {
12077 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
12078 unsigned regwidth = neon_shape_el_size[regshape], match;
12079
12080 /* In VFP mode, operands must match register widths. If we
12081 have a key operand, use its width, else use the width of
12082 the current operand. */
12083 if (k_size != -1u)
12084 match = k_size;
12085 else
12086 match = g_size;
12087
12088 if (regwidth != match)
12089 {
12090 first_error (_("operand size must match register width"));
12091 return badtype;
12092 }
12093 }
12094
12095 if ((thisarg & N_EQK) == 0)
12096 {
12097 unsigned given_type = type_chk_of_el_type (g_type, g_size);
12098
12099 if ((given_type & types_allowed) == 0)
12100 {
12101 first_error (_("bad type in Neon instruction"));
12102 return badtype;
12103 }
12104 }
12105 else
12106 {
12107 enum neon_el_type mod_k_type = k_type;
12108 unsigned mod_k_size = k_size;
12109 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12110 if (g_type != mod_k_type || g_size != mod_k_size)
12111 {
12112 first_error (_("inconsistent types in Neon instruction"));
12113 return badtype;
12114 }
12115 }
12116 }
12117 }
12118 }
12119
12120 return inst.vectype.el[key_el];
12121 }
12122
12123 /* Neon-style VFP instruction forwarding. */
12124
12125 /* Thumb VFP instructions have 0xE in the condition field. */
12126
12127 static void
12128 do_vfp_cond_or_thumb (void)
12129 {
12130 inst.is_neon = 1;
12131
12132 if (thumb_mode)
12133 inst.instruction |= 0xe0000000;
12134 else
12135 inst.instruction |= inst.cond << 28;
12136 }
12137
12138 /* Look up and encode a simple mnemonic, for use as a helper function for the
12139 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12140 etc. It is assumed that operand parsing has already been done, and that the
12141 operands are in the form expected by the given opcode (this isn't necessarily
12142 the same as the form in which they were parsed, hence some massaging must
12143 take place before this function is called).
12144 Checks current arch version against that in the looked-up opcode. */
12145
12146 static void
12147 do_vfp_nsyn_opcode (const char *opname)
12148 {
12149 const struct asm_opcode *opcode;
12150
12151 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12152
12153 if (!opcode)
12154 abort ();
12155
12156 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12157 thumb_mode ? *opcode->tvariant : *opcode->avariant),
12158 _(BAD_FPU));
12159
12160 inst.is_neon = 1;
12161
12162 if (thumb_mode)
12163 {
12164 inst.instruction = opcode->tvalue;
12165 opcode->tencode ();
12166 }
12167 else
12168 {
12169 inst.instruction = (inst.cond << 28) | opcode->avalue;
12170 opcode->aencode ();
12171 }
12172 }
12173
12174 static void
12175 do_vfp_nsyn_add_sub (enum neon_shape rs)
12176 {
12177 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12178
12179 if (rs == NS_FFF)
12180 {
12181 if (is_add)
12182 do_vfp_nsyn_opcode ("fadds");
12183 else
12184 do_vfp_nsyn_opcode ("fsubs");
12185 }
12186 else
12187 {
12188 if (is_add)
12189 do_vfp_nsyn_opcode ("faddd");
12190 else
12191 do_vfp_nsyn_opcode ("fsubd");
12192 }
12193 }
12194
12195 /* Check operand types to see if this is a VFP instruction, and if so call
12196 PFN (). */
12197
12198 static int
12199 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12200 {
12201 enum neon_shape rs;
12202 struct neon_type_el et;
12203
12204 switch (args)
12205 {
12206 case 2:
12207 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12208 et = neon_check_type (2, rs,
12209 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12210 break;
12211
12212 case 3:
12213 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12214 et = neon_check_type (3, rs,
12215 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12216 break;
12217
12218 default:
12219 abort ();
12220 }
12221
12222 if (et.type != NT_invtype)
12223 {
12224 pfn (rs);
12225 return SUCCESS;
12226 }
12227 else
12228 inst.error = NULL;
12229
12230 return FAIL;
12231 }
12232
12233 static void
12234 do_vfp_nsyn_mla_mls (enum neon_shape rs)
12235 {
12236 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12237
12238 if (rs == NS_FFF)
12239 {
12240 if (is_mla)
12241 do_vfp_nsyn_opcode ("fmacs");
12242 else
12243 do_vfp_nsyn_opcode ("fnmacs");
12244 }
12245 else
12246 {
12247 if (is_mla)
12248 do_vfp_nsyn_opcode ("fmacd");
12249 else
12250 do_vfp_nsyn_opcode ("fnmacd");
12251 }
12252 }
12253
12254 static void
12255 do_vfp_nsyn_fma_fms (enum neon_shape rs)
12256 {
12257 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12258
12259 if (rs == NS_FFF)
12260 {
12261 if (is_fma)
12262 do_vfp_nsyn_opcode ("ffmas");
12263 else
12264 do_vfp_nsyn_opcode ("ffnmas");
12265 }
12266 else
12267 {
12268 if (is_fma)
12269 do_vfp_nsyn_opcode ("ffmad");
12270 else
12271 do_vfp_nsyn_opcode ("ffnmad");
12272 }
12273 }
12274
12275 static void
12276 do_vfp_nsyn_mul (enum neon_shape rs)
12277 {
12278 if (rs == NS_FFF)
12279 do_vfp_nsyn_opcode ("fmuls");
12280 else
12281 do_vfp_nsyn_opcode ("fmuld");
12282 }
12283
12284 static void
12285 do_vfp_nsyn_abs_neg (enum neon_shape rs)
12286 {
12287 int is_neg = (inst.instruction & 0x80) != 0;
12288 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12289
12290 if (rs == NS_FF)
12291 {
12292 if (is_neg)
12293 do_vfp_nsyn_opcode ("fnegs");
12294 else
12295 do_vfp_nsyn_opcode ("fabss");
12296 }
12297 else
12298 {
12299 if (is_neg)
12300 do_vfp_nsyn_opcode ("fnegd");
12301 else
12302 do_vfp_nsyn_opcode ("fabsd");
12303 }
12304 }
12305
12306 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12307 insns belong to Neon, and are handled elsewhere. */
12308
12309 static void
12310 do_vfp_nsyn_ldm_stm (int is_dbmode)
12311 {
12312 int is_ldm = (inst.instruction & (1 << 20)) != 0;
12313 if (is_ldm)
12314 {
12315 if (is_dbmode)
12316 do_vfp_nsyn_opcode ("fldmdbs");
12317 else
12318 do_vfp_nsyn_opcode ("fldmias");
12319 }
12320 else
12321 {
12322 if (is_dbmode)
12323 do_vfp_nsyn_opcode ("fstmdbs");
12324 else
12325 do_vfp_nsyn_opcode ("fstmias");
12326 }
12327 }
12328
12329 static void
12330 do_vfp_nsyn_sqrt (void)
12331 {
12332 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12333 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12334
12335 if (rs == NS_FF)
12336 do_vfp_nsyn_opcode ("fsqrts");
12337 else
12338 do_vfp_nsyn_opcode ("fsqrtd");
12339 }
12340
12341 static void
12342 do_vfp_nsyn_div (void)
12343 {
12344 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12345 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12346 N_F32 | N_F64 | N_KEY | N_VFP);
12347
12348 if (rs == NS_FFF)
12349 do_vfp_nsyn_opcode ("fdivs");
12350 else
12351 do_vfp_nsyn_opcode ("fdivd");
12352 }
12353
12354 static void
12355 do_vfp_nsyn_nmul (void)
12356 {
12357 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12358 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12359 N_F32 | N_F64 | N_KEY | N_VFP);
12360
12361 if (rs == NS_FFF)
12362 {
12363 NEON_ENCODE (SINGLE, inst);
12364 do_vfp_sp_dyadic ();
12365 }
12366 else
12367 {
12368 NEON_ENCODE (DOUBLE, inst);
12369 do_vfp_dp_rd_rn_rm ();
12370 }
12371 do_vfp_cond_or_thumb ();
12372 }
12373
12374 static void
12375 do_vfp_nsyn_cmp (void)
12376 {
12377 if (inst.operands[1].isreg)
12378 {
12379 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12380 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12381
12382 if (rs == NS_FF)
12383 {
12384 NEON_ENCODE (SINGLE, inst);
12385 do_vfp_sp_monadic ();
12386 }
12387 else
12388 {
12389 NEON_ENCODE (DOUBLE, inst);
12390 do_vfp_dp_rd_rm ();
12391 }
12392 }
12393 else
12394 {
12395 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
12396 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
12397
12398 switch (inst.instruction & 0x0fffffff)
12399 {
12400 case N_MNEM_vcmp:
12401 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
12402 break;
12403 case N_MNEM_vcmpe:
12404 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
12405 break;
12406 default:
12407 abort ();
12408 }
12409
12410 if (rs == NS_FI)
12411 {
12412 NEON_ENCODE (SINGLE, inst);
12413 do_vfp_sp_compare_z ();
12414 }
12415 else
12416 {
12417 NEON_ENCODE (DOUBLE, inst);
12418 do_vfp_dp_rd ();
12419 }
12420 }
12421 do_vfp_cond_or_thumb ();
12422 }
12423
12424 static void
12425 nsyn_insert_sp (void)
12426 {
12427 inst.operands[1] = inst.operands[0];
12428 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
12429 inst.operands[0].reg = REG_SP;
12430 inst.operands[0].isreg = 1;
12431 inst.operands[0].writeback = 1;
12432 inst.operands[0].present = 1;
12433 }
12434
12435 static void
12436 do_vfp_nsyn_push (void)
12437 {
12438 nsyn_insert_sp ();
12439 if (inst.operands[1].issingle)
12440 do_vfp_nsyn_opcode ("fstmdbs");
12441 else
12442 do_vfp_nsyn_opcode ("fstmdbd");
12443 }
12444
12445 static void
12446 do_vfp_nsyn_pop (void)
12447 {
12448 nsyn_insert_sp ();
12449 if (inst.operands[1].issingle)
12450 do_vfp_nsyn_opcode ("fldmias");
12451 else
12452 do_vfp_nsyn_opcode ("fldmiad");
12453 }
12454
12455 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12456 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12457
12458 static void
12459 neon_dp_fixup (struct arm_it* insn)
12460 {
12461 unsigned int i = insn->instruction;
12462 insn->is_neon = 1;
12463
12464 if (thumb_mode)
12465 {
12466 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12467 if (i & (1 << 24))
12468 i |= 1 << 28;
12469
12470 i &= ~(1 << 24);
12471
12472 i |= 0xef000000;
12473 }
12474 else
12475 i |= 0xf2000000;
12476
12477 insn->instruction = i;
12478 }
12479
12480 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12481 (0, 1, 2, 3). */
12482
12483 static unsigned
12484 neon_logbits (unsigned x)
12485 {
12486 return ffs (x) - 4;
12487 }
12488
12489 #define LOW4(R) ((R) & 0xf)
12490 #define HI1(R) (((R) >> 4) & 1)
12491
12492 /* Encode insns with bit pattern:
12493
12494 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12495 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12496
12497 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12498 different meaning for some instruction. */
12499
12500 static void
12501 neon_three_same (int isquad, int ubit, int size)
12502 {
12503 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12504 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12505 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12506 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12507 inst.instruction |= LOW4 (inst.operands[2].reg);
12508 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12509 inst.instruction |= (isquad != 0) << 6;
12510 inst.instruction |= (ubit != 0) << 24;
12511 if (size != -1)
12512 inst.instruction |= neon_logbits (size) << 20;
12513
12514 neon_dp_fixup (&inst);
12515 }
12516
12517 /* Encode instructions of the form:
12518
12519 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12520 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12521
12522 Don't write size if SIZE == -1. */
12523
12524 static void
12525 neon_two_same (int qbit, int ubit, int size)
12526 {
12527 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12528 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12529 inst.instruction |= LOW4 (inst.operands[1].reg);
12530 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12531 inst.instruction |= (qbit != 0) << 6;
12532 inst.instruction |= (ubit != 0) << 24;
12533
12534 if (size != -1)
12535 inst.instruction |= neon_logbits (size) << 18;
12536
12537 neon_dp_fixup (&inst);
12538 }
12539
12540 /* Neon instruction encoders, in approximate order of appearance. */
12541
12542 static void
12543 do_neon_dyadic_i_su (void)
12544 {
12545 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12546 struct neon_type_el et = neon_check_type (3, rs,
12547 N_EQK, N_EQK, N_SU_32 | N_KEY);
12548 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12549 }
12550
12551 static void
12552 do_neon_dyadic_i64_su (void)
12553 {
12554 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12555 struct neon_type_el et = neon_check_type (3, rs,
12556 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12557 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12558 }
12559
12560 static void
12561 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12562 unsigned immbits)
12563 {
12564 unsigned size = et.size >> 3;
12565 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12566 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12567 inst.instruction |= LOW4 (inst.operands[1].reg);
12568 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12569 inst.instruction |= (isquad != 0) << 6;
12570 inst.instruction |= immbits << 16;
12571 inst.instruction |= (size >> 3) << 7;
12572 inst.instruction |= (size & 0x7) << 19;
12573 if (write_ubit)
12574 inst.instruction |= (uval != 0) << 24;
12575
12576 neon_dp_fixup (&inst);
12577 }
12578
12579 static void
12580 do_neon_shl_imm (void)
12581 {
12582 if (!inst.operands[2].isreg)
12583 {
12584 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12585 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12586 NEON_ENCODE (IMMED, inst);
12587 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12588 }
12589 else
12590 {
12591 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12592 struct neon_type_el et = neon_check_type (3, rs,
12593 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12594 unsigned int tmp;
12595
12596 /* VSHL/VQSHL 3-register variants have syntax such as:
12597 vshl.xx Dd, Dm, Dn
12598 whereas other 3-register operations encoded by neon_three_same have
12599 syntax like:
12600 vadd.xx Dd, Dn, Dm
12601 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12602 here. */
12603 tmp = inst.operands[2].reg;
12604 inst.operands[2].reg = inst.operands[1].reg;
12605 inst.operands[1].reg = tmp;
12606 NEON_ENCODE (INTEGER, inst);
12607 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12608 }
12609 }
12610
12611 static void
12612 do_neon_qshl_imm (void)
12613 {
12614 if (!inst.operands[2].isreg)
12615 {
12616 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12617 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12618
12619 NEON_ENCODE (IMMED, inst);
12620 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12621 inst.operands[2].imm);
12622 }
12623 else
12624 {
12625 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12626 struct neon_type_el et = neon_check_type (3, rs,
12627 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12628 unsigned int tmp;
12629
12630 /* See note in do_neon_shl_imm. */
12631 tmp = inst.operands[2].reg;
12632 inst.operands[2].reg = inst.operands[1].reg;
12633 inst.operands[1].reg = tmp;
12634 NEON_ENCODE (INTEGER, inst);
12635 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12636 }
12637 }
12638
12639 static void
12640 do_neon_rshl (void)
12641 {
12642 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12643 struct neon_type_el et = neon_check_type (3, rs,
12644 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12645 unsigned int tmp;
12646
12647 tmp = inst.operands[2].reg;
12648 inst.operands[2].reg = inst.operands[1].reg;
12649 inst.operands[1].reg = tmp;
12650 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12651 }
12652
12653 static int
12654 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12655 {
12656 /* Handle .I8 pseudo-instructions. */
12657 if (size == 8)
12658 {
12659 /* Unfortunately, this will make everything apart from zero out-of-range.
12660 FIXME is this the intended semantics? There doesn't seem much point in
12661 accepting .I8 if so. */
12662 immediate |= immediate << 8;
12663 size = 16;
12664 }
12665
12666 if (size >= 32)
12667 {
12668 if (immediate == (immediate & 0x000000ff))
12669 {
12670 *immbits = immediate;
12671 return 0x1;
12672 }
12673 else if (immediate == (immediate & 0x0000ff00))
12674 {
12675 *immbits = immediate >> 8;
12676 return 0x3;
12677 }
12678 else if (immediate == (immediate & 0x00ff0000))
12679 {
12680 *immbits = immediate >> 16;
12681 return 0x5;
12682 }
12683 else if (immediate == (immediate & 0xff000000))
12684 {
12685 *immbits = immediate >> 24;
12686 return 0x7;
12687 }
12688 if ((immediate & 0xffff) != (immediate >> 16))
12689 goto bad_immediate;
12690 immediate &= 0xffff;
12691 }
12692
12693 if (immediate == (immediate & 0x000000ff))
12694 {
12695 *immbits = immediate;
12696 return 0x9;
12697 }
12698 else if (immediate == (immediate & 0x0000ff00))
12699 {
12700 *immbits = immediate >> 8;
12701 return 0xb;
12702 }
12703
12704 bad_immediate:
12705 first_error (_("immediate value out of range"));
12706 return FAIL;
12707 }
12708
12709 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12710 A, B, C, D. */
12711
12712 static int
12713 neon_bits_same_in_bytes (unsigned imm)
12714 {
12715 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12716 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12717 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12718 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12719 }
12720
12721 /* For immediate of above form, return 0bABCD. */
12722
12723 static unsigned
12724 neon_squash_bits (unsigned imm)
12725 {
12726 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12727 | ((imm & 0x01000000) >> 21);
12728 }
12729
12730 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12731
12732 static unsigned
12733 neon_qfloat_bits (unsigned imm)
12734 {
12735 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
12736 }
12737
12738 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12739 the instruction. *OP is passed as the initial value of the op field, and
12740 may be set to a different value depending on the constant (i.e.
12741 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12742 MVN). If the immediate looks like a repeated pattern then also
12743 try smaller element sizes. */
12744
12745 static int
12746 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
12747 unsigned *immbits, int *op, int size,
12748 enum neon_el_type type)
12749 {
12750 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12751 float. */
12752 if (type == NT_float && !float_p)
12753 return FAIL;
12754
12755 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
12756 {
12757 if (size != 32 || *op == 1)
12758 return FAIL;
12759 *immbits = neon_qfloat_bits (immlo);
12760 return 0xf;
12761 }
12762
12763 if (size == 64)
12764 {
12765 if (neon_bits_same_in_bytes (immhi)
12766 && neon_bits_same_in_bytes (immlo))
12767 {
12768 if (*op == 1)
12769 return FAIL;
12770 *immbits = (neon_squash_bits (immhi) << 4)
12771 | neon_squash_bits (immlo);
12772 *op = 1;
12773 return 0xe;
12774 }
12775
12776 if (immhi != immlo)
12777 return FAIL;
12778 }
12779
12780 if (size >= 32)
12781 {
12782 if (immlo == (immlo & 0x000000ff))
12783 {
12784 *immbits = immlo;
12785 return 0x0;
12786 }
12787 else if (immlo == (immlo & 0x0000ff00))
12788 {
12789 *immbits = immlo >> 8;
12790 return 0x2;
12791 }
12792 else if (immlo == (immlo & 0x00ff0000))
12793 {
12794 *immbits = immlo >> 16;
12795 return 0x4;
12796 }
12797 else if (immlo == (immlo & 0xff000000))
12798 {
12799 *immbits = immlo >> 24;
12800 return 0x6;
12801 }
12802 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
12803 {
12804 *immbits = (immlo >> 8) & 0xff;
12805 return 0xc;
12806 }
12807 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
12808 {
12809 *immbits = (immlo >> 16) & 0xff;
12810 return 0xd;
12811 }
12812
12813 if ((immlo & 0xffff) != (immlo >> 16))
12814 return FAIL;
12815 immlo &= 0xffff;
12816 }
12817
12818 if (size >= 16)
12819 {
12820 if (immlo == (immlo & 0x000000ff))
12821 {
12822 *immbits = immlo;
12823 return 0x8;
12824 }
12825 else if (immlo == (immlo & 0x0000ff00))
12826 {
12827 *immbits = immlo >> 8;
12828 return 0xa;
12829 }
12830
12831 if ((immlo & 0xff) != (immlo >> 8))
12832 return FAIL;
12833 immlo &= 0xff;
12834 }
12835
12836 if (immlo == (immlo & 0x000000ff))
12837 {
12838 /* Don't allow MVN with 8-bit immediate. */
12839 if (*op == 1)
12840 return FAIL;
12841 *immbits = immlo;
12842 return 0xe;
12843 }
12844
12845 return FAIL;
12846 }
12847
12848 /* Write immediate bits [7:0] to the following locations:
12849
12850 |28/24|23 19|18 16|15 4|3 0|
12851 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12852
12853 This function is used by VMOV/VMVN/VORR/VBIC. */
12854
12855 static void
12856 neon_write_immbits (unsigned immbits)
12857 {
12858 inst.instruction |= immbits & 0xf;
12859 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
12860 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
12861 }
12862
12863 /* Invert low-order SIZE bits of XHI:XLO. */
12864
12865 static void
12866 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
12867 {
12868 unsigned immlo = xlo ? *xlo : 0;
12869 unsigned immhi = xhi ? *xhi : 0;
12870
12871 switch (size)
12872 {
12873 case 8:
12874 immlo = (~immlo) & 0xff;
12875 break;
12876
12877 case 16:
12878 immlo = (~immlo) & 0xffff;
12879 break;
12880
12881 case 64:
12882 immhi = (~immhi) & 0xffffffff;
12883 /* fall through. */
12884
12885 case 32:
12886 immlo = (~immlo) & 0xffffffff;
12887 break;
12888
12889 default:
12890 abort ();
12891 }
12892
12893 if (xlo)
12894 *xlo = immlo;
12895
12896 if (xhi)
12897 *xhi = immhi;
12898 }
12899
12900 static void
12901 do_neon_logic (void)
12902 {
12903 if (inst.operands[2].present && inst.operands[2].isreg)
12904 {
12905 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12906 neon_check_type (3, rs, N_IGNORE_TYPE);
12907 /* U bit and size field were set as part of the bitmask. */
12908 NEON_ENCODE (INTEGER, inst);
12909 neon_three_same (neon_quad (rs), 0, -1);
12910 }
12911 else
12912 {
12913 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12914 struct neon_type_el et = neon_check_type (2, rs,
12915 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12916 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
12917 unsigned immbits;
12918 int cmode;
12919
12920 if (et.type == NT_invtype)
12921 return;
12922
12923 NEON_ENCODE (IMMED, inst);
12924
12925 immbits = inst.operands[1].imm;
12926 if (et.size == 64)
12927 {
12928 /* .i64 is a pseudo-op, so the immediate must be a repeating
12929 pattern. */
12930 if (immbits != (inst.operands[1].regisimm ?
12931 inst.operands[1].reg : 0))
12932 {
12933 /* Set immbits to an invalid constant. */
12934 immbits = 0xdeadbeef;
12935 }
12936 }
12937
12938 switch (opcode)
12939 {
12940 case N_MNEM_vbic:
12941 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12942 break;
12943
12944 case N_MNEM_vorr:
12945 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12946 break;
12947
12948 case N_MNEM_vand:
12949 /* Pseudo-instruction for VBIC. */
12950 neon_invert_size (&immbits, 0, et.size);
12951 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12952 break;
12953
12954 case N_MNEM_vorn:
12955 /* Pseudo-instruction for VORR. */
12956 neon_invert_size (&immbits, 0, et.size);
12957 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12958 break;
12959
12960 default:
12961 abort ();
12962 }
12963
12964 if (cmode == FAIL)
12965 return;
12966
12967 inst.instruction |= neon_quad (rs) << 6;
12968 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12969 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12970 inst.instruction |= cmode << 8;
12971 neon_write_immbits (immbits);
12972
12973 neon_dp_fixup (&inst);
12974 }
12975 }
12976
12977 static void
12978 do_neon_bitfield (void)
12979 {
12980 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12981 neon_check_type (3, rs, N_IGNORE_TYPE);
12982 neon_three_same (neon_quad (rs), 0, -1);
12983 }
12984
12985 static void
12986 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
12987 unsigned destbits)
12988 {
12989 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12990 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
12991 types | N_KEY);
12992 if (et.type == NT_float)
12993 {
12994 NEON_ENCODE (FLOAT, inst);
12995 neon_three_same (neon_quad (rs), 0, -1);
12996 }
12997 else
12998 {
12999 NEON_ENCODE (INTEGER, inst);
13000 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13001 }
13002 }
13003
13004 static void
13005 do_neon_dyadic_if_su (void)
13006 {
13007 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13008 }
13009
13010 static void
13011 do_neon_dyadic_if_su_d (void)
13012 {
13013 /* This version only allow D registers, but that constraint is enforced during
13014 operand parsing so we don't need to do anything extra here. */
13015 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13016 }
13017
13018 static void
13019 do_neon_dyadic_if_i_d (void)
13020 {
13021 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13022 affected if we specify unsigned args. */
13023 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13024 }
13025
13026 enum vfp_or_neon_is_neon_bits
13027 {
13028 NEON_CHECK_CC = 1,
13029 NEON_CHECK_ARCH = 2
13030 };
13031
13032 /* Call this function if an instruction which may have belonged to the VFP or
13033 Neon instruction sets, but turned out to be a Neon instruction (due to the
13034 operand types involved, etc.). We have to check and/or fix-up a couple of
13035 things:
13036
13037 - Make sure the user hasn't attempted to make a Neon instruction
13038 conditional.
13039 - Alter the value in the condition code field if necessary.
13040 - Make sure that the arch supports Neon instructions.
13041
13042 Which of these operations take place depends on bits from enum
13043 vfp_or_neon_is_neon_bits.
13044
13045 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13046 current instruction's condition is COND_ALWAYS, the condition field is
13047 changed to inst.uncond_value. This is necessary because instructions shared
13048 between VFP and Neon may be conditional for the VFP variants only, and the
13049 unconditional Neon version must have, e.g., 0xF in the condition field. */
13050
13051 static int
13052 vfp_or_neon_is_neon (unsigned check)
13053 {
13054 /* Conditions are always legal in Thumb mode (IT blocks). */
13055 if (!thumb_mode && (check & NEON_CHECK_CC))
13056 {
13057 if (inst.cond != COND_ALWAYS)
13058 {
13059 first_error (_(BAD_COND));
13060 return FAIL;
13061 }
13062 if (inst.uncond_value != -1)
13063 inst.instruction |= inst.uncond_value << 28;
13064 }
13065
13066 if ((check & NEON_CHECK_ARCH)
13067 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13068 {
13069 first_error (_(BAD_FPU));
13070 return FAIL;
13071 }
13072
13073 return SUCCESS;
13074 }
13075
13076 static void
13077 do_neon_addsub_if_i (void)
13078 {
13079 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13080 return;
13081
13082 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13083 return;
13084
13085 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13086 affected if we specify unsigned args. */
13087 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13088 }
13089
13090 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13091 result to be:
13092 V<op> A,B (A is operand 0, B is operand 2)
13093 to mean:
13094 V<op> A,B,A
13095 not:
13096 V<op> A,B,B
13097 so handle that case specially. */
13098
13099 static void
13100 neon_exchange_operands (void)
13101 {
13102 void *scratch = alloca (sizeof (inst.operands[0]));
13103 if (inst.operands[1].present)
13104 {
13105 /* Swap operands[1] and operands[2]. */
13106 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13107 inst.operands[1] = inst.operands[2];
13108 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13109 }
13110 else
13111 {
13112 inst.operands[1] = inst.operands[2];
13113 inst.operands[2] = inst.operands[0];
13114 }
13115 }
13116
13117 static void
13118 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13119 {
13120 if (inst.operands[2].isreg)
13121 {
13122 if (invert)
13123 neon_exchange_operands ();
13124 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13125 }
13126 else
13127 {
13128 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13129 struct neon_type_el et = neon_check_type (2, rs,
13130 N_EQK | N_SIZ, immtypes | N_KEY);
13131
13132 NEON_ENCODE (IMMED, inst);
13133 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13134 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13135 inst.instruction |= LOW4 (inst.operands[1].reg);
13136 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13137 inst.instruction |= neon_quad (rs) << 6;
13138 inst.instruction |= (et.type == NT_float) << 10;
13139 inst.instruction |= neon_logbits (et.size) << 18;
13140
13141 neon_dp_fixup (&inst);
13142 }
13143 }
13144
13145 static void
13146 do_neon_cmp (void)
13147 {
13148 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13149 }
13150
13151 static void
13152 do_neon_cmp_inv (void)
13153 {
13154 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13155 }
13156
13157 static void
13158 do_neon_ceq (void)
13159 {
13160 neon_compare (N_IF_32, N_IF_32, FALSE);
13161 }
13162
13163 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13164 scalars, which are encoded in 5 bits, M : Rm.
13165 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13166 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13167 index in M. */
13168
13169 static unsigned
13170 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13171 {
13172 unsigned regno = NEON_SCALAR_REG (scalar);
13173 unsigned elno = NEON_SCALAR_INDEX (scalar);
13174
13175 switch (elsize)
13176 {
13177 case 16:
13178 if (regno > 7 || elno > 3)
13179 goto bad_scalar;
13180 return regno | (elno << 3);
13181
13182 case 32:
13183 if (regno > 15 || elno > 1)
13184 goto bad_scalar;
13185 return regno | (elno << 4);
13186
13187 default:
13188 bad_scalar:
13189 first_error (_("scalar out of range for multiply instruction"));
13190 }
13191
13192 return 0;
13193 }
13194
13195 /* Encode multiply / multiply-accumulate scalar instructions. */
13196
13197 static void
13198 neon_mul_mac (struct neon_type_el et, int ubit)
13199 {
13200 unsigned scalar;
13201
13202 /* Give a more helpful error message if we have an invalid type. */
13203 if (et.type == NT_invtype)
13204 return;
13205
13206 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13207 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13208 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13209 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13210 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13211 inst.instruction |= LOW4 (scalar);
13212 inst.instruction |= HI1 (scalar) << 5;
13213 inst.instruction |= (et.type == NT_float) << 8;
13214 inst.instruction |= neon_logbits (et.size) << 20;
13215 inst.instruction |= (ubit != 0) << 24;
13216
13217 neon_dp_fixup (&inst);
13218 }
13219
13220 static void
13221 do_neon_mac_maybe_scalar (void)
13222 {
13223 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13224 return;
13225
13226 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13227 return;
13228
13229 if (inst.operands[2].isscalar)
13230 {
13231 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13232 struct neon_type_el et = neon_check_type (3, rs,
13233 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13234 NEON_ENCODE (SCALAR, inst);
13235 neon_mul_mac (et, neon_quad (rs));
13236 }
13237 else
13238 {
13239 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13240 affected if we specify unsigned args. */
13241 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13242 }
13243 }
13244
13245 static void
13246 do_neon_fmac (void)
13247 {
13248 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13249 return;
13250
13251 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13252 return;
13253
13254 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13255 }
13256
13257 static void
13258 do_neon_tst (void)
13259 {
13260 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13261 struct neon_type_el et = neon_check_type (3, rs,
13262 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13263 neon_three_same (neon_quad (rs), 0, et.size);
13264 }
13265
13266 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13267 same types as the MAC equivalents. The polynomial type for this instruction
13268 is encoded the same as the integer type. */
13269
13270 static void
13271 do_neon_mul (void)
13272 {
13273 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13274 return;
13275
13276 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13277 return;
13278
13279 if (inst.operands[2].isscalar)
13280 do_neon_mac_maybe_scalar ();
13281 else
13282 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13283 }
13284
13285 static void
13286 do_neon_qdmulh (void)
13287 {
13288 if (inst.operands[2].isscalar)
13289 {
13290 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13291 struct neon_type_el et = neon_check_type (3, rs,
13292 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13293 NEON_ENCODE (SCALAR, inst);
13294 neon_mul_mac (et, neon_quad (rs));
13295 }
13296 else
13297 {
13298 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13299 struct neon_type_el et = neon_check_type (3, rs,
13300 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13301 NEON_ENCODE (INTEGER, inst);
13302 /* The U bit (rounding) comes from bit mask. */
13303 neon_three_same (neon_quad (rs), 0, et.size);
13304 }
13305 }
13306
13307 static void
13308 do_neon_fcmp_absolute (void)
13309 {
13310 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13311 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13312 /* Size field comes from bit mask. */
13313 neon_three_same (neon_quad (rs), 1, -1);
13314 }
13315
13316 static void
13317 do_neon_fcmp_absolute_inv (void)
13318 {
13319 neon_exchange_operands ();
13320 do_neon_fcmp_absolute ();
13321 }
13322
13323 static void
13324 do_neon_step (void)
13325 {
13326 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13327 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13328 neon_three_same (neon_quad (rs), 0, -1);
13329 }
13330
13331 static void
13332 do_neon_abs_neg (void)
13333 {
13334 enum neon_shape rs;
13335 struct neon_type_el et;
13336
13337 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
13338 return;
13339
13340 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13341 return;
13342
13343 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13344 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
13345
13346 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13347 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13348 inst.instruction |= LOW4 (inst.operands[1].reg);
13349 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13350 inst.instruction |= neon_quad (rs) << 6;
13351 inst.instruction |= (et.type == NT_float) << 10;
13352 inst.instruction |= neon_logbits (et.size) << 18;
13353
13354 neon_dp_fixup (&inst);
13355 }
13356
13357 static void
13358 do_neon_sli (void)
13359 {
13360 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13361 struct neon_type_el et = neon_check_type (2, rs,
13362 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13363 int imm = inst.operands[2].imm;
13364 constraint (imm < 0 || (unsigned)imm >= et.size,
13365 _("immediate out of range for insert"));
13366 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13367 }
13368
13369 static void
13370 do_neon_sri (void)
13371 {
13372 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13373 struct neon_type_el et = neon_check_type (2, rs,
13374 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13375 int imm = inst.operands[2].imm;
13376 constraint (imm < 1 || (unsigned)imm > et.size,
13377 _("immediate out of range for insert"));
13378 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
13379 }
13380
13381 static void
13382 do_neon_qshlu_imm (void)
13383 {
13384 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13385 struct neon_type_el et = neon_check_type (2, rs,
13386 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
13387 int imm = inst.operands[2].imm;
13388 constraint (imm < 0 || (unsigned)imm >= et.size,
13389 _("immediate out of range for shift"));
13390 /* Only encodes the 'U present' variant of the instruction.
13391 In this case, signed types have OP (bit 8) set to 0.
13392 Unsigned types have OP set to 1. */
13393 inst.instruction |= (et.type == NT_unsigned) << 8;
13394 /* The rest of the bits are the same as other immediate shifts. */
13395 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13396 }
13397
13398 static void
13399 do_neon_qmovn (void)
13400 {
13401 struct neon_type_el et = neon_check_type (2, NS_DQ,
13402 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13403 /* Saturating move where operands can be signed or unsigned, and the
13404 destination has the same signedness. */
13405 NEON_ENCODE (INTEGER, inst);
13406 if (et.type == NT_unsigned)
13407 inst.instruction |= 0xc0;
13408 else
13409 inst.instruction |= 0x80;
13410 neon_two_same (0, 1, et.size / 2);
13411 }
13412
13413 static void
13414 do_neon_qmovun (void)
13415 {
13416 struct neon_type_el et = neon_check_type (2, NS_DQ,
13417 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13418 /* Saturating move with unsigned results. Operands must be signed. */
13419 NEON_ENCODE (INTEGER, inst);
13420 neon_two_same (0, 1, et.size / 2);
13421 }
13422
13423 static void
13424 do_neon_rshift_sat_narrow (void)
13425 {
13426 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13427 or unsigned. If operands are unsigned, results must also be unsigned. */
13428 struct neon_type_el et = neon_check_type (2, NS_DQI,
13429 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13430 int imm = inst.operands[2].imm;
13431 /* This gets the bounds check, size encoding and immediate bits calculation
13432 right. */
13433 et.size /= 2;
13434
13435 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13436 VQMOVN.I<size> <Dd>, <Qm>. */
13437 if (imm == 0)
13438 {
13439 inst.operands[2].present = 0;
13440 inst.instruction = N_MNEM_vqmovn;
13441 do_neon_qmovn ();
13442 return;
13443 }
13444
13445 constraint (imm < 1 || (unsigned)imm > et.size,
13446 _("immediate out of range"));
13447 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
13448 }
13449
13450 static void
13451 do_neon_rshift_sat_narrow_u (void)
13452 {
13453 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13454 or unsigned. If operands are unsigned, results must also be unsigned. */
13455 struct neon_type_el et = neon_check_type (2, NS_DQI,
13456 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13457 int imm = inst.operands[2].imm;
13458 /* This gets the bounds check, size encoding and immediate bits calculation
13459 right. */
13460 et.size /= 2;
13461
13462 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13463 VQMOVUN.I<size> <Dd>, <Qm>. */
13464 if (imm == 0)
13465 {
13466 inst.operands[2].present = 0;
13467 inst.instruction = N_MNEM_vqmovun;
13468 do_neon_qmovun ();
13469 return;
13470 }
13471
13472 constraint (imm < 1 || (unsigned)imm > et.size,
13473 _("immediate out of range"));
13474 /* FIXME: The manual is kind of unclear about what value U should have in
13475 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13476 must be 1. */
13477 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
13478 }
13479
13480 static void
13481 do_neon_movn (void)
13482 {
13483 struct neon_type_el et = neon_check_type (2, NS_DQ,
13484 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13485 NEON_ENCODE (INTEGER, inst);
13486 neon_two_same (0, 1, et.size / 2);
13487 }
13488
13489 static void
13490 do_neon_rshift_narrow (void)
13491 {
13492 struct neon_type_el et = neon_check_type (2, NS_DQI,
13493 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13494 int imm = inst.operands[2].imm;
13495 /* This gets the bounds check, size encoding and immediate bits calculation
13496 right. */
13497 et.size /= 2;
13498
13499 /* If immediate is zero then we are a pseudo-instruction for
13500 VMOVN.I<size> <Dd>, <Qm> */
13501 if (imm == 0)
13502 {
13503 inst.operands[2].present = 0;
13504 inst.instruction = N_MNEM_vmovn;
13505 do_neon_movn ();
13506 return;
13507 }
13508
13509 constraint (imm < 1 || (unsigned)imm > et.size,
13510 _("immediate out of range for narrowing operation"));
13511 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
13512 }
13513
13514 static void
13515 do_neon_shll (void)
13516 {
13517 /* FIXME: Type checking when lengthening. */
13518 struct neon_type_el et = neon_check_type (2, NS_QDI,
13519 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
13520 unsigned imm = inst.operands[2].imm;
13521
13522 if (imm == et.size)
13523 {
13524 /* Maximum shift variant. */
13525 NEON_ENCODE (INTEGER, inst);
13526 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13527 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13528 inst.instruction |= LOW4 (inst.operands[1].reg);
13529 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13530 inst.instruction |= neon_logbits (et.size) << 18;
13531
13532 neon_dp_fixup (&inst);
13533 }
13534 else
13535 {
13536 /* A more-specific type check for non-max versions. */
13537 et = neon_check_type (2, NS_QDI,
13538 N_EQK | N_DBL, N_SU_32 | N_KEY);
13539 NEON_ENCODE (IMMED, inst);
13540 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
13541 }
13542 }
13543
13544 /* Check the various types for the VCVT instruction, and return which version
13545 the current instruction is. */
13546
13547 static int
13548 neon_cvt_flavour (enum neon_shape rs)
13549 {
13550 #define CVT_VAR(C,X,Y) \
13551 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13552 if (et.type != NT_invtype) \
13553 { \
13554 inst.error = NULL; \
13555 return (C); \
13556 }
13557 struct neon_type_el et;
13558 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
13559 || rs == NS_FF) ? N_VFP : 0;
13560 /* The instruction versions which take an immediate take one register
13561 argument, which is extended to the width of the full register. Thus the
13562 "source" and "destination" registers must have the same width. Hack that
13563 here by making the size equal to the key (wider, in this case) operand. */
13564 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13565
13566 CVT_VAR (0, N_S32, N_F32);
13567 CVT_VAR (1, N_U32, N_F32);
13568 CVT_VAR (2, N_F32, N_S32);
13569 CVT_VAR (3, N_F32, N_U32);
13570 /* Half-precision conversions. */
13571 CVT_VAR (4, N_F32, N_F16);
13572 CVT_VAR (5, N_F16, N_F32);
13573
13574 whole_reg = N_VFP;
13575
13576 /* VFP instructions. */
13577 CVT_VAR (6, N_F32, N_F64);
13578 CVT_VAR (7, N_F64, N_F32);
13579 CVT_VAR (8, N_S32, N_F64 | key);
13580 CVT_VAR (9, N_U32, N_F64 | key);
13581 CVT_VAR (10, N_F64 | key, N_S32);
13582 CVT_VAR (11, N_F64 | key, N_U32);
13583 /* VFP instructions with bitshift. */
13584 CVT_VAR (12, N_F32 | key, N_S16);
13585 CVT_VAR (13, N_F32 | key, N_U16);
13586 CVT_VAR (14, N_F64 | key, N_S16);
13587 CVT_VAR (15, N_F64 | key, N_U16);
13588 CVT_VAR (16, N_S16, N_F32 | key);
13589 CVT_VAR (17, N_U16, N_F32 | key);
13590 CVT_VAR (18, N_S16, N_F64 | key);
13591 CVT_VAR (19, N_U16, N_F64 | key);
13592
13593 return -1;
13594 #undef CVT_VAR
13595 }
13596
13597 /* Neon-syntax VFP conversions. */
13598
13599 static void
13600 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13601 {
13602 const char *opname = 0;
13603
13604 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13605 {
13606 /* Conversions with immediate bitshift. */
13607 const char *enc[] =
13608 {
13609 "ftosls",
13610 "ftouls",
13611 "fsltos",
13612 "fultos",
13613 NULL,
13614 NULL,
13615 NULL,
13616 NULL,
13617 "ftosld",
13618 "ftould",
13619 "fsltod",
13620 "fultod",
13621 "fshtos",
13622 "fuhtos",
13623 "fshtod",
13624 "fuhtod",
13625 "ftoshs",
13626 "ftouhs",
13627 "ftoshd",
13628 "ftouhd"
13629 };
13630
13631 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13632 {
13633 opname = enc[flavour];
13634 constraint (inst.operands[0].reg != inst.operands[1].reg,
13635 _("operands 0 and 1 must be the same register"));
13636 inst.operands[1] = inst.operands[2];
13637 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13638 }
13639 }
13640 else
13641 {
13642 /* Conversions without bitshift. */
13643 const char *enc[] =
13644 {
13645 "ftosis",
13646 "ftouis",
13647 "fsitos",
13648 "fuitos",
13649 "NULL",
13650 "NULL",
13651 "fcvtsd",
13652 "fcvtds",
13653 "ftosid",
13654 "ftouid",
13655 "fsitod",
13656 "fuitod"
13657 };
13658
13659 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13660 opname = enc[flavour];
13661 }
13662
13663 if (opname)
13664 do_vfp_nsyn_opcode (opname);
13665 }
13666
13667 static void
13668 do_vfp_nsyn_cvtz (void)
13669 {
13670 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13671 int flavour = neon_cvt_flavour (rs);
13672 const char *enc[] =
13673 {
13674 "ftosizs",
13675 "ftouizs",
13676 NULL,
13677 NULL,
13678 NULL,
13679 NULL,
13680 NULL,
13681 NULL,
13682 "ftosizd",
13683 "ftouizd"
13684 };
13685
13686 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13687 do_vfp_nsyn_opcode (enc[flavour]);
13688 }
13689
13690 static void
13691 do_neon_cvt (void)
13692 {
13693 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13694 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13695 int flavour = neon_cvt_flavour (rs);
13696
13697 /* VFP rather than Neon conversions. */
13698 if (flavour >= 6)
13699 {
13700 do_vfp_nsyn_cvt (rs, flavour);
13701 return;
13702 }
13703
13704 switch (rs)
13705 {
13706 case NS_DDI:
13707 case NS_QQI:
13708 {
13709 unsigned immbits;
13710 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13711
13712 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13713 return;
13714
13715 /* Fixed-point conversion with #0 immediate is encoded as an
13716 integer conversion. */
13717 if (inst.operands[2].present && inst.operands[2].imm == 0)
13718 goto int_encode;
13719 immbits = 32 - inst.operands[2].imm;
13720 NEON_ENCODE (IMMED, inst);
13721 if (flavour != -1)
13722 inst.instruction |= enctab[flavour];
13723 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13724 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13725 inst.instruction |= LOW4 (inst.operands[1].reg);
13726 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13727 inst.instruction |= neon_quad (rs) << 6;
13728 inst.instruction |= 1 << 21;
13729 inst.instruction |= immbits << 16;
13730
13731 neon_dp_fixup (&inst);
13732 }
13733 break;
13734
13735 case NS_DD:
13736 case NS_QQ:
13737 int_encode:
13738 {
13739 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
13740
13741 NEON_ENCODE (INTEGER, inst);
13742
13743 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13744 return;
13745
13746 if (flavour != -1)
13747 inst.instruction |= enctab[flavour];
13748
13749 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13750 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13751 inst.instruction |= LOW4 (inst.operands[1].reg);
13752 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13753 inst.instruction |= neon_quad (rs) << 6;
13754 inst.instruction |= 2 << 18;
13755
13756 neon_dp_fixup (&inst);
13757 }
13758 break;
13759
13760 /* Half-precision conversions for Advanced SIMD -- neon. */
13761 case NS_QD:
13762 case NS_DQ:
13763
13764 if ((rs == NS_DQ)
13765 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
13766 {
13767 as_bad (_("operand size must match register width"));
13768 break;
13769 }
13770
13771 if ((rs == NS_QD)
13772 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
13773 {
13774 as_bad (_("operand size must match register width"));
13775 break;
13776 }
13777
13778 if (rs == NS_DQ)
13779 inst.instruction = 0x3b60600;
13780 else
13781 inst.instruction = 0x3b60700;
13782
13783 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13784 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13785 inst.instruction |= LOW4 (inst.operands[1].reg);
13786 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13787 neon_dp_fixup (&inst);
13788 break;
13789
13790 default:
13791 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13792 do_vfp_nsyn_cvt (rs, flavour);
13793 }
13794 }
13795
13796 static void
13797 do_neon_cvtb (void)
13798 {
13799 inst.instruction = 0xeb20a40;
13800
13801 /* The sizes are attached to the mnemonic. */
13802 if (inst.vectype.el[0].type != NT_invtype
13803 && inst.vectype.el[0].size == 16)
13804 inst.instruction |= 0x00010000;
13805
13806 /* Programmer's syntax: the sizes are attached to the operands. */
13807 else if (inst.operands[0].vectype.type != NT_invtype
13808 && inst.operands[0].vectype.size == 16)
13809 inst.instruction |= 0x00010000;
13810
13811 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
13812 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
13813 do_vfp_cond_or_thumb ();
13814 }
13815
13816
13817 static void
13818 do_neon_cvtt (void)
13819 {
13820 do_neon_cvtb ();
13821 inst.instruction |= 0x80;
13822 }
13823
13824 static void
13825 neon_move_immediate (void)
13826 {
13827 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
13828 struct neon_type_el et = neon_check_type (2, rs,
13829 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13830 unsigned immlo, immhi = 0, immbits;
13831 int op, cmode, float_p;
13832
13833 constraint (et.type == NT_invtype,
13834 _("operand size must be specified for immediate VMOV"));
13835
13836 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13837 op = (inst.instruction & (1 << 5)) != 0;
13838
13839 immlo = inst.operands[1].imm;
13840 if (inst.operands[1].regisimm)
13841 immhi = inst.operands[1].reg;
13842
13843 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
13844 _("immediate has bits set outside the operand size"));
13845
13846 float_p = inst.operands[1].immisfloat;
13847
13848 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
13849 et.size, et.type)) == FAIL)
13850 {
13851 /* Invert relevant bits only. */
13852 neon_invert_size (&immlo, &immhi, et.size);
13853 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13854 with one or the other; those cases are caught by
13855 neon_cmode_for_move_imm. */
13856 op = !op;
13857 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
13858 &op, et.size, et.type)) == FAIL)
13859 {
13860 first_error (_("immediate out of range"));
13861 return;
13862 }
13863 }
13864
13865 inst.instruction &= ~(1 << 5);
13866 inst.instruction |= op << 5;
13867
13868 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13869 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13870 inst.instruction |= neon_quad (rs) << 6;
13871 inst.instruction |= cmode << 8;
13872
13873 neon_write_immbits (immbits);
13874 }
13875
13876 static void
13877 do_neon_mvn (void)
13878 {
13879 if (inst.operands[1].isreg)
13880 {
13881 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13882
13883 NEON_ENCODE (INTEGER, inst);
13884 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13885 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13886 inst.instruction |= LOW4 (inst.operands[1].reg);
13887 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13888 inst.instruction |= neon_quad (rs) << 6;
13889 }
13890 else
13891 {
13892 NEON_ENCODE (IMMED, inst);
13893 neon_move_immediate ();
13894 }
13895
13896 neon_dp_fixup (&inst);
13897 }
13898
13899 /* Encode instructions of form:
13900
13901 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13902 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13903
13904 static void
13905 neon_mixed_length (struct neon_type_el et, unsigned size)
13906 {
13907 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13908 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13909 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13910 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13911 inst.instruction |= LOW4 (inst.operands[2].reg);
13912 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13913 inst.instruction |= (et.type == NT_unsigned) << 24;
13914 inst.instruction |= neon_logbits (size) << 20;
13915
13916 neon_dp_fixup (&inst);
13917 }
13918
13919 static void
13920 do_neon_dyadic_long (void)
13921 {
13922 /* FIXME: Type checking for lengthening op. */
13923 struct neon_type_el et = neon_check_type (3, NS_QDD,
13924 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
13925 neon_mixed_length (et, et.size);
13926 }
13927
13928 static void
13929 do_neon_abal (void)
13930 {
13931 struct neon_type_el et = neon_check_type (3, NS_QDD,
13932 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
13933 neon_mixed_length (et, et.size);
13934 }
13935
13936 static void
13937 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
13938 {
13939 if (inst.operands[2].isscalar)
13940 {
13941 struct neon_type_el et = neon_check_type (3, NS_QDS,
13942 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
13943 NEON_ENCODE (SCALAR, inst);
13944 neon_mul_mac (et, et.type == NT_unsigned);
13945 }
13946 else
13947 {
13948 struct neon_type_el et = neon_check_type (3, NS_QDD,
13949 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
13950 NEON_ENCODE (INTEGER, inst);
13951 neon_mixed_length (et, et.size);
13952 }
13953 }
13954
13955 static void
13956 do_neon_mac_maybe_scalar_long (void)
13957 {
13958 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
13959 }
13960
13961 static void
13962 do_neon_dyadic_wide (void)
13963 {
13964 struct neon_type_el et = neon_check_type (3, NS_QQD,
13965 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
13966 neon_mixed_length (et, et.size);
13967 }
13968
13969 static void
13970 do_neon_dyadic_narrow (void)
13971 {
13972 struct neon_type_el et = neon_check_type (3, NS_QDD,
13973 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
13974 /* Operand sign is unimportant, and the U bit is part of the opcode,
13975 so force the operand type to integer. */
13976 et.type = NT_integer;
13977 neon_mixed_length (et, et.size / 2);
13978 }
13979
13980 static void
13981 do_neon_mul_sat_scalar_long (void)
13982 {
13983 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
13984 }
13985
13986 static void
13987 do_neon_vmull (void)
13988 {
13989 if (inst.operands[2].isscalar)
13990 do_neon_mac_maybe_scalar_long ();
13991 else
13992 {
13993 struct neon_type_el et = neon_check_type (3, NS_QDD,
13994 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
13995 if (et.type == NT_poly)
13996 NEON_ENCODE (POLY, inst);
13997 else
13998 NEON_ENCODE (INTEGER, inst);
13999 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14000 zero. Should be OK as-is. */
14001 neon_mixed_length (et, et.size);
14002 }
14003 }
14004
14005 static void
14006 do_neon_ext (void)
14007 {
14008 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14009 struct neon_type_el et = neon_check_type (3, rs,
14010 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14011 unsigned imm = (inst.operands[3].imm * et.size) / 8;
14012
14013 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14014 _("shift out of range"));
14015 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14016 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14017 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14018 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14019 inst.instruction |= LOW4 (inst.operands[2].reg);
14020 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14021 inst.instruction |= neon_quad (rs) << 6;
14022 inst.instruction |= imm << 8;
14023
14024 neon_dp_fixup (&inst);
14025 }
14026
14027 static void
14028 do_neon_rev (void)
14029 {
14030 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14031 struct neon_type_el et = neon_check_type (2, rs,
14032 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14033 unsigned op = (inst.instruction >> 7) & 3;
14034 /* N (width of reversed regions) is encoded as part of the bitmask. We
14035 extract it here to check the elements to be reversed are smaller.
14036 Otherwise we'd get a reserved instruction. */
14037 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14038 gas_assert (elsize != 0);
14039 constraint (et.size >= elsize,
14040 _("elements must be smaller than reversal region"));
14041 neon_two_same (neon_quad (rs), 1, et.size);
14042 }
14043
14044 static void
14045 do_neon_dup (void)
14046 {
14047 if (inst.operands[1].isscalar)
14048 {
14049 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14050 struct neon_type_el et = neon_check_type (2, rs,
14051 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14052 unsigned sizebits = et.size >> 3;
14053 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14054 int logsize = neon_logbits (et.size);
14055 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14056
14057 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14058 return;
14059
14060 NEON_ENCODE (SCALAR, inst);
14061 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14062 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14063 inst.instruction |= LOW4 (dm);
14064 inst.instruction |= HI1 (dm) << 5;
14065 inst.instruction |= neon_quad (rs) << 6;
14066 inst.instruction |= x << 17;
14067 inst.instruction |= sizebits << 16;
14068
14069 neon_dp_fixup (&inst);
14070 }
14071 else
14072 {
14073 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14074 struct neon_type_el et = neon_check_type (2, rs,
14075 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14076 /* Duplicate ARM register to lanes of vector. */
14077 NEON_ENCODE (ARMREG, inst);
14078 switch (et.size)
14079 {
14080 case 8: inst.instruction |= 0x400000; break;
14081 case 16: inst.instruction |= 0x000020; break;
14082 case 32: inst.instruction |= 0x000000; break;
14083 default: break;
14084 }
14085 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14086 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14087 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14088 inst.instruction |= neon_quad (rs) << 21;
14089 /* The encoding for this instruction is identical for the ARM and Thumb
14090 variants, except for the condition field. */
14091 do_vfp_cond_or_thumb ();
14092 }
14093 }
14094
14095 /* VMOV has particularly many variations. It can be one of:
14096 0. VMOV<c><q> <Qd>, <Qm>
14097 1. VMOV<c><q> <Dd>, <Dm>
14098 (Register operations, which are VORR with Rm = Rn.)
14099 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14100 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14101 (Immediate loads.)
14102 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14103 (ARM register to scalar.)
14104 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14105 (Two ARM registers to vector.)
14106 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14107 (Scalar to ARM register.)
14108 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14109 (Vector to two ARM registers.)
14110 8. VMOV.F32 <Sd>, <Sm>
14111 9. VMOV.F64 <Dd>, <Dm>
14112 (VFP register moves.)
14113 10. VMOV.F32 <Sd>, #imm
14114 11. VMOV.F64 <Dd>, #imm
14115 (VFP float immediate load.)
14116 12. VMOV <Rd>, <Sm>
14117 (VFP single to ARM reg.)
14118 13. VMOV <Sd>, <Rm>
14119 (ARM reg to VFP single.)
14120 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14121 (Two ARM regs to two VFP singles.)
14122 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14123 (Two VFP singles to two ARM regs.)
14124
14125 These cases can be disambiguated using neon_select_shape, except cases 1/9
14126 and 3/11 which depend on the operand type too.
14127
14128 All the encoded bits are hardcoded by this function.
14129
14130 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14131 Cases 5, 7 may be used with VFPv2 and above.
14132
14133 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14134 can specify a type where it doesn't make sense to, and is ignored). */
14135
14136 static void
14137 do_neon_mov (void)
14138 {
14139 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14140 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14141 NS_NULL);
14142 struct neon_type_el et;
14143 const char *ldconst = 0;
14144
14145 switch (rs)
14146 {
14147 case NS_DD: /* case 1/9. */
14148 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14149 /* It is not an error here if no type is given. */
14150 inst.error = NULL;
14151 if (et.type == NT_float && et.size == 64)
14152 {
14153 do_vfp_nsyn_opcode ("fcpyd");
14154 break;
14155 }
14156 /* fall through. */
14157
14158 case NS_QQ: /* case 0/1. */
14159 {
14160 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14161 return;
14162 /* The architecture manual I have doesn't explicitly state which
14163 value the U bit should have for register->register moves, but
14164 the equivalent VORR instruction has U = 0, so do that. */
14165 inst.instruction = 0x0200110;
14166 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14167 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14168 inst.instruction |= LOW4 (inst.operands[1].reg);
14169 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14170 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14171 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14172 inst.instruction |= neon_quad (rs) << 6;
14173
14174 neon_dp_fixup (&inst);
14175 }
14176 break;
14177
14178 case NS_DI: /* case 3/11. */
14179 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14180 inst.error = NULL;
14181 if (et.type == NT_float && et.size == 64)
14182 {
14183 /* case 11 (fconstd). */
14184 ldconst = "fconstd";
14185 goto encode_fconstd;
14186 }
14187 /* fall through. */
14188
14189 case NS_QI: /* case 2/3. */
14190 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14191 return;
14192 inst.instruction = 0x0800010;
14193 neon_move_immediate ();
14194 neon_dp_fixup (&inst);
14195 break;
14196
14197 case NS_SR: /* case 4. */
14198 {
14199 unsigned bcdebits = 0;
14200 int logsize;
14201 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14202 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14203
14204 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14205 logsize = neon_logbits (et.size);
14206
14207 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14208 _(BAD_FPU));
14209 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14210 && et.size != 32, _(BAD_FPU));
14211 constraint (et.type == NT_invtype, _("bad type for scalar"));
14212 constraint (x >= 64 / et.size, _("scalar index out of range"));
14213
14214 switch (et.size)
14215 {
14216 case 8: bcdebits = 0x8; break;
14217 case 16: bcdebits = 0x1; break;
14218 case 32: bcdebits = 0x0; break;
14219 default: ;
14220 }
14221
14222 bcdebits |= x << logsize;
14223
14224 inst.instruction = 0xe000b10;
14225 do_vfp_cond_or_thumb ();
14226 inst.instruction |= LOW4 (dn) << 16;
14227 inst.instruction |= HI1 (dn) << 7;
14228 inst.instruction |= inst.operands[1].reg << 12;
14229 inst.instruction |= (bcdebits & 3) << 5;
14230 inst.instruction |= (bcdebits >> 2) << 21;
14231 }
14232 break;
14233
14234 case NS_DRR: /* case 5 (fmdrr). */
14235 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14236 _(BAD_FPU));
14237
14238 inst.instruction = 0xc400b10;
14239 do_vfp_cond_or_thumb ();
14240 inst.instruction |= LOW4 (inst.operands[0].reg);
14241 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14242 inst.instruction |= inst.operands[1].reg << 12;
14243 inst.instruction |= inst.operands[2].reg << 16;
14244 break;
14245
14246 case NS_RS: /* case 6. */
14247 {
14248 unsigned logsize;
14249 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14250 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14251 unsigned abcdebits = 0;
14252
14253 et = neon_check_type (2, NS_NULL,
14254 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14255 logsize = neon_logbits (et.size);
14256
14257 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14258 _(BAD_FPU));
14259 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14260 && et.size != 32, _(BAD_FPU));
14261 constraint (et.type == NT_invtype, _("bad type for scalar"));
14262 constraint (x >= 64 / et.size, _("scalar index out of range"));
14263
14264 switch (et.size)
14265 {
14266 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14267 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14268 case 32: abcdebits = 0x00; break;
14269 default: ;
14270 }
14271
14272 abcdebits |= x << logsize;
14273 inst.instruction = 0xe100b10;
14274 do_vfp_cond_or_thumb ();
14275 inst.instruction |= LOW4 (dn) << 16;
14276 inst.instruction |= HI1 (dn) << 7;
14277 inst.instruction |= inst.operands[0].reg << 12;
14278 inst.instruction |= (abcdebits & 3) << 5;
14279 inst.instruction |= (abcdebits >> 2) << 21;
14280 }
14281 break;
14282
14283 case NS_RRD: /* case 7 (fmrrd). */
14284 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14285 _(BAD_FPU));
14286
14287 inst.instruction = 0xc500b10;
14288 do_vfp_cond_or_thumb ();
14289 inst.instruction |= inst.operands[0].reg << 12;
14290 inst.instruction |= inst.operands[1].reg << 16;
14291 inst.instruction |= LOW4 (inst.operands[2].reg);
14292 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14293 break;
14294
14295 case NS_FF: /* case 8 (fcpys). */
14296 do_vfp_nsyn_opcode ("fcpys");
14297 break;
14298
14299 case NS_FI: /* case 10 (fconsts). */
14300 ldconst = "fconsts";
14301 encode_fconstd:
14302 if (is_quarter_float (inst.operands[1].imm))
14303 {
14304 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
14305 do_vfp_nsyn_opcode (ldconst);
14306 }
14307 else
14308 first_error (_("immediate out of range"));
14309 break;
14310
14311 case NS_RF: /* case 12 (fmrs). */
14312 do_vfp_nsyn_opcode ("fmrs");
14313 break;
14314
14315 case NS_FR: /* case 13 (fmsr). */
14316 do_vfp_nsyn_opcode ("fmsr");
14317 break;
14318
14319 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14320 (one of which is a list), but we have parsed four. Do some fiddling to
14321 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14322 expect. */
14323 case NS_RRFF: /* case 14 (fmrrs). */
14324 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
14325 _("VFP registers must be adjacent"));
14326 inst.operands[2].imm = 2;
14327 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14328 do_vfp_nsyn_opcode ("fmrrs");
14329 break;
14330
14331 case NS_FFRR: /* case 15 (fmsrr). */
14332 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
14333 _("VFP registers must be adjacent"));
14334 inst.operands[1] = inst.operands[2];
14335 inst.operands[2] = inst.operands[3];
14336 inst.operands[0].imm = 2;
14337 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14338 do_vfp_nsyn_opcode ("fmsrr");
14339 break;
14340
14341 default:
14342 abort ();
14343 }
14344 }
14345
14346 static void
14347 do_neon_rshift_round_imm (void)
14348 {
14349 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14350 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14351 int imm = inst.operands[2].imm;
14352
14353 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14354 if (imm == 0)
14355 {
14356 inst.operands[2].present = 0;
14357 do_neon_mov ();
14358 return;
14359 }
14360
14361 constraint (imm < 1 || (unsigned)imm > et.size,
14362 _("immediate out of range for shift"));
14363 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14364 et.size - imm);
14365 }
14366
14367 static void
14368 do_neon_movl (void)
14369 {
14370 struct neon_type_el et = neon_check_type (2, NS_QD,
14371 N_EQK | N_DBL, N_SU_32 | N_KEY);
14372 unsigned sizebits = et.size >> 3;
14373 inst.instruction |= sizebits << 19;
14374 neon_two_same (0, et.type == NT_unsigned, -1);
14375 }
14376
14377 static void
14378 do_neon_trn (void)
14379 {
14380 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14381 struct neon_type_el et = neon_check_type (2, rs,
14382 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14383 NEON_ENCODE (INTEGER, inst);
14384 neon_two_same (neon_quad (rs), 1, et.size);
14385 }
14386
14387 static void
14388 do_neon_zip_uzp (void)
14389 {
14390 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14391 struct neon_type_el et = neon_check_type (2, rs,
14392 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14393 if (rs == NS_DD && et.size == 32)
14394 {
14395 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14396 inst.instruction = N_MNEM_vtrn;
14397 do_neon_trn ();
14398 return;
14399 }
14400 neon_two_same (neon_quad (rs), 1, et.size);
14401 }
14402
14403 static void
14404 do_neon_sat_abs_neg (void)
14405 {
14406 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14407 struct neon_type_el et = neon_check_type (2, rs,
14408 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14409 neon_two_same (neon_quad (rs), 1, et.size);
14410 }
14411
14412 static void
14413 do_neon_pair_long (void)
14414 {
14415 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14416 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
14417 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14418 inst.instruction |= (et.type == NT_unsigned) << 7;
14419 neon_two_same (neon_quad (rs), 1, et.size);
14420 }
14421
14422 static void
14423 do_neon_recip_est (void)
14424 {
14425 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14426 struct neon_type_el et = neon_check_type (2, rs,
14427 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
14428 inst.instruction |= (et.type == NT_float) << 8;
14429 neon_two_same (neon_quad (rs), 1, et.size);
14430 }
14431
14432 static void
14433 do_neon_cls (void)
14434 {
14435 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14436 struct neon_type_el et = neon_check_type (2, rs,
14437 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14438 neon_two_same (neon_quad (rs), 1, et.size);
14439 }
14440
14441 static void
14442 do_neon_clz (void)
14443 {
14444 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14445 struct neon_type_el et = neon_check_type (2, rs,
14446 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
14447 neon_two_same (neon_quad (rs), 1, et.size);
14448 }
14449
14450 static void
14451 do_neon_cnt (void)
14452 {
14453 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14454 struct neon_type_el et = neon_check_type (2, rs,
14455 N_EQK | N_INT, N_8 | N_KEY);
14456 neon_two_same (neon_quad (rs), 1, et.size);
14457 }
14458
14459 static void
14460 do_neon_swp (void)
14461 {
14462 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14463 neon_two_same (neon_quad (rs), 1, -1);
14464 }
14465
14466 static void
14467 do_neon_tbl_tbx (void)
14468 {
14469 unsigned listlenbits;
14470 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
14471
14472 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
14473 {
14474 first_error (_("bad list length for table lookup"));
14475 return;
14476 }
14477
14478 listlenbits = inst.operands[1].imm - 1;
14479 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14480 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14481 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14482 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14483 inst.instruction |= LOW4 (inst.operands[2].reg);
14484 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14485 inst.instruction |= listlenbits << 8;
14486
14487 neon_dp_fixup (&inst);
14488 }
14489
14490 static void
14491 do_neon_ldm_stm (void)
14492 {
14493 /* P, U and L bits are part of bitmask. */
14494 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
14495 unsigned offsetbits = inst.operands[1].imm * 2;
14496
14497 if (inst.operands[1].issingle)
14498 {
14499 do_vfp_nsyn_ldm_stm (is_dbmode);
14500 return;
14501 }
14502
14503 constraint (is_dbmode && !inst.operands[0].writeback,
14504 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14505
14506 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14507 _("register list must contain at least 1 and at most 16 "
14508 "registers"));
14509
14510 inst.instruction |= inst.operands[0].reg << 16;
14511 inst.instruction |= inst.operands[0].writeback << 21;
14512 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14513 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
14514
14515 inst.instruction |= offsetbits;
14516
14517 do_vfp_cond_or_thumb ();
14518 }
14519
14520 static void
14521 do_neon_ldr_str (void)
14522 {
14523 int is_ldr = (inst.instruction & (1 << 20)) != 0;
14524
14525 if (inst.operands[0].issingle)
14526 {
14527 if (is_ldr)
14528 do_vfp_nsyn_opcode ("flds");
14529 else
14530 do_vfp_nsyn_opcode ("fsts");
14531 }
14532 else
14533 {
14534 if (is_ldr)
14535 do_vfp_nsyn_opcode ("fldd");
14536 else
14537 do_vfp_nsyn_opcode ("fstd");
14538 }
14539 }
14540
14541 /* "interleave" version also handles non-interleaving register VLD1/VST1
14542 instructions. */
14543
14544 static void
14545 do_neon_ld_st_interleave (void)
14546 {
14547 struct neon_type_el et = neon_check_type (1, NS_NULL,
14548 N_8 | N_16 | N_32 | N_64);
14549 unsigned alignbits = 0;
14550 unsigned idx;
14551 /* The bits in this table go:
14552 0: register stride of one (0) or two (1)
14553 1,2: register list length, minus one (1, 2, 3, 4).
14554 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14555 We use -1 for invalid entries. */
14556 const int typetable[] =
14557 {
14558 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14559 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14560 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14561 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14562 };
14563 int typebits;
14564
14565 if (et.type == NT_invtype)
14566 return;
14567
14568 if (inst.operands[1].immisalign)
14569 switch (inst.operands[1].imm >> 8)
14570 {
14571 case 64: alignbits = 1; break;
14572 case 128:
14573 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14574 goto bad_alignment;
14575 alignbits = 2;
14576 break;
14577 case 256:
14578 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14579 goto bad_alignment;
14580 alignbits = 3;
14581 break;
14582 default:
14583 bad_alignment:
14584 first_error (_("bad alignment"));
14585 return;
14586 }
14587
14588 inst.instruction |= alignbits << 4;
14589 inst.instruction |= neon_logbits (et.size) << 6;
14590
14591 /* Bits [4:6] of the immediate in a list specifier encode register stride
14592 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14593 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14594 up the right value for "type" in a table based on this value and the given
14595 list style, then stick it back. */
14596 idx = ((inst.operands[0].imm >> 4) & 7)
14597 | (((inst.instruction >> 8) & 3) << 3);
14598
14599 typebits = typetable[idx];
14600
14601 constraint (typebits == -1, _("bad list type for instruction"));
14602
14603 inst.instruction &= ~0xf00;
14604 inst.instruction |= typebits << 8;
14605 }
14606
14607 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14608 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14609 otherwise. The variable arguments are a list of pairs of legal (size, align)
14610 values, terminated with -1. */
14611
14612 static int
14613 neon_alignment_bit (int size, int align, int *do_align, ...)
14614 {
14615 va_list ap;
14616 int result = FAIL, thissize, thisalign;
14617
14618 if (!inst.operands[1].immisalign)
14619 {
14620 *do_align = 0;
14621 return SUCCESS;
14622 }
14623
14624 va_start (ap, do_align);
14625
14626 do
14627 {
14628 thissize = va_arg (ap, int);
14629 if (thissize == -1)
14630 break;
14631 thisalign = va_arg (ap, int);
14632
14633 if (size == thissize && align == thisalign)
14634 result = SUCCESS;
14635 }
14636 while (result != SUCCESS);
14637
14638 va_end (ap);
14639
14640 if (result == SUCCESS)
14641 *do_align = 1;
14642 else
14643 first_error (_("unsupported alignment for instruction"));
14644
14645 return result;
14646 }
14647
14648 static void
14649 do_neon_ld_st_lane (void)
14650 {
14651 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14652 int align_good, do_align = 0;
14653 int logsize = neon_logbits (et.size);
14654 int align = inst.operands[1].imm >> 8;
14655 int n = (inst.instruction >> 8) & 3;
14656 int max_el = 64 / et.size;
14657
14658 if (et.type == NT_invtype)
14659 return;
14660
14661 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14662 _("bad list length"));
14663 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14664 _("scalar index out of range"));
14665 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14666 && et.size == 8,
14667 _("stride of 2 unavailable when element size is 8"));
14668
14669 switch (n)
14670 {
14671 case 0: /* VLD1 / VST1. */
14672 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14673 32, 32, -1);
14674 if (align_good == FAIL)
14675 return;
14676 if (do_align)
14677 {
14678 unsigned alignbits = 0;
14679 switch (et.size)
14680 {
14681 case 16: alignbits = 0x1; break;
14682 case 32: alignbits = 0x3; break;
14683 default: ;
14684 }
14685 inst.instruction |= alignbits << 4;
14686 }
14687 break;
14688
14689 case 1: /* VLD2 / VST2. */
14690 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14691 32, 64, -1);
14692 if (align_good == FAIL)
14693 return;
14694 if (do_align)
14695 inst.instruction |= 1 << 4;
14696 break;
14697
14698 case 2: /* VLD3 / VST3. */
14699 constraint (inst.operands[1].immisalign,
14700 _("can't use alignment with this instruction"));
14701 break;
14702
14703 case 3: /* VLD4 / VST4. */
14704 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14705 16, 64, 32, 64, 32, 128, -1);
14706 if (align_good == FAIL)
14707 return;
14708 if (do_align)
14709 {
14710 unsigned alignbits = 0;
14711 switch (et.size)
14712 {
14713 case 8: alignbits = 0x1; break;
14714 case 16: alignbits = 0x1; break;
14715 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
14716 default: ;
14717 }
14718 inst.instruction |= alignbits << 4;
14719 }
14720 break;
14721
14722 default: ;
14723 }
14724
14725 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14726 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14727 inst.instruction |= 1 << (4 + logsize);
14728
14729 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
14730 inst.instruction |= logsize << 10;
14731 }
14732
14733 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14734
14735 static void
14736 do_neon_ld_dup (void)
14737 {
14738 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14739 int align_good, do_align = 0;
14740
14741 if (et.type == NT_invtype)
14742 return;
14743
14744 switch ((inst.instruction >> 8) & 3)
14745 {
14746 case 0: /* VLD1. */
14747 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
14748 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14749 &do_align, 16, 16, 32, 32, -1);
14750 if (align_good == FAIL)
14751 return;
14752 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
14753 {
14754 case 1: break;
14755 case 2: inst.instruction |= 1 << 5; break;
14756 default: first_error (_("bad list length")); return;
14757 }
14758 inst.instruction |= neon_logbits (et.size) << 6;
14759 break;
14760
14761 case 1: /* VLD2. */
14762 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14763 &do_align, 8, 16, 16, 32, 32, 64, -1);
14764 if (align_good == FAIL)
14765 return;
14766 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
14767 _("bad list length"));
14768 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14769 inst.instruction |= 1 << 5;
14770 inst.instruction |= neon_logbits (et.size) << 6;
14771 break;
14772
14773 case 2: /* VLD3. */
14774 constraint (inst.operands[1].immisalign,
14775 _("can't use alignment with this instruction"));
14776 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
14777 _("bad list length"));
14778 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14779 inst.instruction |= 1 << 5;
14780 inst.instruction |= neon_logbits (et.size) << 6;
14781 break;
14782
14783 case 3: /* VLD4. */
14784 {
14785 int align = inst.operands[1].imm >> 8;
14786 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14787 16, 64, 32, 64, 32, 128, -1);
14788 if (align_good == FAIL)
14789 return;
14790 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
14791 _("bad list length"));
14792 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14793 inst.instruction |= 1 << 5;
14794 if (et.size == 32 && align == 128)
14795 inst.instruction |= 0x3 << 6;
14796 else
14797 inst.instruction |= neon_logbits (et.size) << 6;
14798 }
14799 break;
14800
14801 default: ;
14802 }
14803
14804 inst.instruction |= do_align << 4;
14805 }
14806
14807 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14808 apart from bits [11:4]. */
14809
14810 static void
14811 do_neon_ldx_stx (void)
14812 {
14813 if (inst.operands[1].isreg)
14814 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
14815
14816 switch (NEON_LANE (inst.operands[0].imm))
14817 {
14818 case NEON_INTERLEAVE_LANES:
14819 NEON_ENCODE (INTERLV, inst);
14820 do_neon_ld_st_interleave ();
14821 break;
14822
14823 case NEON_ALL_LANES:
14824 NEON_ENCODE (DUP, inst);
14825 do_neon_ld_dup ();
14826 break;
14827
14828 default:
14829 NEON_ENCODE (LANE, inst);
14830 do_neon_ld_st_lane ();
14831 }
14832
14833 /* L bit comes from bit mask. */
14834 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14835 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14836 inst.instruction |= inst.operands[1].reg << 16;
14837
14838 if (inst.operands[1].postind)
14839 {
14840 int postreg = inst.operands[1].imm & 0xf;
14841 constraint (!inst.operands[1].immisreg,
14842 _("post-index must be a register"));
14843 constraint (postreg == 0xd || postreg == 0xf,
14844 _("bad register for post-index"));
14845 inst.instruction |= postreg;
14846 }
14847 else if (inst.operands[1].writeback)
14848 {
14849 inst.instruction |= 0xd;
14850 }
14851 else
14852 inst.instruction |= 0xf;
14853
14854 if (thumb_mode)
14855 inst.instruction |= 0xf9000000;
14856 else
14857 inst.instruction |= 0xf4000000;
14858 }
14859 \f
14860 /* Overall per-instruction processing. */
14861
14862 /* We need to be able to fix up arbitrary expressions in some statements.
14863 This is so that we can handle symbols that are an arbitrary distance from
14864 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14865 which returns part of an address in a form which will be valid for
14866 a data instruction. We do this by pushing the expression into a symbol
14867 in the expr_section, and creating a fix for that. */
14868
14869 static void
14870 fix_new_arm (fragS * frag,
14871 int where,
14872 short int size,
14873 expressionS * exp,
14874 int pc_rel,
14875 int reloc)
14876 {
14877 fixS * new_fix;
14878
14879 switch (exp->X_op)
14880 {
14881 case O_constant:
14882 case O_symbol:
14883 case O_add:
14884 case O_subtract:
14885 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
14886 (enum bfd_reloc_code_real) reloc);
14887 break;
14888
14889 default:
14890 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
14891 pc_rel, (enum bfd_reloc_code_real) reloc);
14892 break;
14893 }
14894
14895 /* Mark whether the fix is to a THUMB instruction, or an ARM
14896 instruction. */
14897 new_fix->tc_fix_data = thumb_mode;
14898 }
14899
14900 /* Create a frg for an instruction requiring relaxation. */
14901 static void
14902 output_relax_insn (void)
14903 {
14904 char * to;
14905 symbolS *sym;
14906 int offset;
14907
14908 /* The size of the instruction is unknown, so tie the debug info to the
14909 start of the instruction. */
14910 dwarf2_emit_insn (0);
14911
14912 switch (inst.reloc.exp.X_op)
14913 {
14914 case O_symbol:
14915 sym = inst.reloc.exp.X_add_symbol;
14916 offset = inst.reloc.exp.X_add_number;
14917 break;
14918 case O_constant:
14919 sym = NULL;
14920 offset = inst.reloc.exp.X_add_number;
14921 break;
14922 default:
14923 sym = make_expr_symbol (&inst.reloc.exp);
14924 offset = 0;
14925 break;
14926 }
14927 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
14928 inst.relax, sym, offset, NULL/*offset, opcode*/);
14929 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
14930 }
14931
14932 /* Write a 32-bit thumb instruction to buf. */
14933 static void
14934 put_thumb32_insn (char * buf, unsigned long insn)
14935 {
14936 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
14937 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
14938 }
14939
14940 static void
14941 output_inst (const char * str)
14942 {
14943 char * to = NULL;
14944
14945 if (inst.error)
14946 {
14947 as_bad ("%s -- `%s'", inst.error, str);
14948 return;
14949 }
14950 if (inst.relax)
14951 {
14952 output_relax_insn ();
14953 return;
14954 }
14955 if (inst.size == 0)
14956 return;
14957
14958 to = frag_more (inst.size);
14959 /* PR 9814: Record the thumb mode into the current frag so that we know
14960 what type of NOP padding to use, if necessary. We override any previous
14961 setting so that if the mode has changed then the NOPS that we use will
14962 match the encoding of the last instruction in the frag. */
14963 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
14964
14965 if (thumb_mode && (inst.size > THUMB_SIZE))
14966 {
14967 gas_assert (inst.size == (2 * THUMB_SIZE));
14968 put_thumb32_insn (to, inst.instruction);
14969 }
14970 else if (inst.size > INSN_SIZE)
14971 {
14972 gas_assert (inst.size == (2 * INSN_SIZE));
14973 md_number_to_chars (to, inst.instruction, INSN_SIZE);
14974 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
14975 }
14976 else
14977 md_number_to_chars (to, inst.instruction, inst.size);
14978
14979 if (inst.reloc.type != BFD_RELOC_UNUSED)
14980 fix_new_arm (frag_now, to - frag_now->fr_literal,
14981 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
14982 inst.reloc.type);
14983
14984 dwarf2_emit_insn (inst.size);
14985 }
14986
14987 static char *
14988 output_it_inst (int cond, int mask, char * to)
14989 {
14990 unsigned long instruction = 0xbf00;
14991
14992 mask &= 0xf;
14993 instruction |= mask;
14994 instruction |= cond << 4;
14995
14996 if (to == NULL)
14997 {
14998 to = frag_more (2);
14999 #ifdef OBJ_ELF
15000 dwarf2_emit_insn (2);
15001 #endif
15002 }
15003
15004 md_number_to_chars (to, instruction, 2);
15005
15006 return to;
15007 }
15008
15009 /* Tag values used in struct asm_opcode's tag field. */
15010 enum opcode_tag
15011 {
15012 OT_unconditional, /* Instruction cannot be conditionalized.
15013 The ARM condition field is still 0xE. */
15014 OT_unconditionalF, /* Instruction cannot be conditionalized
15015 and carries 0xF in its ARM condition field. */
15016 OT_csuffix, /* Instruction takes a conditional suffix. */
15017 OT_csuffixF, /* Some forms of the instruction take a conditional
15018 suffix, others place 0xF where the condition field
15019 would be. */
15020 OT_cinfix3, /* Instruction takes a conditional infix,
15021 beginning at character index 3. (In
15022 unified mode, it becomes a suffix.) */
15023 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
15024 tsts, cmps, cmns, and teqs. */
15025 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
15026 character index 3, even in unified mode. Used for
15027 legacy instructions where suffix and infix forms
15028 may be ambiguous. */
15029 OT_csuf_or_in3, /* Instruction takes either a conditional
15030 suffix or an infix at character index 3. */
15031 OT_odd_infix_unc, /* This is the unconditional variant of an
15032 instruction that takes a conditional infix
15033 at an unusual position. In unified mode,
15034 this variant will accept a suffix. */
15035 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
15036 are the conditional variants of instructions that
15037 take conditional infixes in unusual positions.
15038 The infix appears at character index
15039 (tag - OT_odd_infix_0). These are not accepted
15040 in unified mode. */
15041 };
15042
15043 /* Subroutine of md_assemble, responsible for looking up the primary
15044 opcode from the mnemonic the user wrote. STR points to the
15045 beginning of the mnemonic.
15046
15047 This is not simply a hash table lookup, because of conditional
15048 variants. Most instructions have conditional variants, which are
15049 expressed with a _conditional affix_ to the mnemonic. If we were
15050 to encode each conditional variant as a literal string in the opcode
15051 table, it would have approximately 20,000 entries.
15052
15053 Most mnemonics take this affix as a suffix, and in unified syntax,
15054 'most' is upgraded to 'all'. However, in the divided syntax, some
15055 instructions take the affix as an infix, notably the s-variants of
15056 the arithmetic instructions. Of those instructions, all but six
15057 have the infix appear after the third character of the mnemonic.
15058
15059 Accordingly, the algorithm for looking up primary opcodes given
15060 an identifier is:
15061
15062 1. Look up the identifier in the opcode table.
15063 If we find a match, go to step U.
15064
15065 2. Look up the last two characters of the identifier in the
15066 conditions table. If we find a match, look up the first N-2
15067 characters of the identifier in the opcode table. If we
15068 find a match, go to step CE.
15069
15070 3. Look up the fourth and fifth characters of the identifier in
15071 the conditions table. If we find a match, extract those
15072 characters from the identifier, and look up the remaining
15073 characters in the opcode table. If we find a match, go
15074 to step CM.
15075
15076 4. Fail.
15077
15078 U. Examine the tag field of the opcode structure, in case this is
15079 one of the six instructions with its conditional infix in an
15080 unusual place. If it is, the tag tells us where to find the
15081 infix; look it up in the conditions table and set inst.cond
15082 accordingly. Otherwise, this is an unconditional instruction.
15083 Again set inst.cond accordingly. Return the opcode structure.
15084
15085 CE. Examine the tag field to make sure this is an instruction that
15086 should receive a conditional suffix. If it is not, fail.
15087 Otherwise, set inst.cond from the suffix we already looked up,
15088 and return the opcode structure.
15089
15090 CM. Examine the tag field to make sure this is an instruction that
15091 should receive a conditional infix after the third character.
15092 If it is not, fail. Otherwise, undo the edits to the current
15093 line of input and proceed as for case CE. */
15094
15095 static const struct asm_opcode *
15096 opcode_lookup (char **str)
15097 {
15098 char *end, *base;
15099 char *affix;
15100 const struct asm_opcode *opcode;
15101 const struct asm_cond *cond;
15102 char save[2];
15103
15104 /* Scan up to the end of the mnemonic, which must end in white space,
15105 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15106 for (base = end = *str; *end != '\0'; end++)
15107 if (*end == ' ' || *end == '.')
15108 break;
15109
15110 if (end == base)
15111 return NULL;
15112
15113 /* Handle a possible width suffix and/or Neon type suffix. */
15114 if (end[0] == '.')
15115 {
15116 int offset = 2;
15117
15118 /* The .w and .n suffixes are only valid if the unified syntax is in
15119 use. */
15120 if (unified_syntax && end[1] == 'w')
15121 inst.size_req = 4;
15122 else if (unified_syntax && end[1] == 'n')
15123 inst.size_req = 2;
15124 else
15125 offset = 0;
15126
15127 inst.vectype.elems = 0;
15128
15129 *str = end + offset;
15130
15131 if (end[offset] == '.')
15132 {
15133 /* See if we have a Neon type suffix (possible in either unified or
15134 non-unified ARM syntax mode). */
15135 if (parse_neon_type (&inst.vectype, str) == FAIL)
15136 return NULL;
15137 }
15138 else if (end[offset] != '\0' && end[offset] != ' ')
15139 return NULL;
15140 }
15141 else
15142 *str = end;
15143
15144 /* Look for unaffixed or special-case affixed mnemonic. */
15145 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15146 end - base);
15147 if (opcode)
15148 {
15149 /* step U */
15150 if (opcode->tag < OT_odd_infix_0)
15151 {
15152 inst.cond = COND_ALWAYS;
15153 return opcode;
15154 }
15155
15156 if (warn_on_deprecated && unified_syntax)
15157 as_warn (_("conditional infixes are deprecated in unified syntax"));
15158 affix = base + (opcode->tag - OT_odd_infix_0);
15159 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15160 gas_assert (cond);
15161
15162 inst.cond = cond->value;
15163 return opcode;
15164 }
15165
15166 /* Cannot have a conditional suffix on a mnemonic of less than two
15167 characters. */
15168 if (end - base < 3)
15169 return NULL;
15170
15171 /* Look for suffixed mnemonic. */
15172 affix = end - 2;
15173 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15174 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15175 affix - base);
15176 if (opcode && cond)
15177 {
15178 /* step CE */
15179 switch (opcode->tag)
15180 {
15181 case OT_cinfix3_legacy:
15182 /* Ignore conditional suffixes matched on infix only mnemonics. */
15183 break;
15184
15185 case OT_cinfix3:
15186 case OT_cinfix3_deprecated:
15187 case OT_odd_infix_unc:
15188 if (!unified_syntax)
15189 return 0;
15190 /* else fall through */
15191
15192 case OT_csuffix:
15193 case OT_csuffixF:
15194 case OT_csuf_or_in3:
15195 inst.cond = cond->value;
15196 return opcode;
15197
15198 case OT_unconditional:
15199 case OT_unconditionalF:
15200 if (thumb_mode)
15201 inst.cond = cond->value;
15202 else
15203 {
15204 /* Delayed diagnostic. */
15205 inst.error = BAD_COND;
15206 inst.cond = COND_ALWAYS;
15207 }
15208 return opcode;
15209
15210 default:
15211 return NULL;
15212 }
15213 }
15214
15215 /* Cannot have a usual-position infix on a mnemonic of less than
15216 six characters (five would be a suffix). */
15217 if (end - base < 6)
15218 return NULL;
15219
15220 /* Look for infixed mnemonic in the usual position. */
15221 affix = base + 3;
15222 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15223 if (!cond)
15224 return NULL;
15225
15226 memcpy (save, affix, 2);
15227 memmove (affix, affix + 2, (end - affix) - 2);
15228 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15229 (end - base) - 2);
15230 memmove (affix + 2, affix, (end - affix) - 2);
15231 memcpy (affix, save, 2);
15232
15233 if (opcode
15234 && (opcode->tag == OT_cinfix3
15235 || opcode->tag == OT_cinfix3_deprecated
15236 || opcode->tag == OT_csuf_or_in3
15237 || opcode->tag == OT_cinfix3_legacy))
15238 {
15239 /* Step CM. */
15240 if (warn_on_deprecated && unified_syntax
15241 && (opcode->tag == OT_cinfix3
15242 || opcode->tag == OT_cinfix3_deprecated))
15243 as_warn (_("conditional infixes are deprecated in unified syntax"));
15244
15245 inst.cond = cond->value;
15246 return opcode;
15247 }
15248
15249 return NULL;
15250 }
15251
15252 /* This function generates an initial IT instruction, leaving its block
15253 virtually open for the new instructions. Eventually,
15254 the mask will be updated by now_it_add_mask () each time
15255 a new instruction needs to be included in the IT block.
15256 Finally, the block is closed with close_automatic_it_block ().
15257 The block closure can be requested either from md_assemble (),
15258 a tencode (), or due to a label hook. */
15259
15260 static void
15261 new_automatic_it_block (int cond)
15262 {
15263 now_it.state = AUTOMATIC_IT_BLOCK;
15264 now_it.mask = 0x18;
15265 now_it.cc = cond;
15266 now_it.block_length = 1;
15267 mapping_state (MAP_THUMB);
15268 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
15269 }
15270
15271 /* Close an automatic IT block.
15272 See comments in new_automatic_it_block (). */
15273
15274 static void
15275 close_automatic_it_block (void)
15276 {
15277 now_it.mask = 0x10;
15278 now_it.block_length = 0;
15279 }
15280
15281 /* Update the mask of the current automatically-generated IT
15282 instruction. See comments in new_automatic_it_block (). */
15283
15284 static void
15285 now_it_add_mask (int cond)
15286 {
15287 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15288 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15289 | ((bitvalue) << (nbit)))
15290 const int resulting_bit = (cond & 1);
15291
15292 now_it.mask &= 0xf;
15293 now_it.mask = SET_BIT_VALUE (now_it.mask,
15294 resulting_bit,
15295 (5 - now_it.block_length));
15296 now_it.mask = SET_BIT_VALUE (now_it.mask,
15297 1,
15298 ((5 - now_it.block_length) - 1) );
15299 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
15300
15301 #undef CLEAR_BIT
15302 #undef SET_BIT_VALUE
15303 }
15304
15305 /* The IT blocks handling machinery is accessed through the these functions:
15306 it_fsm_pre_encode () from md_assemble ()
15307 set_it_insn_type () optional, from the tencode functions
15308 set_it_insn_type_last () ditto
15309 in_it_block () ditto
15310 it_fsm_post_encode () from md_assemble ()
15311 force_automatic_it_block_close () from label habdling functions
15312
15313 Rationale:
15314 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15315 initializing the IT insn type with a generic initial value depending
15316 on the inst.condition.
15317 2) During the tencode function, two things may happen:
15318 a) The tencode function overrides the IT insn type by
15319 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15320 b) The tencode function queries the IT block state by
15321 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15322
15323 Both set_it_insn_type and in_it_block run the internal FSM state
15324 handling function (handle_it_state), because: a) setting the IT insn
15325 type may incur in an invalid state (exiting the function),
15326 and b) querying the state requires the FSM to be updated.
15327 Specifically we want to avoid creating an IT block for conditional
15328 branches, so it_fsm_pre_encode is actually a guess and we can't
15329 determine whether an IT block is required until the tencode () routine
15330 has decided what type of instruction this actually it.
15331 Because of this, if set_it_insn_type and in_it_block have to be used,
15332 set_it_insn_type has to be called first.
15333
15334 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15335 determines the insn IT type depending on the inst.cond code.
15336 When a tencode () routine encodes an instruction that can be
15337 either outside an IT block, or, in the case of being inside, has to be
15338 the last one, set_it_insn_type_last () will determine the proper
15339 IT instruction type based on the inst.cond code. Otherwise,
15340 set_it_insn_type can be called for overriding that logic or
15341 for covering other cases.
15342
15343 Calling handle_it_state () may not transition the IT block state to
15344 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15345 still queried. Instead, if the FSM determines that the state should
15346 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15347 after the tencode () function: that's what it_fsm_post_encode () does.
15348
15349 Since in_it_block () calls the state handling function to get an
15350 updated state, an error may occur (due to invalid insns combination).
15351 In that case, inst.error is set.
15352 Therefore, inst.error has to be checked after the execution of
15353 the tencode () routine.
15354
15355 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15356 any pending state change (if any) that didn't take place in
15357 handle_it_state () as explained above. */
15358
15359 static void
15360 it_fsm_pre_encode (void)
15361 {
15362 if (inst.cond != COND_ALWAYS)
15363 inst.it_insn_type = INSIDE_IT_INSN;
15364 else
15365 inst.it_insn_type = OUTSIDE_IT_INSN;
15366
15367 now_it.state_handled = 0;
15368 }
15369
15370 /* IT state FSM handling function. */
15371
15372 static int
15373 handle_it_state (void)
15374 {
15375 now_it.state_handled = 1;
15376
15377 switch (now_it.state)
15378 {
15379 case OUTSIDE_IT_BLOCK:
15380 switch (inst.it_insn_type)
15381 {
15382 case OUTSIDE_IT_INSN:
15383 break;
15384
15385 case INSIDE_IT_INSN:
15386 case INSIDE_IT_LAST_INSN:
15387 if (thumb_mode == 0)
15388 {
15389 if (unified_syntax
15390 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
15391 as_tsktsk (_("Warning: conditional outside an IT block"\
15392 " for Thumb."));
15393 }
15394 else
15395 {
15396 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
15397 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
15398 {
15399 /* Automatically generate the IT instruction. */
15400 new_automatic_it_block (inst.cond);
15401 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
15402 close_automatic_it_block ();
15403 }
15404 else
15405 {
15406 inst.error = BAD_OUT_IT;
15407 return FAIL;
15408 }
15409 }
15410 break;
15411
15412 case IF_INSIDE_IT_LAST_INSN:
15413 case NEUTRAL_IT_INSN:
15414 break;
15415
15416 case IT_INSN:
15417 now_it.state = MANUAL_IT_BLOCK;
15418 now_it.block_length = 0;
15419 break;
15420 }
15421 break;
15422
15423 case AUTOMATIC_IT_BLOCK:
15424 /* Three things may happen now:
15425 a) We should increment current it block size;
15426 b) We should close current it block (closing insn or 4 insns);
15427 c) We should close current it block and start a new one (due
15428 to incompatible conditions or
15429 4 insns-length block reached). */
15430
15431 switch (inst.it_insn_type)
15432 {
15433 case OUTSIDE_IT_INSN:
15434 /* The closure of the block shall happen immediatelly,
15435 so any in_it_block () call reports the block as closed. */
15436 force_automatic_it_block_close ();
15437 break;
15438
15439 case INSIDE_IT_INSN:
15440 case INSIDE_IT_LAST_INSN:
15441 case IF_INSIDE_IT_LAST_INSN:
15442 now_it.block_length++;
15443
15444 if (now_it.block_length > 4
15445 || !now_it_compatible (inst.cond))
15446 {
15447 force_automatic_it_block_close ();
15448 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
15449 new_automatic_it_block (inst.cond);
15450 }
15451 else
15452 {
15453 now_it_add_mask (inst.cond);
15454 }
15455
15456 if (now_it.state == AUTOMATIC_IT_BLOCK
15457 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
15458 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
15459 close_automatic_it_block ();
15460 break;
15461
15462 case NEUTRAL_IT_INSN:
15463 now_it.block_length++;
15464
15465 if (now_it.block_length > 4)
15466 force_automatic_it_block_close ();
15467 else
15468 now_it_add_mask (now_it.cc & 1);
15469 break;
15470
15471 case IT_INSN:
15472 close_automatic_it_block ();
15473 now_it.state = MANUAL_IT_BLOCK;
15474 break;
15475 }
15476 break;
15477
15478 case MANUAL_IT_BLOCK:
15479 {
15480 /* Check conditional suffixes. */
15481 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
15482 int is_last;
15483 now_it.mask <<= 1;
15484 now_it.mask &= 0x1f;
15485 is_last = (now_it.mask == 0x10);
15486
15487 switch (inst.it_insn_type)
15488 {
15489 case OUTSIDE_IT_INSN:
15490 inst.error = BAD_NOT_IT;
15491 return FAIL;
15492
15493 case INSIDE_IT_INSN:
15494 if (cond != inst.cond)
15495 {
15496 inst.error = BAD_IT_COND;
15497 return FAIL;
15498 }
15499 break;
15500
15501 case INSIDE_IT_LAST_INSN:
15502 case IF_INSIDE_IT_LAST_INSN:
15503 if (cond != inst.cond)
15504 {
15505 inst.error = BAD_IT_COND;
15506 return FAIL;
15507 }
15508 if (!is_last)
15509 {
15510 inst.error = BAD_BRANCH;
15511 return FAIL;
15512 }
15513 break;
15514
15515 case NEUTRAL_IT_INSN:
15516 /* The BKPT instruction is unconditional even in an IT block. */
15517 break;
15518
15519 case IT_INSN:
15520 inst.error = BAD_IT_IT;
15521 return FAIL;
15522 }
15523 }
15524 break;
15525 }
15526
15527 return SUCCESS;
15528 }
15529
15530 static void
15531 it_fsm_post_encode (void)
15532 {
15533 int is_last;
15534
15535 if (!now_it.state_handled)
15536 handle_it_state ();
15537
15538 is_last = (now_it.mask == 0x10);
15539 if (is_last)
15540 {
15541 now_it.state = OUTSIDE_IT_BLOCK;
15542 now_it.mask = 0;
15543 }
15544 }
15545
15546 static void
15547 force_automatic_it_block_close (void)
15548 {
15549 if (now_it.state == AUTOMATIC_IT_BLOCK)
15550 {
15551 close_automatic_it_block ();
15552 now_it.state = OUTSIDE_IT_BLOCK;
15553 now_it.mask = 0;
15554 }
15555 }
15556
15557 static int
15558 in_it_block (void)
15559 {
15560 if (!now_it.state_handled)
15561 handle_it_state ();
15562
15563 return now_it.state != OUTSIDE_IT_BLOCK;
15564 }
15565
15566 void
15567 md_assemble (char *str)
15568 {
15569 char *p = str;
15570 const struct asm_opcode * opcode;
15571
15572 /* Align the previous label if needed. */
15573 if (last_label_seen != NULL)
15574 {
15575 symbol_set_frag (last_label_seen, frag_now);
15576 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
15577 S_SET_SEGMENT (last_label_seen, now_seg);
15578 }
15579
15580 memset (&inst, '\0', sizeof (inst));
15581 inst.reloc.type = BFD_RELOC_UNUSED;
15582
15583 opcode = opcode_lookup (&p);
15584 if (!opcode)
15585 {
15586 /* It wasn't an instruction, but it might be a register alias of
15587 the form alias .req reg, or a Neon .dn/.qn directive. */
15588 if (! create_register_alias (str, p)
15589 && ! create_neon_reg_alias (str, p))
15590 as_bad (_("bad instruction `%s'"), str);
15591
15592 return;
15593 }
15594
15595 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
15596 as_warn (_("s suffix on comparison instruction is deprecated"));
15597
15598 /* The value which unconditional instructions should have in place of the
15599 condition field. */
15600 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
15601
15602 if (thumb_mode)
15603 {
15604 arm_feature_set variant;
15605
15606 variant = cpu_variant;
15607 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15608 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
15609 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
15610 /* Check that this instruction is supported for this CPU. */
15611 if (!opcode->tvariant
15612 || (thumb_mode == 1
15613 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
15614 {
15615 as_bad (_("selected processor does not support `%s'"), str);
15616 return;
15617 }
15618 if (inst.cond != COND_ALWAYS && !unified_syntax
15619 && opcode->tencode != do_t_branch)
15620 {
15621 as_bad (_("Thumb does not support conditional execution"));
15622 return;
15623 }
15624
15625 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
15626 {
15627 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
15628 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
15629 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
15630 {
15631 /* Two things are addressed here.
15632 1) Implicit require narrow instructions on Thumb-1.
15633 This avoids relaxation accidentally introducing Thumb-2
15634 instructions.
15635 2) Reject wide instructions in non Thumb-2 cores. */
15636 if (inst.size_req == 0)
15637 inst.size_req = 2;
15638 else if (inst.size_req == 4)
15639 {
15640 as_bad (_("selected processor does not support `%s'"), str);
15641 return;
15642 }
15643 }
15644 }
15645
15646 inst.instruction = opcode->tvalue;
15647
15648 if (!parse_operands (p, opcode->operands))
15649 {
15650 /* Prepare the it_insn_type for those encodings that don't set
15651 it. */
15652 it_fsm_pre_encode ();
15653
15654 opcode->tencode ();
15655
15656 it_fsm_post_encode ();
15657 }
15658
15659 if (!(inst.error || inst.relax))
15660 {
15661 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
15662 inst.size = (inst.instruction > 0xffff ? 4 : 2);
15663 if (inst.size_req && inst.size_req != inst.size)
15664 {
15665 as_bad (_("cannot honor width suffix -- `%s'"), str);
15666 return;
15667 }
15668 }
15669
15670 /* Something has gone badly wrong if we try to relax a fixed size
15671 instruction. */
15672 gas_assert (inst.size_req == 0 || !inst.relax);
15673
15674 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15675 *opcode->tvariant);
15676 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15677 set those bits when Thumb-2 32-bit instructions are seen. ie.
15678 anything other than bl/blx and v6-M instructions.
15679 This is overly pessimistic for relaxable instructions. */
15680 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
15681 || inst.relax)
15682 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
15683 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
15684 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15685 arm_ext_v6t2);
15686
15687 check_neon_suffixes;
15688
15689 if (!inst.error)
15690 {
15691 mapping_state (MAP_THUMB);
15692 }
15693 }
15694 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
15695 {
15696 bfd_boolean is_bx;
15697
15698 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15699 is_bx = (opcode->aencode == do_bx);
15700
15701 /* Check that this instruction is supported for this CPU. */
15702 if (!(is_bx && fix_v4bx)
15703 && !(opcode->avariant &&
15704 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
15705 {
15706 as_bad (_("selected processor does not support `%s'"), str);
15707 return;
15708 }
15709 if (inst.size_req)
15710 {
15711 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
15712 return;
15713 }
15714
15715 inst.instruction = opcode->avalue;
15716 if (opcode->tag == OT_unconditionalF)
15717 inst.instruction |= 0xF << 28;
15718 else
15719 inst.instruction |= inst.cond << 28;
15720 inst.size = INSN_SIZE;
15721 if (!parse_operands (p, opcode->operands))
15722 {
15723 it_fsm_pre_encode ();
15724 opcode->aencode ();
15725 it_fsm_post_encode ();
15726 }
15727 /* Arm mode bx is marked as both v4T and v5 because it's still required
15728 on a hypothetical non-thumb v5 core. */
15729 if (is_bx)
15730 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
15731 else
15732 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
15733 *opcode->avariant);
15734
15735 check_neon_suffixes;
15736
15737 if (!inst.error)
15738 {
15739 mapping_state (MAP_ARM);
15740 }
15741 }
15742 else
15743 {
15744 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15745 "-- `%s'"), str);
15746 return;
15747 }
15748 output_inst (str);
15749 }
15750
15751 static void
15752 check_it_blocks_finished (void)
15753 {
15754 #ifdef OBJ_ELF
15755 asection *sect;
15756
15757 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
15758 if (seg_info (sect)->tc_segment_info_data.current_it.state
15759 == MANUAL_IT_BLOCK)
15760 {
15761 as_warn (_("section '%s' finished with an open IT block."),
15762 sect->name);
15763 }
15764 #else
15765 if (now_it.state == MANUAL_IT_BLOCK)
15766 as_warn (_("file finished with an open IT block."));
15767 #endif
15768 }
15769
15770 /* Various frobbings of labels and their addresses. */
15771
15772 void
15773 arm_start_line_hook (void)
15774 {
15775 last_label_seen = NULL;
15776 }
15777
15778 void
15779 arm_frob_label (symbolS * sym)
15780 {
15781 last_label_seen = sym;
15782
15783 ARM_SET_THUMB (sym, thumb_mode);
15784
15785 #if defined OBJ_COFF || defined OBJ_ELF
15786 ARM_SET_INTERWORK (sym, support_interwork);
15787 #endif
15788
15789 force_automatic_it_block_close ();
15790
15791 /* Note - do not allow local symbols (.Lxxx) to be labelled
15792 as Thumb functions. This is because these labels, whilst
15793 they exist inside Thumb code, are not the entry points for
15794 possible ARM->Thumb calls. Also, these labels can be used
15795 as part of a computed goto or switch statement. eg gcc
15796 can generate code that looks like this:
15797
15798 ldr r2, [pc, .Laaa]
15799 lsl r3, r3, #2
15800 ldr r2, [r3, r2]
15801 mov pc, r2
15802
15803 .Lbbb: .word .Lxxx
15804 .Lccc: .word .Lyyy
15805 ..etc...
15806 .Laaa: .word Lbbb
15807
15808 The first instruction loads the address of the jump table.
15809 The second instruction converts a table index into a byte offset.
15810 The third instruction gets the jump address out of the table.
15811 The fourth instruction performs the jump.
15812
15813 If the address stored at .Laaa is that of a symbol which has the
15814 Thumb_Func bit set, then the linker will arrange for this address
15815 to have the bottom bit set, which in turn would mean that the
15816 address computation performed by the third instruction would end
15817 up with the bottom bit set. Since the ARM is capable of unaligned
15818 word loads, the instruction would then load the incorrect address
15819 out of the jump table, and chaos would ensue. */
15820 if (label_is_thumb_function_name
15821 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
15822 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
15823 {
15824 /* When the address of a Thumb function is taken the bottom
15825 bit of that address should be set. This will allow
15826 interworking between Arm and Thumb functions to work
15827 correctly. */
15828
15829 THUMB_SET_FUNC (sym, 1);
15830
15831 label_is_thumb_function_name = FALSE;
15832 }
15833
15834 dwarf2_emit_label (sym);
15835 }
15836
15837 bfd_boolean
15838 arm_data_in_code (void)
15839 {
15840 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
15841 {
15842 *input_line_pointer = '/';
15843 input_line_pointer += 5;
15844 *input_line_pointer = 0;
15845 return TRUE;
15846 }
15847
15848 return FALSE;
15849 }
15850
15851 char *
15852 arm_canonicalize_symbol_name (char * name)
15853 {
15854 int len;
15855
15856 if (thumb_mode && (len = strlen (name)) > 5
15857 && streq (name + len - 5, "/data"))
15858 *(name + len - 5) = 0;
15859
15860 return name;
15861 }
15862 \f
15863 /* Table of all register names defined by default. The user can
15864 define additional names with .req. Note that all register names
15865 should appear in both upper and lowercase variants. Some registers
15866 also have mixed-case names. */
15867
15868 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15869 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15870 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15871 #define REGSET(p,t) \
15872 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15873 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15874 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15875 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15876 #define REGSETH(p,t) \
15877 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15878 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15879 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15880 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15881 #define REGSET2(p,t) \
15882 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15883 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15884 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15885 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15886
15887 static const struct reg_entry reg_names[] =
15888 {
15889 /* ARM integer registers. */
15890 REGSET(r, RN), REGSET(R, RN),
15891
15892 /* ATPCS synonyms. */
15893 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
15894 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
15895 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
15896
15897 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
15898 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
15899 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
15900
15901 /* Well-known aliases. */
15902 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
15903 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
15904
15905 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
15906 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
15907
15908 /* Coprocessor numbers. */
15909 REGSET(p, CP), REGSET(P, CP),
15910
15911 /* Coprocessor register numbers. The "cr" variants are for backward
15912 compatibility. */
15913 REGSET(c, CN), REGSET(C, CN),
15914 REGSET(cr, CN), REGSET(CR, CN),
15915
15916 /* FPA registers. */
15917 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
15918 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
15919
15920 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
15921 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
15922
15923 /* VFP SP registers. */
15924 REGSET(s,VFS), REGSET(S,VFS),
15925 REGSETH(s,VFS), REGSETH(S,VFS),
15926
15927 /* VFP DP Registers. */
15928 REGSET(d,VFD), REGSET(D,VFD),
15929 /* Extra Neon DP registers. */
15930 REGSETH(d,VFD), REGSETH(D,VFD),
15931
15932 /* Neon QP registers. */
15933 REGSET2(q,NQ), REGSET2(Q,NQ),
15934
15935 /* VFP control registers. */
15936 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
15937 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
15938 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
15939 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
15940 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
15941 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
15942
15943 /* Maverick DSP coprocessor registers. */
15944 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
15945 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
15946
15947 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
15948 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
15949 REGDEF(dspsc,0,DSPSC),
15950
15951 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
15952 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
15953 REGDEF(DSPSC,0,DSPSC),
15954
15955 /* iWMMXt data registers - p0, c0-15. */
15956 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
15957
15958 /* iWMMXt control registers - p1, c0-3. */
15959 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
15960 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
15961 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
15962 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
15963
15964 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15965 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
15966 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
15967 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
15968 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
15969
15970 /* XScale accumulator registers. */
15971 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
15972 };
15973 #undef REGDEF
15974 #undef REGNUM
15975 #undef REGSET
15976
15977 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15978 within psr_required_here. */
15979 static const struct asm_psr psrs[] =
15980 {
15981 /* Backward compatibility notation. Note that "all" is no longer
15982 truly all possible PSR bits. */
15983 {"all", PSR_c | PSR_f},
15984 {"flg", PSR_f},
15985 {"ctl", PSR_c},
15986
15987 /* Individual flags. */
15988 {"f", PSR_f},
15989 {"c", PSR_c},
15990 {"x", PSR_x},
15991 {"s", PSR_s},
15992 /* Combinations of flags. */
15993 {"fs", PSR_f | PSR_s},
15994 {"fx", PSR_f | PSR_x},
15995 {"fc", PSR_f | PSR_c},
15996 {"sf", PSR_s | PSR_f},
15997 {"sx", PSR_s | PSR_x},
15998 {"sc", PSR_s | PSR_c},
15999 {"xf", PSR_x | PSR_f},
16000 {"xs", PSR_x | PSR_s},
16001 {"xc", PSR_x | PSR_c},
16002 {"cf", PSR_c | PSR_f},
16003 {"cs", PSR_c | PSR_s},
16004 {"cx", PSR_c | PSR_x},
16005 {"fsx", PSR_f | PSR_s | PSR_x},
16006 {"fsc", PSR_f | PSR_s | PSR_c},
16007 {"fxs", PSR_f | PSR_x | PSR_s},
16008 {"fxc", PSR_f | PSR_x | PSR_c},
16009 {"fcs", PSR_f | PSR_c | PSR_s},
16010 {"fcx", PSR_f | PSR_c | PSR_x},
16011 {"sfx", PSR_s | PSR_f | PSR_x},
16012 {"sfc", PSR_s | PSR_f | PSR_c},
16013 {"sxf", PSR_s | PSR_x | PSR_f},
16014 {"sxc", PSR_s | PSR_x | PSR_c},
16015 {"scf", PSR_s | PSR_c | PSR_f},
16016 {"scx", PSR_s | PSR_c | PSR_x},
16017 {"xfs", PSR_x | PSR_f | PSR_s},
16018 {"xfc", PSR_x | PSR_f | PSR_c},
16019 {"xsf", PSR_x | PSR_s | PSR_f},
16020 {"xsc", PSR_x | PSR_s | PSR_c},
16021 {"xcf", PSR_x | PSR_c | PSR_f},
16022 {"xcs", PSR_x | PSR_c | PSR_s},
16023 {"cfs", PSR_c | PSR_f | PSR_s},
16024 {"cfx", PSR_c | PSR_f | PSR_x},
16025 {"csf", PSR_c | PSR_s | PSR_f},
16026 {"csx", PSR_c | PSR_s | PSR_x},
16027 {"cxf", PSR_c | PSR_x | PSR_f},
16028 {"cxs", PSR_c | PSR_x | PSR_s},
16029 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16030 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16031 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16032 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16033 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16034 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16035 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16036 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16037 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16038 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16039 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16040 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16041 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16042 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16043 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16044 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16045 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16046 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16047 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16048 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16049 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16050 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16051 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16052 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16053 };
16054
16055 /* Table of V7M psr names. */
16056 static const struct asm_psr v7m_psrs[] =
16057 {
16058 {"apsr", 0 }, {"APSR", 0 },
16059 {"iapsr", 1 }, {"IAPSR", 1 },
16060 {"eapsr", 2 }, {"EAPSR", 2 },
16061 {"psr", 3 }, {"PSR", 3 },
16062 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16063 {"ipsr", 5 }, {"IPSR", 5 },
16064 {"epsr", 6 }, {"EPSR", 6 },
16065 {"iepsr", 7 }, {"IEPSR", 7 },
16066 {"msp", 8 }, {"MSP", 8 },
16067 {"psp", 9 }, {"PSP", 9 },
16068 {"primask", 16}, {"PRIMASK", 16},
16069 {"basepri", 17}, {"BASEPRI", 17},
16070 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16071 {"faultmask", 19}, {"FAULTMASK", 19},
16072 {"control", 20}, {"CONTROL", 20}
16073 };
16074
16075 /* Table of all shift-in-operand names. */
16076 static const struct asm_shift_name shift_names [] =
16077 {
16078 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
16079 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
16080 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
16081 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
16082 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
16083 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
16084 };
16085
16086 /* Table of all explicit relocation names. */
16087 #ifdef OBJ_ELF
16088 static struct reloc_entry reloc_names[] =
16089 {
16090 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
16091 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
16092 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
16093 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16094 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16095 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
16096 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
16097 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
16098 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
16099 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16100 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
16101 };
16102 #endif
16103
16104 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16105 static const struct asm_cond conds[] =
16106 {
16107 {"eq", 0x0},
16108 {"ne", 0x1},
16109 {"cs", 0x2}, {"hs", 0x2},
16110 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16111 {"mi", 0x4},
16112 {"pl", 0x5},
16113 {"vs", 0x6},
16114 {"vc", 0x7},
16115 {"hi", 0x8},
16116 {"ls", 0x9},
16117 {"ge", 0xa},
16118 {"lt", 0xb},
16119 {"gt", 0xc},
16120 {"le", 0xd},
16121 {"al", 0xe}
16122 };
16123
16124 static struct asm_barrier_opt barrier_opt_names[] =
16125 {
16126 { "sy", 0xf },
16127 { "un", 0x7 },
16128 { "st", 0xe },
16129 { "unst", 0x6 }
16130 };
16131
16132 /* Table of ARM-format instructions. */
16133
16134 /* Macros for gluing together operand strings. N.B. In all cases
16135 other than OPS0, the trailing OP_stop comes from default
16136 zero-initialization of the unspecified elements of the array. */
16137 #define OPS0() { OP_stop, }
16138 #define OPS1(a) { OP_##a, }
16139 #define OPS2(a,b) { OP_##a,OP_##b, }
16140 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16141 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16142 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16143 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16144
16145 /* These macros abstract out the exact format of the mnemonic table and
16146 save some repeated characters. */
16147
16148 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16149 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16150 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16151 THUMB_VARIANT, do_##ae, do_##te }
16152
16153 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16154 a T_MNEM_xyz enumerator. */
16155 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16156 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16157 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16158 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16159
16160 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16161 infix after the third character. */
16162 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16163 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16164 THUMB_VARIANT, do_##ae, do_##te }
16165 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16166 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16167 THUMB_VARIANT, do_##ae, do_##te }
16168 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16169 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16170 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16171 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16172 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16173 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16174 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16175 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16176
16177 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16178 appear in the condition table. */
16179 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16180 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16181 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16182
16183 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16184 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16185 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16186 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16187 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16188 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16189 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16190 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16191 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16192 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16193 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16194 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16195 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16196 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16197 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16198 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16199 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16200 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16201 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16202 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16203
16204 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16205 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16206 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16207 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16208
16209 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16210 field is still 0xE. Many of the Thumb variants can be executed
16211 conditionally, so this is checked separately. */
16212 #define TUE(mnem, op, top, nops, ops, ae, te) \
16213 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16214 THUMB_VARIANT, do_##ae, do_##te }
16215
16216 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16217 condition code field. */
16218 #define TUF(mnem, op, top, nops, ops, ae, te) \
16219 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16220 THUMB_VARIANT, do_##ae, do_##te }
16221
16222 /* ARM-only variants of all the above. */
16223 #define CE(mnem, op, nops, ops, ae) \
16224 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16225
16226 #define C3(mnem, op, nops, ops, ae) \
16227 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16228
16229 /* Legacy mnemonics that always have conditional infix after the third
16230 character. */
16231 #define CL(mnem, op, nops, ops, ae) \
16232 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16233 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16234
16235 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16236 #define cCE(mnem, op, nops, ops, ae) \
16237 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16238
16239 /* Legacy coprocessor instructions where conditional infix and conditional
16240 suffix are ambiguous. For consistency this includes all FPA instructions,
16241 not just the potentially ambiguous ones. */
16242 #define cCL(mnem, op, nops, ops, ae) \
16243 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16244 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16245
16246 /* Coprocessor, takes either a suffix or a position-3 infix
16247 (for an FPA corner case). */
16248 #define C3E(mnem, op, nops, ops, ae) \
16249 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16250 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16251
16252 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16253 { m1 #m2 m3, OPS##nops ops, \
16254 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16255 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16256
16257 #define CM(m1, m2, op, nops, ops, ae) \
16258 xCM_ (m1, , m2, op, nops, ops, ae), \
16259 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16260 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16261 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16262 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16263 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16264 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16265 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16266 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16267 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16268 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16269 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16270 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16271 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16272 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16273 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16274 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16275 xCM_ (m1, le, m2, op, nops, ops, ae), \
16276 xCM_ (m1, al, m2, op, nops, ops, ae)
16277
16278 #define UE(mnem, op, nops, ops, ae) \
16279 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16280
16281 #define UF(mnem, op, nops, ops, ae) \
16282 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16283
16284 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16285 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16286 use the same encoding function for each. */
16287 #define NUF(mnem, op, nops, ops, enc) \
16288 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16289 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16290
16291 /* Neon data processing, version which indirects through neon_enc_tab for
16292 the various overloaded versions of opcodes. */
16293 #define nUF(mnem, op, nops, ops, enc) \
16294 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16295 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16296
16297 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16298 version. */
16299 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16300 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16301 THUMB_VARIANT, do_##enc, do_##enc }
16302
16303 #define NCE(mnem, op, nops, ops, enc) \
16304 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16305
16306 #define NCEF(mnem, op, nops, ops, enc) \
16307 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16308
16309 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16310 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16311 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16312 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16313
16314 #define nCE(mnem, op, nops, ops, enc) \
16315 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16316
16317 #define nCEF(mnem, op, nops, ops, enc) \
16318 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16319
16320 #define do_0 0
16321
16322 /* Thumb-only, unconditional. */
16323 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16324
16325 static const struct asm_opcode insns[] =
16326 {
16327 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16328 #define THUMB_VARIANT &arm_ext_v4t
16329 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
16330 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
16331 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
16332 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
16333 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
16334 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
16335 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
16336 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
16337 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
16338 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
16339 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
16340 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
16341 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
16342 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
16343 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
16344 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
16345
16346 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16347 for setting PSR flag bits. They are obsolete in V6 and do not
16348 have Thumb equivalents. */
16349 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16350 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16351 CL("tstp", 110f000, 2, (RR, SH), cmp),
16352 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16353 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16354 CL("cmpp", 150f000, 2, (RR, SH), cmp),
16355 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16356 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16357 CL("cmnp", 170f000, 2, (RR, SH), cmp),
16358
16359 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
16360 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
16361 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
16362 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
16363
16364 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
16365 tC3("ldrb", 4500000, _ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
16366 tCE("str", 4000000, _str, 2, (RR, ADDRGLDR),ldst, t_ldst),
16367 tC3("strb", 4400000, _strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
16368
16369 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16370 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16371 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16372 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16373 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16374 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16375
16376 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
16377 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
16378 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
16379 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
16380
16381 /* Pseudo ops. */
16382 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
16383 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
16384 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
16385
16386 /* Thumb-compatibility pseudo ops. */
16387 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
16388 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
16389 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
16390 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
16391 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
16392 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
16393 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
16394 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
16395 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
16396 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
16397 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
16398 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
16399
16400 /* These may simplify to neg. */
16401 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
16402 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
16403
16404 #undef THUMB_VARIANT
16405 #define THUMB_VARIANT & arm_ext_v6
16406
16407 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
16408
16409 /* V1 instructions with no Thumb analogue prior to V6T2. */
16410 #undef THUMB_VARIANT
16411 #define THUMB_VARIANT & arm_ext_v6t2
16412
16413 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16414 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16415 CL("teqp", 130f000, 2, (RR, SH), cmp),
16416
16417 TC3("ldrt", 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
16418 TC3("ldrbt", 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
16419 TC3("strt", 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
16420 TC3("strbt", 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
16421
16422 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16423 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16424
16425 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16426 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16427
16428 /* V1 instructions with no Thumb analogue at all. */
16429 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
16430 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
16431
16432 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
16433 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
16434 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
16435 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
16436 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
16437 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
16438 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
16439 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
16440
16441 #undef ARM_VARIANT
16442 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16443 #undef THUMB_VARIANT
16444 #define THUMB_VARIANT & arm_ext_v4t
16445
16446 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16447 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16448
16449 #undef THUMB_VARIANT
16450 #define THUMB_VARIANT & arm_ext_v6t2
16451
16452 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16453 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
16454
16455 /* Generic coprocessor instructions. */
16456 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16457 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16458 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16459 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16460 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16461 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16462 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16463
16464 #undef ARM_VARIANT
16465 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16466
16467 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16468 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16469
16470 #undef ARM_VARIANT
16471 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16472 #undef THUMB_VARIANT
16473 #define THUMB_VARIANT & arm_ext_msr
16474
16475 TCE("mrs", 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
16476 TCE("msr", 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
16477
16478 #undef ARM_VARIANT
16479 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16480 #undef THUMB_VARIANT
16481 #define THUMB_VARIANT & arm_ext_v6t2
16482
16483 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16484 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16485 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16486 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16487 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16488 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16489 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16490 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16491
16492 #undef ARM_VARIANT
16493 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16494 #undef THUMB_VARIANT
16495 #define THUMB_VARIANT & arm_ext_v4t
16496
16497 tC3("ldrh", 01000b0, _ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16498 tC3("strh", 00000b0, _strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16499 tC3("ldrsh", 01000f0, _ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16500 tC3("ldrsb", 01000d0, _ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16501 tCM("ld","sh", 01000f0, _ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16502 tCM("ld","sb", 01000d0, _ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16503
16504 #undef ARM_VARIANT
16505 #define ARM_VARIANT & arm_ext_v4t_5
16506
16507 /* ARM Architecture 4T. */
16508 /* Note: bx (and blx) are required on V5, even if the processor does
16509 not support Thumb. */
16510 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
16511
16512 #undef ARM_VARIANT
16513 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16514 #undef THUMB_VARIANT
16515 #define THUMB_VARIANT & arm_ext_v5t
16516
16517 /* Note: blx has 2 variants; the .value coded here is for
16518 BLX(2). Only this variant has conditional execution. */
16519 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
16520 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
16521
16522 #undef THUMB_VARIANT
16523 #define THUMB_VARIANT & arm_ext_v6t2
16524
16525 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
16526 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16527 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16528 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16529 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16530 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16531 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16532 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16533
16534 #undef ARM_VARIANT
16535 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16536 #undef THUMB_VARIANT
16537 #define THUMB_VARIANT &arm_ext_v5exp
16538
16539 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16540 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16541 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16542 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16543
16544 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16545 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16546
16547 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16548 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16549 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16550 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16551
16552 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16553 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16554 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16555 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16556
16557 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16558 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16559
16560 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16561 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16562 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16563 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16564
16565 #undef ARM_VARIANT
16566 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16567 #undef THUMB_VARIANT
16568 #define THUMB_VARIANT &arm_ext_v6t2
16569
16570 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
16571 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
16572 TC3("strd", 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
16573
16574 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16575 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16576
16577 #undef ARM_VARIANT
16578 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16579
16580 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
16581
16582 #undef ARM_VARIANT
16583 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16584 #undef THUMB_VARIANT
16585 #define THUMB_VARIANT & arm_ext_v6
16586
16587 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
16588 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
16589 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16590 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16591 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16592 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16593 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16594 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16595 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16596 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
16597
16598 #undef THUMB_VARIANT
16599 #define THUMB_VARIANT & arm_ext_v6t2
16600
16601 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
16602 TCE("strex", 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
16603 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16604 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16605
16606 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
16607 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
16608
16609 /* ARM V6 not included in V7M. */
16610 #undef THUMB_VARIANT
16611 #define THUMB_VARIANT & arm_ext_v6_notm
16612 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16613 UF(rfeib, 9900a00, 1, (RRw), rfe),
16614 UF(rfeda, 8100a00, 1, (RRw), rfe),
16615 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16616 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16617 UF(rfefa, 9900a00, 1, (RRw), rfe),
16618 UF(rfeea, 8100a00, 1, (RRw), rfe),
16619 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16620 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
16621 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
16622 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
16623 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
16624
16625 /* ARM V6 not included in V7M (eg. integer SIMD). */
16626 #undef THUMB_VARIANT
16627 #define THUMB_VARIANT & arm_ext_v6_dsp
16628 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
16629 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
16630 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
16631 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16632 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16633 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16634 /* Old name for QASX. */
16635 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16636 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16637 /* Old name for QSAX. */
16638 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16639 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16640 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16641 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16642 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16643 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16644 /* Old name for SASX. */
16645 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16646 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16647 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16648 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16649 /* Old name for SHASX. */
16650 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16651 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16652 /* Old name for SHSAX. */
16653 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16654 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16655 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16656 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16657 /* Old name for SSAX. */
16658 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16659 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16660 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16661 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16662 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16663 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16664 /* Old name for UASX. */
16665 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16666 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16667 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16668 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16669 /* Old name for UHASX. */
16670 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16671 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16672 /* Old name for UHSAX. */
16673 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16674 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16675 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16676 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16677 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16678 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16679 /* Old name for UQASX. */
16680 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16681 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16682 /* Old name for UQSAX. */
16683 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16684 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16685 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16686 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16687 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16688 /* Old name for USAX. */
16689 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16690 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16691 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16692 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16693 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16694 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16695 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16696 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16697 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16698 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16699 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16700 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16701 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16702 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16703 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16704 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16705 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16706 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16707 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16708 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16709 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16710 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16711 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16712 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16713 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16714 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16715 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16716 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16717 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16718 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
16719 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
16720 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16721 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16722 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
16723
16724 #undef ARM_VARIANT
16725 #define ARM_VARIANT & arm_ext_v6k
16726 #undef THUMB_VARIANT
16727 #define THUMB_VARIANT & arm_ext_v6k
16728
16729 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
16730 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
16731 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
16732 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
16733
16734 #undef THUMB_VARIANT
16735 #define THUMB_VARIANT & arm_ext_v6_notm
16736
16737 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
16738 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
16739
16740 #undef THUMB_VARIANT
16741 #define THUMB_VARIANT & arm_ext_v6t2
16742
16743 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
16744 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
16745 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
16746 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
16747 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
16748
16749 #undef ARM_VARIANT
16750 #define ARM_VARIANT & arm_ext_v6z
16751
16752 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
16753
16754 #undef ARM_VARIANT
16755 #define ARM_VARIANT & arm_ext_v6t2
16756
16757 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
16758 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
16759 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16760 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16761
16762 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16763 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
16764 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
16765 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
16766
16767 TC3("ldrht", 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16768 TC3("ldrsht", 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16769 TC3("ldrsbt", 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16770 TC3("strht", 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16771
16772 UT("cbnz", b900, 2, (RR, EXP), t_cbz),
16773 UT("cbz", b100, 2, (RR, EXP), t_cbz),
16774
16775 /* ARM does not really have an IT instruction, so always allow it.
16776 The opcode is copied from Thumb in order to allow warnings in
16777 -mimplicit-it=[never | arm] modes. */
16778 #undef ARM_VARIANT
16779 #define ARM_VARIANT & arm_ext_v1
16780
16781 TUE("it", bf08, bf08, 1, (COND), it, t_it),
16782 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
16783 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
16784 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
16785 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
16786 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
16787 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
16788 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
16789 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
16790 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
16791 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
16792 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
16793 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
16794 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
16795 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
16796 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16797 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
16798 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
16799
16800 /* Thumb2 only instructions. */
16801 #undef ARM_VARIANT
16802 #define ARM_VARIANT NULL
16803
16804 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16805 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16806 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
16807 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
16808 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
16809 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
16810
16811 /* Thumb-2 hardware division instructions (R and M profiles only). */
16812 #undef THUMB_VARIANT
16813 #define THUMB_VARIANT & arm_ext_div
16814
16815 TCE("sdiv", 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
16816 TCE("udiv", 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
16817
16818 /* ARM V6M/V7 instructions. */
16819 #undef ARM_VARIANT
16820 #define ARM_VARIANT & arm_ext_barrier
16821 #undef THUMB_VARIANT
16822 #define THUMB_VARIANT & arm_ext_barrier
16823
16824 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
16825 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
16826 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
16827
16828 /* ARM V7 instructions. */
16829 #undef ARM_VARIANT
16830 #define ARM_VARIANT & arm_ext_v7
16831 #undef THUMB_VARIANT
16832 #define THUMB_VARIANT & arm_ext_v7
16833
16834 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
16835 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
16836
16837 #undef ARM_VARIANT
16838 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16839
16840 cCE("wfs", e200110, 1, (RR), rd),
16841 cCE("rfs", e300110, 1, (RR), rd),
16842 cCE("wfc", e400110, 1, (RR), rd),
16843 cCE("rfc", e500110, 1, (RR), rd),
16844
16845 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
16846 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
16847 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
16848 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
16849
16850 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
16851 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
16852 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
16853 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
16854
16855 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
16856 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
16857 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
16858 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
16859 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
16860 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
16861 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
16862 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
16863 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
16864 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
16865 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
16866 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
16867
16868 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
16869 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
16870 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
16871 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
16872 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
16873 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
16874 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
16875 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
16876 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
16877 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
16878 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
16879 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
16880
16881 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
16882 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
16883 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
16884 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
16885 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
16886 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
16887 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
16888 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
16889 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
16890 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
16891 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
16892 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
16893
16894 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
16895 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
16896 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
16897 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
16898 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
16899 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
16900 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
16901 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
16902 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
16903 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
16904 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
16905 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
16906
16907 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
16908 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
16909 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
16910 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
16911 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
16912 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
16913 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
16914 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
16915 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
16916 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
16917 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
16918 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
16919
16920 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
16921 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
16922 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
16923 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
16924 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
16925 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
16926 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
16927 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
16928 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
16929 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
16930 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
16931 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
16932
16933 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
16934 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
16935 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
16936 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
16937 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
16938 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
16939 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
16940 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
16941 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
16942 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
16943 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
16944 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
16945
16946 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
16947 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
16948 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
16949 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
16950 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
16951 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
16952 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
16953 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
16954 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
16955 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
16956 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
16957 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
16958
16959 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
16960 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
16961 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
16962 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
16963 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
16964 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
16965 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
16966 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
16967 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
16968 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
16969 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
16970 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
16971
16972 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
16973 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
16974 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
16975 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
16976 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
16977 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
16978 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
16979 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
16980 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
16981 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
16982 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
16983 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
16984
16985 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
16986 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
16987 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
16988 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
16989 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
16990 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
16991 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
16992 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
16993 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
16994 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
16995 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
16996 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
16997
16998 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
16999 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
17000 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
17001 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
17002 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
17003 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
17004 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
17005 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
17006 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
17007 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
17008 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
17009 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
17010
17011 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
17012 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
17013 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
17014 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
17015 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
17016 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
17017 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
17018 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
17019 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
17020 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
17021 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
17022 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
17023
17024 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
17025 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
17026 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
17027 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
17028 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
17029 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
17030 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
17031 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
17032 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
17033 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
17034 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
17035 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
17036
17037 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
17038 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
17039 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
17040 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
17041 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
17042 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
17043 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
17044 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
17045 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
17046 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
17047 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
17048 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
17049
17050 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
17051 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
17052 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
17053 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
17054 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
17055 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
17056 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
17057 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
17058 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
17059 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
17060 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
17061 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
17062
17063 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17064 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17065 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17066 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17067 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17068 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17069 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17070 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17071 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17072 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17073 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17074 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17075
17076 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17077 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17078 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17079 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17080 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17081 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17082 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17083 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17084 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17085 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17086 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17087 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17088
17089 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17090 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17091 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17092 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17093 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17094 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17095 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17096 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17097 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17098 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17099 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17100 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17101
17102 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17103 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17104 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17105 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17106 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17107 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17108 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17109 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17110 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17111 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17112 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17113 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17114
17115 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17116 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17117 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17118 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17119 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17120 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17121 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17122 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17123 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17124 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17125 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17126 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17127
17128 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17129 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17130 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17131 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17132 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17133 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17134 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17135 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17136 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17137 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
17138 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
17139 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
17140
17141 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
17142 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
17143 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
17144 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
17145 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
17146 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17147 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17148 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17149 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
17150 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
17151 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
17152 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
17153
17154 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
17155 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
17156 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
17157 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
17158 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
17159 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17160 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17161 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17162 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
17163 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
17164 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
17165 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
17166
17167 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
17168 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
17169 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
17170 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
17171 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
17172 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17173 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17174 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17175 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
17176 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
17177 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
17178 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
17179
17180 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
17181 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
17182 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
17183 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
17184 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
17185 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17186 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17187 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17188 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
17189 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
17190 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
17191 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
17192
17193 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17194 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17195 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17196 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17197 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17198 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17199 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17200 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17201 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17202 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17203 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17204 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17205
17206 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17207 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17208 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17209 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17210 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17211 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17212 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17213 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17214 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17215 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17216 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17217 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17218
17219 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17220 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17221 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17222 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17223 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17224 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17225 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17226 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17227 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17228 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17229 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17230 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17231
17232 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
17233 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
17234 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
17235 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
17236
17237 cCL("flts", e000110, 2, (RF, RR), rn_rd),
17238 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
17239 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
17240 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
17241 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
17242 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
17243 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
17244 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
17245 cCL("flte", e080110, 2, (RF, RR), rn_rd),
17246 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
17247 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
17248 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
17249
17250 /* The implementation of the FIX instruction is broken on some
17251 assemblers, in that it accepts a precision specifier as well as a
17252 rounding specifier, despite the fact that this is meaningless.
17253 To be more compatible, we accept it as well, though of course it
17254 does not set any bits. */
17255 cCE("fix", e100110, 2, (RR, RF), rd_rm),
17256 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
17257 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
17258 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
17259 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
17260 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
17261 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
17262 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
17263 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
17264 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
17265 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
17266 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
17267 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
17268
17269 /* Instructions that were new with the real FPA, call them V2. */
17270 #undef ARM_VARIANT
17271 #define ARM_VARIANT & fpu_fpa_ext_v2
17272
17273 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17274 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17275 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17276 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17277 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17278 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17279
17280 #undef ARM_VARIANT
17281 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17282
17283 /* Moves and type conversions. */
17284 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
17285 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
17286 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
17287 cCE("fmstat", ef1fa10, 0, (), noargs),
17288 cCE("vmrs", ef10a10, 2, (APSR_RR, RVC), vmrs),
17289 cCE("vmsr", ee10a10, 2, (RVC, RR), vmsr),
17290 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
17291 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
17292 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
17293 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17294 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
17295 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17296 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
17297 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
17298
17299 /* Memory operations. */
17300 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17301 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17302 cCE("fldmias", c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17303 cCE("fldmfds", c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17304 cCE("fldmdbs", d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17305 cCE("fldmeas", d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17306 cCE("fldmiax", c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17307 cCE("fldmfdx", c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17308 cCE("fldmdbx", d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17309 cCE("fldmeax", d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17310 cCE("fstmias", c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17311 cCE("fstmeas", c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17312 cCE("fstmdbs", d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17313 cCE("fstmfds", d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17314 cCE("fstmiax", c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17315 cCE("fstmeax", c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17316 cCE("fstmdbx", d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17317 cCE("fstmfdx", d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17318
17319 /* Monadic operations. */
17320 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
17321 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
17322 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
17323
17324 /* Dyadic operations. */
17325 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17326 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17327 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17328 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17329 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17330 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17331 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17332 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17333 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17334
17335 /* Comparisons. */
17336 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
17337 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
17338 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
17339 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
17340
17341 /* Double precision load/store are still present on single precision
17342 implementations. */
17343 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17344 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17345 cCE("fldmiad", c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17346 cCE("fldmfdd", c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17347 cCE("fldmdbd", d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17348 cCE("fldmead", d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17349 cCE("fstmiad", c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17350 cCE("fstmead", c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17351 cCE("fstmdbd", d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17352 cCE("fstmfdd", d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17353
17354 #undef ARM_VARIANT
17355 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17356
17357 /* Moves and type conversions. */
17358 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17359 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17360 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17361 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
17362 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
17363 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
17364 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
17365 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17366 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
17367 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17368 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17369 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17370 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17371
17372 /* Monadic operations. */
17373 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17374 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17375 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17376
17377 /* Dyadic operations. */
17378 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17379 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17380 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17381 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17382 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17383 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17384 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17385 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17386 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17387
17388 /* Comparisons. */
17389 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17390 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
17391 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17392 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
17393
17394 #undef ARM_VARIANT
17395 #define ARM_VARIANT & fpu_vfp_ext_v2
17396
17397 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
17398 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
17399 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
17400 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
17401
17402 /* Instructions which may belong to either the Neon or VFP instruction sets.
17403 Individual encoder functions perform additional architecture checks. */
17404 #undef ARM_VARIANT
17405 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17406 #undef THUMB_VARIANT
17407 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17408
17409 /* These mnemonics are unique to VFP. */
17410 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
17411 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
17412 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17413 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17414 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17415 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17416 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17417 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
17418 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
17419 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
17420
17421 /* Mnemonics shared by Neon and VFP. */
17422 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
17423 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17424 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17425
17426 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17427 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17428
17429 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17430 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17431
17432 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17433 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17434 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17435 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17436 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17437 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17438 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17439 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17440
17441 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
17442 nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
17443 nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
17444
17445
17446 /* NOTE: All VMOV encoding is special-cased! */
17447 NCE(vmov, 0, 1, (VMOV), neon_mov),
17448 NCE(vmovq, 0, 1, (VMOV), neon_mov),
17449
17450 #undef THUMB_VARIANT
17451 #define THUMB_VARIANT & fpu_neon_ext_v1
17452 #undef ARM_VARIANT
17453 #define ARM_VARIANT & fpu_neon_ext_v1
17454
17455 /* Data processing with three registers of the same length. */
17456 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17457 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
17458 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
17459 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17460 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17461 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17462 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17463 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17464 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17465 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17466 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17467 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17468 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17469 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17470 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17471 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17472 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17473 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17474 /* If not immediate, fall back to neon_dyadic_i64_su.
17475 shl_imm should accept I8 I16 I32 I64,
17476 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17477 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
17478 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
17479 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
17480 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
17481 /* Logic ops, types optional & ignored. */
17482 nUF(vand, _vand, 2, (RNDQ, NILO), neon_logic),
17483 nUF(vandq, _vand, 2, (RNQ, NILO), neon_logic),
17484 nUF(vbic, _vbic, 2, (RNDQ, NILO), neon_logic),
17485 nUF(vbicq, _vbic, 2, (RNQ, NILO), neon_logic),
17486 nUF(vorr, _vorr, 2, (RNDQ, NILO), neon_logic),
17487 nUF(vorrq, _vorr, 2, (RNQ, NILO), neon_logic),
17488 nUF(vorn, _vorn, 2, (RNDQ, NILO), neon_logic),
17489 nUF(vornq, _vorn, 2, (RNQ, NILO), neon_logic),
17490 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
17491 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
17492 /* Bitfield ops, untyped. */
17493 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17494 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17495 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17496 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17497 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17498 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17499 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17500 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17501 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17502 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17503 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17504 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17505 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17506 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17507 back to neon_dyadic_if_su. */
17508 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17509 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17510 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17511 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17512 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17513 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17514 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17515 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17516 /* Comparison. Type I8 I16 I32 F32. */
17517 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
17518 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
17519 /* As above, D registers only. */
17520 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17521 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17522 /* Int and float variants, signedness unimportant. */
17523 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17524 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17525 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
17526 /* Add/sub take types I8 I16 I32 I64 F32. */
17527 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17528 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17529 /* vtst takes sizes 8, 16, 32. */
17530 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
17531 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
17532 /* VMUL takes I8 I16 I32 F32 P8. */
17533 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
17534 /* VQD{R}MULH takes S16 S32. */
17535 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17536 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17537 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17538 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17539 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17540 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17541 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17542 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17543 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17544 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17545 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17546 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17547 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17548 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17549 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17550 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17551
17552 /* Two address, int/float. Types S8 S16 S32 F32. */
17553 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
17554 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
17555
17556 /* Data processing with two registers and a shift amount. */
17557 /* Right shifts, and variants with rounding.
17558 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17559 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17560 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17561 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17562 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17563 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17564 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17565 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17566 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17567 /* Shift and insert. Sizes accepted 8 16 32 64. */
17568 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
17569 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
17570 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
17571 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
17572 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17573 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
17574 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
17575 /* Right shift immediate, saturating & narrowing, with rounding variants.
17576 Types accepted S16 S32 S64 U16 U32 U64. */
17577 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17578 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17579 /* As above, unsigned. Types accepted S16 S32 S64. */
17580 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17581 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17582 /* Right shift narrowing. Types accepted I16 I32 I64. */
17583 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17584 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17585 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17586 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
17587 /* CVT with optional immediate for fixed-point variant. */
17588 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
17589
17590 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
17591 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
17592
17593 /* Data processing, three registers of different lengths. */
17594 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17595 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
17596 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
17597 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
17598 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
17599 /* If not scalar, fall back to neon_dyadic_long.
17600 Vector types as above, scalar types S16 S32 U16 U32. */
17601 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17602 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17603 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17604 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17605 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17606 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17607 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17608 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17609 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17610 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17611 /* Saturating doubling multiplies. Types S16 S32. */
17612 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17613 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17614 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17615 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17616 S16 S32 U16 U32. */
17617 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
17618
17619 /* Extract. Size 8. */
17620 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
17621 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
17622
17623 /* Two registers, miscellaneous. */
17624 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17625 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
17626 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
17627 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
17628 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
17629 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
17630 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
17631 /* Vector replicate. Sizes 8 16 32. */
17632 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
17633 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
17634 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17635 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
17636 /* VMOVN. Types I16 I32 I64. */
17637 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
17638 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17639 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
17640 /* VQMOVUN. Types S16 S32 S64. */
17641 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
17642 /* VZIP / VUZP. Sizes 8 16 32. */
17643 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
17644 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
17645 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
17646 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
17647 /* VQABS / VQNEG. Types S8 S16 S32. */
17648 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17649 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
17650 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17651 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
17652 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17653 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
17654 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
17655 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
17656 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
17657 /* Reciprocal estimates. Types U32 F32. */
17658 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
17659 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
17660 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
17661 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
17662 /* VCLS. Types S8 S16 S32. */
17663 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
17664 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
17665 /* VCLZ. Types I8 I16 I32. */
17666 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
17667 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
17668 /* VCNT. Size 8. */
17669 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
17670 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
17671 /* Two address, untyped. */
17672 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
17673 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
17674 /* VTRN. Sizes 8 16 32. */
17675 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
17676 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
17677
17678 /* Table lookup. Size 8. */
17679 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17680 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17681
17682 #undef THUMB_VARIANT
17683 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17684 #undef ARM_VARIANT
17685 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17686
17687 /* Neon element/structure load/store. */
17688 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17689 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17690 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17691 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17692 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17693 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17694 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17695 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17696
17697 #undef THUMB_VARIANT
17698 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
17699 #undef ARM_VARIANT
17700 #define ARM_VARIANT &fpu_vfp_ext_v3xd
17701 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
17702 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17703 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17704 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17705 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17706 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17707 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17708 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17709 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17710
17711 #undef THUMB_VARIANT
17712 #define THUMB_VARIANT & fpu_vfp_ext_v3
17713 #undef ARM_VARIANT
17714 #define ARM_VARIANT & fpu_vfp_ext_v3
17715
17716 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
17717 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17718 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17719 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17720 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17721 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17722 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17723 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17724 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17725
17726 #undef ARM_VARIANT
17727 #define ARM_VARIANT &fpu_vfp_ext_fma
17728 #undef THUMB_VARIANT
17729 #define THUMB_VARIANT &fpu_vfp_ext_fma
17730 /* Mnemonics shared by Neon and VFP. These are included in the
17731 VFP FMA variant; NEON and VFP FMA always includes the NEON
17732 FMA instructions. */
17733 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
17734 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
17735 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
17736 the v form should always be used. */
17737 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17738 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17739 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17740 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17741 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17742 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17743
17744 #undef THUMB_VARIANT
17745 #undef ARM_VARIANT
17746 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17747
17748 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17749 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17750 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17751 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17752 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17753 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17754 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
17755 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
17756
17757 #undef ARM_VARIANT
17758 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17759
17760 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
17761 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
17762 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
17763 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
17764 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
17765 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
17766 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
17767 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
17768 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
17769 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17770 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17771 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17772 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17773 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17774 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17775 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17776 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17777 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17778 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
17779 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
17780 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17781 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17782 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17783 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17784 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17785 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17786 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
17787 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
17788 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
17789 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
17790 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
17791 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
17792 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
17793 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
17794 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
17795 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
17796 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
17797 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17798 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17799 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17800 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17801 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17802 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17803 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17804 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17805 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17806 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
17807 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17808 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17809 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17810 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17811 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17812 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17813 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17814 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17815 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17816 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17817 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17818 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17819 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17820 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17821 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17822 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17823 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17824 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17825 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17826 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17827 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17828 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17829 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17830 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17831 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17832 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17833 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17834 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17835 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17836 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17837 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17838 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17839 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17840 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17841 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17842 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17843 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17844 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17845 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17846 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17847 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17848 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
17849 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17850 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17851 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17852 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17853 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17854 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17855 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17856 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17857 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17858 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17859 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17860 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17861 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17862 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17863 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17864 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17865 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17866 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17867 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17868 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17869 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17870 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
17871 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17872 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17873 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17874 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17875 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17876 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17877 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17878 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17879 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17880 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17881 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17882 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17883 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17884 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17885 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17886 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17887 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17888 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17889 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17890 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17891 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17892 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17893 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17894 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17895 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17896 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17897 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17898 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17899 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17900 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17901 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17902 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
17903 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
17904 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
17905 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
17906 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
17907 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
17908 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17909 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17910 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17911 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
17912 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
17913 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
17914 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
17915 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
17916 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
17917 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17918 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17919 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17920 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17921 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
17922
17923 #undef ARM_VARIANT
17924 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
17925
17926 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
17927 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
17928 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
17929 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
17930 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
17931 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
17932 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17933 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17934 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17935 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17936 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17937 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17938 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17939 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17940 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17941 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17942 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17943 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17944 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17945 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17946 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
17947 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17948 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17949 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17950 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17951 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17952 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17953 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17954 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17955 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17956 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17957 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17958 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17959 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17960 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17961 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17962 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17963 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17964 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17965 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17966 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17967 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17968 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17969 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17970 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17971 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17972 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17973 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17974 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17975 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17976 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17977 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17978 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17979 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17980 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17981 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17982 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17983
17984 #undef ARM_VARIANT
17985 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
17986
17987 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
17988 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
17989 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
17990 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
17991 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
17992 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
17993 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
17994 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
17995 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
17996 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
17997 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
17998 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
17999 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
18000 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
18001 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
18002 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
18003 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
18004 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
18005 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
18006 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
18007 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
18008 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
18009 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
18010 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
18011 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
18012 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
18013 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
18014 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
18015 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
18016 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
18017 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
18018 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
18019 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
18020 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
18021 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
18022 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
18023 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
18024 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
18025 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
18026 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
18027 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
18028 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
18029 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
18030 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
18031 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
18032 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
18033 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
18034 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
18035 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
18036 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
18037 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
18038 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
18039 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
18040 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
18041 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
18042 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
18043 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
18044 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
18045 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
18046 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
18047 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
18048 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
18049 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
18050 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
18051 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18052 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18053 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18054 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18055 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18056 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18057 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18058 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18059 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18060 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18061 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18062 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18063 };
18064 #undef ARM_VARIANT
18065 #undef THUMB_VARIANT
18066 #undef TCE
18067 #undef TCM
18068 #undef TUE
18069 #undef TUF
18070 #undef TCC
18071 #undef cCE
18072 #undef cCL
18073 #undef C3E
18074 #undef CE
18075 #undef CM
18076 #undef UE
18077 #undef UF
18078 #undef UT
18079 #undef NUF
18080 #undef nUF
18081 #undef NCE
18082 #undef nCE
18083 #undef OPS0
18084 #undef OPS1
18085 #undef OPS2
18086 #undef OPS3
18087 #undef OPS4
18088 #undef OPS5
18089 #undef OPS6
18090 #undef do_0
18091 \f
18092 /* MD interface: bits in the object file. */
18093
18094 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18095 for use in the a.out file, and stores them in the array pointed to by buf.
18096 This knows about the endian-ness of the target machine and does
18097 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18098 2 (short) and 4 (long) Floating numbers are put out as a series of
18099 LITTLENUMS (shorts, here at least). */
18100
18101 void
18102 md_number_to_chars (char * buf, valueT val, int n)
18103 {
18104 if (target_big_endian)
18105 number_to_chars_bigendian (buf, val, n);
18106 else
18107 number_to_chars_littleendian (buf, val, n);
18108 }
18109
18110 static valueT
18111 md_chars_to_number (char * buf, int n)
18112 {
18113 valueT result = 0;
18114 unsigned char * where = (unsigned char *) buf;
18115
18116 if (target_big_endian)
18117 {
18118 while (n--)
18119 {
18120 result <<= 8;
18121 result |= (*where++ & 255);
18122 }
18123 }
18124 else
18125 {
18126 while (n--)
18127 {
18128 result <<= 8;
18129 result |= (where[n] & 255);
18130 }
18131 }
18132
18133 return result;
18134 }
18135
18136 /* MD interface: Sections. */
18137
18138 /* Estimate the size of a frag before relaxing. Assume everything fits in
18139 2 bytes. */
18140
18141 int
18142 md_estimate_size_before_relax (fragS * fragp,
18143 segT segtype ATTRIBUTE_UNUSED)
18144 {
18145 fragp->fr_var = 2;
18146 return 2;
18147 }
18148
18149 /* Convert a machine dependent frag. */
18150
18151 void
18152 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
18153 {
18154 unsigned long insn;
18155 unsigned long old_op;
18156 char *buf;
18157 expressionS exp;
18158 fixS *fixp;
18159 int reloc_type;
18160 int pc_rel;
18161 int opcode;
18162
18163 buf = fragp->fr_literal + fragp->fr_fix;
18164
18165 old_op = bfd_get_16(abfd, buf);
18166 if (fragp->fr_symbol)
18167 {
18168 exp.X_op = O_symbol;
18169 exp.X_add_symbol = fragp->fr_symbol;
18170 }
18171 else
18172 {
18173 exp.X_op = O_constant;
18174 }
18175 exp.X_add_number = fragp->fr_offset;
18176 opcode = fragp->fr_subtype;
18177 switch (opcode)
18178 {
18179 case T_MNEM_ldr_pc:
18180 case T_MNEM_ldr_pc2:
18181 case T_MNEM_ldr_sp:
18182 case T_MNEM_str_sp:
18183 case T_MNEM_ldr:
18184 case T_MNEM_ldrb:
18185 case T_MNEM_ldrh:
18186 case T_MNEM_str:
18187 case T_MNEM_strb:
18188 case T_MNEM_strh:
18189 if (fragp->fr_var == 4)
18190 {
18191 insn = THUMB_OP32 (opcode);
18192 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
18193 {
18194 insn |= (old_op & 0x700) << 4;
18195 }
18196 else
18197 {
18198 insn |= (old_op & 7) << 12;
18199 insn |= (old_op & 0x38) << 13;
18200 }
18201 insn |= 0x00000c00;
18202 put_thumb32_insn (buf, insn);
18203 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
18204 }
18205 else
18206 {
18207 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
18208 }
18209 pc_rel = (opcode == T_MNEM_ldr_pc2);
18210 break;
18211 case T_MNEM_adr:
18212 if (fragp->fr_var == 4)
18213 {
18214 insn = THUMB_OP32 (opcode);
18215 insn |= (old_op & 0xf0) << 4;
18216 put_thumb32_insn (buf, insn);
18217 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
18218 }
18219 else
18220 {
18221 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18222 exp.X_add_number -= 4;
18223 }
18224 pc_rel = 1;
18225 break;
18226 case T_MNEM_mov:
18227 case T_MNEM_movs:
18228 case T_MNEM_cmp:
18229 case T_MNEM_cmn:
18230 if (fragp->fr_var == 4)
18231 {
18232 int r0off = (opcode == T_MNEM_mov
18233 || opcode == T_MNEM_movs) ? 0 : 8;
18234 insn = THUMB_OP32 (opcode);
18235 insn = (insn & 0xe1ffffff) | 0x10000000;
18236 insn |= (old_op & 0x700) << r0off;
18237 put_thumb32_insn (buf, insn);
18238 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18239 }
18240 else
18241 {
18242 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
18243 }
18244 pc_rel = 0;
18245 break;
18246 case T_MNEM_b:
18247 if (fragp->fr_var == 4)
18248 {
18249 insn = THUMB_OP32(opcode);
18250 put_thumb32_insn (buf, insn);
18251 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
18252 }
18253 else
18254 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
18255 pc_rel = 1;
18256 break;
18257 case T_MNEM_bcond:
18258 if (fragp->fr_var == 4)
18259 {
18260 insn = THUMB_OP32(opcode);
18261 insn |= (old_op & 0xf00) << 14;
18262 put_thumb32_insn (buf, insn);
18263 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
18264 }
18265 else
18266 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
18267 pc_rel = 1;
18268 break;
18269 case T_MNEM_add_sp:
18270 case T_MNEM_add_pc:
18271 case T_MNEM_inc_sp:
18272 case T_MNEM_dec_sp:
18273 if (fragp->fr_var == 4)
18274 {
18275 /* ??? Choose between add and addw. */
18276 insn = THUMB_OP32 (opcode);
18277 insn |= (old_op & 0xf0) << 4;
18278 put_thumb32_insn (buf, insn);
18279 if (opcode == T_MNEM_add_pc)
18280 reloc_type = BFD_RELOC_ARM_T32_IMM12;
18281 else
18282 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18283 }
18284 else
18285 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18286 pc_rel = 0;
18287 break;
18288
18289 case T_MNEM_addi:
18290 case T_MNEM_addis:
18291 case T_MNEM_subi:
18292 case T_MNEM_subis:
18293 if (fragp->fr_var == 4)
18294 {
18295 insn = THUMB_OP32 (opcode);
18296 insn |= (old_op & 0xf0) << 4;
18297 insn |= (old_op & 0xf) << 16;
18298 put_thumb32_insn (buf, insn);
18299 if (insn & (1 << 20))
18300 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18301 else
18302 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18303 }
18304 else
18305 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18306 pc_rel = 0;
18307 break;
18308 default:
18309 abort ();
18310 }
18311 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
18312 (enum bfd_reloc_code_real) reloc_type);
18313 fixp->fx_file = fragp->fr_file;
18314 fixp->fx_line = fragp->fr_line;
18315 fragp->fr_fix += fragp->fr_var;
18316 }
18317
18318 /* Return the size of a relaxable immediate operand instruction.
18319 SHIFT and SIZE specify the form of the allowable immediate. */
18320 static int
18321 relax_immediate (fragS *fragp, int size, int shift)
18322 {
18323 offsetT offset;
18324 offsetT mask;
18325 offsetT low;
18326
18327 /* ??? Should be able to do better than this. */
18328 if (fragp->fr_symbol)
18329 return 4;
18330
18331 low = (1 << shift) - 1;
18332 mask = (1 << (shift + size)) - (1 << shift);
18333 offset = fragp->fr_offset;
18334 /* Force misaligned offsets to 32-bit variant. */
18335 if (offset & low)
18336 return 4;
18337 if (offset & ~mask)
18338 return 4;
18339 return 2;
18340 }
18341
18342 /* Get the address of a symbol during relaxation. */
18343 static addressT
18344 relaxed_symbol_addr (fragS *fragp, long stretch)
18345 {
18346 fragS *sym_frag;
18347 addressT addr;
18348 symbolS *sym;
18349
18350 sym = fragp->fr_symbol;
18351 sym_frag = symbol_get_frag (sym);
18352 know (S_GET_SEGMENT (sym) != absolute_section
18353 || sym_frag == &zero_address_frag);
18354 addr = S_GET_VALUE (sym) + fragp->fr_offset;
18355
18356 /* If frag has yet to be reached on this pass, assume it will
18357 move by STRETCH just as we did. If this is not so, it will
18358 be because some frag between grows, and that will force
18359 another pass. */
18360
18361 if (stretch != 0
18362 && sym_frag->relax_marker != fragp->relax_marker)
18363 {
18364 fragS *f;
18365
18366 /* Adjust stretch for any alignment frag. Note that if have
18367 been expanding the earlier code, the symbol may be
18368 defined in what appears to be an earlier frag. FIXME:
18369 This doesn't handle the fr_subtype field, which specifies
18370 a maximum number of bytes to skip when doing an
18371 alignment. */
18372 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
18373 {
18374 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
18375 {
18376 if (stretch < 0)
18377 stretch = - ((- stretch)
18378 & ~ ((1 << (int) f->fr_offset) - 1));
18379 else
18380 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
18381 if (stretch == 0)
18382 break;
18383 }
18384 }
18385 if (f != NULL)
18386 addr += stretch;
18387 }
18388
18389 return addr;
18390 }
18391
18392 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18393 load. */
18394 static int
18395 relax_adr (fragS *fragp, asection *sec, long stretch)
18396 {
18397 addressT addr;
18398 offsetT val;
18399
18400 /* Assume worst case for symbols not known to be in the same section. */
18401 if (fragp->fr_symbol == NULL
18402 || !S_IS_DEFINED (fragp->fr_symbol)
18403 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18404 return 4;
18405
18406 val = relaxed_symbol_addr (fragp, stretch);
18407 addr = fragp->fr_address + fragp->fr_fix;
18408 addr = (addr + 4) & ~3;
18409 /* Force misaligned targets to 32-bit variant. */
18410 if (val & 3)
18411 return 4;
18412 val -= addr;
18413 if (val < 0 || val > 1020)
18414 return 4;
18415 return 2;
18416 }
18417
18418 /* Return the size of a relaxable add/sub immediate instruction. */
18419 static int
18420 relax_addsub (fragS *fragp, asection *sec)
18421 {
18422 char *buf;
18423 int op;
18424
18425 buf = fragp->fr_literal + fragp->fr_fix;
18426 op = bfd_get_16(sec->owner, buf);
18427 if ((op & 0xf) == ((op >> 4) & 0xf))
18428 return relax_immediate (fragp, 8, 0);
18429 else
18430 return relax_immediate (fragp, 3, 0);
18431 }
18432
18433
18434 /* Return the size of a relaxable branch instruction. BITS is the
18435 size of the offset field in the narrow instruction. */
18436
18437 static int
18438 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
18439 {
18440 addressT addr;
18441 offsetT val;
18442 offsetT limit;
18443
18444 /* Assume worst case for symbols not known to be in the same section. */
18445 if (!S_IS_DEFINED (fragp->fr_symbol)
18446 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18447 return 4;
18448
18449 #ifdef OBJ_ELF
18450 if (S_IS_DEFINED (fragp->fr_symbol)
18451 && ARM_IS_FUNC (fragp->fr_symbol))
18452 return 4;
18453 #endif
18454
18455 val = relaxed_symbol_addr (fragp, stretch);
18456 addr = fragp->fr_address + fragp->fr_fix + 4;
18457 val -= addr;
18458
18459 /* Offset is a signed value *2 */
18460 limit = 1 << bits;
18461 if (val >= limit || val < -limit)
18462 return 4;
18463 return 2;
18464 }
18465
18466
18467 /* Relax a machine dependent frag. This returns the amount by which
18468 the current size of the frag should change. */
18469
18470 int
18471 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
18472 {
18473 int oldsize;
18474 int newsize;
18475
18476 oldsize = fragp->fr_var;
18477 switch (fragp->fr_subtype)
18478 {
18479 case T_MNEM_ldr_pc2:
18480 newsize = relax_adr (fragp, sec, stretch);
18481 break;
18482 case T_MNEM_ldr_pc:
18483 case T_MNEM_ldr_sp:
18484 case T_MNEM_str_sp:
18485 newsize = relax_immediate (fragp, 8, 2);
18486 break;
18487 case T_MNEM_ldr:
18488 case T_MNEM_str:
18489 newsize = relax_immediate (fragp, 5, 2);
18490 break;
18491 case T_MNEM_ldrh:
18492 case T_MNEM_strh:
18493 newsize = relax_immediate (fragp, 5, 1);
18494 break;
18495 case T_MNEM_ldrb:
18496 case T_MNEM_strb:
18497 newsize = relax_immediate (fragp, 5, 0);
18498 break;
18499 case T_MNEM_adr:
18500 newsize = relax_adr (fragp, sec, stretch);
18501 break;
18502 case T_MNEM_mov:
18503 case T_MNEM_movs:
18504 case T_MNEM_cmp:
18505 case T_MNEM_cmn:
18506 newsize = relax_immediate (fragp, 8, 0);
18507 break;
18508 case T_MNEM_b:
18509 newsize = relax_branch (fragp, sec, 11, stretch);
18510 break;
18511 case T_MNEM_bcond:
18512 newsize = relax_branch (fragp, sec, 8, stretch);
18513 break;
18514 case T_MNEM_add_sp:
18515 case T_MNEM_add_pc:
18516 newsize = relax_immediate (fragp, 8, 2);
18517 break;
18518 case T_MNEM_inc_sp:
18519 case T_MNEM_dec_sp:
18520 newsize = relax_immediate (fragp, 7, 2);
18521 break;
18522 case T_MNEM_addi:
18523 case T_MNEM_addis:
18524 case T_MNEM_subi:
18525 case T_MNEM_subis:
18526 newsize = relax_addsub (fragp, sec);
18527 break;
18528 default:
18529 abort ();
18530 }
18531
18532 fragp->fr_var = newsize;
18533 /* Freeze wide instructions that are at or before the same location as
18534 in the previous pass. This avoids infinite loops.
18535 Don't freeze them unconditionally because targets may be artificially
18536 misaligned by the expansion of preceding frags. */
18537 if (stretch <= 0 && newsize > 2)
18538 {
18539 md_convert_frag (sec->owner, sec, fragp);
18540 frag_wane (fragp);
18541 }
18542
18543 return newsize - oldsize;
18544 }
18545
18546 /* Round up a section size to the appropriate boundary. */
18547
18548 valueT
18549 md_section_align (segT segment ATTRIBUTE_UNUSED,
18550 valueT size)
18551 {
18552 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18553 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
18554 {
18555 /* For a.out, force the section size to be aligned. If we don't do
18556 this, BFD will align it for us, but it will not write out the
18557 final bytes of the section. This may be a bug in BFD, but it is
18558 easier to fix it here since that is how the other a.out targets
18559 work. */
18560 int align;
18561
18562 align = bfd_get_section_alignment (stdoutput, segment);
18563 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
18564 }
18565 #endif
18566
18567 return size;
18568 }
18569
18570 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18571 of an rs_align_code fragment. */
18572
18573 void
18574 arm_handle_align (fragS * fragP)
18575 {
18576 static char const arm_noop[2][2][4] =
18577 {
18578 { /* ARMv1 */
18579 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18580 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18581 },
18582 { /* ARMv6k */
18583 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18584 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18585 },
18586 };
18587 static char const thumb_noop[2][2][2] =
18588 {
18589 { /* Thumb-1 */
18590 {0xc0, 0x46}, /* LE */
18591 {0x46, 0xc0}, /* BE */
18592 },
18593 { /* Thumb-2 */
18594 {0x00, 0xbf}, /* LE */
18595 {0xbf, 0x00} /* BE */
18596 }
18597 };
18598 static char const wide_thumb_noop[2][4] =
18599 { /* Wide Thumb-2 */
18600 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18601 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18602 };
18603
18604 unsigned bytes, fix, noop_size;
18605 char * p;
18606 const char * noop;
18607 const char *narrow_noop = NULL;
18608 #ifdef OBJ_ELF
18609 enum mstate state;
18610 #endif
18611
18612 if (fragP->fr_type != rs_align_code)
18613 return;
18614
18615 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
18616 p = fragP->fr_literal + fragP->fr_fix;
18617 fix = 0;
18618
18619 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
18620 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
18621
18622 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
18623
18624 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
18625 {
18626 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
18627 {
18628 narrow_noop = thumb_noop[1][target_big_endian];
18629 noop = wide_thumb_noop[target_big_endian];
18630 }
18631 else
18632 noop = thumb_noop[0][target_big_endian];
18633 noop_size = 2;
18634 #ifdef OBJ_ELF
18635 state = MAP_THUMB;
18636 #endif
18637 }
18638 else
18639 {
18640 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
18641 [target_big_endian];
18642 noop_size = 4;
18643 #ifdef OBJ_ELF
18644 state = MAP_ARM;
18645 #endif
18646 }
18647
18648 fragP->fr_var = noop_size;
18649
18650 if (bytes & (noop_size - 1))
18651 {
18652 fix = bytes & (noop_size - 1);
18653 #ifdef OBJ_ELF
18654 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
18655 #endif
18656 memset (p, 0, fix);
18657 p += fix;
18658 bytes -= fix;
18659 }
18660
18661 if (narrow_noop)
18662 {
18663 if (bytes & noop_size)
18664 {
18665 /* Insert a narrow noop. */
18666 memcpy (p, narrow_noop, noop_size);
18667 p += noop_size;
18668 bytes -= noop_size;
18669 fix += noop_size;
18670 }
18671
18672 /* Use wide noops for the remainder */
18673 noop_size = 4;
18674 }
18675
18676 while (bytes >= noop_size)
18677 {
18678 memcpy (p, noop, noop_size);
18679 p += noop_size;
18680 bytes -= noop_size;
18681 fix += noop_size;
18682 }
18683
18684 fragP->fr_fix += fix;
18685 }
18686
18687 /* Called from md_do_align. Used to create an alignment
18688 frag in a code section. */
18689
18690 void
18691 arm_frag_align_code (int n, int max)
18692 {
18693 char * p;
18694
18695 /* We assume that there will never be a requirement
18696 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18697 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
18698 {
18699 char err_msg[128];
18700
18701 sprintf (err_msg,
18702 _("alignments greater than %d bytes not supported in .text sections."),
18703 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
18704 as_fatal ("%s", err_msg);
18705 }
18706
18707 p = frag_var (rs_align_code,
18708 MAX_MEM_FOR_RS_ALIGN_CODE,
18709 1,
18710 (relax_substateT) max,
18711 (symbolS *) NULL,
18712 (offsetT) n,
18713 (char *) NULL);
18714 *p = 0;
18715 }
18716
18717 /* Perform target specific initialisation of a frag.
18718 Note - despite the name this initialisation is not done when the frag
18719 is created, but only when its type is assigned. A frag can be created
18720 and used a long time before its type is set, so beware of assuming that
18721 this initialisationis performed first. */
18722
18723 #ifndef OBJ_ELF
18724 void
18725 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
18726 {
18727 /* Record whether this frag is in an ARM or a THUMB area. */
18728 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18729 }
18730
18731 #else /* OBJ_ELF is defined. */
18732 void
18733 arm_init_frag (fragS * fragP, int max_chars)
18734 {
18735 /* If the current ARM vs THUMB mode has not already
18736 been recorded into this frag then do so now. */
18737 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
18738 {
18739 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18740
18741 /* Record a mapping symbol for alignment frags. We will delete this
18742 later if the alignment ends up empty. */
18743 switch (fragP->fr_type)
18744 {
18745 case rs_align:
18746 case rs_align_test:
18747 case rs_fill:
18748 mapping_state_2 (MAP_DATA, max_chars);
18749 break;
18750 case rs_align_code:
18751 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
18752 break;
18753 default:
18754 break;
18755 }
18756 }
18757 }
18758
18759 /* When we change sections we need to issue a new mapping symbol. */
18760
18761 void
18762 arm_elf_change_section (void)
18763 {
18764 /* Link an unlinked unwind index table section to the .text section. */
18765 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
18766 && elf_linked_to_section (now_seg) == NULL)
18767 elf_linked_to_section (now_seg) = text_section;
18768 }
18769
18770 int
18771 arm_elf_section_type (const char * str, size_t len)
18772 {
18773 if (len == 5 && strncmp (str, "exidx", 5) == 0)
18774 return SHT_ARM_EXIDX;
18775
18776 return -1;
18777 }
18778 \f
18779 /* Code to deal with unwinding tables. */
18780
18781 static void add_unwind_adjustsp (offsetT);
18782
18783 /* Generate any deferred unwind frame offset. */
18784
18785 static void
18786 flush_pending_unwind (void)
18787 {
18788 offsetT offset;
18789
18790 offset = unwind.pending_offset;
18791 unwind.pending_offset = 0;
18792 if (offset != 0)
18793 add_unwind_adjustsp (offset);
18794 }
18795
18796 /* Add an opcode to this list for this function. Two-byte opcodes should
18797 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18798 order. */
18799
18800 static void
18801 add_unwind_opcode (valueT op, int length)
18802 {
18803 /* Add any deferred stack adjustment. */
18804 if (unwind.pending_offset)
18805 flush_pending_unwind ();
18806
18807 unwind.sp_restored = 0;
18808
18809 if (unwind.opcode_count + length > unwind.opcode_alloc)
18810 {
18811 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
18812 if (unwind.opcodes)
18813 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
18814 unwind.opcode_alloc);
18815 else
18816 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
18817 }
18818 while (length > 0)
18819 {
18820 length--;
18821 unwind.opcodes[unwind.opcode_count] = op & 0xff;
18822 op >>= 8;
18823 unwind.opcode_count++;
18824 }
18825 }
18826
18827 /* Add unwind opcodes to adjust the stack pointer. */
18828
18829 static void
18830 add_unwind_adjustsp (offsetT offset)
18831 {
18832 valueT op;
18833
18834 if (offset > 0x200)
18835 {
18836 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18837 char bytes[5];
18838 int n;
18839 valueT o;
18840
18841 /* Long form: 0xb2, uleb128. */
18842 /* This might not fit in a word so add the individual bytes,
18843 remembering the list is built in reverse order. */
18844 o = (valueT) ((offset - 0x204) >> 2);
18845 if (o == 0)
18846 add_unwind_opcode (0, 1);
18847
18848 /* Calculate the uleb128 encoding of the offset. */
18849 n = 0;
18850 while (o)
18851 {
18852 bytes[n] = o & 0x7f;
18853 o >>= 7;
18854 if (o)
18855 bytes[n] |= 0x80;
18856 n++;
18857 }
18858 /* Add the insn. */
18859 for (; n; n--)
18860 add_unwind_opcode (bytes[n - 1], 1);
18861 add_unwind_opcode (0xb2, 1);
18862 }
18863 else if (offset > 0x100)
18864 {
18865 /* Two short opcodes. */
18866 add_unwind_opcode (0x3f, 1);
18867 op = (offset - 0x104) >> 2;
18868 add_unwind_opcode (op, 1);
18869 }
18870 else if (offset > 0)
18871 {
18872 /* Short opcode. */
18873 op = (offset - 4) >> 2;
18874 add_unwind_opcode (op, 1);
18875 }
18876 else if (offset < 0)
18877 {
18878 offset = -offset;
18879 while (offset > 0x100)
18880 {
18881 add_unwind_opcode (0x7f, 1);
18882 offset -= 0x100;
18883 }
18884 op = ((offset - 4) >> 2) | 0x40;
18885 add_unwind_opcode (op, 1);
18886 }
18887 }
18888
18889 /* Finish the list of unwind opcodes for this function. */
18890 static void
18891 finish_unwind_opcodes (void)
18892 {
18893 valueT op;
18894
18895 if (unwind.fp_used)
18896 {
18897 /* Adjust sp as necessary. */
18898 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
18899 flush_pending_unwind ();
18900
18901 /* After restoring sp from the frame pointer. */
18902 op = 0x90 | unwind.fp_reg;
18903 add_unwind_opcode (op, 1);
18904 }
18905 else
18906 flush_pending_unwind ();
18907 }
18908
18909
18910 /* Start an exception table entry. If idx is nonzero this is an index table
18911 entry. */
18912
18913 static void
18914 start_unwind_section (const segT text_seg, int idx)
18915 {
18916 const char * text_name;
18917 const char * prefix;
18918 const char * prefix_once;
18919 const char * group_name;
18920 size_t prefix_len;
18921 size_t text_len;
18922 char * sec_name;
18923 size_t sec_name_len;
18924 int type;
18925 int flags;
18926 int linkonce;
18927
18928 if (idx)
18929 {
18930 prefix = ELF_STRING_ARM_unwind;
18931 prefix_once = ELF_STRING_ARM_unwind_once;
18932 type = SHT_ARM_EXIDX;
18933 }
18934 else
18935 {
18936 prefix = ELF_STRING_ARM_unwind_info;
18937 prefix_once = ELF_STRING_ARM_unwind_info_once;
18938 type = SHT_PROGBITS;
18939 }
18940
18941 text_name = segment_name (text_seg);
18942 if (streq (text_name, ".text"))
18943 text_name = "";
18944
18945 if (strncmp (text_name, ".gnu.linkonce.t.",
18946 strlen (".gnu.linkonce.t.")) == 0)
18947 {
18948 prefix = prefix_once;
18949 text_name += strlen (".gnu.linkonce.t.");
18950 }
18951
18952 prefix_len = strlen (prefix);
18953 text_len = strlen (text_name);
18954 sec_name_len = prefix_len + text_len;
18955 sec_name = (char *) xmalloc (sec_name_len + 1);
18956 memcpy (sec_name, prefix, prefix_len);
18957 memcpy (sec_name + prefix_len, text_name, text_len);
18958 sec_name[prefix_len + text_len] = '\0';
18959
18960 flags = SHF_ALLOC;
18961 linkonce = 0;
18962 group_name = 0;
18963
18964 /* Handle COMDAT group. */
18965 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
18966 {
18967 group_name = elf_group_name (text_seg);
18968 if (group_name == NULL)
18969 {
18970 as_bad (_("Group section `%s' has no group signature"),
18971 segment_name (text_seg));
18972 ignore_rest_of_line ();
18973 return;
18974 }
18975 flags |= SHF_GROUP;
18976 linkonce = 1;
18977 }
18978
18979 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
18980
18981 /* Set the section link for index tables. */
18982 if (idx)
18983 elf_linked_to_section (now_seg) = text_seg;
18984 }
18985
18986
18987 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
18988 personality routine data. Returns zero, or the index table value for
18989 and inline entry. */
18990
18991 static valueT
18992 create_unwind_entry (int have_data)
18993 {
18994 int size;
18995 addressT where;
18996 char *ptr;
18997 /* The current word of data. */
18998 valueT data;
18999 /* The number of bytes left in this word. */
19000 int n;
19001
19002 finish_unwind_opcodes ();
19003
19004 /* Remember the current text section. */
19005 unwind.saved_seg = now_seg;
19006 unwind.saved_subseg = now_subseg;
19007
19008 start_unwind_section (now_seg, 0);
19009
19010 if (unwind.personality_routine == NULL)
19011 {
19012 if (unwind.personality_index == -2)
19013 {
19014 if (have_data)
19015 as_bad (_("handlerdata in cantunwind frame"));
19016 return 1; /* EXIDX_CANTUNWIND. */
19017 }
19018
19019 /* Use a default personality routine if none is specified. */
19020 if (unwind.personality_index == -1)
19021 {
19022 if (unwind.opcode_count > 3)
19023 unwind.personality_index = 1;
19024 else
19025 unwind.personality_index = 0;
19026 }
19027
19028 /* Space for the personality routine entry. */
19029 if (unwind.personality_index == 0)
19030 {
19031 if (unwind.opcode_count > 3)
19032 as_bad (_("too many unwind opcodes for personality routine 0"));
19033
19034 if (!have_data)
19035 {
19036 /* All the data is inline in the index table. */
19037 data = 0x80;
19038 n = 3;
19039 while (unwind.opcode_count > 0)
19040 {
19041 unwind.opcode_count--;
19042 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19043 n--;
19044 }
19045
19046 /* Pad with "finish" opcodes. */
19047 while (n--)
19048 data = (data << 8) | 0xb0;
19049
19050 return data;
19051 }
19052 size = 0;
19053 }
19054 else
19055 /* We get two opcodes "free" in the first word. */
19056 size = unwind.opcode_count - 2;
19057 }
19058 else
19059 /* An extra byte is required for the opcode count. */
19060 size = unwind.opcode_count + 1;
19061
19062 size = (size + 3) >> 2;
19063 if (size > 0xff)
19064 as_bad (_("too many unwind opcodes"));
19065
19066 frag_align (2, 0, 0);
19067 record_alignment (now_seg, 2);
19068 unwind.table_entry = expr_build_dot ();
19069
19070 /* Allocate the table entry. */
19071 ptr = frag_more ((size << 2) + 4);
19072 where = frag_now_fix () - ((size << 2) + 4);
19073
19074 switch (unwind.personality_index)
19075 {
19076 case -1:
19077 /* ??? Should this be a PLT generating relocation? */
19078 /* Custom personality routine. */
19079 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19080 BFD_RELOC_ARM_PREL31);
19081
19082 where += 4;
19083 ptr += 4;
19084
19085 /* Set the first byte to the number of additional words. */
19086 data = size - 1;
19087 n = 3;
19088 break;
19089
19090 /* ABI defined personality routines. */
19091 case 0:
19092 /* Three opcodes bytes are packed into the first word. */
19093 data = 0x80;
19094 n = 3;
19095 break;
19096
19097 case 1:
19098 case 2:
19099 /* The size and first two opcode bytes go in the first word. */
19100 data = ((0x80 + unwind.personality_index) << 8) | size;
19101 n = 2;
19102 break;
19103
19104 default:
19105 /* Should never happen. */
19106 abort ();
19107 }
19108
19109 /* Pack the opcodes into words (MSB first), reversing the list at the same
19110 time. */
19111 while (unwind.opcode_count > 0)
19112 {
19113 if (n == 0)
19114 {
19115 md_number_to_chars (ptr, data, 4);
19116 ptr += 4;
19117 n = 4;
19118 data = 0;
19119 }
19120 unwind.opcode_count--;
19121 n--;
19122 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19123 }
19124
19125 /* Finish off the last word. */
19126 if (n < 4)
19127 {
19128 /* Pad with "finish" opcodes. */
19129 while (n--)
19130 data = (data << 8) | 0xb0;
19131
19132 md_number_to_chars (ptr, data, 4);
19133 }
19134
19135 if (!have_data)
19136 {
19137 /* Add an empty descriptor if there is no user-specified data. */
19138 ptr = frag_more (4);
19139 md_number_to_chars (ptr, 0, 4);
19140 }
19141
19142 return 0;
19143 }
19144
19145
19146 /* Initialize the DWARF-2 unwind information for this procedure. */
19147
19148 void
19149 tc_arm_frame_initial_instructions (void)
19150 {
19151 cfi_add_CFA_def_cfa (REG_SP, 0);
19152 }
19153 #endif /* OBJ_ELF */
19154
19155 /* Convert REGNAME to a DWARF-2 register number. */
19156
19157 int
19158 tc_arm_regname_to_dw2regnum (char *regname)
19159 {
19160 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
19161
19162 if (reg == FAIL)
19163 return -1;
19164
19165 return reg;
19166 }
19167
19168 #ifdef TE_PE
19169 void
19170 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
19171 {
19172 expressionS exp;
19173
19174 exp.X_op = O_secrel;
19175 exp.X_add_symbol = symbol;
19176 exp.X_add_number = 0;
19177 emit_expr (&exp, size);
19178 }
19179 #endif
19180
19181 /* MD interface: Symbol and relocation handling. */
19182
19183 /* Return the address within the segment that a PC-relative fixup is
19184 relative to. For ARM, PC-relative fixups applied to instructions
19185 are generally relative to the location of the fixup plus 8 bytes.
19186 Thumb branches are offset by 4, and Thumb loads relative to PC
19187 require special handling. */
19188
19189 long
19190 md_pcrel_from_section (fixS * fixP, segT seg)
19191 {
19192 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
19193
19194 /* If this is pc-relative and we are going to emit a relocation
19195 then we just want to put out any pipeline compensation that the linker
19196 will need. Otherwise we want to use the calculated base.
19197 For WinCE we skip the bias for externals as well, since this
19198 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19199 if (fixP->fx_pcrel
19200 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19201 || (arm_force_relocation (fixP)
19202 #ifdef TE_WINCE
19203 && !S_IS_EXTERNAL (fixP->fx_addsy)
19204 #endif
19205 )))
19206 base = 0;
19207
19208
19209 switch (fixP->fx_r_type)
19210 {
19211 /* PC relative addressing on the Thumb is slightly odd as the
19212 bottom two bits of the PC are forced to zero for the
19213 calculation. This happens *after* application of the
19214 pipeline offset. However, Thumb adrl already adjusts for
19215 this, so we need not do it again. */
19216 case BFD_RELOC_ARM_THUMB_ADD:
19217 return base & ~3;
19218
19219 case BFD_RELOC_ARM_THUMB_OFFSET:
19220 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19221 case BFD_RELOC_ARM_T32_ADD_PC12:
19222 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19223 return (base + 4) & ~3;
19224
19225 /* Thumb branches are simply offset by +4. */
19226 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19227 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19228 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19229 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19230 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19231 return base + 4;
19232
19233 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19234 if (fixP->fx_addsy
19235 && ARM_IS_FUNC (fixP->fx_addsy)
19236 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19237 base = fixP->fx_where + fixP->fx_frag->fr_address;
19238 return base + 4;
19239
19240 /* BLX is like branches above, but forces the low two bits of PC to
19241 zero. */
19242 case BFD_RELOC_THUMB_PCREL_BLX:
19243 if (fixP->fx_addsy
19244 && THUMB_IS_FUNC (fixP->fx_addsy)
19245 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19246 base = fixP->fx_where + fixP->fx_frag->fr_address;
19247 return (base + 4) & ~3;
19248
19249 /* ARM mode branches are offset by +8. However, the Windows CE
19250 loader expects the relocation not to take this into account. */
19251 case BFD_RELOC_ARM_PCREL_BLX:
19252 if (fixP->fx_addsy
19253 && ARM_IS_FUNC (fixP->fx_addsy)
19254 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19255 base = fixP->fx_where + fixP->fx_frag->fr_address;
19256 return base + 8;
19257
19258 case BFD_RELOC_ARM_PCREL_CALL:
19259 if (fixP->fx_addsy
19260 && THUMB_IS_FUNC (fixP->fx_addsy)
19261 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19262 base = fixP->fx_where + fixP->fx_frag->fr_address;
19263 return base + 8;
19264
19265 case BFD_RELOC_ARM_PCREL_BRANCH:
19266 case BFD_RELOC_ARM_PCREL_JUMP:
19267 case BFD_RELOC_ARM_PLT32:
19268 #ifdef TE_WINCE
19269 /* When handling fixups immediately, because we have already
19270 discovered the value of a symbol, or the address of the frag involved
19271 we must account for the offset by +8, as the OS loader will never see the reloc.
19272 see fixup_segment() in write.c
19273 The S_IS_EXTERNAL test handles the case of global symbols.
19274 Those need the calculated base, not just the pipe compensation the linker will need. */
19275 if (fixP->fx_pcrel
19276 && fixP->fx_addsy != NULL
19277 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19278 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
19279 return base + 8;
19280 return base;
19281 #else
19282 return base + 8;
19283 #endif
19284
19285
19286 /* ARM mode loads relative to PC are also offset by +8. Unlike
19287 branches, the Windows CE loader *does* expect the relocation
19288 to take this into account. */
19289 case BFD_RELOC_ARM_OFFSET_IMM:
19290 case BFD_RELOC_ARM_OFFSET_IMM8:
19291 case BFD_RELOC_ARM_HWLITERAL:
19292 case BFD_RELOC_ARM_LITERAL:
19293 case BFD_RELOC_ARM_CP_OFF_IMM:
19294 return base + 8;
19295
19296
19297 /* Other PC-relative relocations are un-offset. */
19298 default:
19299 return base;
19300 }
19301 }
19302
19303 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19304 Otherwise we have no need to default values of symbols. */
19305
19306 symbolS *
19307 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
19308 {
19309 #ifdef OBJ_ELF
19310 if (name[0] == '_' && name[1] == 'G'
19311 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
19312 {
19313 if (!GOT_symbol)
19314 {
19315 if (symbol_find (name))
19316 as_bad (_("GOT already in the symbol table"));
19317
19318 GOT_symbol = symbol_new (name, undefined_section,
19319 (valueT) 0, & zero_address_frag);
19320 }
19321
19322 return GOT_symbol;
19323 }
19324 #endif
19325
19326 return NULL;
19327 }
19328
19329 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19330 computed as two separate immediate values, added together. We
19331 already know that this value cannot be computed by just one ARM
19332 instruction. */
19333
19334 static unsigned int
19335 validate_immediate_twopart (unsigned int val,
19336 unsigned int * highpart)
19337 {
19338 unsigned int a;
19339 unsigned int i;
19340
19341 for (i = 0; i < 32; i += 2)
19342 if (((a = rotate_left (val, i)) & 0xff) != 0)
19343 {
19344 if (a & 0xff00)
19345 {
19346 if (a & ~ 0xffff)
19347 continue;
19348 * highpart = (a >> 8) | ((i + 24) << 7);
19349 }
19350 else if (a & 0xff0000)
19351 {
19352 if (a & 0xff000000)
19353 continue;
19354 * highpart = (a >> 16) | ((i + 16) << 7);
19355 }
19356 else
19357 {
19358 gas_assert (a & 0xff000000);
19359 * highpart = (a >> 24) | ((i + 8) << 7);
19360 }
19361
19362 return (a & 0xff) | (i << 7);
19363 }
19364
19365 return FAIL;
19366 }
19367
19368 static int
19369 validate_offset_imm (unsigned int val, int hwse)
19370 {
19371 if ((hwse && val > 255) || val > 4095)
19372 return FAIL;
19373 return val;
19374 }
19375
19376 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19377 negative immediate constant by altering the instruction. A bit of
19378 a hack really.
19379 MOV <-> MVN
19380 AND <-> BIC
19381 ADC <-> SBC
19382 by inverting the second operand, and
19383 ADD <-> SUB
19384 CMP <-> CMN
19385 by negating the second operand. */
19386
19387 static int
19388 negate_data_op (unsigned long * instruction,
19389 unsigned long value)
19390 {
19391 int op, new_inst;
19392 unsigned long negated, inverted;
19393
19394 negated = encode_arm_immediate (-value);
19395 inverted = encode_arm_immediate (~value);
19396
19397 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
19398 switch (op)
19399 {
19400 /* First negates. */
19401 case OPCODE_SUB: /* ADD <-> SUB */
19402 new_inst = OPCODE_ADD;
19403 value = negated;
19404 break;
19405
19406 case OPCODE_ADD:
19407 new_inst = OPCODE_SUB;
19408 value = negated;
19409 break;
19410
19411 case OPCODE_CMP: /* CMP <-> CMN */
19412 new_inst = OPCODE_CMN;
19413 value = negated;
19414 break;
19415
19416 case OPCODE_CMN:
19417 new_inst = OPCODE_CMP;
19418 value = negated;
19419 break;
19420
19421 /* Now Inverted ops. */
19422 case OPCODE_MOV: /* MOV <-> MVN */
19423 new_inst = OPCODE_MVN;
19424 value = inverted;
19425 break;
19426
19427 case OPCODE_MVN:
19428 new_inst = OPCODE_MOV;
19429 value = inverted;
19430 break;
19431
19432 case OPCODE_AND: /* AND <-> BIC */
19433 new_inst = OPCODE_BIC;
19434 value = inverted;
19435 break;
19436
19437 case OPCODE_BIC:
19438 new_inst = OPCODE_AND;
19439 value = inverted;
19440 break;
19441
19442 case OPCODE_ADC: /* ADC <-> SBC */
19443 new_inst = OPCODE_SBC;
19444 value = inverted;
19445 break;
19446
19447 case OPCODE_SBC:
19448 new_inst = OPCODE_ADC;
19449 value = inverted;
19450 break;
19451
19452 /* We cannot do anything. */
19453 default:
19454 return FAIL;
19455 }
19456
19457 if (value == (unsigned) FAIL)
19458 return FAIL;
19459
19460 *instruction &= OPCODE_MASK;
19461 *instruction |= new_inst << DATA_OP_SHIFT;
19462 return value;
19463 }
19464
19465 /* Like negate_data_op, but for Thumb-2. */
19466
19467 static unsigned int
19468 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
19469 {
19470 int op, new_inst;
19471 int rd;
19472 unsigned int negated, inverted;
19473
19474 negated = encode_thumb32_immediate (-value);
19475 inverted = encode_thumb32_immediate (~value);
19476
19477 rd = (*instruction >> 8) & 0xf;
19478 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
19479 switch (op)
19480 {
19481 /* ADD <-> SUB. Includes CMP <-> CMN. */
19482 case T2_OPCODE_SUB:
19483 new_inst = T2_OPCODE_ADD;
19484 value = negated;
19485 break;
19486
19487 case T2_OPCODE_ADD:
19488 new_inst = T2_OPCODE_SUB;
19489 value = negated;
19490 break;
19491
19492 /* ORR <-> ORN. Includes MOV <-> MVN. */
19493 case T2_OPCODE_ORR:
19494 new_inst = T2_OPCODE_ORN;
19495 value = inverted;
19496 break;
19497
19498 case T2_OPCODE_ORN:
19499 new_inst = T2_OPCODE_ORR;
19500 value = inverted;
19501 break;
19502
19503 /* AND <-> BIC. TST has no inverted equivalent. */
19504 case T2_OPCODE_AND:
19505 new_inst = T2_OPCODE_BIC;
19506 if (rd == 15)
19507 value = FAIL;
19508 else
19509 value = inverted;
19510 break;
19511
19512 case T2_OPCODE_BIC:
19513 new_inst = T2_OPCODE_AND;
19514 value = inverted;
19515 break;
19516
19517 /* ADC <-> SBC */
19518 case T2_OPCODE_ADC:
19519 new_inst = T2_OPCODE_SBC;
19520 value = inverted;
19521 break;
19522
19523 case T2_OPCODE_SBC:
19524 new_inst = T2_OPCODE_ADC;
19525 value = inverted;
19526 break;
19527
19528 /* We cannot do anything. */
19529 default:
19530 return FAIL;
19531 }
19532
19533 if (value == (unsigned int)FAIL)
19534 return FAIL;
19535
19536 *instruction &= T2_OPCODE_MASK;
19537 *instruction |= new_inst << T2_DATA_OP_SHIFT;
19538 return value;
19539 }
19540
19541 /* Read a 32-bit thumb instruction from buf. */
19542 static unsigned long
19543 get_thumb32_insn (char * buf)
19544 {
19545 unsigned long insn;
19546 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
19547 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19548
19549 return insn;
19550 }
19551
19552
19553 /* We usually want to set the low bit on the address of thumb function
19554 symbols. In particular .word foo - . should have the low bit set.
19555 Generic code tries to fold the difference of two symbols to
19556 a constant. Prevent this and force a relocation when the first symbols
19557 is a thumb function. */
19558
19559 bfd_boolean
19560 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
19561 {
19562 if (op == O_subtract
19563 && l->X_op == O_symbol
19564 && r->X_op == O_symbol
19565 && THUMB_IS_FUNC (l->X_add_symbol))
19566 {
19567 l->X_op = O_subtract;
19568 l->X_op_symbol = r->X_add_symbol;
19569 l->X_add_number -= r->X_add_number;
19570 return TRUE;
19571 }
19572
19573 /* Process as normal. */
19574 return FALSE;
19575 }
19576
19577 /* Encode Thumb2 unconditional branches and calls. The encoding
19578 for the 2 are identical for the immediate values. */
19579
19580 static void
19581 encode_thumb2_b_bl_offset (char * buf, offsetT value)
19582 {
19583 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19584 offsetT newval;
19585 offsetT newval2;
19586 addressT S, I1, I2, lo, hi;
19587
19588 S = (value >> 24) & 0x01;
19589 I1 = (value >> 23) & 0x01;
19590 I2 = (value >> 22) & 0x01;
19591 hi = (value >> 12) & 0x3ff;
19592 lo = (value >> 1) & 0x7ff;
19593 newval = md_chars_to_number (buf, THUMB_SIZE);
19594 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19595 newval |= (S << 10) | hi;
19596 newval2 &= ~T2I1I2MASK;
19597 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
19598 md_number_to_chars (buf, newval, THUMB_SIZE);
19599 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19600 }
19601
19602 void
19603 md_apply_fix (fixS * fixP,
19604 valueT * valP,
19605 segT seg)
19606 {
19607 offsetT value = * valP;
19608 offsetT newval;
19609 unsigned int newimm;
19610 unsigned long temp;
19611 int sign;
19612 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
19613
19614 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
19615
19616 /* Note whether this will delete the relocation. */
19617
19618 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
19619 fixP->fx_done = 1;
19620
19621 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19622 consistency with the behaviour on 32-bit hosts. Remember value
19623 for emit_reloc. */
19624 value &= 0xffffffff;
19625 value ^= 0x80000000;
19626 value -= 0x80000000;
19627
19628 *valP = value;
19629 fixP->fx_addnumber = value;
19630
19631 /* Same treatment for fixP->fx_offset. */
19632 fixP->fx_offset &= 0xffffffff;
19633 fixP->fx_offset ^= 0x80000000;
19634 fixP->fx_offset -= 0x80000000;
19635
19636 switch (fixP->fx_r_type)
19637 {
19638 case BFD_RELOC_NONE:
19639 /* This will need to go in the object file. */
19640 fixP->fx_done = 0;
19641 break;
19642
19643 case BFD_RELOC_ARM_IMMEDIATE:
19644 /* We claim that this fixup has been processed here,
19645 even if in fact we generate an error because we do
19646 not have a reloc for it, so tc_gen_reloc will reject it. */
19647 fixP->fx_done = 1;
19648
19649 if (fixP->fx_addsy
19650 && ! S_IS_DEFINED (fixP->fx_addsy))
19651 {
19652 as_bad_where (fixP->fx_file, fixP->fx_line,
19653 _("undefined symbol %s used as an immediate value"),
19654 S_GET_NAME (fixP->fx_addsy));
19655 break;
19656 }
19657
19658 if (fixP->fx_addsy
19659 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19660 {
19661 as_bad_where (fixP->fx_file, fixP->fx_line,
19662 _("symbol %s is in a different section"),
19663 S_GET_NAME (fixP->fx_addsy));
19664 break;
19665 }
19666
19667 newimm = encode_arm_immediate (value);
19668 temp = md_chars_to_number (buf, INSN_SIZE);
19669
19670 /* If the instruction will fail, see if we can fix things up by
19671 changing the opcode. */
19672 if (newimm == (unsigned int) FAIL
19673 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
19674 {
19675 as_bad_where (fixP->fx_file, fixP->fx_line,
19676 _("invalid constant (%lx) after fixup"),
19677 (unsigned long) value);
19678 break;
19679 }
19680
19681 newimm |= (temp & 0xfffff000);
19682 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19683 break;
19684
19685 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19686 {
19687 unsigned int highpart = 0;
19688 unsigned int newinsn = 0xe1a00000; /* nop. */
19689
19690 if (fixP->fx_addsy
19691 && ! S_IS_DEFINED (fixP->fx_addsy))
19692 {
19693 as_bad_where (fixP->fx_file, fixP->fx_line,
19694 _("undefined symbol %s used as an immediate value"),
19695 S_GET_NAME (fixP->fx_addsy));
19696 break;
19697 }
19698
19699 if (fixP->fx_addsy
19700 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19701 {
19702 as_bad_where (fixP->fx_file, fixP->fx_line,
19703 _("symbol %s is in a different section"),
19704 S_GET_NAME (fixP->fx_addsy));
19705 break;
19706 }
19707
19708 newimm = encode_arm_immediate (value);
19709 temp = md_chars_to_number (buf, INSN_SIZE);
19710
19711 /* If the instruction will fail, see if we can fix things up by
19712 changing the opcode. */
19713 if (newimm == (unsigned int) FAIL
19714 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
19715 {
19716 /* No ? OK - try using two ADD instructions to generate
19717 the value. */
19718 newimm = validate_immediate_twopart (value, & highpart);
19719
19720 /* Yes - then make sure that the second instruction is
19721 also an add. */
19722 if (newimm != (unsigned int) FAIL)
19723 newinsn = temp;
19724 /* Still No ? Try using a negated value. */
19725 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
19726 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
19727 /* Otherwise - give up. */
19728 else
19729 {
19730 as_bad_where (fixP->fx_file, fixP->fx_line,
19731 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19732 (long) value);
19733 break;
19734 }
19735
19736 /* Replace the first operand in the 2nd instruction (which
19737 is the PC) with the destination register. We have
19738 already added in the PC in the first instruction and we
19739 do not want to do it again. */
19740 newinsn &= ~ 0xf0000;
19741 newinsn |= ((newinsn & 0x0f000) << 4);
19742 }
19743
19744 newimm |= (temp & 0xfffff000);
19745 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19746
19747 highpart |= (newinsn & 0xfffff000);
19748 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
19749 }
19750 break;
19751
19752 case BFD_RELOC_ARM_OFFSET_IMM:
19753 if (!fixP->fx_done && seg->use_rela_p)
19754 value = 0;
19755
19756 case BFD_RELOC_ARM_LITERAL:
19757 sign = value >= 0;
19758
19759 if (value < 0)
19760 value = - value;
19761
19762 if (validate_offset_imm (value, 0) == FAIL)
19763 {
19764 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
19765 as_bad_where (fixP->fx_file, fixP->fx_line,
19766 _("invalid literal constant: pool needs to be closer"));
19767 else
19768 as_bad_where (fixP->fx_file, fixP->fx_line,
19769 _("bad immediate value for offset (%ld)"),
19770 (long) value);
19771 break;
19772 }
19773
19774 newval = md_chars_to_number (buf, INSN_SIZE);
19775 newval &= 0xff7ff000;
19776 newval |= value | (sign ? INDEX_UP : 0);
19777 md_number_to_chars (buf, newval, INSN_SIZE);
19778 break;
19779
19780 case BFD_RELOC_ARM_OFFSET_IMM8:
19781 case BFD_RELOC_ARM_HWLITERAL:
19782 sign = value >= 0;
19783
19784 if (value < 0)
19785 value = - value;
19786
19787 if (validate_offset_imm (value, 1) == FAIL)
19788 {
19789 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
19790 as_bad_where (fixP->fx_file, fixP->fx_line,
19791 _("invalid literal constant: pool needs to be closer"));
19792 else
19793 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19794 (long) value);
19795 break;
19796 }
19797
19798 newval = md_chars_to_number (buf, INSN_SIZE);
19799 newval &= 0xff7ff0f0;
19800 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
19801 md_number_to_chars (buf, newval, INSN_SIZE);
19802 break;
19803
19804 case BFD_RELOC_ARM_T32_OFFSET_U8:
19805 if (value < 0 || value > 1020 || value % 4 != 0)
19806 as_bad_where (fixP->fx_file, fixP->fx_line,
19807 _("bad immediate value for offset (%ld)"), (long) value);
19808 value /= 4;
19809
19810 newval = md_chars_to_number (buf+2, THUMB_SIZE);
19811 newval |= value;
19812 md_number_to_chars (buf+2, newval, THUMB_SIZE);
19813 break;
19814
19815 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19816 /* This is a complicated relocation used for all varieties of Thumb32
19817 load/store instruction with immediate offset:
19818
19819 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19820 *4, optional writeback(W)
19821 (doubleword load/store)
19822
19823 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19824 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19825 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19826 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19827 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19828
19829 Uppercase letters indicate bits that are already encoded at
19830 this point. Lowercase letters are our problem. For the
19831 second block of instructions, the secondary opcode nybble
19832 (bits 8..11) is present, and bit 23 is zero, even if this is
19833 a PC-relative operation. */
19834 newval = md_chars_to_number (buf, THUMB_SIZE);
19835 newval <<= 16;
19836 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
19837
19838 if ((newval & 0xf0000000) == 0xe0000000)
19839 {
19840 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19841 if (value >= 0)
19842 newval |= (1 << 23);
19843 else
19844 value = -value;
19845 if (value % 4 != 0)
19846 {
19847 as_bad_where (fixP->fx_file, fixP->fx_line,
19848 _("offset not a multiple of 4"));
19849 break;
19850 }
19851 value /= 4;
19852 if (value > 0xff)
19853 {
19854 as_bad_where (fixP->fx_file, fixP->fx_line,
19855 _("offset out of range"));
19856 break;
19857 }
19858 newval &= ~0xff;
19859 }
19860 else if ((newval & 0x000f0000) == 0x000f0000)
19861 {
19862 /* PC-relative, 12-bit offset. */
19863 if (value >= 0)
19864 newval |= (1 << 23);
19865 else
19866 value = -value;
19867 if (value > 0xfff)
19868 {
19869 as_bad_where (fixP->fx_file, fixP->fx_line,
19870 _("offset out of range"));
19871 break;
19872 }
19873 newval &= ~0xfff;
19874 }
19875 else if ((newval & 0x00000100) == 0x00000100)
19876 {
19877 /* Writeback: 8-bit, +/- offset. */
19878 if (value >= 0)
19879 newval |= (1 << 9);
19880 else
19881 value = -value;
19882 if (value > 0xff)
19883 {
19884 as_bad_where (fixP->fx_file, fixP->fx_line,
19885 _("offset out of range"));
19886 break;
19887 }
19888 newval &= ~0xff;
19889 }
19890 else if ((newval & 0x00000f00) == 0x00000e00)
19891 {
19892 /* T-instruction: positive 8-bit offset. */
19893 if (value < 0 || value > 0xff)
19894 {
19895 as_bad_where (fixP->fx_file, fixP->fx_line,
19896 _("offset out of range"));
19897 break;
19898 }
19899 newval &= ~0xff;
19900 newval |= value;
19901 }
19902 else
19903 {
19904 /* Positive 12-bit or negative 8-bit offset. */
19905 int limit;
19906 if (value >= 0)
19907 {
19908 newval |= (1 << 23);
19909 limit = 0xfff;
19910 }
19911 else
19912 {
19913 value = -value;
19914 limit = 0xff;
19915 }
19916 if (value > limit)
19917 {
19918 as_bad_where (fixP->fx_file, fixP->fx_line,
19919 _("offset out of range"));
19920 break;
19921 }
19922 newval &= ~limit;
19923 }
19924
19925 newval |= value;
19926 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
19927 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
19928 break;
19929
19930 case BFD_RELOC_ARM_SHIFT_IMM:
19931 newval = md_chars_to_number (buf, INSN_SIZE);
19932 if (((unsigned long) value) > 32
19933 || (value == 32
19934 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
19935 {
19936 as_bad_where (fixP->fx_file, fixP->fx_line,
19937 _("shift expression is too large"));
19938 break;
19939 }
19940
19941 if (value == 0)
19942 /* Shifts of zero must be done as lsl. */
19943 newval &= ~0x60;
19944 else if (value == 32)
19945 value = 0;
19946 newval &= 0xfffff07f;
19947 newval |= (value & 0x1f) << 7;
19948 md_number_to_chars (buf, newval, INSN_SIZE);
19949 break;
19950
19951 case BFD_RELOC_ARM_T32_IMMEDIATE:
19952 case BFD_RELOC_ARM_T32_ADD_IMM:
19953 case BFD_RELOC_ARM_T32_IMM12:
19954 case BFD_RELOC_ARM_T32_ADD_PC12:
19955 /* We claim that this fixup has been processed here,
19956 even if in fact we generate an error because we do
19957 not have a reloc for it, so tc_gen_reloc will reject it. */
19958 fixP->fx_done = 1;
19959
19960 if (fixP->fx_addsy
19961 && ! S_IS_DEFINED (fixP->fx_addsy))
19962 {
19963 as_bad_where (fixP->fx_file, fixP->fx_line,
19964 _("undefined symbol %s used as an immediate value"),
19965 S_GET_NAME (fixP->fx_addsy));
19966 break;
19967 }
19968
19969 newval = md_chars_to_number (buf, THUMB_SIZE);
19970 newval <<= 16;
19971 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
19972
19973 newimm = FAIL;
19974 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19975 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
19976 {
19977 newimm = encode_thumb32_immediate (value);
19978 if (newimm == (unsigned int) FAIL)
19979 newimm = thumb32_negate_data_op (&newval, value);
19980 }
19981 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
19982 && newimm == (unsigned int) FAIL)
19983 {
19984 /* Turn add/sum into addw/subw. */
19985 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
19986 newval = (newval & 0xfeffffff) | 0x02000000;
19987
19988 /* 12 bit immediate for addw/subw. */
19989 if (value < 0)
19990 {
19991 value = -value;
19992 newval ^= 0x00a00000;
19993 }
19994 if (value > 0xfff)
19995 newimm = (unsigned int) FAIL;
19996 else
19997 newimm = value;
19998 }
19999
20000 if (newimm == (unsigned int)FAIL)
20001 {
20002 as_bad_where (fixP->fx_file, fixP->fx_line,
20003 _("invalid constant (%lx) after fixup"),
20004 (unsigned long) value);
20005 break;
20006 }
20007
20008 newval |= (newimm & 0x800) << 15;
20009 newval |= (newimm & 0x700) << 4;
20010 newval |= (newimm & 0x0ff);
20011
20012 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20013 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20014 break;
20015
20016 case BFD_RELOC_ARM_SMC:
20017 if (((unsigned long) value) > 0xffff)
20018 as_bad_where (fixP->fx_file, fixP->fx_line,
20019 _("invalid smc expression"));
20020 newval = md_chars_to_number (buf, INSN_SIZE);
20021 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20022 md_number_to_chars (buf, newval, INSN_SIZE);
20023 break;
20024
20025 case BFD_RELOC_ARM_SWI:
20026 if (fixP->tc_fix_data != 0)
20027 {
20028 if (((unsigned long) value) > 0xff)
20029 as_bad_where (fixP->fx_file, fixP->fx_line,
20030 _("invalid swi expression"));
20031 newval = md_chars_to_number (buf, THUMB_SIZE);
20032 newval |= value;
20033 md_number_to_chars (buf, newval, THUMB_SIZE);
20034 }
20035 else
20036 {
20037 if (((unsigned long) value) > 0x00ffffff)
20038 as_bad_where (fixP->fx_file, fixP->fx_line,
20039 _("invalid swi expression"));
20040 newval = md_chars_to_number (buf, INSN_SIZE);
20041 newval |= value;
20042 md_number_to_chars (buf, newval, INSN_SIZE);
20043 }
20044 break;
20045
20046 case BFD_RELOC_ARM_MULTI:
20047 if (((unsigned long) value) > 0xffff)
20048 as_bad_where (fixP->fx_file, fixP->fx_line,
20049 _("invalid expression in load/store multiple"));
20050 newval = value | md_chars_to_number (buf, INSN_SIZE);
20051 md_number_to_chars (buf, newval, INSN_SIZE);
20052 break;
20053
20054 #ifdef OBJ_ELF
20055 case BFD_RELOC_ARM_PCREL_CALL:
20056
20057 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20058 && fixP->fx_addsy
20059 && !S_IS_EXTERNAL (fixP->fx_addsy)
20060 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20061 && THUMB_IS_FUNC (fixP->fx_addsy))
20062 /* Flip the bl to blx. This is a simple flip
20063 bit here because we generate PCREL_CALL for
20064 unconditional bls. */
20065 {
20066 newval = md_chars_to_number (buf, INSN_SIZE);
20067 newval = newval | 0x10000000;
20068 md_number_to_chars (buf, newval, INSN_SIZE);
20069 temp = 1;
20070 fixP->fx_done = 1;
20071 }
20072 else
20073 temp = 3;
20074 goto arm_branch_common;
20075
20076 case BFD_RELOC_ARM_PCREL_JUMP:
20077 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20078 && fixP->fx_addsy
20079 && !S_IS_EXTERNAL (fixP->fx_addsy)
20080 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20081 && THUMB_IS_FUNC (fixP->fx_addsy))
20082 {
20083 /* This would map to a bl<cond>, b<cond>,
20084 b<always> to a Thumb function. We
20085 need to force a relocation for this particular
20086 case. */
20087 newval = md_chars_to_number (buf, INSN_SIZE);
20088 fixP->fx_done = 0;
20089 }
20090
20091 case BFD_RELOC_ARM_PLT32:
20092 #endif
20093 case BFD_RELOC_ARM_PCREL_BRANCH:
20094 temp = 3;
20095 goto arm_branch_common;
20096
20097 case BFD_RELOC_ARM_PCREL_BLX:
20098
20099 temp = 1;
20100 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20101 && fixP->fx_addsy
20102 && !S_IS_EXTERNAL (fixP->fx_addsy)
20103 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20104 && ARM_IS_FUNC (fixP->fx_addsy))
20105 {
20106 /* Flip the blx to a bl and warn. */
20107 const char *name = S_GET_NAME (fixP->fx_addsy);
20108 newval = 0xeb000000;
20109 as_warn_where (fixP->fx_file, fixP->fx_line,
20110 _("blx to '%s' an ARM ISA state function changed to bl"),
20111 name);
20112 md_number_to_chars (buf, newval, INSN_SIZE);
20113 temp = 3;
20114 fixP->fx_done = 1;
20115 }
20116
20117 #ifdef OBJ_ELF
20118 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20119 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
20120 #endif
20121
20122 arm_branch_common:
20123 /* We are going to store value (shifted right by two) in the
20124 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20125 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20126 also be be clear. */
20127 if (value & temp)
20128 as_bad_where (fixP->fx_file, fixP->fx_line,
20129 _("misaligned branch destination"));
20130 if ((value & (offsetT)0xfe000000) != (offsetT)0
20131 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
20132 as_bad_where (fixP->fx_file, fixP->fx_line,
20133 _("branch out of range"));
20134
20135 if (fixP->fx_done || !seg->use_rela_p)
20136 {
20137 newval = md_chars_to_number (buf, INSN_SIZE);
20138 newval |= (value >> 2) & 0x00ffffff;
20139 /* Set the H bit on BLX instructions. */
20140 if (temp == 1)
20141 {
20142 if (value & 2)
20143 newval |= 0x01000000;
20144 else
20145 newval &= ~0x01000000;
20146 }
20147 md_number_to_chars (buf, newval, INSN_SIZE);
20148 }
20149 break;
20150
20151 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
20152 /* CBZ can only branch forward. */
20153
20154 /* Attempts to use CBZ to branch to the next instruction
20155 (which, strictly speaking, are prohibited) will be turned into
20156 no-ops.
20157
20158 FIXME: It may be better to remove the instruction completely and
20159 perform relaxation. */
20160 if (value == -2)
20161 {
20162 newval = md_chars_to_number (buf, THUMB_SIZE);
20163 newval = 0xbf00; /* NOP encoding T1 */
20164 md_number_to_chars (buf, newval, THUMB_SIZE);
20165 }
20166 else
20167 {
20168 if (value & ~0x7e)
20169 as_bad_where (fixP->fx_file, fixP->fx_line,
20170 _("branch out of range"));
20171
20172 if (fixP->fx_done || !seg->use_rela_p)
20173 {
20174 newval = md_chars_to_number (buf, THUMB_SIZE);
20175 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
20176 md_number_to_chars (buf, newval, THUMB_SIZE);
20177 }
20178 }
20179 break;
20180
20181 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
20182 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
20183 as_bad_where (fixP->fx_file, fixP->fx_line,
20184 _("branch out of range"));
20185
20186 if (fixP->fx_done || !seg->use_rela_p)
20187 {
20188 newval = md_chars_to_number (buf, THUMB_SIZE);
20189 newval |= (value & 0x1ff) >> 1;
20190 md_number_to_chars (buf, newval, THUMB_SIZE);
20191 }
20192 break;
20193
20194 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
20195 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
20196 as_bad_where (fixP->fx_file, fixP->fx_line,
20197 _("branch out of range"));
20198
20199 if (fixP->fx_done || !seg->use_rela_p)
20200 {
20201 newval = md_chars_to_number (buf, THUMB_SIZE);
20202 newval |= (value & 0xfff) >> 1;
20203 md_number_to_chars (buf, newval, THUMB_SIZE);
20204 }
20205 break;
20206
20207 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20208 if (fixP->fx_addsy
20209 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20210 && !S_IS_EXTERNAL (fixP->fx_addsy)
20211 && S_IS_DEFINED (fixP->fx_addsy)
20212 && ARM_IS_FUNC (fixP->fx_addsy)
20213 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20214 {
20215 /* Force a relocation for a branch 20 bits wide. */
20216 fixP->fx_done = 0;
20217 }
20218 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
20219 as_bad_where (fixP->fx_file, fixP->fx_line,
20220 _("conditional branch out of range"));
20221
20222 if (fixP->fx_done || !seg->use_rela_p)
20223 {
20224 offsetT newval2;
20225 addressT S, J1, J2, lo, hi;
20226
20227 S = (value & 0x00100000) >> 20;
20228 J2 = (value & 0x00080000) >> 19;
20229 J1 = (value & 0x00040000) >> 18;
20230 hi = (value & 0x0003f000) >> 12;
20231 lo = (value & 0x00000ffe) >> 1;
20232
20233 newval = md_chars_to_number (buf, THUMB_SIZE);
20234 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20235 newval |= (S << 10) | hi;
20236 newval2 |= (J1 << 13) | (J2 << 11) | lo;
20237 md_number_to_chars (buf, newval, THUMB_SIZE);
20238 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20239 }
20240 break;
20241
20242 case BFD_RELOC_THUMB_PCREL_BLX:
20243
20244 /* If there is a blx from a thumb state function to
20245 another thumb function flip this to a bl and warn
20246 about it. */
20247
20248 if (fixP->fx_addsy
20249 && S_IS_DEFINED (fixP->fx_addsy)
20250 && !S_IS_EXTERNAL (fixP->fx_addsy)
20251 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20252 && THUMB_IS_FUNC (fixP->fx_addsy))
20253 {
20254 const char *name = S_GET_NAME (fixP->fx_addsy);
20255 as_warn_where (fixP->fx_file, fixP->fx_line,
20256 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20257 name);
20258 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20259 newval = newval | 0x1000;
20260 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20261 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20262 fixP->fx_done = 1;
20263 }
20264
20265
20266 goto thumb_bl_common;
20267
20268 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20269
20270 /* A bl from Thumb state ISA to an internal ARM state function
20271 is converted to a blx. */
20272 if (fixP->fx_addsy
20273 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20274 && !S_IS_EXTERNAL (fixP->fx_addsy)
20275 && S_IS_DEFINED (fixP->fx_addsy)
20276 && ARM_IS_FUNC (fixP->fx_addsy)
20277 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20278 {
20279 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20280 newval = newval & ~0x1000;
20281 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20282 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
20283 fixP->fx_done = 1;
20284 }
20285
20286 thumb_bl_common:
20287
20288 #ifdef OBJ_ELF
20289 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
20290 fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20291 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20292 #endif
20293
20294 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20295 /* For a BLX instruction, make sure that the relocation is rounded up
20296 to a word boundary. This follows the semantics of the instruction
20297 which specifies that bit 1 of the target address will come from bit
20298 1 of the base address. */
20299 value = (value + 1) & ~ 1;
20300
20301
20302 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
20303 {
20304 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
20305 {
20306 as_bad_where (fixP->fx_file, fixP->fx_line,
20307 _("branch out of range"));
20308 }
20309 else if ((value & ~0x1ffffff)
20310 && ((value & ~0x1ffffff) != ~0x1ffffff))
20311 {
20312 as_bad_where (fixP->fx_file, fixP->fx_line,
20313 _("Thumb2 branch out of range"));
20314 }
20315 }
20316
20317 if (fixP->fx_done || !seg->use_rela_p)
20318 encode_thumb2_b_bl_offset (buf, value);
20319
20320 break;
20321
20322 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20323 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
20324 as_bad_where (fixP->fx_file, fixP->fx_line,
20325 _("branch out of range"));
20326
20327 if (fixP->fx_done || !seg->use_rela_p)
20328 encode_thumb2_b_bl_offset (buf, value);
20329
20330 break;
20331
20332 case BFD_RELOC_8:
20333 if (fixP->fx_done || !seg->use_rela_p)
20334 md_number_to_chars (buf, value, 1);
20335 break;
20336
20337 case BFD_RELOC_16:
20338 if (fixP->fx_done || !seg->use_rela_p)
20339 md_number_to_chars (buf, value, 2);
20340 break;
20341
20342 #ifdef OBJ_ELF
20343 case BFD_RELOC_ARM_TLS_GD32:
20344 case BFD_RELOC_ARM_TLS_LE32:
20345 case BFD_RELOC_ARM_TLS_IE32:
20346 case BFD_RELOC_ARM_TLS_LDM32:
20347 case BFD_RELOC_ARM_TLS_LDO32:
20348 S_SET_THREAD_LOCAL (fixP->fx_addsy);
20349 /* fall through */
20350
20351 case BFD_RELOC_ARM_GOT32:
20352 case BFD_RELOC_ARM_GOTOFF:
20353 if (fixP->fx_done || !seg->use_rela_p)
20354 md_number_to_chars (buf, 0, 4);
20355 break;
20356
20357 case BFD_RELOC_ARM_TARGET2:
20358 /* TARGET2 is not partial-inplace, so we need to write the
20359 addend here for REL targets, because it won't be written out
20360 during reloc processing later. */
20361 if (fixP->fx_done || !seg->use_rela_p)
20362 md_number_to_chars (buf, fixP->fx_offset, 4);
20363 break;
20364 #endif
20365
20366 case BFD_RELOC_RVA:
20367 case BFD_RELOC_32:
20368 case BFD_RELOC_ARM_TARGET1:
20369 case BFD_RELOC_ARM_ROSEGREL32:
20370 case BFD_RELOC_ARM_SBREL32:
20371 case BFD_RELOC_32_PCREL:
20372 #ifdef TE_PE
20373 case BFD_RELOC_32_SECREL:
20374 #endif
20375 if (fixP->fx_done || !seg->use_rela_p)
20376 #ifdef TE_WINCE
20377 /* For WinCE we only do this for pcrel fixups. */
20378 if (fixP->fx_done || fixP->fx_pcrel)
20379 #endif
20380 md_number_to_chars (buf, value, 4);
20381 break;
20382
20383 #ifdef OBJ_ELF
20384 case BFD_RELOC_ARM_PREL31:
20385 if (fixP->fx_done || !seg->use_rela_p)
20386 {
20387 newval = md_chars_to_number (buf, 4) & 0x80000000;
20388 if ((value ^ (value >> 1)) & 0x40000000)
20389 {
20390 as_bad_where (fixP->fx_file, fixP->fx_line,
20391 _("rel31 relocation overflow"));
20392 }
20393 newval |= value & 0x7fffffff;
20394 md_number_to_chars (buf, newval, 4);
20395 }
20396 break;
20397 #endif
20398
20399 case BFD_RELOC_ARM_CP_OFF_IMM:
20400 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20401 if (value < -1023 || value > 1023 || (value & 3))
20402 as_bad_where (fixP->fx_file, fixP->fx_line,
20403 _("co-processor offset out of range"));
20404 cp_off_common:
20405 sign = value >= 0;
20406 if (value < 0)
20407 value = -value;
20408 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20409 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20410 newval = md_chars_to_number (buf, INSN_SIZE);
20411 else
20412 newval = get_thumb32_insn (buf);
20413 newval &= 0xff7fff00;
20414 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
20415 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20416 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20417 md_number_to_chars (buf, newval, INSN_SIZE);
20418 else
20419 put_thumb32_insn (buf, newval);
20420 break;
20421
20422 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
20423 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
20424 if (value < -255 || value > 255)
20425 as_bad_where (fixP->fx_file, fixP->fx_line,
20426 _("co-processor offset out of range"));
20427 value *= 4;
20428 goto cp_off_common;
20429
20430 case BFD_RELOC_ARM_THUMB_OFFSET:
20431 newval = md_chars_to_number (buf, THUMB_SIZE);
20432 /* Exactly what ranges, and where the offset is inserted depends
20433 on the type of instruction, we can establish this from the
20434 top 4 bits. */
20435 switch (newval >> 12)
20436 {
20437 case 4: /* PC load. */
20438 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20439 forced to zero for these loads; md_pcrel_from has already
20440 compensated for this. */
20441 if (value & 3)
20442 as_bad_where (fixP->fx_file, fixP->fx_line,
20443 _("invalid offset, target not word aligned (0x%08lX)"),
20444 (((unsigned long) fixP->fx_frag->fr_address
20445 + (unsigned long) fixP->fx_where) & ~3)
20446 + (unsigned long) value);
20447
20448 if (value & ~0x3fc)
20449 as_bad_where (fixP->fx_file, fixP->fx_line,
20450 _("invalid offset, value too big (0x%08lX)"),
20451 (long) value);
20452
20453 newval |= value >> 2;
20454 break;
20455
20456 case 9: /* SP load/store. */
20457 if (value & ~0x3fc)
20458 as_bad_where (fixP->fx_file, fixP->fx_line,
20459 _("invalid offset, value too big (0x%08lX)"),
20460 (long) value);
20461 newval |= value >> 2;
20462 break;
20463
20464 case 6: /* Word load/store. */
20465 if (value & ~0x7c)
20466 as_bad_where (fixP->fx_file, fixP->fx_line,
20467 _("invalid offset, value too big (0x%08lX)"),
20468 (long) value);
20469 newval |= value << 4; /* 6 - 2. */
20470 break;
20471
20472 case 7: /* Byte load/store. */
20473 if (value & ~0x1f)
20474 as_bad_where (fixP->fx_file, fixP->fx_line,
20475 _("invalid offset, value too big (0x%08lX)"),
20476 (long) value);
20477 newval |= value << 6;
20478 break;
20479
20480 case 8: /* Halfword load/store. */
20481 if (value & ~0x3e)
20482 as_bad_where (fixP->fx_file, fixP->fx_line,
20483 _("invalid offset, value too big (0x%08lX)"),
20484 (long) value);
20485 newval |= value << 5; /* 6 - 1. */
20486 break;
20487
20488 default:
20489 as_bad_where (fixP->fx_file, fixP->fx_line,
20490 "Unable to process relocation for thumb opcode: %lx",
20491 (unsigned long) newval);
20492 break;
20493 }
20494 md_number_to_chars (buf, newval, THUMB_SIZE);
20495 break;
20496
20497 case BFD_RELOC_ARM_THUMB_ADD:
20498 /* This is a complicated relocation, since we use it for all of
20499 the following immediate relocations:
20500
20501 3bit ADD/SUB
20502 8bit ADD/SUB
20503 9bit ADD/SUB SP word-aligned
20504 10bit ADD PC/SP word-aligned
20505
20506 The type of instruction being processed is encoded in the
20507 instruction field:
20508
20509 0x8000 SUB
20510 0x00F0 Rd
20511 0x000F Rs
20512 */
20513 newval = md_chars_to_number (buf, THUMB_SIZE);
20514 {
20515 int rd = (newval >> 4) & 0xf;
20516 int rs = newval & 0xf;
20517 int subtract = !!(newval & 0x8000);
20518
20519 /* Check for HI regs, only very restricted cases allowed:
20520 Adjusting SP, and using PC or SP to get an address. */
20521 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
20522 || (rs > 7 && rs != REG_SP && rs != REG_PC))
20523 as_bad_where (fixP->fx_file, fixP->fx_line,
20524 _("invalid Hi register with immediate"));
20525
20526 /* If value is negative, choose the opposite instruction. */
20527 if (value < 0)
20528 {
20529 value = -value;
20530 subtract = !subtract;
20531 if (value < 0)
20532 as_bad_where (fixP->fx_file, fixP->fx_line,
20533 _("immediate value out of range"));
20534 }
20535
20536 if (rd == REG_SP)
20537 {
20538 if (value & ~0x1fc)
20539 as_bad_where (fixP->fx_file, fixP->fx_line,
20540 _("invalid immediate for stack address calculation"));
20541 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
20542 newval |= value >> 2;
20543 }
20544 else if (rs == REG_PC || rs == REG_SP)
20545 {
20546 if (subtract || value & ~0x3fc)
20547 as_bad_where (fixP->fx_file, fixP->fx_line,
20548 _("invalid immediate for address calculation (value = 0x%08lX)"),
20549 (unsigned long) value);
20550 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
20551 newval |= rd << 8;
20552 newval |= value >> 2;
20553 }
20554 else if (rs == rd)
20555 {
20556 if (value & ~0xff)
20557 as_bad_where (fixP->fx_file, fixP->fx_line,
20558 _("immediate value out of range"));
20559 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
20560 newval |= (rd << 8) | value;
20561 }
20562 else
20563 {
20564 if (value & ~0x7)
20565 as_bad_where (fixP->fx_file, fixP->fx_line,
20566 _("immediate value out of range"));
20567 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
20568 newval |= rd | (rs << 3) | (value << 6);
20569 }
20570 }
20571 md_number_to_chars (buf, newval, THUMB_SIZE);
20572 break;
20573
20574 case BFD_RELOC_ARM_THUMB_IMM:
20575 newval = md_chars_to_number (buf, THUMB_SIZE);
20576 if (value < 0 || value > 255)
20577 as_bad_where (fixP->fx_file, fixP->fx_line,
20578 _("invalid immediate: %ld is out of range"),
20579 (long) value);
20580 newval |= value;
20581 md_number_to_chars (buf, newval, THUMB_SIZE);
20582 break;
20583
20584 case BFD_RELOC_ARM_THUMB_SHIFT:
20585 /* 5bit shift value (0..32). LSL cannot take 32. */
20586 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
20587 temp = newval & 0xf800;
20588 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
20589 as_bad_where (fixP->fx_file, fixP->fx_line,
20590 _("invalid shift value: %ld"), (long) value);
20591 /* Shifts of zero must be encoded as LSL. */
20592 if (value == 0)
20593 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
20594 /* Shifts of 32 are encoded as zero. */
20595 else if (value == 32)
20596 value = 0;
20597 newval |= value << 6;
20598 md_number_to_chars (buf, newval, THUMB_SIZE);
20599 break;
20600
20601 case BFD_RELOC_VTABLE_INHERIT:
20602 case BFD_RELOC_VTABLE_ENTRY:
20603 fixP->fx_done = 0;
20604 return;
20605
20606 case BFD_RELOC_ARM_MOVW:
20607 case BFD_RELOC_ARM_MOVT:
20608 case BFD_RELOC_ARM_THUMB_MOVW:
20609 case BFD_RELOC_ARM_THUMB_MOVT:
20610 if (fixP->fx_done || !seg->use_rela_p)
20611 {
20612 /* REL format relocations are limited to a 16-bit addend. */
20613 if (!fixP->fx_done)
20614 {
20615 if (value < -0x8000 || value > 0x7fff)
20616 as_bad_where (fixP->fx_file, fixP->fx_line,
20617 _("offset out of range"));
20618 }
20619 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
20620 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20621 {
20622 value >>= 16;
20623 }
20624
20625 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
20626 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20627 {
20628 newval = get_thumb32_insn (buf);
20629 newval &= 0xfbf08f00;
20630 newval |= (value & 0xf000) << 4;
20631 newval |= (value & 0x0800) << 15;
20632 newval |= (value & 0x0700) << 4;
20633 newval |= (value & 0x00ff);
20634 put_thumb32_insn (buf, newval);
20635 }
20636 else
20637 {
20638 newval = md_chars_to_number (buf, 4);
20639 newval &= 0xfff0f000;
20640 newval |= value & 0x0fff;
20641 newval |= (value & 0xf000) << 4;
20642 md_number_to_chars (buf, newval, 4);
20643 }
20644 }
20645 return;
20646
20647 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20648 case BFD_RELOC_ARM_ALU_PC_G0:
20649 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20650 case BFD_RELOC_ARM_ALU_PC_G1:
20651 case BFD_RELOC_ARM_ALU_PC_G2:
20652 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20653 case BFD_RELOC_ARM_ALU_SB_G0:
20654 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20655 case BFD_RELOC_ARM_ALU_SB_G1:
20656 case BFD_RELOC_ARM_ALU_SB_G2:
20657 gas_assert (!fixP->fx_done);
20658 if (!seg->use_rela_p)
20659 {
20660 bfd_vma insn;
20661 bfd_vma encoded_addend;
20662 bfd_vma addend_abs = abs (value);
20663
20664 /* Check that the absolute value of the addend can be
20665 expressed as an 8-bit constant plus a rotation. */
20666 encoded_addend = encode_arm_immediate (addend_abs);
20667 if (encoded_addend == (unsigned int) FAIL)
20668 as_bad_where (fixP->fx_file, fixP->fx_line,
20669 _("the offset 0x%08lX is not representable"),
20670 (unsigned long) addend_abs);
20671
20672 /* Extract the instruction. */
20673 insn = md_chars_to_number (buf, INSN_SIZE);
20674
20675 /* If the addend is positive, use an ADD instruction.
20676 Otherwise use a SUB. Take care not to destroy the S bit. */
20677 insn &= 0xff1fffff;
20678 if (value < 0)
20679 insn |= 1 << 22;
20680 else
20681 insn |= 1 << 23;
20682
20683 /* Place the encoded addend into the first 12 bits of the
20684 instruction. */
20685 insn &= 0xfffff000;
20686 insn |= encoded_addend;
20687
20688 /* Update the instruction. */
20689 md_number_to_chars (buf, insn, INSN_SIZE);
20690 }
20691 break;
20692
20693 case BFD_RELOC_ARM_LDR_PC_G0:
20694 case BFD_RELOC_ARM_LDR_PC_G1:
20695 case BFD_RELOC_ARM_LDR_PC_G2:
20696 case BFD_RELOC_ARM_LDR_SB_G0:
20697 case BFD_RELOC_ARM_LDR_SB_G1:
20698 case BFD_RELOC_ARM_LDR_SB_G2:
20699 gas_assert (!fixP->fx_done);
20700 if (!seg->use_rela_p)
20701 {
20702 bfd_vma insn;
20703 bfd_vma addend_abs = abs (value);
20704
20705 /* Check that the absolute value of the addend can be
20706 encoded in 12 bits. */
20707 if (addend_abs >= 0x1000)
20708 as_bad_where (fixP->fx_file, fixP->fx_line,
20709 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20710 (unsigned long) addend_abs);
20711
20712 /* Extract the instruction. */
20713 insn = md_chars_to_number (buf, INSN_SIZE);
20714
20715 /* If the addend is negative, clear bit 23 of the instruction.
20716 Otherwise set it. */
20717 if (value < 0)
20718 insn &= ~(1 << 23);
20719 else
20720 insn |= 1 << 23;
20721
20722 /* Place the absolute value of the addend into the first 12 bits
20723 of the instruction. */
20724 insn &= 0xfffff000;
20725 insn |= addend_abs;
20726
20727 /* Update the instruction. */
20728 md_number_to_chars (buf, insn, INSN_SIZE);
20729 }
20730 break;
20731
20732 case BFD_RELOC_ARM_LDRS_PC_G0:
20733 case BFD_RELOC_ARM_LDRS_PC_G1:
20734 case BFD_RELOC_ARM_LDRS_PC_G2:
20735 case BFD_RELOC_ARM_LDRS_SB_G0:
20736 case BFD_RELOC_ARM_LDRS_SB_G1:
20737 case BFD_RELOC_ARM_LDRS_SB_G2:
20738 gas_assert (!fixP->fx_done);
20739 if (!seg->use_rela_p)
20740 {
20741 bfd_vma insn;
20742 bfd_vma addend_abs = abs (value);
20743
20744 /* Check that the absolute value of the addend can be
20745 encoded in 8 bits. */
20746 if (addend_abs >= 0x100)
20747 as_bad_where (fixP->fx_file, fixP->fx_line,
20748 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20749 (unsigned long) addend_abs);
20750
20751 /* Extract the instruction. */
20752 insn = md_chars_to_number (buf, INSN_SIZE);
20753
20754 /* If the addend is negative, clear bit 23 of the instruction.
20755 Otherwise set it. */
20756 if (value < 0)
20757 insn &= ~(1 << 23);
20758 else
20759 insn |= 1 << 23;
20760
20761 /* Place the first four bits of the absolute value of the addend
20762 into the first 4 bits of the instruction, and the remaining
20763 four into bits 8 .. 11. */
20764 insn &= 0xfffff0f0;
20765 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
20766
20767 /* Update the instruction. */
20768 md_number_to_chars (buf, insn, INSN_SIZE);
20769 }
20770 break;
20771
20772 case BFD_RELOC_ARM_LDC_PC_G0:
20773 case BFD_RELOC_ARM_LDC_PC_G1:
20774 case BFD_RELOC_ARM_LDC_PC_G2:
20775 case BFD_RELOC_ARM_LDC_SB_G0:
20776 case BFD_RELOC_ARM_LDC_SB_G1:
20777 case BFD_RELOC_ARM_LDC_SB_G2:
20778 gas_assert (!fixP->fx_done);
20779 if (!seg->use_rela_p)
20780 {
20781 bfd_vma insn;
20782 bfd_vma addend_abs = abs (value);
20783
20784 /* Check that the absolute value of the addend is a multiple of
20785 four and, when divided by four, fits in 8 bits. */
20786 if (addend_abs & 0x3)
20787 as_bad_where (fixP->fx_file, fixP->fx_line,
20788 _("bad offset 0x%08lX (must be word-aligned)"),
20789 (unsigned long) addend_abs);
20790
20791 if ((addend_abs >> 2) > 0xff)
20792 as_bad_where (fixP->fx_file, fixP->fx_line,
20793 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20794 (unsigned long) addend_abs);
20795
20796 /* Extract the instruction. */
20797 insn = md_chars_to_number (buf, INSN_SIZE);
20798
20799 /* If the addend is negative, clear bit 23 of the instruction.
20800 Otherwise set it. */
20801 if (value < 0)
20802 insn &= ~(1 << 23);
20803 else
20804 insn |= 1 << 23;
20805
20806 /* Place the addend (divided by four) into the first eight
20807 bits of the instruction. */
20808 insn &= 0xfffffff0;
20809 insn |= addend_abs >> 2;
20810
20811 /* Update the instruction. */
20812 md_number_to_chars (buf, insn, INSN_SIZE);
20813 }
20814 break;
20815
20816 case BFD_RELOC_ARM_V4BX:
20817 /* This will need to go in the object file. */
20818 fixP->fx_done = 0;
20819 break;
20820
20821 case BFD_RELOC_UNUSED:
20822 default:
20823 as_bad_where (fixP->fx_file, fixP->fx_line,
20824 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
20825 }
20826 }
20827
20828 /* Translate internal representation of relocation info to BFD target
20829 format. */
20830
20831 arelent *
20832 tc_gen_reloc (asection *section, fixS *fixp)
20833 {
20834 arelent * reloc;
20835 bfd_reloc_code_real_type code;
20836
20837 reloc = (arelent *) xmalloc (sizeof (arelent));
20838
20839 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
20840 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
20841 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
20842
20843 if (fixp->fx_pcrel)
20844 {
20845 if (section->use_rela_p)
20846 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
20847 else
20848 fixp->fx_offset = reloc->address;
20849 }
20850 reloc->addend = fixp->fx_offset;
20851
20852 switch (fixp->fx_r_type)
20853 {
20854 case BFD_RELOC_8:
20855 if (fixp->fx_pcrel)
20856 {
20857 code = BFD_RELOC_8_PCREL;
20858 break;
20859 }
20860
20861 case BFD_RELOC_16:
20862 if (fixp->fx_pcrel)
20863 {
20864 code = BFD_RELOC_16_PCREL;
20865 break;
20866 }
20867
20868 case BFD_RELOC_32:
20869 if (fixp->fx_pcrel)
20870 {
20871 code = BFD_RELOC_32_PCREL;
20872 break;
20873 }
20874
20875 case BFD_RELOC_ARM_MOVW:
20876 if (fixp->fx_pcrel)
20877 {
20878 code = BFD_RELOC_ARM_MOVW_PCREL;
20879 break;
20880 }
20881
20882 case BFD_RELOC_ARM_MOVT:
20883 if (fixp->fx_pcrel)
20884 {
20885 code = BFD_RELOC_ARM_MOVT_PCREL;
20886 break;
20887 }
20888
20889 case BFD_RELOC_ARM_THUMB_MOVW:
20890 if (fixp->fx_pcrel)
20891 {
20892 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
20893 break;
20894 }
20895
20896 case BFD_RELOC_ARM_THUMB_MOVT:
20897 if (fixp->fx_pcrel)
20898 {
20899 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
20900 break;
20901 }
20902
20903 case BFD_RELOC_NONE:
20904 case BFD_RELOC_ARM_PCREL_BRANCH:
20905 case BFD_RELOC_ARM_PCREL_BLX:
20906 case BFD_RELOC_RVA:
20907 case BFD_RELOC_THUMB_PCREL_BRANCH7:
20908 case BFD_RELOC_THUMB_PCREL_BRANCH9:
20909 case BFD_RELOC_THUMB_PCREL_BRANCH12:
20910 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20911 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20912 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20913 case BFD_RELOC_VTABLE_ENTRY:
20914 case BFD_RELOC_VTABLE_INHERIT:
20915 #ifdef TE_PE
20916 case BFD_RELOC_32_SECREL:
20917 #endif
20918 code = fixp->fx_r_type;
20919 break;
20920
20921 case BFD_RELOC_THUMB_PCREL_BLX:
20922 #ifdef OBJ_ELF
20923 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20924 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
20925 else
20926 #endif
20927 code = BFD_RELOC_THUMB_PCREL_BLX;
20928 break;
20929
20930 case BFD_RELOC_ARM_LITERAL:
20931 case BFD_RELOC_ARM_HWLITERAL:
20932 /* If this is called then the a literal has
20933 been referenced across a section boundary. */
20934 as_bad_where (fixp->fx_file, fixp->fx_line,
20935 _("literal referenced across section boundary"));
20936 return NULL;
20937
20938 #ifdef OBJ_ELF
20939 case BFD_RELOC_ARM_GOT32:
20940 case BFD_RELOC_ARM_GOTOFF:
20941 case BFD_RELOC_ARM_PLT32:
20942 case BFD_RELOC_ARM_TARGET1:
20943 case BFD_RELOC_ARM_ROSEGREL32:
20944 case BFD_RELOC_ARM_SBREL32:
20945 case BFD_RELOC_ARM_PREL31:
20946 case BFD_RELOC_ARM_TARGET2:
20947 case BFD_RELOC_ARM_TLS_LE32:
20948 case BFD_RELOC_ARM_TLS_LDO32:
20949 case BFD_RELOC_ARM_PCREL_CALL:
20950 case BFD_RELOC_ARM_PCREL_JUMP:
20951 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20952 case BFD_RELOC_ARM_ALU_PC_G0:
20953 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20954 case BFD_RELOC_ARM_ALU_PC_G1:
20955 case BFD_RELOC_ARM_ALU_PC_G2:
20956 case BFD_RELOC_ARM_LDR_PC_G0:
20957 case BFD_RELOC_ARM_LDR_PC_G1:
20958 case BFD_RELOC_ARM_LDR_PC_G2:
20959 case BFD_RELOC_ARM_LDRS_PC_G0:
20960 case BFD_RELOC_ARM_LDRS_PC_G1:
20961 case BFD_RELOC_ARM_LDRS_PC_G2:
20962 case BFD_RELOC_ARM_LDC_PC_G0:
20963 case BFD_RELOC_ARM_LDC_PC_G1:
20964 case BFD_RELOC_ARM_LDC_PC_G2:
20965 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20966 case BFD_RELOC_ARM_ALU_SB_G0:
20967 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20968 case BFD_RELOC_ARM_ALU_SB_G1:
20969 case BFD_RELOC_ARM_ALU_SB_G2:
20970 case BFD_RELOC_ARM_LDR_SB_G0:
20971 case BFD_RELOC_ARM_LDR_SB_G1:
20972 case BFD_RELOC_ARM_LDR_SB_G2:
20973 case BFD_RELOC_ARM_LDRS_SB_G0:
20974 case BFD_RELOC_ARM_LDRS_SB_G1:
20975 case BFD_RELOC_ARM_LDRS_SB_G2:
20976 case BFD_RELOC_ARM_LDC_SB_G0:
20977 case BFD_RELOC_ARM_LDC_SB_G1:
20978 case BFD_RELOC_ARM_LDC_SB_G2:
20979 case BFD_RELOC_ARM_V4BX:
20980 code = fixp->fx_r_type;
20981 break;
20982
20983 case BFD_RELOC_ARM_TLS_GD32:
20984 case BFD_RELOC_ARM_TLS_IE32:
20985 case BFD_RELOC_ARM_TLS_LDM32:
20986 /* BFD will include the symbol's address in the addend.
20987 But we don't want that, so subtract it out again here. */
20988 if (!S_IS_COMMON (fixp->fx_addsy))
20989 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
20990 code = fixp->fx_r_type;
20991 break;
20992 #endif
20993
20994 case BFD_RELOC_ARM_IMMEDIATE:
20995 as_bad_where (fixp->fx_file, fixp->fx_line,
20996 _("internal relocation (type: IMMEDIATE) not fixed up"));
20997 return NULL;
20998
20999 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21000 as_bad_where (fixp->fx_file, fixp->fx_line,
21001 _("ADRL used for a symbol not defined in the same file"));
21002 return NULL;
21003
21004 case BFD_RELOC_ARM_OFFSET_IMM:
21005 if (section->use_rela_p)
21006 {
21007 code = fixp->fx_r_type;
21008 break;
21009 }
21010
21011 if (fixp->fx_addsy != NULL
21012 && !S_IS_DEFINED (fixp->fx_addsy)
21013 && S_IS_LOCAL (fixp->fx_addsy))
21014 {
21015 as_bad_where (fixp->fx_file, fixp->fx_line,
21016 _("undefined local label `%s'"),
21017 S_GET_NAME (fixp->fx_addsy));
21018 return NULL;
21019 }
21020
21021 as_bad_where (fixp->fx_file, fixp->fx_line,
21022 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21023 return NULL;
21024
21025 default:
21026 {
21027 char * type;
21028
21029 switch (fixp->fx_r_type)
21030 {
21031 case BFD_RELOC_NONE: type = "NONE"; break;
21032 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
21033 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
21034 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
21035 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
21036 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
21037 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
21038 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21039 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
21040 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
21041 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
21042 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21043 default: type = _("<unknown>"); break;
21044 }
21045 as_bad_where (fixp->fx_file, fixp->fx_line,
21046 _("cannot represent %s relocation in this object file format"),
21047 type);
21048 return NULL;
21049 }
21050 }
21051
21052 #ifdef OBJ_ELF
21053 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21054 && GOT_symbol
21055 && fixp->fx_addsy == GOT_symbol)
21056 {
21057 code = BFD_RELOC_ARM_GOTPC;
21058 reloc->addend = fixp->fx_offset = reloc->address;
21059 }
21060 #endif
21061
21062 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21063
21064 if (reloc->howto == NULL)
21065 {
21066 as_bad_where (fixp->fx_file, fixp->fx_line,
21067 _("cannot represent %s relocation in this object file format"),
21068 bfd_get_reloc_code_name (code));
21069 return NULL;
21070 }
21071
21072 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21073 vtable entry to be used in the relocation's section offset. */
21074 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21075 reloc->address = fixp->fx_offset;
21076
21077 return reloc;
21078 }
21079
21080 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21081
21082 void
21083 cons_fix_new_arm (fragS * frag,
21084 int where,
21085 int size,
21086 expressionS * exp)
21087 {
21088 bfd_reloc_code_real_type type;
21089 int pcrel = 0;
21090
21091 /* Pick a reloc.
21092 FIXME: @@ Should look at CPU word size. */
21093 switch (size)
21094 {
21095 case 1:
21096 type = BFD_RELOC_8;
21097 break;
21098 case 2:
21099 type = BFD_RELOC_16;
21100 break;
21101 case 4:
21102 default:
21103 type = BFD_RELOC_32;
21104 break;
21105 case 8:
21106 type = BFD_RELOC_64;
21107 break;
21108 }
21109
21110 #ifdef TE_PE
21111 if (exp->X_op == O_secrel)
21112 {
21113 exp->X_op = O_symbol;
21114 type = BFD_RELOC_32_SECREL;
21115 }
21116 #endif
21117
21118 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
21119 }
21120
21121 #if defined (OBJ_COFF)
21122 void
21123 arm_validate_fix (fixS * fixP)
21124 {
21125 /* If the destination of the branch is a defined symbol which does not have
21126 the THUMB_FUNC attribute, then we must be calling a function which has
21127 the (interfacearm) attribute. We look for the Thumb entry point to that
21128 function and change the branch to refer to that function instead. */
21129 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
21130 && fixP->fx_addsy != NULL
21131 && S_IS_DEFINED (fixP->fx_addsy)
21132 && ! THUMB_IS_FUNC (fixP->fx_addsy))
21133 {
21134 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
21135 }
21136 }
21137 #endif
21138
21139
21140 int
21141 arm_force_relocation (struct fix * fixp)
21142 {
21143 #if defined (OBJ_COFF) && defined (TE_PE)
21144 if (fixp->fx_r_type == BFD_RELOC_RVA)
21145 return 1;
21146 #endif
21147
21148 /* In case we have a call or a branch to a function in ARM ISA mode from
21149 a thumb function or vice-versa force the relocation. These relocations
21150 are cleared off for some cores that might have blx and simple transformations
21151 are possible. */
21152
21153 #ifdef OBJ_ELF
21154 switch (fixp->fx_r_type)
21155 {
21156 case BFD_RELOC_ARM_PCREL_JUMP:
21157 case BFD_RELOC_ARM_PCREL_CALL:
21158 case BFD_RELOC_THUMB_PCREL_BLX:
21159 if (THUMB_IS_FUNC (fixp->fx_addsy))
21160 return 1;
21161 break;
21162
21163 case BFD_RELOC_ARM_PCREL_BLX:
21164 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21165 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21166 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21167 if (ARM_IS_FUNC (fixp->fx_addsy))
21168 return 1;
21169 break;
21170
21171 default:
21172 break;
21173 }
21174 #endif
21175
21176 /* Resolve these relocations even if the symbol is extern or weak. */
21177 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
21178 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
21179 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
21180 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
21181 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21182 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
21183 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
21184 return 0;
21185
21186 /* Always leave these relocations for the linker. */
21187 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21188 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21189 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21190 return 1;
21191
21192 /* Always generate relocations against function symbols. */
21193 if (fixp->fx_r_type == BFD_RELOC_32
21194 && fixp->fx_addsy
21195 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
21196 return 1;
21197
21198 return generic_force_reloc (fixp);
21199 }
21200
21201 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21202 /* Relocations against function names must be left unadjusted,
21203 so that the linker can use this information to generate interworking
21204 stubs. The MIPS version of this function
21205 also prevents relocations that are mips-16 specific, but I do not
21206 know why it does this.
21207
21208 FIXME:
21209 There is one other problem that ought to be addressed here, but
21210 which currently is not: Taking the address of a label (rather
21211 than a function) and then later jumping to that address. Such
21212 addresses also ought to have their bottom bit set (assuming that
21213 they reside in Thumb code), but at the moment they will not. */
21214
21215 bfd_boolean
21216 arm_fix_adjustable (fixS * fixP)
21217 {
21218 if (fixP->fx_addsy == NULL)
21219 return 1;
21220
21221 /* Preserve relocations against symbols with function type. */
21222 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
21223 return FALSE;
21224
21225 if (THUMB_IS_FUNC (fixP->fx_addsy)
21226 && fixP->fx_subsy == NULL)
21227 return FALSE;
21228
21229 /* We need the symbol name for the VTABLE entries. */
21230 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
21231 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21232 return FALSE;
21233
21234 /* Don't allow symbols to be discarded on GOT related relocs. */
21235 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
21236 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
21237 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
21238 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
21239 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
21240 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
21241 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
21242 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
21243 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
21244 return FALSE;
21245
21246 /* Similarly for group relocations. */
21247 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21248 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21249 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21250 return FALSE;
21251
21252 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21253 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
21254 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21255 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
21256 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
21257 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21258 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
21259 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
21260 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
21261 return FALSE;
21262
21263 return TRUE;
21264 }
21265 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21266
21267 #ifdef OBJ_ELF
21268
21269 const char *
21270 elf32_arm_target_format (void)
21271 {
21272 #ifdef TE_SYMBIAN
21273 return (target_big_endian
21274 ? "elf32-bigarm-symbian"
21275 : "elf32-littlearm-symbian");
21276 #elif defined (TE_VXWORKS)
21277 return (target_big_endian
21278 ? "elf32-bigarm-vxworks"
21279 : "elf32-littlearm-vxworks");
21280 #else
21281 if (target_big_endian)
21282 return "elf32-bigarm";
21283 else
21284 return "elf32-littlearm";
21285 #endif
21286 }
21287
21288 void
21289 armelf_frob_symbol (symbolS * symp,
21290 int * puntp)
21291 {
21292 elf_frob_symbol (symp, puntp);
21293 }
21294 #endif
21295
21296 /* MD interface: Finalization. */
21297
21298 void
21299 arm_cleanup (void)
21300 {
21301 literal_pool * pool;
21302
21303 /* Ensure that all the IT blocks are properly closed. */
21304 check_it_blocks_finished ();
21305
21306 for (pool = list_of_pools; pool; pool = pool->next)
21307 {
21308 /* Put it at the end of the relevant section. */
21309 subseg_set (pool->section, pool->sub_section);
21310 #ifdef OBJ_ELF
21311 arm_elf_change_section ();
21312 #endif
21313 s_ltorg (0);
21314 }
21315 }
21316
21317 #ifdef OBJ_ELF
21318 /* Remove any excess mapping symbols generated for alignment frags in
21319 SEC. We may have created a mapping symbol before a zero byte
21320 alignment; remove it if there's a mapping symbol after the
21321 alignment. */
21322 static void
21323 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
21324 void *dummy ATTRIBUTE_UNUSED)
21325 {
21326 segment_info_type *seginfo = seg_info (sec);
21327 fragS *fragp;
21328
21329 if (seginfo == NULL || seginfo->frchainP == NULL)
21330 return;
21331
21332 for (fragp = seginfo->frchainP->frch_root;
21333 fragp != NULL;
21334 fragp = fragp->fr_next)
21335 {
21336 symbolS *sym = fragp->tc_frag_data.last_map;
21337 fragS *next = fragp->fr_next;
21338
21339 /* Variable-sized frags have been converted to fixed size by
21340 this point. But if this was variable-sized to start with,
21341 there will be a fixed-size frag after it. So don't handle
21342 next == NULL. */
21343 if (sym == NULL || next == NULL)
21344 continue;
21345
21346 if (S_GET_VALUE (sym) < next->fr_address)
21347 /* Not at the end of this frag. */
21348 continue;
21349 know (S_GET_VALUE (sym) == next->fr_address);
21350
21351 do
21352 {
21353 if (next->tc_frag_data.first_map != NULL)
21354 {
21355 /* Next frag starts with a mapping symbol. Discard this
21356 one. */
21357 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21358 break;
21359 }
21360
21361 if (next->fr_next == NULL)
21362 {
21363 /* This mapping symbol is at the end of the section. Discard
21364 it. */
21365 know (next->fr_fix == 0 && next->fr_var == 0);
21366 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21367 break;
21368 }
21369
21370 /* As long as we have empty frags without any mapping symbols,
21371 keep looking. */
21372 /* If the next frag is non-empty and does not start with a
21373 mapping symbol, then this mapping symbol is required. */
21374 if (next->fr_address != next->fr_next->fr_address)
21375 break;
21376
21377 next = next->fr_next;
21378 }
21379 while (next != NULL);
21380 }
21381 }
21382 #endif
21383
21384 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21385 ARM ones. */
21386
21387 void
21388 arm_adjust_symtab (void)
21389 {
21390 #ifdef OBJ_COFF
21391 symbolS * sym;
21392
21393 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21394 {
21395 if (ARM_IS_THUMB (sym))
21396 {
21397 if (THUMB_IS_FUNC (sym))
21398 {
21399 /* Mark the symbol as a Thumb function. */
21400 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
21401 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
21402 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
21403
21404 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
21405 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
21406 else
21407 as_bad (_("%s: unexpected function type: %d"),
21408 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
21409 }
21410 else switch (S_GET_STORAGE_CLASS (sym))
21411 {
21412 case C_EXT:
21413 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
21414 break;
21415 case C_STAT:
21416 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
21417 break;
21418 case C_LABEL:
21419 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
21420 break;
21421 default:
21422 /* Do nothing. */
21423 break;
21424 }
21425 }
21426
21427 if (ARM_IS_INTERWORK (sym))
21428 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
21429 }
21430 #endif
21431 #ifdef OBJ_ELF
21432 symbolS * sym;
21433 char bind;
21434
21435 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21436 {
21437 if (ARM_IS_THUMB (sym))
21438 {
21439 elf_symbol_type * elf_sym;
21440
21441 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
21442 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
21443
21444 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
21445 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
21446 {
21447 /* If it's a .thumb_func, declare it as so,
21448 otherwise tag label as .code 16. */
21449 if (THUMB_IS_FUNC (sym))
21450 elf_sym->internal_elf_sym.st_info =
21451 ELF_ST_INFO (bind, STT_ARM_TFUNC);
21452 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21453 elf_sym->internal_elf_sym.st_info =
21454 ELF_ST_INFO (bind, STT_ARM_16BIT);
21455 }
21456 }
21457 }
21458
21459 /* Remove any overlapping mapping symbols generated by alignment frags. */
21460 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
21461 #endif
21462 }
21463
21464 /* MD interface: Initialization. */
21465
21466 static void
21467 set_constant_flonums (void)
21468 {
21469 int i;
21470
21471 for (i = 0; i < NUM_FLOAT_VALS; i++)
21472 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
21473 abort ();
21474 }
21475
21476 /* Auto-select Thumb mode if it's the only available instruction set for the
21477 given architecture. */
21478
21479 static void
21480 autoselect_thumb_from_cpu_variant (void)
21481 {
21482 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
21483 opcode_select (16);
21484 }
21485
21486 void
21487 md_begin (void)
21488 {
21489 unsigned mach;
21490 unsigned int i;
21491
21492 if ( (arm_ops_hsh = hash_new ()) == NULL
21493 || (arm_cond_hsh = hash_new ()) == NULL
21494 || (arm_shift_hsh = hash_new ()) == NULL
21495 || (arm_psr_hsh = hash_new ()) == NULL
21496 || (arm_v7m_psr_hsh = hash_new ()) == NULL
21497 || (arm_reg_hsh = hash_new ()) == NULL
21498 || (arm_reloc_hsh = hash_new ()) == NULL
21499 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
21500 as_fatal (_("virtual memory exhausted"));
21501
21502 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
21503 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
21504 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
21505 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
21506 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
21507 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
21508 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
21509 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
21510 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
21511 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
21512 (void *) (v7m_psrs + i));
21513 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
21514 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
21515 for (i = 0;
21516 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
21517 i++)
21518 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
21519 (void *) (barrier_opt_names + i));
21520 #ifdef OBJ_ELF
21521 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
21522 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
21523 #endif
21524
21525 set_constant_flonums ();
21526
21527 /* Set the cpu variant based on the command-line options. We prefer
21528 -mcpu= over -march= if both are set (as for GCC); and we prefer
21529 -mfpu= over any other way of setting the floating point unit.
21530 Use of legacy options with new options are faulted. */
21531 if (legacy_cpu)
21532 {
21533 if (mcpu_cpu_opt || march_cpu_opt)
21534 as_bad (_("use of old and new-style options to set CPU type"));
21535
21536 mcpu_cpu_opt = legacy_cpu;
21537 }
21538 else if (!mcpu_cpu_opt)
21539 mcpu_cpu_opt = march_cpu_opt;
21540
21541 if (legacy_fpu)
21542 {
21543 if (mfpu_opt)
21544 as_bad (_("use of old and new-style options to set FPU type"));
21545
21546 mfpu_opt = legacy_fpu;
21547 }
21548 else if (!mfpu_opt)
21549 {
21550 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21551 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21552 /* Some environments specify a default FPU. If they don't, infer it
21553 from the processor. */
21554 if (mcpu_fpu_opt)
21555 mfpu_opt = mcpu_fpu_opt;
21556 else
21557 mfpu_opt = march_fpu_opt;
21558 #else
21559 mfpu_opt = &fpu_default;
21560 #endif
21561 }
21562
21563 if (!mfpu_opt)
21564 {
21565 if (mcpu_cpu_opt != NULL)
21566 mfpu_opt = &fpu_default;
21567 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
21568 mfpu_opt = &fpu_arch_vfp_v2;
21569 else
21570 mfpu_opt = &fpu_arch_fpa;
21571 }
21572
21573 #ifdef CPU_DEFAULT
21574 if (!mcpu_cpu_opt)
21575 {
21576 mcpu_cpu_opt = &cpu_default;
21577 selected_cpu = cpu_default;
21578 }
21579 #else
21580 if (mcpu_cpu_opt)
21581 selected_cpu = *mcpu_cpu_opt;
21582 else
21583 mcpu_cpu_opt = &arm_arch_any;
21584 #endif
21585
21586 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21587
21588 autoselect_thumb_from_cpu_variant ();
21589
21590 arm_arch_used = thumb_arch_used = arm_arch_none;
21591
21592 #if defined OBJ_COFF || defined OBJ_ELF
21593 {
21594 unsigned int flags = 0;
21595
21596 #if defined OBJ_ELF
21597 flags = meabi_flags;
21598
21599 switch (meabi_flags)
21600 {
21601 case EF_ARM_EABI_UNKNOWN:
21602 #endif
21603 /* Set the flags in the private structure. */
21604 if (uses_apcs_26) flags |= F_APCS26;
21605 if (support_interwork) flags |= F_INTERWORK;
21606 if (uses_apcs_float) flags |= F_APCS_FLOAT;
21607 if (pic_code) flags |= F_PIC;
21608 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
21609 flags |= F_SOFT_FLOAT;
21610
21611 switch (mfloat_abi_opt)
21612 {
21613 case ARM_FLOAT_ABI_SOFT:
21614 case ARM_FLOAT_ABI_SOFTFP:
21615 flags |= F_SOFT_FLOAT;
21616 break;
21617
21618 case ARM_FLOAT_ABI_HARD:
21619 if (flags & F_SOFT_FLOAT)
21620 as_bad (_("hard-float conflicts with specified fpu"));
21621 break;
21622 }
21623
21624 /* Using pure-endian doubles (even if soft-float). */
21625 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
21626 flags |= F_VFP_FLOAT;
21627
21628 #if defined OBJ_ELF
21629 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
21630 flags |= EF_ARM_MAVERICK_FLOAT;
21631 break;
21632
21633 case EF_ARM_EABI_VER4:
21634 case EF_ARM_EABI_VER5:
21635 /* No additional flags to set. */
21636 break;
21637
21638 default:
21639 abort ();
21640 }
21641 #endif
21642 bfd_set_private_flags (stdoutput, flags);
21643
21644 /* We have run out flags in the COFF header to encode the
21645 status of ATPCS support, so instead we create a dummy,
21646 empty, debug section called .arm.atpcs. */
21647 if (atpcs)
21648 {
21649 asection * sec;
21650
21651 sec = bfd_make_section (stdoutput, ".arm.atpcs");
21652
21653 if (sec != NULL)
21654 {
21655 bfd_set_section_flags
21656 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
21657 bfd_set_section_size (stdoutput, sec, 0);
21658 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
21659 }
21660 }
21661 }
21662 #endif
21663
21664 /* Record the CPU type as well. */
21665 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
21666 mach = bfd_mach_arm_iWMMXt2;
21667 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
21668 mach = bfd_mach_arm_iWMMXt;
21669 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
21670 mach = bfd_mach_arm_XScale;
21671 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
21672 mach = bfd_mach_arm_ep9312;
21673 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
21674 mach = bfd_mach_arm_5TE;
21675 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
21676 {
21677 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21678 mach = bfd_mach_arm_5T;
21679 else
21680 mach = bfd_mach_arm_5;
21681 }
21682 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
21683 {
21684 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21685 mach = bfd_mach_arm_4T;
21686 else
21687 mach = bfd_mach_arm_4;
21688 }
21689 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
21690 mach = bfd_mach_arm_3M;
21691 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
21692 mach = bfd_mach_arm_3;
21693 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
21694 mach = bfd_mach_arm_2a;
21695 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
21696 mach = bfd_mach_arm_2;
21697 else
21698 mach = bfd_mach_arm_unknown;
21699
21700 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
21701 }
21702
21703 /* Command line processing. */
21704
21705 /* md_parse_option
21706 Invocation line includes a switch not recognized by the base assembler.
21707 See if it's a processor-specific option.
21708
21709 This routine is somewhat complicated by the need for backwards
21710 compatibility (since older releases of gcc can't be changed).
21711 The new options try to make the interface as compatible as
21712 possible with GCC.
21713
21714 New options (supported) are:
21715
21716 -mcpu=<cpu name> Assemble for selected processor
21717 -march=<architecture name> Assemble for selected architecture
21718 -mfpu=<fpu architecture> Assemble for selected FPU.
21719 -EB/-mbig-endian Big-endian
21720 -EL/-mlittle-endian Little-endian
21721 -k Generate PIC code
21722 -mthumb Start in Thumb mode
21723 -mthumb-interwork Code supports ARM/Thumb interworking
21724
21725 -m[no-]warn-deprecated Warn about deprecated features
21726
21727 For now we will also provide support for:
21728
21729 -mapcs-32 32-bit Program counter
21730 -mapcs-26 26-bit Program counter
21731 -macps-float Floats passed in FP registers
21732 -mapcs-reentrant Reentrant code
21733 -matpcs
21734 (sometime these will probably be replaced with -mapcs=<list of options>
21735 and -matpcs=<list of options>)
21736
21737 The remaining options are only supported for back-wards compatibility.
21738 Cpu variants, the arm part is optional:
21739 -m[arm]1 Currently not supported.
21740 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21741 -m[arm]3 Arm 3 processor
21742 -m[arm]6[xx], Arm 6 processors
21743 -m[arm]7[xx][t][[d]m] Arm 7 processors
21744 -m[arm]8[10] Arm 8 processors
21745 -m[arm]9[20][tdmi] Arm 9 processors
21746 -mstrongarm[110[0]] StrongARM processors
21747 -mxscale XScale processors
21748 -m[arm]v[2345[t[e]]] Arm architectures
21749 -mall All (except the ARM1)
21750 FP variants:
21751 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21752 -mfpe-old (No float load/store multiples)
21753 -mvfpxd VFP Single precision
21754 -mvfp All VFP
21755 -mno-fpu Disable all floating point instructions
21756
21757 The following CPU names are recognized:
21758 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21759 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21760 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21761 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21762 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21763 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21764 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21765
21766 */
21767
21768 const char * md_shortopts = "m:k";
21769
21770 #ifdef ARM_BI_ENDIAN
21771 #define OPTION_EB (OPTION_MD_BASE + 0)
21772 #define OPTION_EL (OPTION_MD_BASE + 1)
21773 #else
21774 #if TARGET_BYTES_BIG_ENDIAN
21775 #define OPTION_EB (OPTION_MD_BASE + 0)
21776 #else
21777 #define OPTION_EL (OPTION_MD_BASE + 1)
21778 #endif
21779 #endif
21780 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21781
21782 struct option md_longopts[] =
21783 {
21784 #ifdef OPTION_EB
21785 {"EB", no_argument, NULL, OPTION_EB},
21786 #endif
21787 #ifdef OPTION_EL
21788 {"EL", no_argument, NULL, OPTION_EL},
21789 #endif
21790 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
21791 {NULL, no_argument, NULL, 0}
21792 };
21793
21794 size_t md_longopts_size = sizeof (md_longopts);
21795
21796 struct arm_option_table
21797 {
21798 char *option; /* Option name to match. */
21799 char *help; /* Help information. */
21800 int *var; /* Variable to change. */
21801 int value; /* What to change it to. */
21802 char *deprecated; /* If non-null, print this message. */
21803 };
21804
21805 struct arm_option_table arm_opts[] =
21806 {
21807 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
21808 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
21809 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21810 &support_interwork, 1, NULL},
21811 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
21812 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
21813 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
21814 1, NULL},
21815 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
21816 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
21817 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
21818 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
21819 NULL},
21820
21821 /* These are recognized by the assembler, but have no affect on code. */
21822 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
21823 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
21824
21825 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
21826 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21827 &warn_on_deprecated, 0, NULL},
21828 {NULL, NULL, NULL, 0, NULL}
21829 };
21830
21831 struct arm_legacy_option_table
21832 {
21833 char *option; /* Option name to match. */
21834 const arm_feature_set **var; /* Variable to change. */
21835 const arm_feature_set value; /* What to change it to. */
21836 char *deprecated; /* If non-null, print this message. */
21837 };
21838
21839 const struct arm_legacy_option_table arm_legacy_opts[] =
21840 {
21841 /* DON'T add any new processors to this list -- we want the whole list
21842 to go away... Add them to the processors table instead. */
21843 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21844 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21845 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21846 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21847 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21848 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21849 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21850 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21851 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21852 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21853 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21854 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21855 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21856 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21857 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21858 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21859 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21860 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21861 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
21862 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
21863 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
21864 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
21865 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
21866 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
21867 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
21868 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
21869 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
21870 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
21871 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
21872 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
21873 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
21874 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
21875 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
21876 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
21877 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
21878 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
21879 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
21880 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
21881 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
21882 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
21883 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
21884 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
21885 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
21886 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
21887 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
21888 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
21889 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21890 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21891 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21892 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21893 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
21894 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
21895 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
21896 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
21897 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
21898 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
21899 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
21900 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
21901 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
21902 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
21903 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
21904 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
21905 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
21906 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
21907 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
21908 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
21909 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
21910 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
21911 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
21912 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
21913 N_("use -mcpu=strongarm110")},
21914 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
21915 N_("use -mcpu=strongarm1100")},
21916 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
21917 N_("use -mcpu=strongarm1110")},
21918 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
21919 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
21920 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
21921
21922 /* Architecture variants -- don't add any more to this list either. */
21923 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
21924 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
21925 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
21926 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
21927 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
21928 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
21929 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
21930 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
21931 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
21932 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
21933 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
21934 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
21935 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
21936 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
21937 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
21938 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
21939 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
21940 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
21941
21942 /* Floating point variants -- don't add any more to this list either. */
21943 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
21944 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
21945 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
21946 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
21947 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
21948
21949 {NULL, NULL, ARM_ARCH_NONE, NULL}
21950 };
21951
21952 struct arm_cpu_option_table
21953 {
21954 char *name;
21955 const arm_feature_set value;
21956 /* For some CPUs we assume an FPU unless the user explicitly sets
21957 -mfpu=... */
21958 const arm_feature_set default_fpu;
21959 /* The canonical name of the CPU, or NULL to use NAME converted to upper
21960 case. */
21961 const char *canonical_name;
21962 };
21963
21964 /* This list should, at a minimum, contain all the cpu names
21965 recognized by GCC. */
21966 static const struct arm_cpu_option_table arm_cpus[] =
21967 {
21968 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
21969 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
21970 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
21971 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
21972 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
21973 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21974 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21975 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21976 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21977 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21978 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21979 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21980 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21981 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21982 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21983 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21984 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21985 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21986 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21987 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21988 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21989 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21990 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21991 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21992 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21993 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21994 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21995 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21996 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21997 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21998 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21999 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22000 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22001 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22002 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22003 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22004 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22005 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22006 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22007 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
22008 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22009 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22010 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22011 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22012 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22013 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22014 /* For V5 or later processors we default to using VFP; but the user
22015 should really set the FPU type explicitly. */
22016 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22017 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22018 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22019 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22020 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22021 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22022 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
22023 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22024 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22025 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
22026 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22027 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22028 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22029 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22030 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22031 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
22032 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22033 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22034 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22035 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
22036 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22037 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
22038 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22039 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
22040 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
22041 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
22042 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
22043 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
22044 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
22045 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
22046 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
22047 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
22048 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
22049 {"cortex-a5", ARM_ARCH_V7A, FPU_NONE, NULL},
22050 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
22051 | FPU_NEON_EXT_V1),
22052 NULL},
22053 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
22054 | FPU_NEON_EXT_V1),
22055 NULL},
22056 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
22057 {"cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16, NULL},
22058 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
22059 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
22060 {"cortex-m0", ARM_ARCH_V6M, FPU_NONE, NULL},
22061 /* ??? XSCALE is really an architecture. */
22062 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22063 /* ??? iwmmxt is not a processor. */
22064 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
22065 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
22066 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22067 /* Maverick */
22068 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
22069 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
22070 };
22071
22072 struct arm_arch_option_table
22073 {
22074 char *name;
22075 const arm_feature_set value;
22076 const arm_feature_set default_fpu;
22077 };
22078
22079 /* This list should, at a minimum, contain all the architecture names
22080 recognized by GCC. */
22081 static const struct arm_arch_option_table arm_archs[] =
22082 {
22083 {"all", ARM_ANY, FPU_ARCH_FPA},
22084 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
22085 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
22086 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
22087 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
22088 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
22089 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
22090 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
22091 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
22092 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
22093 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
22094 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
22095 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
22096 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
22097 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
22098 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
22099 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
22100 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
22101 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
22102 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
22103 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
22104 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
22105 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
22106 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
22107 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
22108 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
22109 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
22110 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
22111 /* The official spelling of the ARMv7 profile variants is the dashed form.
22112 Accept the non-dashed form for compatibility with old toolchains. */
22113 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22114 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22115 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22116 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22117 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22118 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22119 {"armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP},
22120 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
22121 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
22122 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
22123 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
22124 };
22125
22126 /* ISA extensions in the co-processor space. */
22127 struct arm_option_cpu_value_table
22128 {
22129 char *name;
22130 const arm_feature_set value;
22131 };
22132
22133 static const struct arm_option_cpu_value_table arm_extensions[] =
22134 {
22135 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
22136 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
22137 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
22138 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
22139 {NULL, ARM_ARCH_NONE}
22140 };
22141
22142 /* This list should, at a minimum, contain all the fpu names
22143 recognized by GCC. */
22144 static const struct arm_option_cpu_value_table arm_fpus[] =
22145 {
22146 {"softfpa", FPU_NONE},
22147 {"fpe", FPU_ARCH_FPE},
22148 {"fpe2", FPU_ARCH_FPE},
22149 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
22150 {"fpa", FPU_ARCH_FPA},
22151 {"fpa10", FPU_ARCH_FPA},
22152 {"fpa11", FPU_ARCH_FPA},
22153 {"arm7500fe", FPU_ARCH_FPA},
22154 {"softvfp", FPU_ARCH_VFP},
22155 {"softvfp+vfp", FPU_ARCH_VFP_V2},
22156 {"vfp", FPU_ARCH_VFP_V2},
22157 {"vfp9", FPU_ARCH_VFP_V2},
22158 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
22159 {"vfp10", FPU_ARCH_VFP_V2},
22160 {"vfp10-r0", FPU_ARCH_VFP_V1},
22161 {"vfpxd", FPU_ARCH_VFP_V1xD},
22162 {"vfpv2", FPU_ARCH_VFP_V2},
22163 {"vfpv3", FPU_ARCH_VFP_V3},
22164 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
22165 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
22166 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
22167 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
22168 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
22169 {"arm1020t", FPU_ARCH_VFP_V1},
22170 {"arm1020e", FPU_ARCH_VFP_V2},
22171 {"arm1136jfs", FPU_ARCH_VFP_V2},
22172 {"arm1136jf-s", FPU_ARCH_VFP_V2},
22173 {"maverick", FPU_ARCH_MAVERICK},
22174 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
22175 {"neon-fp16", FPU_ARCH_NEON_FP16},
22176 {"vfpv4", FPU_ARCH_VFP_V4},
22177 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
22178 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
22179 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
22180 {NULL, ARM_ARCH_NONE}
22181 };
22182
22183 struct arm_option_value_table
22184 {
22185 char *name;
22186 long value;
22187 };
22188
22189 static const struct arm_option_value_table arm_float_abis[] =
22190 {
22191 {"hard", ARM_FLOAT_ABI_HARD},
22192 {"softfp", ARM_FLOAT_ABI_SOFTFP},
22193 {"soft", ARM_FLOAT_ABI_SOFT},
22194 {NULL, 0}
22195 };
22196
22197 #ifdef OBJ_ELF
22198 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22199 static const struct arm_option_value_table arm_eabis[] =
22200 {
22201 {"gnu", EF_ARM_EABI_UNKNOWN},
22202 {"4", EF_ARM_EABI_VER4},
22203 {"5", EF_ARM_EABI_VER5},
22204 {NULL, 0}
22205 };
22206 #endif
22207
22208 struct arm_long_option_table
22209 {
22210 char * option; /* Substring to match. */
22211 char * help; /* Help information. */
22212 int (* func) (char * subopt); /* Function to decode sub-option. */
22213 char * deprecated; /* If non-null, print this message. */
22214 };
22215
22216 static bfd_boolean
22217 arm_parse_extension (char * str, const arm_feature_set **opt_p)
22218 {
22219 arm_feature_set *ext_set = (arm_feature_set *)
22220 xmalloc (sizeof (arm_feature_set));
22221
22222 /* Copy the feature set, so that we can modify it. */
22223 *ext_set = **opt_p;
22224 *opt_p = ext_set;
22225
22226 while (str != NULL && *str != 0)
22227 {
22228 const struct arm_option_cpu_value_table * opt;
22229 char * ext;
22230 int optlen;
22231
22232 if (*str != '+')
22233 {
22234 as_bad (_("invalid architectural extension"));
22235 return FALSE;
22236 }
22237
22238 str++;
22239 ext = strchr (str, '+');
22240
22241 if (ext != NULL)
22242 optlen = ext - str;
22243 else
22244 optlen = strlen (str);
22245
22246 if (optlen == 0)
22247 {
22248 as_bad (_("missing architectural extension"));
22249 return FALSE;
22250 }
22251
22252 for (opt = arm_extensions; opt->name != NULL; opt++)
22253 if (strncmp (opt->name, str, optlen) == 0)
22254 {
22255 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
22256 break;
22257 }
22258
22259 if (opt->name == NULL)
22260 {
22261 as_bad (_("unknown architectural extension `%s'"), str);
22262 return FALSE;
22263 }
22264
22265 str = ext;
22266 };
22267
22268 return TRUE;
22269 }
22270
22271 static bfd_boolean
22272 arm_parse_cpu (char * str)
22273 {
22274 const struct arm_cpu_option_table * opt;
22275 char * ext = strchr (str, '+');
22276 int optlen;
22277
22278 if (ext != NULL)
22279 optlen = ext - str;
22280 else
22281 optlen = strlen (str);
22282
22283 if (optlen == 0)
22284 {
22285 as_bad (_("missing cpu name `%s'"), str);
22286 return FALSE;
22287 }
22288
22289 for (opt = arm_cpus; opt->name != NULL; opt++)
22290 if (strncmp (opt->name, str, optlen) == 0)
22291 {
22292 mcpu_cpu_opt = &opt->value;
22293 mcpu_fpu_opt = &opt->default_fpu;
22294 if (opt->canonical_name)
22295 strcpy (selected_cpu_name, opt->canonical_name);
22296 else
22297 {
22298 int i;
22299
22300 for (i = 0; i < optlen; i++)
22301 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22302 selected_cpu_name[i] = 0;
22303 }
22304
22305 if (ext != NULL)
22306 return arm_parse_extension (ext, &mcpu_cpu_opt);
22307
22308 return TRUE;
22309 }
22310
22311 as_bad (_("unknown cpu `%s'"), str);
22312 return FALSE;
22313 }
22314
22315 static bfd_boolean
22316 arm_parse_arch (char * str)
22317 {
22318 const struct arm_arch_option_table *opt;
22319 char *ext = strchr (str, '+');
22320 int optlen;
22321
22322 if (ext != NULL)
22323 optlen = ext - str;
22324 else
22325 optlen = strlen (str);
22326
22327 if (optlen == 0)
22328 {
22329 as_bad (_("missing architecture name `%s'"), str);
22330 return FALSE;
22331 }
22332
22333 for (opt = arm_archs; opt->name != NULL; opt++)
22334 if (streq (opt->name, str))
22335 {
22336 march_cpu_opt = &opt->value;
22337 march_fpu_opt = &opt->default_fpu;
22338 strcpy (selected_cpu_name, opt->name);
22339
22340 if (ext != NULL)
22341 return arm_parse_extension (ext, &march_cpu_opt);
22342
22343 return TRUE;
22344 }
22345
22346 as_bad (_("unknown architecture `%s'\n"), str);
22347 return FALSE;
22348 }
22349
22350 static bfd_boolean
22351 arm_parse_fpu (char * str)
22352 {
22353 const struct arm_option_cpu_value_table * opt;
22354
22355 for (opt = arm_fpus; opt->name != NULL; opt++)
22356 if (streq (opt->name, str))
22357 {
22358 mfpu_opt = &opt->value;
22359 return TRUE;
22360 }
22361
22362 as_bad (_("unknown floating point format `%s'\n"), str);
22363 return FALSE;
22364 }
22365
22366 static bfd_boolean
22367 arm_parse_float_abi (char * str)
22368 {
22369 const struct arm_option_value_table * opt;
22370
22371 for (opt = arm_float_abis; opt->name != NULL; opt++)
22372 if (streq (opt->name, str))
22373 {
22374 mfloat_abi_opt = opt->value;
22375 return TRUE;
22376 }
22377
22378 as_bad (_("unknown floating point abi `%s'\n"), str);
22379 return FALSE;
22380 }
22381
22382 #ifdef OBJ_ELF
22383 static bfd_boolean
22384 arm_parse_eabi (char * str)
22385 {
22386 const struct arm_option_value_table *opt;
22387
22388 for (opt = arm_eabis; opt->name != NULL; opt++)
22389 if (streq (opt->name, str))
22390 {
22391 meabi_flags = opt->value;
22392 return TRUE;
22393 }
22394 as_bad (_("unknown EABI `%s'\n"), str);
22395 return FALSE;
22396 }
22397 #endif
22398
22399 static bfd_boolean
22400 arm_parse_it_mode (char * str)
22401 {
22402 bfd_boolean ret = TRUE;
22403
22404 if (streq ("arm", str))
22405 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
22406 else if (streq ("thumb", str))
22407 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
22408 else if (streq ("always", str))
22409 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
22410 else if (streq ("never", str))
22411 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
22412 else
22413 {
22414 as_bad (_("unknown implicit IT mode `%s', should be "\
22415 "arm, thumb, always, or never."), str);
22416 ret = FALSE;
22417 }
22418
22419 return ret;
22420 }
22421
22422 struct arm_long_option_table arm_long_opts[] =
22423 {
22424 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22425 arm_parse_cpu, NULL},
22426 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22427 arm_parse_arch, NULL},
22428 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22429 arm_parse_fpu, NULL},
22430 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22431 arm_parse_float_abi, NULL},
22432 #ifdef OBJ_ELF
22433 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22434 arm_parse_eabi, NULL},
22435 #endif
22436 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22437 arm_parse_it_mode, NULL},
22438 {NULL, NULL, 0, NULL}
22439 };
22440
22441 int
22442 md_parse_option (int c, char * arg)
22443 {
22444 struct arm_option_table *opt;
22445 const struct arm_legacy_option_table *fopt;
22446 struct arm_long_option_table *lopt;
22447
22448 switch (c)
22449 {
22450 #ifdef OPTION_EB
22451 case OPTION_EB:
22452 target_big_endian = 1;
22453 break;
22454 #endif
22455
22456 #ifdef OPTION_EL
22457 case OPTION_EL:
22458 target_big_endian = 0;
22459 break;
22460 #endif
22461
22462 case OPTION_FIX_V4BX:
22463 fix_v4bx = TRUE;
22464 break;
22465
22466 case 'a':
22467 /* Listing option. Just ignore these, we don't support additional
22468 ones. */
22469 return 0;
22470
22471 default:
22472 for (opt = arm_opts; opt->option != NULL; opt++)
22473 {
22474 if (c == opt->option[0]
22475 && ((arg == NULL && opt->option[1] == 0)
22476 || streq (arg, opt->option + 1)))
22477 {
22478 /* If the option is deprecated, tell the user. */
22479 if (warn_on_deprecated && opt->deprecated != NULL)
22480 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22481 arg ? arg : "", _(opt->deprecated));
22482
22483 if (opt->var != NULL)
22484 *opt->var = opt->value;
22485
22486 return 1;
22487 }
22488 }
22489
22490 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
22491 {
22492 if (c == fopt->option[0]
22493 && ((arg == NULL && fopt->option[1] == 0)
22494 || streq (arg, fopt->option + 1)))
22495 {
22496 /* If the option is deprecated, tell the user. */
22497 if (warn_on_deprecated && fopt->deprecated != NULL)
22498 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22499 arg ? arg : "", _(fopt->deprecated));
22500
22501 if (fopt->var != NULL)
22502 *fopt->var = &fopt->value;
22503
22504 return 1;
22505 }
22506 }
22507
22508 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22509 {
22510 /* These options are expected to have an argument. */
22511 if (c == lopt->option[0]
22512 && arg != NULL
22513 && strncmp (arg, lopt->option + 1,
22514 strlen (lopt->option + 1)) == 0)
22515 {
22516 /* If the option is deprecated, tell the user. */
22517 if (warn_on_deprecated && lopt->deprecated != NULL)
22518 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
22519 _(lopt->deprecated));
22520
22521 /* Call the sup-option parser. */
22522 return lopt->func (arg + strlen (lopt->option) - 1);
22523 }
22524 }
22525
22526 return 0;
22527 }
22528
22529 return 1;
22530 }
22531
22532 void
22533 md_show_usage (FILE * fp)
22534 {
22535 struct arm_option_table *opt;
22536 struct arm_long_option_table *lopt;
22537
22538 fprintf (fp, _(" ARM-specific assembler options:\n"));
22539
22540 for (opt = arm_opts; opt->option != NULL; opt++)
22541 if (opt->help != NULL)
22542 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
22543
22544 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22545 if (lopt->help != NULL)
22546 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
22547
22548 #ifdef OPTION_EB
22549 fprintf (fp, _("\
22550 -EB assemble code for a big-endian cpu\n"));
22551 #endif
22552
22553 #ifdef OPTION_EL
22554 fprintf (fp, _("\
22555 -EL assemble code for a little-endian cpu\n"));
22556 #endif
22557
22558 fprintf (fp, _("\
22559 --fix-v4bx Allow BX in ARMv4 code\n"));
22560 }
22561
22562
22563 #ifdef OBJ_ELF
22564 typedef struct
22565 {
22566 int val;
22567 arm_feature_set flags;
22568 } cpu_arch_ver_table;
22569
22570 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22571 least features first. */
22572 static const cpu_arch_ver_table cpu_arch_ver[] =
22573 {
22574 {1, ARM_ARCH_V4},
22575 {2, ARM_ARCH_V4T},
22576 {3, ARM_ARCH_V5},
22577 {3, ARM_ARCH_V5T},
22578 {4, ARM_ARCH_V5TE},
22579 {5, ARM_ARCH_V5TEJ},
22580 {6, ARM_ARCH_V6},
22581 {7, ARM_ARCH_V6Z},
22582 {9, ARM_ARCH_V6K},
22583 {11, ARM_ARCH_V6M},
22584 {8, ARM_ARCH_V6T2},
22585 {10, ARM_ARCH_V7A},
22586 {10, ARM_ARCH_V7R},
22587 {10, ARM_ARCH_V7M},
22588 {0, ARM_ARCH_NONE}
22589 };
22590
22591 /* Set an attribute if it has not already been set by the user. */
22592 static void
22593 aeabi_set_attribute_int (int tag, int value)
22594 {
22595 if (tag < 1
22596 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22597 || !attributes_set_explicitly[tag])
22598 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
22599 }
22600
22601 static void
22602 aeabi_set_attribute_string (int tag, const char *value)
22603 {
22604 if (tag < 1
22605 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22606 || !attributes_set_explicitly[tag])
22607 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
22608 }
22609
22610 /* Set the public EABI object attributes. */
22611 static void
22612 aeabi_set_public_attributes (void)
22613 {
22614 int arch;
22615 arm_feature_set flags;
22616 arm_feature_set tmp;
22617 const cpu_arch_ver_table *p;
22618
22619 /* Choose the architecture based on the capabilities of the requested cpu
22620 (if any) and/or the instructions actually used. */
22621 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
22622 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
22623 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
22624 /*Allow the user to override the reported architecture. */
22625 if (object_arch)
22626 {
22627 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
22628 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
22629 }
22630
22631 tmp = flags;
22632 arch = 0;
22633 for (p = cpu_arch_ver; p->val; p++)
22634 {
22635 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
22636 {
22637 arch = p->val;
22638 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
22639 }
22640 }
22641
22642 /* The table lookup above finds the last architecture to contribute
22643 a new feature. Unfortunately, Tag13 is a subset of the union of
22644 v6T2 and v7-M, so it is never seen as contributing a new feature.
22645 We can not search for the last entry which is entirely used,
22646 because if no CPU is specified we build up only those flags
22647 actually used. Perhaps we should separate out the specified
22648 and implicit cases. Avoid taking this path for -march=all by
22649 checking for contradictory v7-A / v7-M features. */
22650 if (arch == 10
22651 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
22652 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
22653 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
22654 arch = 13;
22655
22656 /* Tag_CPU_name. */
22657 if (selected_cpu_name[0])
22658 {
22659 char *q;
22660
22661 q = selected_cpu_name;
22662 if (strncmp (q, "armv", 4) == 0)
22663 {
22664 int i;
22665
22666 q += 4;
22667 for (i = 0; q[i]; i++)
22668 q[i] = TOUPPER (q[i]);
22669 }
22670 aeabi_set_attribute_string (Tag_CPU_name, q);
22671 }
22672
22673 /* Tag_CPU_arch. */
22674 aeabi_set_attribute_int (Tag_CPU_arch, arch);
22675
22676 /* Tag_CPU_arch_profile. */
22677 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
22678 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
22679 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
22680 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
22681 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
22682 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
22683
22684 /* Tag_ARM_ISA_use. */
22685 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
22686 || arch == 0)
22687 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
22688
22689 /* Tag_THUMB_ISA_use. */
22690 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
22691 || arch == 0)
22692 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
22693 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
22694
22695 /* Tag_VFP_arch. */
22696 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
22697 aeabi_set_attribute_int (Tag_VFP_arch,
22698 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
22699 ? 5 : 6);
22700 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
22701 aeabi_set_attribute_int (Tag_VFP_arch, 3);
22702 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
22703 aeabi_set_attribute_int (Tag_VFP_arch, 4);
22704 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
22705 aeabi_set_attribute_int (Tag_VFP_arch, 2);
22706 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
22707 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
22708 aeabi_set_attribute_int (Tag_VFP_arch, 1);
22709
22710 /* Tag_WMMX_arch. */
22711 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
22712 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
22713 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
22714 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
22715
22716 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22717 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
22718 aeabi_set_attribute_int
22719 (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
22720 ? 2 : 1));
22721
22722 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22723 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
22724 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
22725 }
22726
22727 /* Add the default contents for the .ARM.attributes section. */
22728 void
22729 arm_md_end (void)
22730 {
22731 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22732 return;
22733
22734 aeabi_set_public_attributes ();
22735 }
22736 #endif /* OBJ_ELF */
22737
22738
22739 /* Parse a .cpu directive. */
22740
22741 static void
22742 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
22743 {
22744 const struct arm_cpu_option_table *opt;
22745 char *name;
22746 char saved_char;
22747
22748 name = input_line_pointer;
22749 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22750 input_line_pointer++;
22751 saved_char = *input_line_pointer;
22752 *input_line_pointer = 0;
22753
22754 /* Skip the first "all" entry. */
22755 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
22756 if (streq (opt->name, name))
22757 {
22758 mcpu_cpu_opt = &opt->value;
22759 selected_cpu = opt->value;
22760 if (opt->canonical_name)
22761 strcpy (selected_cpu_name, opt->canonical_name);
22762 else
22763 {
22764 int i;
22765 for (i = 0; opt->name[i]; i++)
22766 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22767 selected_cpu_name[i] = 0;
22768 }
22769 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22770 *input_line_pointer = saved_char;
22771 demand_empty_rest_of_line ();
22772 return;
22773 }
22774 as_bad (_("unknown cpu `%s'"), name);
22775 *input_line_pointer = saved_char;
22776 ignore_rest_of_line ();
22777 }
22778
22779
22780 /* Parse a .arch directive. */
22781
22782 static void
22783 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
22784 {
22785 const struct arm_arch_option_table *opt;
22786 char saved_char;
22787 char *name;
22788
22789 name = input_line_pointer;
22790 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22791 input_line_pointer++;
22792 saved_char = *input_line_pointer;
22793 *input_line_pointer = 0;
22794
22795 /* Skip the first "all" entry. */
22796 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22797 if (streq (opt->name, name))
22798 {
22799 mcpu_cpu_opt = &opt->value;
22800 selected_cpu = opt->value;
22801 strcpy (selected_cpu_name, opt->name);
22802 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22803 *input_line_pointer = saved_char;
22804 demand_empty_rest_of_line ();
22805 return;
22806 }
22807
22808 as_bad (_("unknown architecture `%s'\n"), name);
22809 *input_line_pointer = saved_char;
22810 ignore_rest_of_line ();
22811 }
22812
22813
22814 /* Parse a .object_arch directive. */
22815
22816 static void
22817 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
22818 {
22819 const struct arm_arch_option_table *opt;
22820 char saved_char;
22821 char *name;
22822
22823 name = input_line_pointer;
22824 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22825 input_line_pointer++;
22826 saved_char = *input_line_pointer;
22827 *input_line_pointer = 0;
22828
22829 /* Skip the first "all" entry. */
22830 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22831 if (streq (opt->name, name))
22832 {
22833 object_arch = &opt->value;
22834 *input_line_pointer = saved_char;
22835 demand_empty_rest_of_line ();
22836 return;
22837 }
22838
22839 as_bad (_("unknown architecture `%s'\n"), name);
22840 *input_line_pointer = saved_char;
22841 ignore_rest_of_line ();
22842 }
22843
22844 /* Parse a .fpu directive. */
22845
22846 static void
22847 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
22848 {
22849 const struct arm_option_cpu_value_table *opt;
22850 char saved_char;
22851 char *name;
22852
22853 name = input_line_pointer;
22854 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22855 input_line_pointer++;
22856 saved_char = *input_line_pointer;
22857 *input_line_pointer = 0;
22858
22859 for (opt = arm_fpus; opt->name != NULL; opt++)
22860 if (streq (opt->name, name))
22861 {
22862 mfpu_opt = &opt->value;
22863 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22864 *input_line_pointer = saved_char;
22865 demand_empty_rest_of_line ();
22866 return;
22867 }
22868
22869 as_bad (_("unknown floating point format `%s'\n"), name);
22870 *input_line_pointer = saved_char;
22871 ignore_rest_of_line ();
22872 }
22873
22874 /* Copy symbol information. */
22875
22876 void
22877 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
22878 {
22879 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
22880 }
22881
22882 #ifdef OBJ_ELF
22883 /* Given a symbolic attribute NAME, return the proper integer value.
22884 Returns -1 if the attribute is not known. */
22885
22886 int
22887 arm_convert_symbolic_attribute (const char *name)
22888 {
22889 static const struct
22890 {
22891 const char * name;
22892 const int tag;
22893 }
22894 attribute_table[] =
22895 {
22896 /* When you modify this table you should
22897 also modify the list in doc/c-arm.texi. */
22898 #define T(tag) {#tag, tag}
22899 T (Tag_CPU_raw_name),
22900 T (Tag_CPU_name),
22901 T (Tag_CPU_arch),
22902 T (Tag_CPU_arch_profile),
22903 T (Tag_ARM_ISA_use),
22904 T (Tag_THUMB_ISA_use),
22905 T (Tag_VFP_arch),
22906 T (Tag_WMMX_arch),
22907 T (Tag_Advanced_SIMD_arch),
22908 T (Tag_PCS_config),
22909 T (Tag_ABI_PCS_R9_use),
22910 T (Tag_ABI_PCS_RW_data),
22911 T (Tag_ABI_PCS_RO_data),
22912 T (Tag_ABI_PCS_GOT_use),
22913 T (Tag_ABI_PCS_wchar_t),
22914 T (Tag_ABI_FP_rounding),
22915 T (Tag_ABI_FP_denormal),
22916 T (Tag_ABI_FP_exceptions),
22917 T (Tag_ABI_FP_user_exceptions),
22918 T (Tag_ABI_FP_number_model),
22919 T (Tag_ABI_align8_needed),
22920 T (Tag_ABI_align8_preserved),
22921 T (Tag_ABI_enum_size),
22922 T (Tag_ABI_HardFP_use),
22923 T (Tag_ABI_VFP_args),
22924 T (Tag_ABI_WMMX_args),
22925 T (Tag_ABI_optimization_goals),
22926 T (Tag_ABI_FP_optimization_goals),
22927 T (Tag_compatibility),
22928 T (Tag_CPU_unaligned_access),
22929 T (Tag_VFP_HP_extension),
22930 T (Tag_ABI_FP_16bit_format),
22931 T (Tag_nodefaults),
22932 T (Tag_also_compatible_with),
22933 T (Tag_conformance),
22934 T (Tag_T2EE_use),
22935 T (Tag_Virtualization_use),
22936 T (Tag_MPextension_use)
22937 #undef T
22938 };
22939 unsigned int i;
22940
22941 if (name == NULL)
22942 return -1;
22943
22944 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
22945 if (streq (name, attribute_table[i].name))
22946 return attribute_table[i].tag;
22947
22948 return -1;
22949 }
22950
22951
22952 /* Apply sym value for relocations only in the case that
22953 they are for local symbols and you have the respective
22954 architectural feature for blx and simple switches. */
22955 int
22956 arm_apply_sym_value (struct fix * fixP)
22957 {
22958 if (fixP->fx_addsy
22959 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22960 && !S_IS_EXTERNAL (fixP->fx_addsy))
22961 {
22962 switch (fixP->fx_r_type)
22963 {
22964 case BFD_RELOC_ARM_PCREL_BLX:
22965 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22966 if (ARM_IS_FUNC (fixP->fx_addsy))
22967 return 1;
22968 break;
22969
22970 case BFD_RELOC_ARM_PCREL_CALL:
22971 case BFD_RELOC_THUMB_PCREL_BLX:
22972 if (THUMB_IS_FUNC (fixP->fx_addsy))
22973 return 1;
22974 break;
22975
22976 default:
22977 break;
22978 }
22979
22980 }
22981 return 0;
22982 }
22983 #endif /* OBJ_ELF */