]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame_incremental - gas/config/tc-arm.c
* MAINTAINERS: Add an "Authorized committers" section, and list
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
... / ...
CommitLineData
1/* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28#include <string.h>
29#include <limits.h>
30#define NO_RELOC 0
31#include "as.h"
32#include "safe-ctype.h"
33
34/* Need TARGET_CPU. */
35#include "config.h"
36#include "subsegs.h"
37#include "obstack.h"
38#include "symbols.h"
39#include "listing.h"
40
41#include "opcode/arm.h"
42
43#ifdef OBJ_ELF
44#include "elf/arm.h"
45#include "dwarf2dbg.h"
46#include "dw2gencfi.h"
47#endif
48
49/* XXX Set this to 1 after the next binutils release. */
50#define WARN_DEPRECATED 0
51
52#ifdef OBJ_ELF
53/* Must be at least the size of the largest unwind opcode (currently two). */
54#define ARM_OPCODE_CHUNK_SIZE 8
55
56/* This structure holds the unwinding state. */
57
58static struct
59{
60 symbolS * proc_start;
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
65 segT saved_seg;
66 subsegT saved_subseg;
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
69 int opcode_count;
70 int opcode_alloc;
71 /* The number of bytes pushed to the stack. */
72 offsetT frame_size;
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
79 offsetT fp_offset;
80 int fp_reg;
81 /* Nonzero if an unwind_setfp directive has been seen. */
82 unsigned fp_used:1;
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
85} unwind;
86
87/* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90static unsigned int marked_pr_dependency = 0;
91
92#endif /* OBJ_ELF */
93
94enum arm_float_abi
95{
96 ARM_FLOAT_ABI_HARD,
97 ARM_FLOAT_ABI_SOFTFP,
98 ARM_FLOAT_ABI_SOFT
99};
100
101/* Types of processor to assemble for. */
102#ifndef CPU_DEFAULT
103#if defined __XSCALE__
104#define CPU_DEFAULT ARM_ARCH_XSCALE
105#else
106#if defined __thumb__
107#define CPU_DEFAULT ARM_ARCH_V5T
108#endif
109#endif
110#endif
111
112#ifndef FPU_DEFAULT
113# ifdef TE_LINUX
114# define FPU_DEFAULT FPU_ARCH_FPA
115# elif defined (TE_NetBSD)
116# ifdef OBJ_ELF
117# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
118# else
119 /* Legacy a.out format. */
120# define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
121# endif
122# elif defined (TE_VXWORKS)
123# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
124# else
125 /* For backwards compatibility, default to FPA. */
126# define FPU_DEFAULT FPU_ARCH_FPA
127# endif
128#endif /* ifndef FPU_DEFAULT */
129
130#define streq(a, b) (strcmp (a, b) == 0)
131
132static arm_feature_set cpu_variant;
133static arm_feature_set arm_arch_used;
134static arm_feature_set thumb_arch_used;
135
136/* Flags stored in private area of BFD structure. */
137static int uses_apcs_26 = FALSE;
138static int atpcs = FALSE;
139static int support_interwork = FALSE;
140static int uses_apcs_float = FALSE;
141static int pic_code = FALSE;
142
143/* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146static const arm_feature_set *legacy_cpu = NULL;
147static const arm_feature_set *legacy_fpu = NULL;
148
149static const arm_feature_set *mcpu_cpu_opt = NULL;
150static const arm_feature_set *mcpu_fpu_opt = NULL;
151static const arm_feature_set *march_cpu_opt = NULL;
152static const arm_feature_set *march_fpu_opt = NULL;
153static const arm_feature_set *mfpu_opt = NULL;
154
155/* Constants for known architecture features. */
156static const arm_feature_set fpu_default = FPU_DEFAULT;
157static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
165
166#ifdef CPU_DEFAULT
167static const arm_feature_set cpu_default = CPU_DEFAULT;
168#endif
169
170static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
194
195static const arm_feature_set arm_arch_any = ARM_ANY;
196static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
199
200static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
216
217static int mfloat_abi_opt = -1;
218/* Record user cpu selection for object attributes. */
219static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220/* Must be long enough to hold any of the names in arm_cpus. */
221static char selected_cpu_name[16];
222#ifdef OBJ_ELF
223# ifdef EABI_DEFAULT
224static int meabi_flags = EABI_DEFAULT;
225# else
226static int meabi_flags = EF_ARM_EABI_UNKNOWN;
227# endif
228#endif
229
230#ifdef OBJ_ELF
231/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232symbolS * GOT_symbol;
233#endif
234
235/* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
238 instructions. */
239static int thumb_mode = 0;
240
241/* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
243
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
248 there.)
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
251 machine code.
252
253 Important differences from the old Thumb mode:
254
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
263
264static bfd_boolean unified_syntax = FALSE;
265
266enum neon_el_type
267{
268 NT_invtype,
269 NT_untyped,
270 NT_integer,
271 NT_float,
272 NT_poly,
273 NT_signed,
274 NT_unsigned
275};
276
277struct neon_type_el
278{
279 enum neon_el_type type;
280 unsigned size;
281};
282
283#define NEON_MAX_TYPE_ELS 4
284
285struct neon_type
286{
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
288 unsigned elems;
289};
290
291struct arm_it
292{
293 const char * error;
294 unsigned long instruction;
295 int size;
296 int size_req;
297 int cond;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
301 unsigned long relax;
302 struct
303 {
304 bfd_reloc_code_real_type type;
305 expressionS exp;
306 int pc_rel;
307 } reloc;
308
309 struct
310 {
311 unsigned reg;
312 signed int imm;
313 struct neon_type_el vectype;
314 unsigned present : 1; /* Operand present. */
315 unsigned isreg : 1; /* Operand was a register. */
316 unsigned immisreg : 1; /* .imm field is a second register. */
317 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc : 1; /* Operand has relocation suffix. */
324 unsigned writeback : 1; /* Operand has trailing ! */
325 unsigned preind : 1; /* Preindexed address. */
326 unsigned postind : 1; /* Postindexed address. */
327 unsigned negative : 1; /* Index register was negated. */
328 unsigned shifted : 1; /* Shift applied to operation. */
329 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
330 } operands[6];
331};
332
333static struct arm_it inst;
334
335#define NUM_FLOAT_VALS 8
336
337const char * fp_const[] =
338{
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
340};
341
342/* Number of littlenums required to hold an extended precision number. */
343#define MAX_LITTLENUMS 6
344
345LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
346
347#define FAIL (-1)
348#define SUCCESS (0)
349
350#define SUFF_S 1
351#define SUFF_D 2
352#define SUFF_E 3
353#define SUFF_P 4
354
355#define CP_T_X 0x00008000
356#define CP_T_Y 0x00400000
357
358#define CONDS_BIT 0x00100000
359#define LOAD_BIT 0x00100000
360
361#define DOUBLE_LOAD_FLAG 0x00000001
362
363struct asm_cond
364{
365 const char * template;
366 unsigned long value;
367};
368
369#define COND_ALWAYS 0xE
370
371struct asm_psr
372{
373 const char *template;
374 unsigned long field;
375};
376
377struct asm_barrier_opt
378{
379 const char *template;
380 unsigned long value;
381};
382
383/* The bit that distinguishes CPSR and SPSR. */
384#define SPSR_BIT (1 << 22)
385
386/* The individual PSR flag bits. */
387#define PSR_c (1 << 16)
388#define PSR_x (1 << 17)
389#define PSR_s (1 << 18)
390#define PSR_f (1 << 19)
391
392struct reloc_entry
393{
394 char *name;
395 bfd_reloc_code_real_type reloc;
396};
397
398enum vfp_reg_pos
399{
400 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
401 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
402};
403
404enum vfp_ldstm_type
405{
406 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
407};
408
409/* Bits for DEFINED field in neon_typed_alias. */
410#define NTA_HASTYPE 1
411#define NTA_HASINDEX 2
412
413struct neon_typed_alias
414{
415 unsigned char defined;
416 unsigned char index;
417 struct neon_type_el eltype;
418};
419
420/* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
422enum arm_reg_type
423{
424 REG_TYPE_RN,
425 REG_TYPE_CP,
426 REG_TYPE_CN,
427 REG_TYPE_FN,
428 REG_TYPE_VFS,
429 REG_TYPE_VFD,
430 REG_TYPE_NQ,
431 REG_TYPE_NDQ,
432 REG_TYPE_VFC,
433 REG_TYPE_MVF,
434 REG_TYPE_MVD,
435 REG_TYPE_MVFX,
436 REG_TYPE_MVDX,
437 REG_TYPE_MVAX,
438 REG_TYPE_DSPSC,
439 REG_TYPE_MMXWR,
440 REG_TYPE_MMXWC,
441 REG_TYPE_MMXWCG,
442 REG_TYPE_XSCALE,
443};
444
445/* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
449struct reg_entry
450{
451 const char *name;
452 unsigned char number;
453 unsigned char type;
454 unsigned char builtin;
455 struct neon_typed_alias *neon;
456};
457
458/* Diagnostics used when we don't get a register of the expected type. */
459const char *const reg_expected_msgs[] =
460{
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
480};
481
482/* Some well known registers that we refer to directly elsewhere. */
483#define REG_SP 13
484#define REG_LR 14
485#define REG_PC 15
486
487/* ARM instructions take 4bytes in the object file, Thumb instructions
488 take 2: */
489#define INSN_SIZE 4
490
491struct asm_opcode
492{
493 /* Basic string to match. */
494 const char *template;
495
496 /* Parameters to instruction. */
497 unsigned char operands[8];
498
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag : 4;
501
502 /* Basic instruction code. */
503 unsigned int avalue : 28;
504
505 /* Thumb-format instruction code. */
506 unsigned int tvalue;
507
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set *avariant;
510 const arm_feature_set *tvariant;
511
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode) (void);
514
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode) (void);
517};
518
519/* Defines for various bits that we will want to toggle. */
520#define INST_IMMEDIATE 0x02000000
521#define OFFSET_REG 0x02000000
522#define HWOFFSET_IMM 0x00400000
523#define SHIFT_BY_REG 0x00000010
524#define PRE_INDEX 0x01000000
525#define INDEX_UP 0x00800000
526#define WRITE_BACK 0x00200000
527#define LDM_TYPE_2_OR_3 0x00400000
528
529#define LITERAL_MASK 0xf000f000
530#define OPCODE_MASK 0xfe1fffff
531#define V4_STR_BIT 0x00000020
532
533#define DATA_OP_SHIFT 21
534
535#define T2_OPCODE_MASK 0xfe1fffff
536#define T2_DATA_OP_SHIFT 21
537
538/* Codes to distinguish the arithmetic instructions. */
539#define OPCODE_AND 0
540#define OPCODE_EOR 1
541#define OPCODE_SUB 2
542#define OPCODE_RSB 3
543#define OPCODE_ADD 4
544#define OPCODE_ADC 5
545#define OPCODE_SBC 6
546#define OPCODE_RSC 7
547#define OPCODE_TST 8
548#define OPCODE_TEQ 9
549#define OPCODE_CMP 10
550#define OPCODE_CMN 11
551#define OPCODE_ORR 12
552#define OPCODE_MOV 13
553#define OPCODE_BIC 14
554#define OPCODE_MVN 15
555
556#define T2_OPCODE_AND 0
557#define T2_OPCODE_BIC 1
558#define T2_OPCODE_ORR 2
559#define T2_OPCODE_ORN 3
560#define T2_OPCODE_EOR 4
561#define T2_OPCODE_ADD 8
562#define T2_OPCODE_ADC 10
563#define T2_OPCODE_SBC 11
564#define T2_OPCODE_SUB 13
565#define T2_OPCODE_RSB 14
566
567#define T_OPCODE_MUL 0x4340
568#define T_OPCODE_TST 0x4200
569#define T_OPCODE_CMN 0x42c0
570#define T_OPCODE_NEG 0x4240
571#define T_OPCODE_MVN 0x43c0
572
573#define T_OPCODE_ADD_R3 0x1800
574#define T_OPCODE_SUB_R3 0x1a00
575#define T_OPCODE_ADD_HI 0x4400
576#define T_OPCODE_ADD_ST 0xb000
577#define T_OPCODE_SUB_ST 0xb080
578#define T_OPCODE_ADD_SP 0xa800
579#define T_OPCODE_ADD_PC 0xa000
580#define T_OPCODE_ADD_I8 0x3000
581#define T_OPCODE_SUB_I8 0x3800
582#define T_OPCODE_ADD_I3 0x1c00
583#define T_OPCODE_SUB_I3 0x1e00
584
585#define T_OPCODE_ASR_R 0x4100
586#define T_OPCODE_LSL_R 0x4080
587#define T_OPCODE_LSR_R 0x40c0
588#define T_OPCODE_ROR_R 0x41c0
589#define T_OPCODE_ASR_I 0x1000
590#define T_OPCODE_LSL_I 0x0000
591#define T_OPCODE_LSR_I 0x0800
592
593#define T_OPCODE_MOV_I8 0x2000
594#define T_OPCODE_CMP_I8 0x2800
595#define T_OPCODE_CMP_LR 0x4280
596#define T_OPCODE_MOV_HR 0x4600
597#define T_OPCODE_CMP_HR 0x4500
598
599#define T_OPCODE_LDR_PC 0x4800
600#define T_OPCODE_LDR_SP 0x9800
601#define T_OPCODE_STR_SP 0x9000
602#define T_OPCODE_LDR_IW 0x6800
603#define T_OPCODE_STR_IW 0x6000
604#define T_OPCODE_LDR_IH 0x8800
605#define T_OPCODE_STR_IH 0x8000
606#define T_OPCODE_LDR_IB 0x7800
607#define T_OPCODE_STR_IB 0x7000
608#define T_OPCODE_LDR_RW 0x5800
609#define T_OPCODE_STR_RW 0x5000
610#define T_OPCODE_LDR_RH 0x5a00
611#define T_OPCODE_STR_RH 0x5200
612#define T_OPCODE_LDR_RB 0x5c00
613#define T_OPCODE_STR_RB 0x5400
614
615#define T_OPCODE_PUSH 0xb400
616#define T_OPCODE_POP 0xbc00
617
618#define T_OPCODE_BRANCH 0xe000
619
620#define THUMB_SIZE 2 /* Size of thumb instruction. */
621#define THUMB_PP_PC_LR 0x0100
622#define THUMB_LOAD_BIT 0x0800
623#define THUMB2_LOAD_BIT 0x00100000
624
625#define BAD_ARGS _("bad arguments to instruction")
626#define BAD_PC _("r15 not allowed here")
627#define BAD_COND _("instruction cannot be conditional")
628#define BAD_OVERLAP _("registers may not be the same")
629#define BAD_HIREG _("lo register required")
630#define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631#define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632#define BAD_BRANCH _("branch must be last instruction in IT block")
633#define BAD_NOT_IT _("instruction not allowed in IT block")
634
635static struct hash_control *arm_ops_hsh;
636static struct hash_control *arm_cond_hsh;
637static struct hash_control *arm_shift_hsh;
638static struct hash_control *arm_psr_hsh;
639static struct hash_control *arm_v7m_psr_hsh;
640static struct hash_control *arm_reg_hsh;
641static struct hash_control *arm_reloc_hsh;
642static struct hash_control *arm_barrier_opt_hsh;
643
644/* Stuff needed to resolve the label ambiguity
645 As:
646 ...
647 label: <insn>
648 may differ from:
649 ...
650 label:
651 <insn>
652*/
653
654symbolS * last_label_seen;
655static int label_is_thumb_function_name = FALSE;
656\f
657/* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
659
660#define MAX_LITERAL_POOL_SIZE 1024
661typedef struct literal_pool
662{
663 expressionS literals [MAX_LITERAL_POOL_SIZE];
664 unsigned int next_free_entry;
665 unsigned int id;
666 symbolS * symbol;
667 segT section;
668 subsegT sub_section;
669 struct literal_pool * next;
670} literal_pool;
671
672/* Pointer to a linked list of literal pools. */
673literal_pool * list_of_pools = NULL;
674
675/* State variables for IT block handling. */
676static bfd_boolean current_it_mask = 0;
677static int current_cc;
678
679\f
680/* Pure syntax. */
681
682/* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684const char comment_chars[] = "@";
685
686/* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689/* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692/* Also note that comments like this one will always work. */
693const char line_comment_chars[] = "#";
694
695const char line_separator_chars[] = ";";
696
697/* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699const char EXP_CHARS[] = "eE";
700
701/* Chars that mean this number is a floating point constant. */
702/* As in 0f12.456 */
703/* or 0d1.2345e12 */
704
705const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
706
707/* Prefix characters that indicate the start of an immediate
708 value. */
709#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
710
711/* Separator character handling. */
712
713#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
714
715static inline int
716skip_past_char (char ** str, char c)
717{
718 if (**str == c)
719 {
720 (*str)++;
721 return SUCCESS;
722 }
723 else
724 return FAIL;
725}
726#define skip_past_comma(str) skip_past_char (str, ',')
727
728/* Arithmetic expressions (possibly involving symbols). */
729
730/* Return TRUE if anything in the expression is a bignum. */
731
732static int
733walk_no_bignums (symbolS * sp)
734{
735 if (symbol_get_value_expression (sp)->X_op == O_big)
736 return 1;
737
738 if (symbol_get_value_expression (sp)->X_add_symbol)
739 {
740 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
741 || (symbol_get_value_expression (sp)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
743 }
744
745 return 0;
746}
747
748static int in_my_get_expression = 0;
749
750/* Third argument to my_get_expression. */
751#define GE_NO_PREFIX 0
752#define GE_IMM_PREFIX 1
753#define GE_OPT_PREFIX 2
754/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756#define GE_OPT_PREFIX_BIG 3
757
758static int
759my_get_expression (expressionS * ep, char ** str, int prefix_mode)
760{
761 char * save_in;
762 segT seg;
763
764 /* In unified syntax, all prefixes are optional. */
765 if (unified_syntax)
766 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
767 : GE_OPT_PREFIX;
768
769 switch (prefix_mode)
770 {
771 case GE_NO_PREFIX: break;
772 case GE_IMM_PREFIX:
773 if (!is_immediate_prefix (**str))
774 {
775 inst.error = _("immediate expression requires a # prefix");
776 return FAIL;
777 }
778 (*str)++;
779 break;
780 case GE_OPT_PREFIX:
781 case GE_OPT_PREFIX_BIG:
782 if (is_immediate_prefix (**str))
783 (*str)++;
784 break;
785 default: abort ();
786 }
787
788 memset (ep, 0, sizeof (expressionS));
789
790 save_in = input_line_pointer;
791 input_line_pointer = *str;
792 in_my_get_expression = 1;
793 seg = expression (ep);
794 in_my_get_expression = 0;
795
796 if (ep->X_op == O_illegal)
797 {
798 /* We found a bad expression in md_operand(). */
799 *str = input_line_pointer;
800 input_line_pointer = save_in;
801 if (inst.error == NULL)
802 inst.error = _("bad expression");
803 return 1;
804 }
805
806#ifdef OBJ_AOUT
807 if (seg != absolute_section
808 && seg != text_section
809 && seg != data_section
810 && seg != bss_section
811 && seg != undefined_section)
812 {
813 inst.error = _("bad segment");
814 *str = input_line_pointer;
815 input_line_pointer = save_in;
816 return 1;
817 }
818#endif
819
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode != GE_OPT_PREFIX_BIG
824 && (ep->X_op == O_big
825 || (ep->X_add_symbol
826 && (walk_no_bignums (ep->X_add_symbol)
827 || (ep->X_op_symbol
828 && walk_no_bignums (ep->X_op_symbol))))))
829 {
830 inst.error = _("invalid constant");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
833 return 1;
834 }
835
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
838 return 0;
839}
840
841/* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
845
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
852
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
854
855char *
856md_atof (int type, char * litP, int * sizeP)
857{
858 int prec;
859 LITTLENUM_TYPE words[MAX_LITTLENUMS];
860 char *t;
861 int i;
862
863 switch (type)
864 {
865 case 'f':
866 case 'F':
867 case 's':
868 case 'S':
869 prec = 2;
870 break;
871
872 case 'd':
873 case 'D':
874 case 'r':
875 case 'R':
876 prec = 4;
877 break;
878
879 case 'x':
880 case 'X':
881 prec = 6;
882 break;
883
884 case 'p':
885 case 'P':
886 prec = 6;
887 break;
888
889 default:
890 *sizeP = 0;
891 return _("bad call to MD_ATOF()");
892 }
893
894 t = atof_ieee (input_line_pointer, type, words);
895 if (t)
896 input_line_pointer = t;
897 *sizeP = prec * 2;
898
899 if (target_big_endian)
900 {
901 for (i = 0; i < prec; i++)
902 {
903 md_number_to_chars (litP, (valueT) words[i], 2);
904 litP += 2;
905 }
906 }
907 else
908 {
909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
910 for (i = prec - 1; i >= 0; i--)
911 {
912 md_number_to_chars (litP, (valueT) words[i], 2);
913 litP += 2;
914 }
915 else
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i = 0; i < prec; i += 2)
919 {
920 md_number_to_chars (litP, (valueT) words[i + 1], 2);
921 md_number_to_chars (litP + 2, (valueT) words[i], 2);
922 litP += 4;
923 }
924 }
925
926 return 0;
927}
928
929/* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
931void
932md_operand (expressionS * expr)
933{
934 if (in_my_get_expression)
935 expr->X_op = O_illegal;
936}
937
938/* Immediate values. */
939
940/* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
943#ifdef OBJ_ELF
944static int
945immediate_for_directive (int *val)
946{
947 expressionS exp;
948 exp.X_op = O_illegal;
949
950 if (is_immediate_prefix (*input_line_pointer))
951 {
952 input_line_pointer++;
953 expression (&exp);
954 }
955
956 if (exp.X_op != O_constant)
957 {
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
960 return FAIL;
961 }
962 *val = exp.X_add_number;
963 return SUCCESS;
964}
965#endif
966
967/* Register parsing. */
968
969/* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
973
974static struct reg_entry *
975arm_reg_parse_multi (char **ccp)
976{
977 char *start = *ccp;
978 char *p;
979 struct reg_entry *reg;
980
981#ifdef REGISTER_PREFIX
982 if (*start != REGISTER_PREFIX)
983 return NULL;
984 start++;
985#endif
986#ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start == OPTIONAL_REGISTER_PREFIX)
988 start++;
989#endif
990
991 p = start;
992 if (!ISALPHA (*p) || !is_name_beginner (*p))
993 return NULL;
994
995 do
996 p++;
997 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
998
999 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1000
1001 if (!reg)
1002 return NULL;
1003
1004 *ccp = p;
1005 return reg;
1006}
1007
1008static int
1009arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1010 enum arm_reg_type type)
1011{
1012 /* Alternative syntaxes are accepted for a few register classes. */
1013 switch (type)
1014 {
1015 case REG_TYPE_MVF:
1016 case REG_TYPE_MVD:
1017 case REG_TYPE_MVFX:
1018 case REG_TYPE_MVDX:
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg && reg->type == REG_TYPE_CN)
1021 return reg->number;
1022 break;
1023
1024 case REG_TYPE_CP:
1025 /* For backward compatibility, a bare number is valid here. */
1026 {
1027 unsigned long processor = strtoul (start, ccp, 10);
1028 if (*ccp != start && processor <= 15)
1029 return processor;
1030 }
1031
1032 case REG_TYPE_MMXWC:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg && reg->type == REG_TYPE_MMXWCG)
1036 return reg->number;
1037 break;
1038
1039 default:
1040 break;
1041 }
1042
1043 return FAIL;
1044}
1045
1046/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1048
1049static int
1050arm_reg_parse (char **ccp, enum arm_reg_type type)
1051{
1052 char *start = *ccp;
1053 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1054 int ret;
1055
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1058 return FAIL;
1059
1060 if (reg && reg->type == type)
1061 return reg->number;
1062
1063 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1064 return ret;
1065
1066 *ccp = start;
1067 return FAIL;
1068}
1069
1070/* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1072 properly. E.g.,
1073
1074 .i32.i32.s16
1075 .s32.f32
1076 .u16
1077
1078 Can all be legally parsed by this function.
1079
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1083
1084static int
1085parse_neon_type (struct neon_type *type, char **str)
1086{
1087 char *ptr = *str;
1088
1089 if (type)
1090 type->elems = 0;
1091
1092 while (type->elems < NEON_MAX_TYPE_ELS)
1093 {
1094 enum neon_el_type thistype = NT_untyped;
1095 unsigned thissize = -1u;
1096
1097 if (*ptr != '.')
1098 break;
1099
1100 ptr++;
1101
1102 /* Just a size without an explicit type. */
1103 if (ISDIGIT (*ptr))
1104 goto parsesize;
1105
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'i': thistype = NT_integer; break;
1109 case 'f': thistype = NT_float; break;
1110 case 'p': thistype = NT_poly; break;
1111 case 's': thistype = NT_signed; break;
1112 case 'u': thistype = NT_unsigned; break;
1113 default:
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1115 return FAIL;
1116 }
1117
1118 ptr++;
1119
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype == NT_float && !ISDIGIT (*ptr))
1122 thissize = 32;
1123 else
1124 {
1125 parsesize:
1126 thissize = strtoul (ptr, &ptr, 10);
1127
1128 if (thissize != 8 && thissize != 16 && thissize != 32
1129 && thissize != 64)
1130 {
1131 as_bad (_("bad size %d in type specifier"), thissize);
1132 return FAIL;
1133 }
1134 }
1135
1136 if (type)
1137 {
1138 type->el[type->elems].type = thistype;
1139 type->el[type->elems].size = thissize;
1140 type->elems++;
1141 }
1142 }
1143
1144 /* Empty/missing type is not a successful parse. */
1145 if (type->elems == 0)
1146 return FAIL;
1147
1148 *str = ptr;
1149
1150 return SUCCESS;
1151}
1152
1153/* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1157
1158static void
1159first_error (const char *err)
1160{
1161 if (!inst.error)
1162 inst.error = err;
1163}
1164
1165/* Parse a single type, e.g. ".s32", leading period included. */
1166static int
1167parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1168{
1169 char *str = *ccp;
1170 struct neon_type optype;
1171
1172 if (*str == '.')
1173 {
1174 if (parse_neon_type (&optype, &str) == SUCCESS)
1175 {
1176 if (optype.elems == 1)
1177 *vectype = optype.el[0];
1178 else
1179 {
1180 first_error (_("only one type should be specified for operand"));
1181 return FAIL;
1182 }
1183 }
1184 else
1185 {
1186 first_error (_("vector type expected"));
1187 return FAIL;
1188 }
1189 }
1190 else
1191 return FAIL;
1192
1193 *ccp = str;
1194
1195 return SUCCESS;
1196}
1197
1198/* Special meanings for indices (which have a range of 0-7), which will fit into
1199 a 4-bit integer. */
1200
1201#define NEON_ALL_LANES 15
1202#define NEON_INTERLEAVE_LANES 14
1203
1204/* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1208
1209static int
1210parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1211 enum arm_reg_type *rtype,
1212 struct neon_typed_alias *typeinfo)
1213{
1214 char *str = *ccp;
1215 struct reg_entry *reg = arm_reg_parse_multi (&str);
1216 struct neon_typed_alias atype;
1217 struct neon_type_el parsetype;
1218
1219 atype.defined = 0;
1220 atype.index = -1;
1221 atype.eltype.type = NT_invtype;
1222 atype.eltype.size = -1;
1223
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1226 if (reg == NULL)
1227 {
1228 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1229 if (altreg != FAIL)
1230 *ccp = str;
1231 if (typeinfo)
1232 *typeinfo = atype;
1233 return altreg;
1234 }
1235
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type == REG_TYPE_NDQ
1238 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1239 type = reg->type;
1240
1241 if (type != reg->type)
1242 return FAIL;
1243
1244 if (reg->neon)
1245 atype = *reg->neon;
1246
1247 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1248 {
1249 if ((atype.defined & NTA_HASTYPE) != 0)
1250 {
1251 first_error (_("can't redefine type for operand"));
1252 return FAIL;
1253 }
1254 atype.defined |= NTA_HASTYPE;
1255 atype.eltype = parsetype;
1256 }
1257
1258 if (skip_past_char (&str, '[') == SUCCESS)
1259 {
1260 if (type != REG_TYPE_VFD)
1261 {
1262 first_error (_("only D registers may be indexed"));
1263 return FAIL;
1264 }
1265
1266 if ((atype.defined & NTA_HASINDEX) != 0)
1267 {
1268 first_error (_("can't change index for operand"));
1269 return FAIL;
1270 }
1271
1272 atype.defined |= NTA_HASINDEX;
1273
1274 if (skip_past_char (&str, ']') == SUCCESS)
1275 atype.index = NEON_ALL_LANES;
1276 else
1277 {
1278 expressionS exp;
1279
1280 my_get_expression (&exp, &str, GE_NO_PREFIX);
1281
1282 if (exp.X_op != O_constant)
1283 {
1284 first_error (_("constant expression required"));
1285 return FAIL;
1286 }
1287
1288 if (skip_past_char (&str, ']') == FAIL)
1289 return FAIL;
1290
1291 atype.index = exp.X_add_number;
1292 }
1293 }
1294
1295 if (typeinfo)
1296 *typeinfo = atype;
1297
1298 if (rtype)
1299 *rtype = type;
1300
1301 *ccp = str;
1302
1303 return reg->number;
1304}
1305
1306/* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1312*/
1313
1314static int
1315arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1316 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1317{
1318 struct neon_typed_alias atype;
1319 char *str = *ccp;
1320 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1321
1322 if (reg == FAIL)
1323 return FAIL;
1324
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype.defined & NTA_HASINDEX) != 0)
1327 {
1328 first_error (_("register operand expected, but got scalar"));
1329 return FAIL;
1330 }
1331
1332 if (vectype)
1333 *vectype = atype.eltype;
1334
1335 *ccp = str;
1336
1337 return reg;
1338}
1339
1340#define NEON_SCALAR_REG(X) ((X) >> 4)
1341#define NEON_SCALAR_INDEX(X) ((X) & 15)
1342
1343/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1346
1347static int
1348parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1349{
1350 int reg;
1351 char *str = *ccp;
1352 struct neon_typed_alias atype;
1353
1354 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1355
1356 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1357 return FAIL;
1358
1359 if (atype.index == NEON_ALL_LANES)
1360 {
1361 first_error (_("scalar must have an index"));
1362 return FAIL;
1363 }
1364 else if (atype.index >= 64 / elsize)
1365 {
1366 first_error (_("scalar index out of range"));
1367 return FAIL;
1368 }
1369
1370 if (type)
1371 *type = atype.eltype;
1372
1373 *ccp = str;
1374
1375 return reg * 16 + atype.index;
1376}
1377
1378/* Parse an ARM register list. Returns the bitmask, or FAIL. */
1379static long
1380parse_reg_list (char ** strp)
1381{
1382 char * str = * strp;
1383 long range = 0;
1384 int another_range;
1385
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1387 do
1388 {
1389 another_range = 0;
1390
1391 if (*str == '{')
1392 {
1393 int in_range = 0;
1394 int cur_reg = -1;
1395
1396 str++;
1397 do
1398 {
1399 int reg;
1400
1401 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1402 {
1403 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1404 return FAIL;
1405 }
1406
1407 if (in_range)
1408 {
1409 int i;
1410
1411 if (reg <= cur_reg)
1412 {
1413 first_error (_("bad range in register list"));
1414 return FAIL;
1415 }
1416
1417 for (i = cur_reg + 1; i < reg; i++)
1418 {
1419 if (range & (1 << i))
1420 as_tsktsk
1421 (_("Warning: duplicated register (r%d) in register list"),
1422 i);
1423 else
1424 range |= 1 << i;
1425 }
1426 in_range = 0;
1427 }
1428
1429 if (range & (1 << reg))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1431 reg);
1432 else if (reg <= cur_reg)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1434
1435 range |= 1 << reg;
1436 cur_reg = reg;
1437 }
1438 while (skip_past_comma (&str) != FAIL
1439 || (in_range = 1, *str++ == '-'));
1440 str--;
1441
1442 if (*str++ != '}')
1443 {
1444 first_error (_("missing `}'"));
1445 return FAIL;
1446 }
1447 }
1448 else
1449 {
1450 expressionS expr;
1451
1452 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1453 return FAIL;
1454
1455 if (expr.X_op == O_constant)
1456 {
1457 if (expr.X_add_number
1458 != (expr.X_add_number & 0x0000ffff))
1459 {
1460 inst.error = _("invalid register mask");
1461 return FAIL;
1462 }
1463
1464 if ((range & expr.X_add_number) != 0)
1465 {
1466 int regno = range & expr.X_add_number;
1467
1468 regno &= -regno;
1469 regno = (1 << regno) - 1;
1470 as_tsktsk
1471 (_("Warning: duplicated register (r%d) in register list"),
1472 regno);
1473 }
1474
1475 range |= expr.X_add_number;
1476 }
1477 else
1478 {
1479 if (inst.reloc.type != 0)
1480 {
1481 inst.error = _("expression too complex");
1482 return FAIL;
1483 }
1484
1485 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1486 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1487 inst.reloc.pc_rel = 0;
1488 }
1489 }
1490
1491 if (*str == '|' || *str == '+')
1492 {
1493 str++;
1494 another_range = 1;
1495 }
1496 }
1497 while (another_range);
1498
1499 *strp = str;
1500 return range;
1501}
1502
1503/* Types of registers in a list. */
1504
1505enum reg_list_els
1506{
1507 REGLIST_VFP_S,
1508 REGLIST_VFP_D,
1509 REGLIST_NEON_D
1510};
1511
1512/* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1519 some cases, e.g.:
1520 vtbl.8 d3,d4,d5
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1525 bug. */
1526
1527static int
1528parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1529{
1530 int base_reg;
1531 int new_base;
1532 enum arm_reg_type regtype = 0;
1533 int max_regs = 0;
1534 int count = 0;
1535 int warned = 0;
1536 unsigned long mask = 0;
1537 int i;
1538
1539 if (**str != '{')
1540 {
1541 inst.error = _("expecting {");
1542 return FAIL;
1543 }
1544
1545 (*str)++;
1546
1547 switch (etype)
1548 {
1549 case REGLIST_VFP_S:
1550 regtype = REG_TYPE_VFS;
1551 max_regs = 32;
1552 break;
1553
1554 case REGLIST_VFP_D:
1555 regtype = REG_TYPE_VFD;
1556 /* VFPv3 allows 32 D registers. */
1557 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1558 {
1559 max_regs = 32;
1560 if (thumb_mode)
1561 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1562 fpu_vfp_ext_v3);
1563 else
1564 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1565 fpu_vfp_ext_v3);
1566 }
1567 else
1568 max_regs = 16;
1569 break;
1570
1571 case REGLIST_NEON_D:
1572 regtype = REG_TYPE_NDQ;
1573 max_regs = 32;
1574 break;
1575 }
1576
1577 base_reg = max_regs;
1578
1579 do
1580 {
1581 int setmask = 1, addregs = 1;
1582
1583 new_base = arm_typed_reg_parse (str, regtype, &regtype, NULL);
1584
1585 if (new_base == FAIL)
1586 {
1587 first_error (_(reg_expected_msgs[regtype]));
1588 return FAIL;
1589 }
1590
1591 /* Note: a value of 2 * n is returned for the register Q<n>. */
1592 if (regtype == REG_TYPE_NQ)
1593 {
1594 setmask = 3;
1595 addregs = 2;
1596 }
1597
1598 if (new_base < base_reg)
1599 base_reg = new_base;
1600
1601 if (mask & (setmask << new_base))
1602 {
1603 first_error (_("invalid register list"));
1604 return FAIL;
1605 }
1606
1607 if ((mask >> new_base) != 0 && ! warned)
1608 {
1609 as_tsktsk (_("register list not in ascending order"));
1610 warned = 1;
1611 }
1612
1613 mask |= setmask << new_base;
1614 count += addregs;
1615
1616 if (**str == '-') /* We have the start of a range expression */
1617 {
1618 int high_range;
1619
1620 (*str)++;
1621
1622 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1623 == FAIL)
1624 {
1625 inst.error = gettext (reg_expected_msgs[regtype]);
1626 return FAIL;
1627 }
1628
1629 if (regtype == REG_TYPE_NQ)
1630 high_range = high_range + 1;
1631
1632 if (high_range <= new_base)
1633 {
1634 inst.error = _("register range not in ascending order");
1635 return FAIL;
1636 }
1637
1638 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1639 {
1640 if (mask & (setmask << new_base))
1641 {
1642 inst.error = _("invalid register list");
1643 return FAIL;
1644 }
1645
1646 mask |= setmask << new_base;
1647 count += addregs;
1648 }
1649 }
1650 }
1651 while (skip_past_comma (str) != FAIL);
1652
1653 (*str)++;
1654
1655 /* Sanity check -- should have raised a parse error above. */
1656 if (count == 0 || count > max_regs)
1657 abort ();
1658
1659 *pbase = base_reg;
1660
1661 /* Final test -- the registers must be consecutive. */
1662 mask >>= base_reg;
1663 for (i = 0; i < count; i++)
1664 {
1665 if ((mask & (1u << i)) == 0)
1666 {
1667 inst.error = _("non-contiguous register range");
1668 return FAIL;
1669 }
1670 }
1671
1672 return count;
1673}
1674
1675/* True if two alias types are the same. */
1676
1677static int
1678neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1679{
1680 if (!a && !b)
1681 return 1;
1682
1683 if (!a || !b)
1684 return 0;
1685
1686 if (a->defined != b->defined)
1687 return 0;
1688
1689 if ((a->defined & NTA_HASTYPE) != 0
1690 && (a->eltype.type != b->eltype.type
1691 || a->eltype.size != b->eltype.size))
1692 return 0;
1693
1694 if ((a->defined & NTA_HASINDEX) != 0
1695 && (a->index != b->index))
1696 return 0;
1697
1698 return 1;
1699}
1700
1701/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1702 The base register is put in *PBASE.
1703 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1704 the return value.
1705 The register stride (minus one) is put in bit 4 of the return value.
1706 Bits [6:5] encode the list length (minus one).
1707 The type of the list elements is put in *ELTYPE, if non-NULL. */
1708
1709#define NEON_LANE(X) ((X) & 0xf)
1710#define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1711#define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1712
1713static int
1714parse_neon_el_struct_list (char **str, unsigned *pbase,
1715 struct neon_type_el *eltype)
1716{
1717 char *ptr = *str;
1718 int base_reg = -1;
1719 int reg_incr = -1;
1720 int count = 0;
1721 int lane = -1;
1722 int leading_brace = 0;
1723 enum arm_reg_type rtype = REG_TYPE_NDQ;
1724 int addregs = 1;
1725 const char *const incr_error = "register stride must be 1 or 2";
1726 const char *const type_error = "mismatched element/structure types in list";
1727 struct neon_typed_alias firsttype;
1728
1729 if (skip_past_char (&ptr, '{') == SUCCESS)
1730 leading_brace = 1;
1731
1732 do
1733 {
1734 struct neon_typed_alias atype;
1735 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1736
1737 if (getreg == FAIL)
1738 {
1739 first_error (_(reg_expected_msgs[rtype]));
1740 return FAIL;
1741 }
1742
1743 if (base_reg == -1)
1744 {
1745 base_reg = getreg;
1746 if (rtype == REG_TYPE_NQ)
1747 {
1748 reg_incr = 1;
1749 addregs = 2;
1750 }
1751 firsttype = atype;
1752 }
1753 else if (reg_incr == -1)
1754 {
1755 reg_incr = getreg - base_reg;
1756 if (reg_incr < 1 || reg_incr > 2)
1757 {
1758 first_error (_(incr_error));
1759 return FAIL;
1760 }
1761 }
1762 else if (getreg != base_reg + reg_incr * count)
1763 {
1764 first_error (_(incr_error));
1765 return FAIL;
1766 }
1767
1768 if (!neon_alias_types_same (&atype, &firsttype))
1769 {
1770 first_error (_(type_error));
1771 return FAIL;
1772 }
1773
1774 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1775 modes. */
1776 if (ptr[0] == '-')
1777 {
1778 struct neon_typed_alias htype;
1779 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1780 if (lane == -1)
1781 lane = NEON_INTERLEAVE_LANES;
1782 else if (lane != NEON_INTERLEAVE_LANES)
1783 {
1784 first_error (_(type_error));
1785 return FAIL;
1786 }
1787 if (reg_incr == -1)
1788 reg_incr = 1;
1789 else if (reg_incr != 1)
1790 {
1791 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1792 return FAIL;
1793 }
1794 ptr++;
1795 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1796 if (hireg == FAIL)
1797 {
1798 first_error (_(reg_expected_msgs[rtype]));
1799 return FAIL;
1800 }
1801 if (!neon_alias_types_same (&htype, &firsttype))
1802 {
1803 first_error (_(type_error));
1804 return FAIL;
1805 }
1806 count += hireg + dregs - getreg;
1807 continue;
1808 }
1809
1810 /* If we're using Q registers, we can't use [] or [n] syntax. */
1811 if (rtype == REG_TYPE_NQ)
1812 {
1813 count += 2;
1814 continue;
1815 }
1816
1817 if ((atype.defined & NTA_HASINDEX) != 0)
1818 {
1819 if (lane == -1)
1820 lane = atype.index;
1821 else if (lane != atype.index)
1822 {
1823 first_error (_(type_error));
1824 return FAIL;
1825 }
1826 }
1827 else if (lane == -1)
1828 lane = NEON_INTERLEAVE_LANES;
1829 else if (lane != NEON_INTERLEAVE_LANES)
1830 {
1831 first_error (_(type_error));
1832 return FAIL;
1833 }
1834 count++;
1835 }
1836 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1837
1838 /* No lane set by [x]. We must be interleaving structures. */
1839 if (lane == -1)
1840 lane = NEON_INTERLEAVE_LANES;
1841
1842 /* Sanity check. */
1843 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1844 || (count > 1 && reg_incr == -1))
1845 {
1846 first_error (_("error parsing element/structure list"));
1847 return FAIL;
1848 }
1849
1850 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1851 {
1852 first_error (_("expected }"));
1853 return FAIL;
1854 }
1855
1856 if (reg_incr == -1)
1857 reg_incr = 1;
1858
1859 if (eltype)
1860 *eltype = firsttype.eltype;
1861
1862 *pbase = base_reg;
1863 *str = ptr;
1864
1865 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1866}
1867
1868/* Parse an explicit relocation suffix on an expression. This is
1869 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1870 arm_reloc_hsh contains no entries, so this function can only
1871 succeed if there is no () after the word. Returns -1 on error,
1872 BFD_RELOC_UNUSED if there wasn't any suffix. */
1873static int
1874parse_reloc (char **str)
1875{
1876 struct reloc_entry *r;
1877 char *p, *q;
1878
1879 if (**str != '(')
1880 return BFD_RELOC_UNUSED;
1881
1882 p = *str + 1;
1883 q = p;
1884
1885 while (*q && *q != ')' && *q != ',')
1886 q++;
1887 if (*q != ')')
1888 return -1;
1889
1890 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1891 return -1;
1892
1893 *str = q + 1;
1894 return r->reloc;
1895}
1896
1897/* Directives: register aliases. */
1898
1899static struct reg_entry *
1900insert_reg_alias (char *str, int number, int type)
1901{
1902 struct reg_entry *new;
1903 const char *name;
1904
1905 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1906 {
1907 if (new->builtin)
1908 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1909
1910 /* Only warn about a redefinition if it's not defined as the
1911 same register. */
1912 else if (new->number != number || new->type != type)
1913 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1914
1915 return 0;
1916 }
1917
1918 name = xstrdup (str);
1919 new = xmalloc (sizeof (struct reg_entry));
1920
1921 new->name = name;
1922 new->number = number;
1923 new->type = type;
1924 new->builtin = FALSE;
1925 new->neon = NULL;
1926
1927 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1928 abort ();
1929
1930 return new;
1931}
1932
1933static void
1934insert_neon_reg_alias (char *str, int number, int type,
1935 struct neon_typed_alias *atype)
1936{
1937 struct reg_entry *reg = insert_reg_alias (str, number, type);
1938
1939 if (!reg)
1940 {
1941 first_error (_("attempt to redefine typed alias"));
1942 return;
1943 }
1944
1945 if (atype)
1946 {
1947 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1948 *reg->neon = *atype;
1949 }
1950}
1951
1952/* Look for the .req directive. This is of the form:
1953
1954 new_register_name .req existing_register_name
1955
1956 If we find one, or if it looks sufficiently like one that we want to
1957 handle any error here, return non-zero. Otherwise return zero. */
1958
1959static int
1960create_register_alias (char * newname, char *p)
1961{
1962 struct reg_entry *old;
1963 char *oldname, *nbuf;
1964 size_t nlen;
1965
1966 /* The input scrubber ensures that whitespace after the mnemonic is
1967 collapsed to single spaces. */
1968 oldname = p;
1969 if (strncmp (oldname, " .req ", 6) != 0)
1970 return 0;
1971
1972 oldname += 6;
1973 if (*oldname == '\0')
1974 return 0;
1975
1976 old = hash_find (arm_reg_hsh, oldname);
1977 if (!old)
1978 {
1979 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1980 return 1;
1981 }
1982
1983 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1984 the desired alias name, and p points to its end. If not, then
1985 the desired alias name is in the global original_case_string. */
1986#ifdef TC_CASE_SENSITIVE
1987 nlen = p - newname;
1988#else
1989 newname = original_case_string;
1990 nlen = strlen (newname);
1991#endif
1992
1993 nbuf = alloca (nlen + 1);
1994 memcpy (nbuf, newname, nlen);
1995 nbuf[nlen] = '\0';
1996
1997 /* Create aliases under the new name as stated; an all-lowercase
1998 version of the new name; and an all-uppercase version of the new
1999 name. */
2000 insert_reg_alias (nbuf, old->number, old->type);
2001
2002 for (p = nbuf; *p; p++)
2003 *p = TOUPPER (*p);
2004
2005 if (strncmp (nbuf, newname, nlen))
2006 insert_reg_alias (nbuf, old->number, old->type);
2007
2008 for (p = nbuf; *p; p++)
2009 *p = TOLOWER (*p);
2010
2011 if (strncmp (nbuf, newname, nlen))
2012 insert_reg_alias (nbuf, old->number, old->type);
2013
2014 return 1;
2015}
2016
2017/* Create a Neon typed/indexed register alias using directives, e.g.:
2018 X .dn d5.s32[1]
2019 Y .qn 6.s16
2020 Z .dn d7
2021 T .dn Z[0]
2022 These typed registers can be used instead of the types specified after the
2023 Neon mnemonic, so long as all operands given have types. Types can also be
2024 specified directly, e.g.:
2025 vadd d0.s32, d1.s32, d2.s32
2026*/
2027
2028static int
2029create_neon_reg_alias (char *newname, char *p)
2030{
2031 enum arm_reg_type basetype;
2032 struct reg_entry *basereg;
2033 struct reg_entry mybasereg;
2034 struct neon_type ntype;
2035 struct neon_typed_alias typeinfo;
2036 char *namebuf, *nameend;
2037 int namelen;
2038
2039 typeinfo.defined = 0;
2040 typeinfo.eltype.type = NT_invtype;
2041 typeinfo.eltype.size = -1;
2042 typeinfo.index = -1;
2043
2044 nameend = p;
2045
2046 if (strncmp (p, " .dn ", 5) == 0)
2047 basetype = REG_TYPE_VFD;
2048 else if (strncmp (p, " .qn ", 5) == 0)
2049 basetype = REG_TYPE_NQ;
2050 else
2051 return 0;
2052
2053 p += 5;
2054
2055 if (*p == '\0')
2056 return 0;
2057
2058 basereg = arm_reg_parse_multi (&p);
2059
2060 if (basereg && basereg->type != basetype)
2061 {
2062 as_bad (_("bad type for register"));
2063 return 0;
2064 }
2065
2066 if (basereg == NULL)
2067 {
2068 expressionS exp;
2069 /* Try parsing as an integer. */
2070 my_get_expression (&exp, &p, GE_NO_PREFIX);
2071 if (exp.X_op != O_constant)
2072 {
2073 as_bad (_("expression must be constant"));
2074 return 0;
2075 }
2076 basereg = &mybasereg;
2077 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2078 : exp.X_add_number;
2079 basereg->neon = 0;
2080 }
2081
2082 if (basereg->neon)
2083 typeinfo = *basereg->neon;
2084
2085 if (parse_neon_type (&ntype, &p) == SUCCESS)
2086 {
2087 /* We got a type. */
2088 if (typeinfo.defined & NTA_HASTYPE)
2089 {
2090 as_bad (_("can't redefine the type of a register alias"));
2091 return 0;
2092 }
2093
2094 typeinfo.defined |= NTA_HASTYPE;
2095 if (ntype.elems != 1)
2096 {
2097 as_bad (_("you must specify a single type only"));
2098 return 0;
2099 }
2100 typeinfo.eltype = ntype.el[0];
2101 }
2102
2103 if (skip_past_char (&p, '[') == SUCCESS)
2104 {
2105 expressionS exp;
2106 /* We got a scalar index. */
2107
2108 if (typeinfo.defined & NTA_HASINDEX)
2109 {
2110 as_bad (_("can't redefine the index of a scalar alias"));
2111 return 0;
2112 }
2113
2114 my_get_expression (&exp, &p, GE_NO_PREFIX);
2115
2116 if (exp.X_op != O_constant)
2117 {
2118 as_bad (_("scalar index must be constant"));
2119 return 0;
2120 }
2121
2122 typeinfo.defined |= NTA_HASINDEX;
2123 typeinfo.index = exp.X_add_number;
2124
2125 if (skip_past_char (&p, ']') == FAIL)
2126 {
2127 as_bad (_("expecting ]"));
2128 return 0;
2129 }
2130 }
2131
2132 namelen = nameend - newname;
2133 namebuf = alloca (namelen + 1);
2134 strncpy (namebuf, newname, namelen);
2135 namebuf[namelen] = '\0';
2136
2137 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2138 typeinfo.defined != 0 ? &typeinfo : NULL);
2139
2140 /* Insert name in all uppercase. */
2141 for (p = namebuf; *p; p++)
2142 *p = TOUPPER (*p);
2143
2144 if (strncmp (namebuf, newname, namelen))
2145 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2146 typeinfo.defined != 0 ? &typeinfo : NULL);
2147
2148 /* Insert name in all lowercase. */
2149 for (p = namebuf; *p; p++)
2150 *p = TOLOWER (*p);
2151
2152 if (strncmp (namebuf, newname, namelen))
2153 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2154 typeinfo.defined != 0 ? &typeinfo : NULL);
2155
2156 return 1;
2157}
2158
2159/* Should never be called, as .req goes between the alias and the
2160 register name, not at the beginning of the line. */
2161static void
2162s_req (int a ATTRIBUTE_UNUSED)
2163{
2164 as_bad (_("invalid syntax for .req directive"));
2165}
2166
2167static void
2168s_dn (int a ATTRIBUTE_UNUSED)
2169{
2170 as_bad (_("invalid syntax for .dn directive"));
2171}
2172
2173static void
2174s_qn (int a ATTRIBUTE_UNUSED)
2175{
2176 as_bad (_("invalid syntax for .qn directive"));
2177}
2178
2179/* The .unreq directive deletes an alias which was previously defined
2180 by .req. For example:
2181
2182 my_alias .req r11
2183 .unreq my_alias */
2184
2185static void
2186s_unreq (int a ATTRIBUTE_UNUSED)
2187{
2188 char * name;
2189 char saved_char;
2190
2191 name = input_line_pointer;
2192
2193 while (*input_line_pointer != 0
2194 && *input_line_pointer != ' '
2195 && *input_line_pointer != '\n')
2196 ++input_line_pointer;
2197
2198 saved_char = *input_line_pointer;
2199 *input_line_pointer = 0;
2200
2201 if (!*name)
2202 as_bad (_("invalid syntax for .unreq directive"));
2203 else
2204 {
2205 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2206
2207 if (!reg)
2208 as_bad (_("unknown register alias '%s'"), name);
2209 else if (reg->builtin)
2210 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2211 name);
2212 else
2213 {
2214 hash_delete (arm_reg_hsh, name);
2215 free ((char *) reg->name);
2216 if (reg->neon)
2217 free (reg->neon);
2218 free (reg);
2219 }
2220 }
2221
2222 *input_line_pointer = saved_char;
2223 demand_empty_rest_of_line ();
2224}
2225
2226/* Directives: Instruction set selection. */
2227
2228#ifdef OBJ_ELF
2229/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2230 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2231 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2232 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2233
2234static enum mstate mapstate = MAP_UNDEFINED;
2235
2236static void
2237mapping_state (enum mstate state)
2238{
2239 symbolS * symbolP;
2240 const char * symname;
2241 int type;
2242
2243 if (mapstate == state)
2244 /* The mapping symbol has already been emitted.
2245 There is nothing else to do. */
2246 return;
2247
2248 mapstate = state;
2249
2250 switch (state)
2251 {
2252 case MAP_DATA:
2253 symname = "$d";
2254 type = BSF_NO_FLAGS;
2255 break;
2256 case MAP_ARM:
2257 symname = "$a";
2258 type = BSF_NO_FLAGS;
2259 break;
2260 case MAP_THUMB:
2261 symname = "$t";
2262 type = BSF_NO_FLAGS;
2263 break;
2264 case MAP_UNDEFINED:
2265 return;
2266 default:
2267 abort ();
2268 }
2269
2270 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2271
2272 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2273 symbol_table_insert (symbolP);
2274 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2275
2276 switch (state)
2277 {
2278 case MAP_ARM:
2279 THUMB_SET_FUNC (symbolP, 0);
2280 ARM_SET_THUMB (symbolP, 0);
2281 ARM_SET_INTERWORK (symbolP, support_interwork);
2282 break;
2283
2284 case MAP_THUMB:
2285 THUMB_SET_FUNC (symbolP, 1);
2286 ARM_SET_THUMB (symbolP, 1);
2287 ARM_SET_INTERWORK (symbolP, support_interwork);
2288 break;
2289
2290 case MAP_DATA:
2291 default:
2292 return;
2293 }
2294}
2295#else
2296#define mapping_state(x) /* nothing */
2297#endif
2298
2299/* Find the real, Thumb encoded start of a Thumb function. */
2300
2301static symbolS *
2302find_real_start (symbolS * symbolP)
2303{
2304 char * real_start;
2305 const char * name = S_GET_NAME (symbolP);
2306 symbolS * new_target;
2307
2308 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2309#define STUB_NAME ".real_start_of"
2310
2311 if (name == NULL)
2312 abort ();
2313
2314 /* The compiler may generate BL instructions to local labels because
2315 it needs to perform a branch to a far away location. These labels
2316 do not have a corresponding ".real_start_of" label. We check
2317 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2318 the ".real_start_of" convention for nonlocal branches. */
2319 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2320 return symbolP;
2321
2322 real_start = ACONCAT ((STUB_NAME, name, NULL));
2323 new_target = symbol_find (real_start);
2324
2325 if (new_target == NULL)
2326 {
2327 as_warn ("Failed to find real start of function: %s\n", name);
2328 new_target = symbolP;
2329 }
2330
2331 return new_target;
2332}
2333
2334static void
2335opcode_select (int width)
2336{
2337 switch (width)
2338 {
2339 case 16:
2340 if (! thumb_mode)
2341 {
2342 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2343 as_bad (_("selected processor does not support THUMB opcodes"));
2344
2345 thumb_mode = 1;
2346 /* No need to force the alignment, since we will have been
2347 coming from ARM mode, which is word-aligned. */
2348 record_alignment (now_seg, 1);
2349 }
2350 mapping_state (MAP_THUMB);
2351 break;
2352
2353 case 32:
2354 if (thumb_mode)
2355 {
2356 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2357 as_bad (_("selected processor does not support ARM opcodes"));
2358
2359 thumb_mode = 0;
2360
2361 if (!need_pass_2)
2362 frag_align (2, 0, 0);
2363
2364 record_alignment (now_seg, 1);
2365 }
2366 mapping_state (MAP_ARM);
2367 break;
2368
2369 default:
2370 as_bad (_("invalid instruction size selected (%d)"), width);
2371 }
2372}
2373
2374static void
2375s_arm (int ignore ATTRIBUTE_UNUSED)
2376{
2377 opcode_select (32);
2378 demand_empty_rest_of_line ();
2379}
2380
2381static void
2382s_thumb (int ignore ATTRIBUTE_UNUSED)
2383{
2384 opcode_select (16);
2385 demand_empty_rest_of_line ();
2386}
2387
2388static void
2389s_code (int unused ATTRIBUTE_UNUSED)
2390{
2391 int temp;
2392
2393 temp = get_absolute_expression ();
2394 switch (temp)
2395 {
2396 case 16:
2397 case 32:
2398 opcode_select (temp);
2399 break;
2400
2401 default:
2402 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2403 }
2404}
2405
2406static void
2407s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2408{
2409 /* If we are not already in thumb mode go into it, EVEN if
2410 the target processor does not support thumb instructions.
2411 This is used by gcc/config/arm/lib1funcs.asm for example
2412 to compile interworking support functions even if the
2413 target processor should not support interworking. */
2414 if (! thumb_mode)
2415 {
2416 thumb_mode = 2;
2417 record_alignment (now_seg, 1);
2418 }
2419
2420 demand_empty_rest_of_line ();
2421}
2422
2423static void
2424s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2425{
2426 s_thumb (0);
2427
2428 /* The following label is the name/address of the start of a Thumb function.
2429 We need to know this for the interworking support. */
2430 label_is_thumb_function_name = TRUE;
2431}
2432
2433/* Perform a .set directive, but also mark the alias as
2434 being a thumb function. */
2435
2436static void
2437s_thumb_set (int equiv)
2438{
2439 /* XXX the following is a duplicate of the code for s_set() in read.c
2440 We cannot just call that code as we need to get at the symbol that
2441 is created. */
2442 char * name;
2443 char delim;
2444 char * end_name;
2445 symbolS * symbolP;
2446
2447 /* Especial apologies for the random logic:
2448 This just grew, and could be parsed much more simply!
2449 Dean - in haste. */
2450 name = input_line_pointer;
2451 delim = get_symbol_end ();
2452 end_name = input_line_pointer;
2453 *end_name = delim;
2454
2455 if (*input_line_pointer != ',')
2456 {
2457 *end_name = 0;
2458 as_bad (_("expected comma after name \"%s\""), name);
2459 *end_name = delim;
2460 ignore_rest_of_line ();
2461 return;
2462 }
2463
2464 input_line_pointer++;
2465 *end_name = 0;
2466
2467 if (name[0] == '.' && name[1] == '\0')
2468 {
2469 /* XXX - this should not happen to .thumb_set. */
2470 abort ();
2471 }
2472
2473 if ((symbolP = symbol_find (name)) == NULL
2474 && (symbolP = md_undefined_symbol (name)) == NULL)
2475 {
2476#ifndef NO_LISTING
2477 /* When doing symbol listings, play games with dummy fragments living
2478 outside the normal fragment chain to record the file and line info
2479 for this symbol. */
2480 if (listing & LISTING_SYMBOLS)
2481 {
2482 extern struct list_info_struct * listing_tail;
2483 fragS * dummy_frag = xmalloc (sizeof (fragS));
2484
2485 memset (dummy_frag, 0, sizeof (fragS));
2486 dummy_frag->fr_type = rs_fill;
2487 dummy_frag->line = listing_tail;
2488 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2489 dummy_frag->fr_symbol = symbolP;
2490 }
2491 else
2492#endif
2493 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2494
2495#ifdef OBJ_COFF
2496 /* "set" symbols are local unless otherwise specified. */
2497 SF_SET_LOCAL (symbolP);
2498#endif /* OBJ_COFF */
2499 } /* Make a new symbol. */
2500
2501 symbol_table_insert (symbolP);
2502
2503 * end_name = delim;
2504
2505 if (equiv
2506 && S_IS_DEFINED (symbolP)
2507 && S_GET_SEGMENT (symbolP) != reg_section)
2508 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2509
2510 pseudo_set (symbolP);
2511
2512 demand_empty_rest_of_line ();
2513
2514 /* XXX Now we come to the Thumb specific bit of code. */
2515
2516 THUMB_SET_FUNC (symbolP, 1);
2517 ARM_SET_THUMB (symbolP, 1);
2518#if defined OBJ_ELF || defined OBJ_COFF
2519 ARM_SET_INTERWORK (symbolP, support_interwork);
2520#endif
2521}
2522
2523/* Directives: Mode selection. */
2524
2525/* .syntax [unified|divided] - choose the new unified syntax
2526 (same for Arm and Thumb encoding, modulo slight differences in what
2527 can be represented) or the old divergent syntax for each mode. */
2528static void
2529s_syntax (int unused ATTRIBUTE_UNUSED)
2530{
2531 char *name, delim;
2532
2533 name = input_line_pointer;
2534 delim = get_symbol_end ();
2535
2536 if (!strcasecmp (name, "unified"))
2537 unified_syntax = TRUE;
2538 else if (!strcasecmp (name, "divided"))
2539 unified_syntax = FALSE;
2540 else
2541 {
2542 as_bad (_("unrecognized syntax mode \"%s\""), name);
2543 return;
2544 }
2545 *input_line_pointer = delim;
2546 demand_empty_rest_of_line ();
2547}
2548
2549/* Directives: sectioning and alignment. */
2550
2551/* Same as s_align_ptwo but align 0 => align 2. */
2552
2553static void
2554s_align (int unused ATTRIBUTE_UNUSED)
2555{
2556 int temp;
2557 long temp_fill;
2558 long max_alignment = 15;
2559
2560 temp = get_absolute_expression ();
2561 if (temp > max_alignment)
2562 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2563 else if (temp < 0)
2564 {
2565 as_bad (_("alignment negative. 0 assumed."));
2566 temp = 0;
2567 }
2568
2569 if (*input_line_pointer == ',')
2570 {
2571 input_line_pointer++;
2572 temp_fill = get_absolute_expression ();
2573 }
2574 else
2575 temp_fill = 0;
2576
2577 if (!temp)
2578 temp = 2;
2579
2580 /* Only make a frag if we HAVE to. */
2581 if (temp && !need_pass_2)
2582 frag_align (temp, (int) temp_fill, 0);
2583 demand_empty_rest_of_line ();
2584
2585 record_alignment (now_seg, temp);
2586}
2587
2588static void
2589s_bss (int ignore ATTRIBUTE_UNUSED)
2590{
2591 /* We don't support putting frags in the BSS segment, we fake it by
2592 marking in_bss, then looking at s_skip for clues. */
2593 subseg_set (bss_section, 0);
2594 demand_empty_rest_of_line ();
2595 mapping_state (MAP_DATA);
2596}
2597
2598static void
2599s_even (int ignore ATTRIBUTE_UNUSED)
2600{
2601 /* Never make frag if expect extra pass. */
2602 if (!need_pass_2)
2603 frag_align (1, 0, 0);
2604
2605 record_alignment (now_seg, 1);
2606
2607 demand_empty_rest_of_line ();
2608}
2609
2610/* Directives: Literal pools. */
2611
2612static literal_pool *
2613find_literal_pool (void)
2614{
2615 literal_pool * pool;
2616
2617 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2618 {
2619 if (pool->section == now_seg
2620 && pool->sub_section == now_subseg)
2621 break;
2622 }
2623
2624 return pool;
2625}
2626
2627static literal_pool *
2628find_or_make_literal_pool (void)
2629{
2630 /* Next literal pool ID number. */
2631 static unsigned int latest_pool_num = 1;
2632 literal_pool * pool;
2633
2634 pool = find_literal_pool ();
2635
2636 if (pool == NULL)
2637 {
2638 /* Create a new pool. */
2639 pool = xmalloc (sizeof (* pool));
2640 if (! pool)
2641 return NULL;
2642
2643 pool->next_free_entry = 0;
2644 pool->section = now_seg;
2645 pool->sub_section = now_subseg;
2646 pool->next = list_of_pools;
2647 pool->symbol = NULL;
2648
2649 /* Add it to the list. */
2650 list_of_pools = pool;
2651 }
2652
2653 /* New pools, and emptied pools, will have a NULL symbol. */
2654 if (pool->symbol == NULL)
2655 {
2656 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2657 (valueT) 0, &zero_address_frag);
2658 pool->id = latest_pool_num ++;
2659 }
2660
2661 /* Done. */
2662 return pool;
2663}
2664
2665/* Add the literal in the global 'inst'
2666 structure to the relevent literal pool. */
2667
2668static int
2669add_to_lit_pool (void)
2670{
2671 literal_pool * pool;
2672 unsigned int entry;
2673
2674 pool = find_or_make_literal_pool ();
2675
2676 /* Check if this literal value is already in the pool. */
2677 for (entry = 0; entry < pool->next_free_entry; entry ++)
2678 {
2679 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2680 && (inst.reloc.exp.X_op == O_constant)
2681 && (pool->literals[entry].X_add_number
2682 == inst.reloc.exp.X_add_number)
2683 && (pool->literals[entry].X_unsigned
2684 == inst.reloc.exp.X_unsigned))
2685 break;
2686
2687 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2688 && (inst.reloc.exp.X_op == O_symbol)
2689 && (pool->literals[entry].X_add_number
2690 == inst.reloc.exp.X_add_number)
2691 && (pool->literals[entry].X_add_symbol
2692 == inst.reloc.exp.X_add_symbol)
2693 && (pool->literals[entry].X_op_symbol
2694 == inst.reloc.exp.X_op_symbol))
2695 break;
2696 }
2697
2698 /* Do we need to create a new entry? */
2699 if (entry == pool->next_free_entry)
2700 {
2701 if (entry >= MAX_LITERAL_POOL_SIZE)
2702 {
2703 inst.error = _("literal pool overflow");
2704 return FAIL;
2705 }
2706
2707 pool->literals[entry] = inst.reloc.exp;
2708 pool->next_free_entry += 1;
2709 }
2710
2711 inst.reloc.exp.X_op = O_symbol;
2712 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2713 inst.reloc.exp.X_add_symbol = pool->symbol;
2714
2715 return SUCCESS;
2716}
2717
2718/* Can't use symbol_new here, so have to create a symbol and then at
2719 a later date assign it a value. Thats what these functions do. */
2720
2721static void
2722symbol_locate (symbolS * symbolP,
2723 const char * name, /* It is copied, the caller can modify. */
2724 segT segment, /* Segment identifier (SEG_<something>). */
2725 valueT valu, /* Symbol value. */
2726 fragS * frag) /* Associated fragment. */
2727{
2728 unsigned int name_length;
2729 char * preserved_copy_of_name;
2730
2731 name_length = strlen (name) + 1; /* +1 for \0. */
2732 obstack_grow (&notes, name, name_length);
2733 preserved_copy_of_name = obstack_finish (&notes);
2734
2735#ifdef tc_canonicalize_symbol_name
2736 preserved_copy_of_name =
2737 tc_canonicalize_symbol_name (preserved_copy_of_name);
2738#endif
2739
2740 S_SET_NAME (symbolP, preserved_copy_of_name);
2741
2742 S_SET_SEGMENT (symbolP, segment);
2743 S_SET_VALUE (symbolP, valu);
2744 symbol_clear_list_pointers (symbolP);
2745
2746 symbol_set_frag (symbolP, frag);
2747
2748 /* Link to end of symbol chain. */
2749 {
2750 extern int symbol_table_frozen;
2751
2752 if (symbol_table_frozen)
2753 abort ();
2754 }
2755
2756 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2757
2758 obj_symbol_new_hook (symbolP);
2759
2760#ifdef tc_symbol_new_hook
2761 tc_symbol_new_hook (symbolP);
2762#endif
2763
2764#ifdef DEBUG_SYMS
2765 verify_symbol_chain (symbol_rootP, symbol_lastP);
2766#endif /* DEBUG_SYMS */
2767}
2768
2769
2770static void
2771s_ltorg (int ignored ATTRIBUTE_UNUSED)
2772{
2773 unsigned int entry;
2774 literal_pool * pool;
2775 char sym_name[20];
2776
2777 pool = find_literal_pool ();
2778 if (pool == NULL
2779 || pool->symbol == NULL
2780 || pool->next_free_entry == 0)
2781 return;
2782
2783 mapping_state (MAP_DATA);
2784
2785 /* Align pool as you have word accesses.
2786 Only make a frag if we have to. */
2787 if (!need_pass_2)
2788 frag_align (2, 0, 0);
2789
2790 record_alignment (now_seg, 2);
2791
2792 sprintf (sym_name, "$$lit_\002%x", pool->id);
2793
2794 symbol_locate (pool->symbol, sym_name, now_seg,
2795 (valueT) frag_now_fix (), frag_now);
2796 symbol_table_insert (pool->symbol);
2797
2798 ARM_SET_THUMB (pool->symbol, thumb_mode);
2799
2800#if defined OBJ_COFF || defined OBJ_ELF
2801 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2802#endif
2803
2804 for (entry = 0; entry < pool->next_free_entry; entry ++)
2805 /* First output the expression in the instruction to the pool. */
2806 emit_expr (&(pool->literals[entry]), 4); /* .word */
2807
2808 /* Mark the pool as empty. */
2809 pool->next_free_entry = 0;
2810 pool->symbol = NULL;
2811}
2812
2813#ifdef OBJ_ELF
2814/* Forward declarations for functions below, in the MD interface
2815 section. */
2816static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2817static valueT create_unwind_entry (int);
2818static void start_unwind_section (const segT, int);
2819static void add_unwind_opcode (valueT, int);
2820static void flush_pending_unwind (void);
2821
2822/* Directives: Data. */
2823
2824static void
2825s_arm_elf_cons (int nbytes)
2826{
2827 expressionS exp;
2828
2829#ifdef md_flush_pending_output
2830 md_flush_pending_output ();
2831#endif
2832
2833 if (is_it_end_of_statement ())
2834 {
2835 demand_empty_rest_of_line ();
2836 return;
2837 }
2838
2839#ifdef md_cons_align
2840 md_cons_align (nbytes);
2841#endif
2842
2843 mapping_state (MAP_DATA);
2844 do
2845 {
2846 int reloc;
2847 char *base = input_line_pointer;
2848
2849 expression (& exp);
2850
2851 if (exp.X_op != O_symbol)
2852 emit_expr (&exp, (unsigned int) nbytes);
2853 else
2854 {
2855 char *before_reloc = input_line_pointer;
2856 reloc = parse_reloc (&input_line_pointer);
2857 if (reloc == -1)
2858 {
2859 as_bad (_("unrecognized relocation suffix"));
2860 ignore_rest_of_line ();
2861 return;
2862 }
2863 else if (reloc == BFD_RELOC_UNUSED)
2864 emit_expr (&exp, (unsigned int) nbytes);
2865 else
2866 {
2867 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2868 int size = bfd_get_reloc_size (howto);
2869
2870 if (reloc == BFD_RELOC_ARM_PLT32)
2871 {
2872 as_bad (_("(plt) is only valid on branch targets"));
2873 reloc = BFD_RELOC_UNUSED;
2874 size = 0;
2875 }
2876
2877 if (size > nbytes)
2878 as_bad (_("%s relocations do not fit in %d bytes"),
2879 howto->name, nbytes);
2880 else
2881 {
2882 /* We've parsed an expression stopping at O_symbol.
2883 But there may be more expression left now that we
2884 have parsed the relocation marker. Parse it again.
2885 XXX Surely there is a cleaner way to do this. */
2886 char *p = input_line_pointer;
2887 int offset;
2888 char *save_buf = alloca (input_line_pointer - base);
2889 memcpy (save_buf, base, input_line_pointer - base);
2890 memmove (base + (input_line_pointer - before_reloc),
2891 base, before_reloc - base);
2892
2893 input_line_pointer = base + (input_line_pointer-before_reloc);
2894 expression (&exp);
2895 memcpy (base, save_buf, p - base);
2896
2897 offset = nbytes - size;
2898 p = frag_more ((int) nbytes);
2899 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2900 size, &exp, 0, reloc);
2901 }
2902 }
2903 }
2904 }
2905 while (*input_line_pointer++ == ',');
2906
2907 /* Put terminator back into stream. */
2908 input_line_pointer --;
2909 demand_empty_rest_of_line ();
2910}
2911
2912
2913/* Parse a .rel31 directive. */
2914
2915static void
2916s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2917{
2918 expressionS exp;
2919 char *p;
2920 valueT highbit;
2921
2922 highbit = 0;
2923 if (*input_line_pointer == '1')
2924 highbit = 0x80000000;
2925 else if (*input_line_pointer != '0')
2926 as_bad (_("expected 0 or 1"));
2927
2928 input_line_pointer++;
2929 if (*input_line_pointer != ',')
2930 as_bad (_("missing comma"));
2931 input_line_pointer++;
2932
2933#ifdef md_flush_pending_output
2934 md_flush_pending_output ();
2935#endif
2936
2937#ifdef md_cons_align
2938 md_cons_align (4);
2939#endif
2940
2941 mapping_state (MAP_DATA);
2942
2943 expression (&exp);
2944
2945 p = frag_more (4);
2946 md_number_to_chars (p, highbit, 4);
2947 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2948 BFD_RELOC_ARM_PREL31);
2949
2950 demand_empty_rest_of_line ();
2951}
2952
2953/* Directives: AEABI stack-unwind tables. */
2954
2955/* Parse an unwind_fnstart directive. Simply records the current location. */
2956
2957static void
2958s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2959{
2960 demand_empty_rest_of_line ();
2961 /* Mark the start of the function. */
2962 unwind.proc_start = expr_build_dot ();
2963
2964 /* Reset the rest of the unwind info. */
2965 unwind.opcode_count = 0;
2966 unwind.table_entry = NULL;
2967 unwind.personality_routine = NULL;
2968 unwind.personality_index = -1;
2969 unwind.frame_size = 0;
2970 unwind.fp_offset = 0;
2971 unwind.fp_reg = 13;
2972 unwind.fp_used = 0;
2973 unwind.sp_restored = 0;
2974}
2975
2976
2977/* Parse a handlerdata directive. Creates the exception handling table entry
2978 for the function. */
2979
2980static void
2981s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2982{
2983 demand_empty_rest_of_line ();
2984 if (unwind.table_entry)
2985 as_bad (_("dupicate .handlerdata directive"));
2986
2987 create_unwind_entry (1);
2988}
2989
2990/* Parse an unwind_fnend directive. Generates the index table entry. */
2991
2992static void
2993s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
2994{
2995 long where;
2996 char *ptr;
2997 valueT val;
2998
2999 demand_empty_rest_of_line ();
3000
3001 /* Add eh table entry. */
3002 if (unwind.table_entry == NULL)
3003 val = create_unwind_entry (0);
3004 else
3005 val = 0;
3006
3007 /* Add index table entry. This is two words. */
3008 start_unwind_section (unwind.saved_seg, 1);
3009 frag_align (2, 0, 0);
3010 record_alignment (now_seg, 2);
3011
3012 ptr = frag_more (8);
3013 where = frag_now_fix () - 8;
3014
3015 /* Self relative offset of the function start. */
3016 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3017 BFD_RELOC_ARM_PREL31);
3018
3019 /* Indicate dependency on EHABI-defined personality routines to the
3020 linker, if it hasn't been done already. */
3021 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3022 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3023 {
3024 static const char *const name[] = {
3025 "__aeabi_unwind_cpp_pr0",
3026 "__aeabi_unwind_cpp_pr1",
3027 "__aeabi_unwind_cpp_pr2"
3028 };
3029 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3030 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3031 marked_pr_dependency |= 1 << unwind.personality_index;
3032 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3033 = marked_pr_dependency;
3034 }
3035
3036 if (val)
3037 /* Inline exception table entry. */
3038 md_number_to_chars (ptr + 4, val, 4);
3039 else
3040 /* Self relative offset of the table entry. */
3041 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3042 BFD_RELOC_ARM_PREL31);
3043
3044 /* Restore the original section. */
3045 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3046}
3047
3048
3049/* Parse an unwind_cantunwind directive. */
3050
3051static void
3052s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3053{
3054 demand_empty_rest_of_line ();
3055 if (unwind.personality_routine || unwind.personality_index != -1)
3056 as_bad (_("personality routine specified for cantunwind frame"));
3057
3058 unwind.personality_index = -2;
3059}
3060
3061
3062/* Parse a personalityindex directive. */
3063
3064static void
3065s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3066{
3067 expressionS exp;
3068
3069 if (unwind.personality_routine || unwind.personality_index != -1)
3070 as_bad (_("duplicate .personalityindex directive"));
3071
3072 expression (&exp);
3073
3074 if (exp.X_op != O_constant
3075 || exp.X_add_number < 0 || exp.X_add_number > 15)
3076 {
3077 as_bad (_("bad personality routine number"));
3078 ignore_rest_of_line ();
3079 return;
3080 }
3081
3082 unwind.personality_index = exp.X_add_number;
3083
3084 demand_empty_rest_of_line ();
3085}
3086
3087
3088/* Parse a personality directive. */
3089
3090static void
3091s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3092{
3093 char *name, *p, c;
3094
3095 if (unwind.personality_routine || unwind.personality_index != -1)
3096 as_bad (_("duplicate .personality directive"));
3097
3098 name = input_line_pointer;
3099 c = get_symbol_end ();
3100 p = input_line_pointer;
3101 unwind.personality_routine = symbol_find_or_make (name);
3102 *p = c;
3103 demand_empty_rest_of_line ();
3104}
3105
3106
3107/* Parse a directive saving core registers. */
3108
3109static void
3110s_arm_unwind_save_core (void)
3111{
3112 valueT op;
3113 long range;
3114 int n;
3115
3116 range = parse_reg_list (&input_line_pointer);
3117 if (range == FAIL)
3118 {
3119 as_bad (_("expected register list"));
3120 ignore_rest_of_line ();
3121 return;
3122 }
3123
3124 demand_empty_rest_of_line ();
3125
3126 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3127 into .unwind_save {..., sp...}. We aren't bothered about the value of
3128 ip because it is clobbered by calls. */
3129 if (unwind.sp_restored && unwind.fp_reg == 12
3130 && (range & 0x3000) == 0x1000)
3131 {
3132 unwind.opcode_count--;
3133 unwind.sp_restored = 0;
3134 range = (range | 0x2000) & ~0x1000;
3135 unwind.pending_offset = 0;
3136 }
3137
3138 /* Pop r4-r15. */
3139 if (range & 0xfff0)
3140 {
3141 /* See if we can use the short opcodes. These pop a block of up to 8
3142 registers starting with r4, plus maybe r14. */
3143 for (n = 0; n < 8; n++)
3144 {
3145 /* Break at the first non-saved register. */
3146 if ((range & (1 << (n + 4))) == 0)
3147 break;
3148 }
3149 /* See if there are any other bits set. */
3150 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3151 {
3152 /* Use the long form. */
3153 op = 0x8000 | ((range >> 4) & 0xfff);
3154 add_unwind_opcode (op, 2);
3155 }
3156 else
3157 {
3158 /* Use the short form. */
3159 if (range & 0x4000)
3160 op = 0xa8; /* Pop r14. */
3161 else
3162 op = 0xa0; /* Do not pop r14. */
3163 op |= (n - 1);
3164 add_unwind_opcode (op, 1);
3165 }
3166 }
3167
3168 /* Pop r0-r3. */
3169 if (range & 0xf)
3170 {
3171 op = 0xb100 | (range & 0xf);
3172 add_unwind_opcode (op, 2);
3173 }
3174
3175 /* Record the number of bytes pushed. */
3176 for (n = 0; n < 16; n++)
3177 {
3178 if (range & (1 << n))
3179 unwind.frame_size += 4;
3180 }
3181}
3182
3183
3184/* Parse a directive saving FPA registers. */
3185
3186static void
3187s_arm_unwind_save_fpa (int reg)
3188{
3189 expressionS exp;
3190 int num_regs;
3191 valueT op;
3192
3193 /* Get Number of registers to transfer. */
3194 if (skip_past_comma (&input_line_pointer) != FAIL)
3195 expression (&exp);
3196 else
3197 exp.X_op = O_illegal;
3198
3199 if (exp.X_op != O_constant)
3200 {
3201 as_bad (_("expected , <constant>"));
3202 ignore_rest_of_line ();
3203 return;
3204 }
3205
3206 num_regs = exp.X_add_number;
3207
3208 if (num_regs < 1 || num_regs > 4)
3209 {
3210 as_bad (_("number of registers must be in the range [1:4]"));
3211 ignore_rest_of_line ();
3212 return;
3213 }
3214
3215 demand_empty_rest_of_line ();
3216
3217 if (reg == 4)
3218 {
3219 /* Short form. */
3220 op = 0xb4 | (num_regs - 1);
3221 add_unwind_opcode (op, 1);
3222 }
3223 else
3224 {
3225 /* Long form. */
3226 op = 0xc800 | (reg << 4) | (num_regs - 1);
3227 add_unwind_opcode (op, 2);
3228 }
3229 unwind.frame_size += num_regs * 12;
3230}
3231
3232
3233/* Parse a directive saving VFP registers. */
3234
3235static void
3236s_arm_unwind_save_vfp (void)
3237{
3238 int count;
3239 unsigned int reg;
3240 valueT op;
3241
3242 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3243 if (count == FAIL)
3244 {
3245 as_bad (_("expected register list"));
3246 ignore_rest_of_line ();
3247 return;
3248 }
3249
3250 demand_empty_rest_of_line ();
3251
3252 if (reg == 8)
3253 {
3254 /* Short form. */
3255 op = 0xb8 | (count - 1);
3256 add_unwind_opcode (op, 1);
3257 }
3258 else
3259 {
3260 /* Long form. */
3261 op = 0xb300 | (reg << 4) | (count - 1);
3262 add_unwind_opcode (op, 2);
3263 }
3264 unwind.frame_size += count * 8 + 4;
3265}
3266
3267
3268/* Parse a directive saving iWMMXt data registers. */
3269
3270static void
3271s_arm_unwind_save_mmxwr (void)
3272{
3273 int reg;
3274 int hi_reg;
3275 int i;
3276 unsigned mask = 0;
3277 valueT op;
3278
3279 if (*input_line_pointer == '{')
3280 input_line_pointer++;
3281
3282 do
3283 {
3284 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3285
3286 if (reg == FAIL)
3287 {
3288 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3289 goto error;
3290 }
3291
3292 if (mask >> reg)
3293 as_tsktsk (_("register list not in ascending order"));
3294 mask |= 1 << reg;
3295
3296 if (*input_line_pointer == '-')
3297 {
3298 input_line_pointer++;
3299 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3300 if (hi_reg == FAIL)
3301 {
3302 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3303 goto error;
3304 }
3305 else if (reg >= hi_reg)
3306 {
3307 as_bad (_("bad register range"));
3308 goto error;
3309 }
3310 for (; reg < hi_reg; reg++)
3311 mask |= 1 << reg;
3312 }
3313 }
3314 while (skip_past_comma (&input_line_pointer) != FAIL);
3315
3316 if (*input_line_pointer == '}')
3317 input_line_pointer++;
3318
3319 demand_empty_rest_of_line ();
3320
3321 /* Generate any deferred opcodes because we're going to be looking at
3322 the list. */
3323 flush_pending_unwind ();
3324
3325 for (i = 0; i < 16; i++)
3326 {
3327 if (mask & (1 << i))
3328 unwind.frame_size += 8;
3329 }
3330
3331 /* Attempt to combine with a previous opcode. We do this because gcc
3332 likes to output separate unwind directives for a single block of
3333 registers. */
3334 if (unwind.opcode_count > 0)
3335 {
3336 i = unwind.opcodes[unwind.opcode_count - 1];
3337 if ((i & 0xf8) == 0xc0)
3338 {
3339 i &= 7;
3340 /* Only merge if the blocks are contiguous. */
3341 if (i < 6)
3342 {
3343 if ((mask & 0xfe00) == (1 << 9))
3344 {
3345 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3346 unwind.opcode_count--;
3347 }
3348 }
3349 else if (i == 6 && unwind.opcode_count >= 2)
3350 {
3351 i = unwind.opcodes[unwind.opcode_count - 2];
3352 reg = i >> 4;
3353 i &= 0xf;
3354
3355 op = 0xffff << (reg - 1);
3356 if (reg > 0
3357 || ((mask & op) == (1u << (reg - 1))))
3358 {
3359 op = (1 << (reg + i + 1)) - 1;
3360 op &= ~((1 << reg) - 1);
3361 mask |= op;
3362 unwind.opcode_count -= 2;
3363 }
3364 }
3365 }
3366 }
3367
3368 hi_reg = 15;
3369 /* We want to generate opcodes in the order the registers have been
3370 saved, ie. descending order. */
3371 for (reg = 15; reg >= -1; reg--)
3372 {
3373 /* Save registers in blocks. */
3374 if (reg < 0
3375 || !(mask & (1 << reg)))
3376 {
3377 /* We found an unsaved reg. Generate opcodes to save the
3378 preceeding block. */
3379 if (reg != hi_reg)
3380 {
3381 if (reg == 9)
3382 {
3383 /* Short form. */
3384 op = 0xc0 | (hi_reg - 10);
3385 add_unwind_opcode (op, 1);
3386 }
3387 else
3388 {
3389 /* Long form. */
3390 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3391 add_unwind_opcode (op, 2);
3392 }
3393 }
3394 hi_reg = reg - 1;
3395 }
3396 }
3397
3398 return;
3399error:
3400 ignore_rest_of_line ();
3401}
3402
3403static void
3404s_arm_unwind_save_mmxwcg (void)
3405{
3406 int reg;
3407 int hi_reg;
3408 unsigned mask = 0;
3409 valueT op;
3410
3411 if (*input_line_pointer == '{')
3412 input_line_pointer++;
3413
3414 do
3415 {
3416 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3417
3418 if (reg == FAIL)
3419 {
3420 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3421 goto error;
3422 }
3423
3424 reg -= 8;
3425 if (mask >> reg)
3426 as_tsktsk (_("register list not in ascending order"));
3427 mask |= 1 << reg;
3428
3429 if (*input_line_pointer == '-')
3430 {
3431 input_line_pointer++;
3432 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3433 if (hi_reg == FAIL)
3434 {
3435 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3436 goto error;
3437 }
3438 else if (reg >= hi_reg)
3439 {
3440 as_bad (_("bad register range"));
3441 goto error;
3442 }
3443 for (; reg < hi_reg; reg++)
3444 mask |= 1 << reg;
3445 }
3446 }
3447 while (skip_past_comma (&input_line_pointer) != FAIL);
3448
3449 if (*input_line_pointer == '}')
3450 input_line_pointer++;
3451
3452 demand_empty_rest_of_line ();
3453
3454 /* Generate any deferred opcodes because we're going to be looking at
3455 the list. */
3456 flush_pending_unwind ();
3457
3458 for (reg = 0; reg < 16; reg++)
3459 {
3460 if (mask & (1 << reg))
3461 unwind.frame_size += 4;
3462 }
3463 op = 0xc700 | mask;
3464 add_unwind_opcode (op, 2);
3465 return;
3466error:
3467 ignore_rest_of_line ();
3468}
3469
3470
3471/* Parse an unwind_save directive. */
3472
3473static void
3474s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3475{
3476 char *peek;
3477 struct reg_entry *reg;
3478 bfd_boolean had_brace = FALSE;
3479
3480 /* Figure out what sort of save we have. */
3481 peek = input_line_pointer;
3482
3483 if (*peek == '{')
3484 {
3485 had_brace = TRUE;
3486 peek++;
3487 }
3488
3489 reg = arm_reg_parse_multi (&peek);
3490
3491 if (!reg)
3492 {
3493 as_bad (_("register expected"));
3494 ignore_rest_of_line ();
3495 return;
3496 }
3497
3498 switch (reg->type)
3499 {
3500 case REG_TYPE_FN:
3501 if (had_brace)
3502 {
3503 as_bad (_("FPA .unwind_save does not take a register list"));
3504 ignore_rest_of_line ();
3505 return;
3506 }
3507 s_arm_unwind_save_fpa (reg->number);
3508 return;
3509
3510 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3511 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3512 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3513 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3514
3515 default:
3516 as_bad (_(".unwind_save does not support this kind of register"));
3517 ignore_rest_of_line ();
3518 }
3519}
3520
3521
3522/* Parse an unwind_movsp directive. */
3523
3524static void
3525s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3526{
3527 int reg;
3528 valueT op;
3529
3530 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3531 if (reg == FAIL)
3532 {
3533 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3534 ignore_rest_of_line ();
3535 return;
3536 }
3537 demand_empty_rest_of_line ();
3538
3539 if (reg == REG_SP || reg == REG_PC)
3540 {
3541 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3542 return;
3543 }
3544
3545 if (unwind.fp_reg != REG_SP)
3546 as_bad (_("unexpected .unwind_movsp directive"));
3547
3548 /* Generate opcode to restore the value. */
3549 op = 0x90 | reg;
3550 add_unwind_opcode (op, 1);
3551
3552 /* Record the information for later. */
3553 unwind.fp_reg = reg;
3554 unwind.fp_offset = unwind.frame_size;
3555 unwind.sp_restored = 1;
3556}
3557
3558/* Parse an unwind_pad directive. */
3559
3560static void
3561s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3562{
3563 int offset;
3564
3565 if (immediate_for_directive (&offset) == FAIL)
3566 return;
3567
3568 if (offset & 3)
3569 {
3570 as_bad (_("stack increment must be multiple of 4"));
3571 ignore_rest_of_line ();
3572 return;
3573 }
3574
3575 /* Don't generate any opcodes, just record the details for later. */
3576 unwind.frame_size += offset;
3577 unwind.pending_offset += offset;
3578
3579 demand_empty_rest_of_line ();
3580}
3581
3582/* Parse an unwind_setfp directive. */
3583
3584static void
3585s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3586{
3587 int sp_reg;
3588 int fp_reg;
3589 int offset;
3590
3591 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3592 if (skip_past_comma (&input_line_pointer) == FAIL)
3593 sp_reg = FAIL;
3594 else
3595 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3596
3597 if (fp_reg == FAIL || sp_reg == FAIL)
3598 {
3599 as_bad (_("expected <reg>, <reg>"));
3600 ignore_rest_of_line ();
3601 return;
3602 }
3603
3604 /* Optional constant. */
3605 if (skip_past_comma (&input_line_pointer) != FAIL)
3606 {
3607 if (immediate_for_directive (&offset) == FAIL)
3608 return;
3609 }
3610 else
3611 offset = 0;
3612
3613 demand_empty_rest_of_line ();
3614
3615 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3616 {
3617 as_bad (_("register must be either sp or set by a previous"
3618 "unwind_movsp directive"));
3619 return;
3620 }
3621
3622 /* Don't generate any opcodes, just record the information for later. */
3623 unwind.fp_reg = fp_reg;
3624 unwind.fp_used = 1;
3625 if (sp_reg == 13)
3626 unwind.fp_offset = unwind.frame_size - offset;
3627 else
3628 unwind.fp_offset -= offset;
3629}
3630
3631/* Parse an unwind_raw directive. */
3632
3633static void
3634s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3635{
3636 expressionS exp;
3637 /* This is an arbitrary limit. */
3638 unsigned char op[16];
3639 int count;
3640
3641 expression (&exp);
3642 if (exp.X_op == O_constant
3643 && skip_past_comma (&input_line_pointer) != FAIL)
3644 {
3645 unwind.frame_size += exp.X_add_number;
3646 expression (&exp);
3647 }
3648 else
3649 exp.X_op = O_illegal;
3650
3651 if (exp.X_op != O_constant)
3652 {
3653 as_bad (_("expected <offset>, <opcode>"));
3654 ignore_rest_of_line ();
3655 return;
3656 }
3657
3658 count = 0;
3659
3660 /* Parse the opcode. */
3661 for (;;)
3662 {
3663 if (count >= 16)
3664 {
3665 as_bad (_("unwind opcode too long"));
3666 ignore_rest_of_line ();
3667 }
3668 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3669 {
3670 as_bad (_("invalid unwind opcode"));
3671 ignore_rest_of_line ();
3672 return;
3673 }
3674 op[count++] = exp.X_add_number;
3675
3676 /* Parse the next byte. */
3677 if (skip_past_comma (&input_line_pointer) == FAIL)
3678 break;
3679
3680 expression (&exp);
3681 }
3682
3683 /* Add the opcode bytes in reverse order. */
3684 while (count--)
3685 add_unwind_opcode (op[count], 1);
3686
3687 demand_empty_rest_of_line ();
3688}
3689
3690
3691/* Parse a .eabi_attribute directive. */
3692
3693static void
3694s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3695{
3696 expressionS exp;
3697 bfd_boolean is_string;
3698 int tag;
3699 unsigned int i = 0;
3700 char *s = NULL;
3701 char saved_char;
3702
3703 expression (& exp);
3704 if (exp.X_op != O_constant)
3705 goto bad;
3706
3707 tag = exp.X_add_number;
3708 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3709 is_string = 1;
3710 else
3711 is_string = 0;
3712
3713 if (skip_past_comma (&input_line_pointer) == FAIL)
3714 goto bad;
3715 if (tag == 32 || !is_string)
3716 {
3717 expression (& exp);
3718 if (exp.X_op != O_constant)
3719 {
3720 as_bad (_("expected numeric constant"));
3721 ignore_rest_of_line ();
3722 return;
3723 }
3724 i = exp.X_add_number;
3725 }
3726 if (tag == Tag_compatibility
3727 && skip_past_comma (&input_line_pointer) == FAIL)
3728 {
3729 as_bad (_("expected comma"));
3730 ignore_rest_of_line ();
3731 return;
3732 }
3733 if (is_string)
3734 {
3735 skip_whitespace(input_line_pointer);
3736 if (*input_line_pointer != '"')
3737 goto bad_string;
3738 input_line_pointer++;
3739 s = input_line_pointer;
3740 while (*input_line_pointer && *input_line_pointer != '"')
3741 input_line_pointer++;
3742 if (*input_line_pointer != '"')
3743 goto bad_string;
3744 saved_char = *input_line_pointer;
3745 *input_line_pointer = 0;
3746 }
3747 else
3748 {
3749 s = NULL;
3750 saved_char = 0;
3751 }
3752
3753 if (tag == Tag_compatibility)
3754 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3755 else if (is_string)
3756 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3757 else
3758 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3759
3760 if (s)
3761 {
3762 *input_line_pointer = saved_char;
3763 input_line_pointer++;
3764 }
3765 demand_empty_rest_of_line ();
3766 return;
3767bad_string:
3768 as_bad (_("bad string constant"));
3769 ignore_rest_of_line ();
3770 return;
3771bad:
3772 as_bad (_("expected <tag> , <value>"));
3773 ignore_rest_of_line ();
3774}
3775#endif /* OBJ_ELF */
3776
3777static void s_arm_arch (int);
3778static void s_arm_cpu (int);
3779static void s_arm_fpu (int);
3780
3781/* This table describes all the machine specific pseudo-ops the assembler
3782 has to support. The fields are:
3783 pseudo-op name without dot
3784 function to call to execute this pseudo-op
3785 Integer arg to pass to the function. */
3786
3787const pseudo_typeS md_pseudo_table[] =
3788{
3789 /* Never called because '.req' does not start a line. */
3790 { "req", s_req, 0 },
3791 /* Following two are likewise never called. */
3792 { "dn", s_dn, 0 },
3793 { "qn", s_qn, 0 },
3794 { "unreq", s_unreq, 0 },
3795 { "bss", s_bss, 0 },
3796 { "align", s_align, 0 },
3797 { "arm", s_arm, 0 },
3798 { "thumb", s_thumb, 0 },
3799 { "code", s_code, 0 },
3800 { "force_thumb", s_force_thumb, 0 },
3801 { "thumb_func", s_thumb_func, 0 },
3802 { "thumb_set", s_thumb_set, 0 },
3803 { "even", s_even, 0 },
3804 { "ltorg", s_ltorg, 0 },
3805 { "pool", s_ltorg, 0 },
3806 { "syntax", s_syntax, 0 },
3807 { "cpu", s_arm_cpu, 0 },
3808 { "arch", s_arm_arch, 0 },
3809 { "fpu", s_arm_fpu, 0 },
3810#ifdef OBJ_ELF
3811 { "word", s_arm_elf_cons, 4 },
3812 { "long", s_arm_elf_cons, 4 },
3813 { "rel31", s_arm_rel31, 0 },
3814 { "fnstart", s_arm_unwind_fnstart, 0 },
3815 { "fnend", s_arm_unwind_fnend, 0 },
3816 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3817 { "personality", s_arm_unwind_personality, 0 },
3818 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3819 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3820 { "save", s_arm_unwind_save, 0 },
3821 { "movsp", s_arm_unwind_movsp, 0 },
3822 { "pad", s_arm_unwind_pad, 0 },
3823 { "setfp", s_arm_unwind_setfp, 0 },
3824 { "unwind_raw", s_arm_unwind_raw, 0 },
3825 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3826#else
3827 { "word", cons, 4},
3828#endif
3829 { "extend", float_cons, 'x' },
3830 { "ldouble", float_cons, 'x' },
3831 { "packed", float_cons, 'p' },
3832 { 0, 0, 0 }
3833};
3834\f
3835/* Parser functions used exclusively in instruction operands. */
3836
3837/* Generic immediate-value read function for use in insn parsing.
3838 STR points to the beginning of the immediate (the leading #);
3839 VAL receives the value; if the value is outside [MIN, MAX]
3840 issue an error. PREFIX_OPT is true if the immediate prefix is
3841 optional. */
3842
3843static int
3844parse_immediate (char **str, int *val, int min, int max,
3845 bfd_boolean prefix_opt)
3846{
3847 expressionS exp;
3848 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3849 if (exp.X_op != O_constant)
3850 {
3851 inst.error = _("constant expression required");
3852 return FAIL;
3853 }
3854
3855 if (exp.X_add_number < min || exp.X_add_number > max)
3856 {
3857 inst.error = _("immediate value out of range");
3858 return FAIL;
3859 }
3860
3861 *val = exp.X_add_number;
3862 return SUCCESS;
3863}
3864
3865/* Less-generic immediate-value read function with the possibility of loading a
3866 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3867 instructions. Puts the result directly in inst.operands[i]. */
3868
3869static int
3870parse_big_immediate (char **str, int i)
3871{
3872 expressionS exp;
3873 char *ptr = *str;
3874
3875 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3876
3877 if (exp.X_op == O_constant)
3878 inst.operands[i].imm = exp.X_add_number;
3879 else if (exp.X_op == O_big
3880 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3881 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3882 {
3883 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3884 /* Bignums have their least significant bits in
3885 generic_bignum[0]. Make sure we put 32 bits in imm and
3886 32 bits in reg, in a (hopefully) portable way. */
3887 assert (parts != 0);
3888 inst.operands[i].imm = 0;
3889 for (j = 0; j < parts; j++, idx++)
3890 inst.operands[i].imm |= generic_bignum[idx]
3891 << (LITTLENUM_NUMBER_OF_BITS * j);
3892 inst.operands[i].reg = 0;
3893 for (j = 0; j < parts; j++, idx++)
3894 inst.operands[i].reg |= generic_bignum[idx]
3895 << (LITTLENUM_NUMBER_OF_BITS * j);
3896 inst.operands[i].regisimm = 1;
3897 }
3898 else
3899 return FAIL;
3900
3901 *str = ptr;
3902
3903 return SUCCESS;
3904}
3905
3906/* Returns the pseudo-register number of an FPA immediate constant,
3907 or FAIL if there isn't a valid constant here. */
3908
3909static int
3910parse_fpa_immediate (char ** str)
3911{
3912 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3913 char * save_in;
3914 expressionS exp;
3915 int i;
3916 int j;
3917
3918 /* First try and match exact strings, this is to guarantee
3919 that some formats will work even for cross assembly. */
3920
3921 for (i = 0; fp_const[i]; i++)
3922 {
3923 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3924 {
3925 char *start = *str;
3926
3927 *str += strlen (fp_const[i]);
3928 if (is_end_of_line[(unsigned char) **str])
3929 return i + 8;
3930 *str = start;
3931 }
3932 }
3933
3934 /* Just because we didn't get a match doesn't mean that the constant
3935 isn't valid, just that it is in a format that we don't
3936 automatically recognize. Try parsing it with the standard
3937 expression routines. */
3938
3939 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3940
3941 /* Look for a raw floating point number. */
3942 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3943 && is_end_of_line[(unsigned char) *save_in])
3944 {
3945 for (i = 0; i < NUM_FLOAT_VALS; i++)
3946 {
3947 for (j = 0; j < MAX_LITTLENUMS; j++)
3948 {
3949 if (words[j] != fp_values[i][j])
3950 break;
3951 }
3952
3953 if (j == MAX_LITTLENUMS)
3954 {
3955 *str = save_in;
3956 return i + 8;
3957 }
3958 }
3959 }
3960
3961 /* Try and parse a more complex expression, this will probably fail
3962 unless the code uses a floating point prefix (eg "0f"). */
3963 save_in = input_line_pointer;
3964 input_line_pointer = *str;
3965 if (expression (&exp) == absolute_section
3966 && exp.X_op == O_big
3967 && exp.X_add_number < 0)
3968 {
3969 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3970 Ditto for 15. */
3971 if (gen_to_words (words, 5, (long) 15) == 0)
3972 {
3973 for (i = 0; i < NUM_FLOAT_VALS; i++)
3974 {
3975 for (j = 0; j < MAX_LITTLENUMS; j++)
3976 {
3977 if (words[j] != fp_values[i][j])
3978 break;
3979 }
3980
3981 if (j == MAX_LITTLENUMS)
3982 {
3983 *str = input_line_pointer;
3984 input_line_pointer = save_in;
3985 return i + 8;
3986 }
3987 }
3988 }
3989 }
3990
3991 *str = input_line_pointer;
3992 input_line_pointer = save_in;
3993 inst.error = _("invalid FPA immediate expression");
3994 return FAIL;
3995}
3996
3997/* Returns 1 if a number has "quarter-precision" float format
3998 0baBbbbbbc defgh000 00000000 00000000. */
3999
4000static int
4001is_quarter_float (unsigned imm)
4002{
4003 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4004 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4005}
4006
4007/* Parse an 8-bit "quarter-precision" floating point number of the form:
4008 0baBbbbbbc defgh000 00000000 00000000.
4009 The minus-zero case needs special handling, since it can't be encoded in the
4010 "quarter-precision" float format, but can nonetheless be loaded as an integer
4011 constant. */
4012
4013static unsigned
4014parse_qfloat_immediate (char **ccp, int *immed)
4015{
4016 char *str = *ccp;
4017 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4018
4019 skip_past_char (&str, '#');
4020
4021 if ((str = atof_ieee (str, 's', words)) != NULL)
4022 {
4023 unsigned fpword = 0;
4024 int i;
4025
4026 /* Our FP word must be 32 bits (single-precision FP). */
4027 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4028 {
4029 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4030 fpword |= words[i];
4031 }
4032
4033 if (is_quarter_float (fpword) || fpword == 0x80000000)
4034 *immed = fpword;
4035 else
4036 return FAIL;
4037
4038 *ccp = str;
4039
4040 return SUCCESS;
4041 }
4042
4043 return FAIL;
4044}
4045
4046/* Shift operands. */
4047enum shift_kind
4048{
4049 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4050};
4051
4052struct asm_shift_name
4053{
4054 const char *name;
4055 enum shift_kind kind;
4056};
4057
4058/* Third argument to parse_shift. */
4059enum parse_shift_mode
4060{
4061 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4062 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4063 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4064 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4065 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4066};
4067
4068/* Parse a <shift> specifier on an ARM data processing instruction.
4069 This has three forms:
4070
4071 (LSL|LSR|ASL|ASR|ROR) Rs
4072 (LSL|LSR|ASL|ASR|ROR) #imm
4073 RRX
4074
4075 Note that ASL is assimilated to LSL in the instruction encoding, and
4076 RRX to ROR #0 (which cannot be written as such). */
4077
4078static int
4079parse_shift (char **str, int i, enum parse_shift_mode mode)
4080{
4081 const struct asm_shift_name *shift_name;
4082 enum shift_kind shift;
4083 char *s = *str;
4084 char *p = s;
4085 int reg;
4086
4087 for (p = *str; ISALPHA (*p); p++)
4088 ;
4089
4090 if (p == *str)
4091 {
4092 inst.error = _("shift expression expected");
4093 return FAIL;
4094 }
4095
4096 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4097
4098 if (shift_name == NULL)
4099 {
4100 inst.error = _("shift expression expected");
4101 return FAIL;
4102 }
4103
4104 shift = shift_name->kind;
4105
4106 switch (mode)
4107 {
4108 case NO_SHIFT_RESTRICT:
4109 case SHIFT_IMMEDIATE: break;
4110
4111 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4112 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4113 {
4114 inst.error = _("'LSL' or 'ASR' required");
4115 return FAIL;
4116 }
4117 break;
4118
4119 case SHIFT_LSL_IMMEDIATE:
4120 if (shift != SHIFT_LSL)
4121 {
4122 inst.error = _("'LSL' required");
4123 return FAIL;
4124 }
4125 break;
4126
4127 case SHIFT_ASR_IMMEDIATE:
4128 if (shift != SHIFT_ASR)
4129 {
4130 inst.error = _("'ASR' required");
4131 return FAIL;
4132 }
4133 break;
4134
4135 default: abort ();
4136 }
4137
4138 if (shift != SHIFT_RRX)
4139 {
4140 /* Whitespace can appear here if the next thing is a bare digit. */
4141 skip_whitespace (p);
4142
4143 if (mode == NO_SHIFT_RESTRICT
4144 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4145 {
4146 inst.operands[i].imm = reg;
4147 inst.operands[i].immisreg = 1;
4148 }
4149 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4150 return FAIL;
4151 }
4152 inst.operands[i].shift_kind = shift;
4153 inst.operands[i].shifted = 1;
4154 *str = p;
4155 return SUCCESS;
4156}
4157
4158/* Parse a <shifter_operand> for an ARM data processing instruction:
4159
4160 #<immediate>
4161 #<immediate>, <rotate>
4162 <Rm>
4163 <Rm>, <shift>
4164
4165 where <shift> is defined by parse_shift above, and <rotate> is a
4166 multiple of 2 between 0 and 30. Validation of immediate operands
4167 is deferred to md_apply_fix. */
4168
4169static int
4170parse_shifter_operand (char **str, int i)
4171{
4172 int value;
4173 expressionS expr;
4174
4175 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4176 {
4177 inst.operands[i].reg = value;
4178 inst.operands[i].isreg = 1;
4179
4180 /* parse_shift will override this if appropriate */
4181 inst.reloc.exp.X_op = O_constant;
4182 inst.reloc.exp.X_add_number = 0;
4183
4184 if (skip_past_comma (str) == FAIL)
4185 return SUCCESS;
4186
4187 /* Shift operation on register. */
4188 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4189 }
4190
4191 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4192 return FAIL;
4193
4194 if (skip_past_comma (str) == SUCCESS)
4195 {
4196 /* #x, y -- ie explicit rotation by Y. */
4197 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4198 return FAIL;
4199
4200 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4201 {
4202 inst.error = _("constant expression expected");
4203 return FAIL;
4204 }
4205
4206 value = expr.X_add_number;
4207 if (value < 0 || value > 30 || value % 2 != 0)
4208 {
4209 inst.error = _("invalid rotation");
4210 return FAIL;
4211 }
4212 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4213 {
4214 inst.error = _("invalid constant");
4215 return FAIL;
4216 }
4217
4218 /* Convert to decoded value. md_apply_fix will put it back. */
4219 inst.reloc.exp.X_add_number
4220 = (((inst.reloc.exp.X_add_number << (32 - value))
4221 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4222 }
4223
4224 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4225 inst.reloc.pc_rel = 0;
4226 return SUCCESS;
4227}
4228
4229/* Parse all forms of an ARM address expression. Information is written
4230 to inst.operands[i] and/or inst.reloc.
4231
4232 Preindexed addressing (.preind=1):
4233
4234 [Rn, #offset] .reg=Rn .reloc.exp=offset
4235 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4236 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4237 .shift_kind=shift .reloc.exp=shift_imm
4238
4239 These three may have a trailing ! which causes .writeback to be set also.
4240
4241 Postindexed addressing (.postind=1, .writeback=1):
4242
4243 [Rn], #offset .reg=Rn .reloc.exp=offset
4244 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4245 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4246 .shift_kind=shift .reloc.exp=shift_imm
4247
4248 Unindexed addressing (.preind=0, .postind=0):
4249
4250 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4251
4252 Other:
4253
4254 [Rn]{!} shorthand for [Rn,#0]{!}
4255 =immediate .isreg=0 .reloc.exp=immediate
4256 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4257
4258 It is the caller's responsibility to check for addressing modes not
4259 supported by the instruction, and to set inst.reloc.type. */
4260
4261static int
4262parse_address (char **str, int i)
4263{
4264 char *p = *str;
4265 int reg;
4266
4267 if (skip_past_char (&p, '[') == FAIL)
4268 {
4269 if (skip_past_char (&p, '=') == FAIL)
4270 {
4271 /* bare address - translate to PC-relative offset */
4272 inst.reloc.pc_rel = 1;
4273 inst.operands[i].reg = REG_PC;
4274 inst.operands[i].isreg = 1;
4275 inst.operands[i].preind = 1;
4276 }
4277 /* else a load-constant pseudo op, no special treatment needed here */
4278
4279 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4280 return FAIL;
4281
4282 *str = p;
4283 return SUCCESS;
4284 }
4285
4286 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4287 {
4288 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4289 return FAIL;
4290 }
4291 inst.operands[i].reg = reg;
4292 inst.operands[i].isreg = 1;
4293
4294 if (skip_past_comma (&p) == SUCCESS)
4295 {
4296 inst.operands[i].preind = 1;
4297
4298 if (*p == '+') p++;
4299 else if (*p == '-') p++, inst.operands[i].negative = 1;
4300
4301 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4302 {
4303 inst.operands[i].imm = reg;
4304 inst.operands[i].immisreg = 1;
4305
4306 if (skip_past_comma (&p) == SUCCESS)
4307 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4308 return FAIL;
4309 }
4310 else if (skip_past_char (&p, ':') == SUCCESS)
4311 {
4312 /* FIXME: '@' should be used here, but it's filtered out by generic
4313 code before we get to see it here. This may be subject to
4314 change. */
4315 expressionS exp;
4316 my_get_expression (&exp, &p, GE_NO_PREFIX);
4317 if (exp.X_op != O_constant)
4318 {
4319 inst.error = _("alignment must be constant");
4320 return FAIL;
4321 }
4322 inst.operands[i].imm = exp.X_add_number << 8;
4323 inst.operands[i].immisalign = 1;
4324 /* Alignments are not pre-indexes. */
4325 inst.operands[i].preind = 0;
4326 }
4327 else
4328 {
4329 if (inst.operands[i].negative)
4330 {
4331 inst.operands[i].negative = 0;
4332 p--;
4333 }
4334 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4335 return FAIL;
4336 }
4337 }
4338
4339 if (skip_past_char (&p, ']') == FAIL)
4340 {
4341 inst.error = _("']' expected");
4342 return FAIL;
4343 }
4344
4345 if (skip_past_char (&p, '!') == SUCCESS)
4346 inst.operands[i].writeback = 1;
4347
4348 else if (skip_past_comma (&p) == SUCCESS)
4349 {
4350 if (skip_past_char (&p, '{') == SUCCESS)
4351 {
4352 /* [Rn], {expr} - unindexed, with option */
4353 if (parse_immediate (&p, &inst.operands[i].imm,
4354 0, 255, TRUE) == FAIL)
4355 return FAIL;
4356
4357 if (skip_past_char (&p, '}') == FAIL)
4358 {
4359 inst.error = _("'}' expected at end of 'option' field");
4360 return FAIL;
4361 }
4362 if (inst.operands[i].preind)
4363 {
4364 inst.error = _("cannot combine index with option");
4365 return FAIL;
4366 }
4367 *str = p;
4368 return SUCCESS;
4369 }
4370 else
4371 {
4372 inst.operands[i].postind = 1;
4373 inst.operands[i].writeback = 1;
4374
4375 if (inst.operands[i].preind)
4376 {
4377 inst.error = _("cannot combine pre- and post-indexing");
4378 return FAIL;
4379 }
4380
4381 if (*p == '+') p++;
4382 else if (*p == '-') p++, inst.operands[i].negative = 1;
4383
4384 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4385 {
4386 /* We might be using the immediate for alignment already. If we
4387 are, OR the register number into the low-order bits. */
4388 if (inst.operands[i].immisalign)
4389 inst.operands[i].imm |= reg;
4390 else
4391 inst.operands[i].imm = reg;
4392 inst.operands[i].immisreg = 1;
4393
4394 if (skip_past_comma (&p) == SUCCESS)
4395 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4396 return FAIL;
4397 }
4398 else
4399 {
4400 if (inst.operands[i].negative)
4401 {
4402 inst.operands[i].negative = 0;
4403 p--;
4404 }
4405 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4406 return FAIL;
4407 }
4408 }
4409 }
4410
4411 /* If at this point neither .preind nor .postind is set, we have a
4412 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4413 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4414 {
4415 inst.operands[i].preind = 1;
4416 inst.reloc.exp.X_op = O_constant;
4417 inst.reloc.exp.X_add_number = 0;
4418 }
4419 *str = p;
4420 return SUCCESS;
4421}
4422
4423/* Miscellaneous. */
4424
4425/* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4426 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4427static int
4428parse_psr (char **str)
4429{
4430 char *p;
4431 unsigned long psr_field;
4432 const struct asm_psr *psr;
4433 char *start;
4434
4435 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4436 feature for ease of use and backwards compatibility. */
4437 p = *str;
4438 if (strncasecmp (p, "SPSR", 4) == 0)
4439 psr_field = SPSR_BIT;
4440 else if (strncasecmp (p, "CPSR", 4) == 0)
4441 psr_field = 0;
4442 else
4443 {
4444 start = p;
4445 do
4446 p++;
4447 while (ISALNUM (*p) || *p == '_');
4448
4449 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4450 if (!psr)
4451 return FAIL;
4452
4453 *str = p;
4454 return psr->field;
4455 }
4456
4457 p += 4;
4458 if (*p == '_')
4459 {
4460 /* A suffix follows. */
4461 p++;
4462 start = p;
4463
4464 do
4465 p++;
4466 while (ISALNUM (*p) || *p == '_');
4467
4468 psr = hash_find_n (arm_psr_hsh, start, p - start);
4469 if (!psr)
4470 goto error;
4471
4472 psr_field |= psr->field;
4473 }
4474 else
4475 {
4476 if (ISALNUM (*p))
4477 goto error; /* Garbage after "[CS]PSR". */
4478
4479 psr_field |= (PSR_c | PSR_f);
4480 }
4481 *str = p;
4482 return psr_field;
4483
4484 error:
4485 inst.error = _("flag for {c}psr instruction expected");
4486 return FAIL;
4487}
4488
4489/* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4490 value suitable for splatting into the AIF field of the instruction. */
4491
4492static int
4493parse_cps_flags (char **str)
4494{
4495 int val = 0;
4496 int saw_a_flag = 0;
4497 char *s = *str;
4498
4499 for (;;)
4500 switch (*s++)
4501 {
4502 case '\0': case ',':
4503 goto done;
4504
4505 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4506 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4507 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4508
4509 default:
4510 inst.error = _("unrecognized CPS flag");
4511 return FAIL;
4512 }
4513
4514 done:
4515 if (saw_a_flag == 0)
4516 {
4517 inst.error = _("missing CPS flags");
4518 return FAIL;
4519 }
4520
4521 *str = s - 1;
4522 return val;
4523}
4524
4525/* Parse an endian specifier ("BE" or "LE", case insensitive);
4526 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4527
4528static int
4529parse_endian_specifier (char **str)
4530{
4531 int little_endian;
4532 char *s = *str;
4533
4534 if (strncasecmp (s, "BE", 2))
4535 little_endian = 0;
4536 else if (strncasecmp (s, "LE", 2))
4537 little_endian = 1;
4538 else
4539 {
4540 inst.error = _("valid endian specifiers are be or le");
4541 return FAIL;
4542 }
4543
4544 if (ISALNUM (s[2]) || s[2] == '_')
4545 {
4546 inst.error = _("valid endian specifiers are be or le");
4547 return FAIL;
4548 }
4549
4550 *str = s + 2;
4551 return little_endian;
4552}
4553
4554/* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4555 value suitable for poking into the rotate field of an sxt or sxta
4556 instruction, or FAIL on error. */
4557
4558static int
4559parse_ror (char **str)
4560{
4561 int rot;
4562 char *s = *str;
4563
4564 if (strncasecmp (s, "ROR", 3) == 0)
4565 s += 3;
4566 else
4567 {
4568 inst.error = _("missing rotation field after comma");
4569 return FAIL;
4570 }
4571
4572 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4573 return FAIL;
4574
4575 switch (rot)
4576 {
4577 case 0: *str = s; return 0x0;
4578 case 8: *str = s; return 0x1;
4579 case 16: *str = s; return 0x2;
4580 case 24: *str = s; return 0x3;
4581
4582 default:
4583 inst.error = _("rotation can only be 0, 8, 16, or 24");
4584 return FAIL;
4585 }
4586}
4587
4588/* Parse a conditional code (from conds[] below). The value returned is in the
4589 range 0 .. 14, or FAIL. */
4590static int
4591parse_cond (char **str)
4592{
4593 char *p, *q;
4594 const struct asm_cond *c;
4595
4596 p = q = *str;
4597 while (ISALPHA (*q))
4598 q++;
4599
4600 c = hash_find_n (arm_cond_hsh, p, q - p);
4601 if (!c)
4602 {
4603 inst.error = _("condition required");
4604 return FAIL;
4605 }
4606
4607 *str = q;
4608 return c->value;
4609}
4610
4611/* Parse an option for a barrier instruction. Returns the encoding for the
4612 option, or FAIL. */
4613static int
4614parse_barrier (char **str)
4615{
4616 char *p, *q;
4617 const struct asm_barrier_opt *o;
4618
4619 p = q = *str;
4620 while (ISALPHA (*q))
4621 q++;
4622
4623 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4624 if (!o)
4625 return FAIL;
4626
4627 *str = q;
4628 return o->value;
4629}
4630
4631/* Parse the operands of a table branch instruction. Similar to a memory
4632 operand. */
4633static int
4634parse_tb (char **str)
4635{
4636 char * p = *str;
4637 int reg;
4638
4639 if (skip_past_char (&p, '[') == FAIL)
4640 {
4641 inst.error = _("'[' expected");
4642 return FAIL;
4643 }
4644
4645 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4646 {
4647 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4648 return FAIL;
4649 }
4650 inst.operands[0].reg = reg;
4651
4652 if (skip_past_comma (&p) == FAIL)
4653 {
4654 inst.error = _("',' expected");
4655 return FAIL;
4656 }
4657
4658 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4659 {
4660 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4661 return FAIL;
4662 }
4663 inst.operands[0].imm = reg;
4664
4665 if (skip_past_comma (&p) == SUCCESS)
4666 {
4667 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4668 return FAIL;
4669 if (inst.reloc.exp.X_add_number != 1)
4670 {
4671 inst.error = _("invalid shift");
4672 return FAIL;
4673 }
4674 inst.operands[0].shifted = 1;
4675 }
4676
4677 if (skip_past_char (&p, ']') == FAIL)
4678 {
4679 inst.error = _("']' expected");
4680 return FAIL;
4681 }
4682 *str = p;
4683 return SUCCESS;
4684}
4685
4686/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4687 information on the types the operands can take and how they are encoded.
4688 Note particularly the abuse of ".regisimm" to signify a Neon register.
4689 Up to three operands may be read; this function handles setting the
4690 ".present" field for each operand itself.
4691 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4692 else returns FAIL. */
4693
4694static int
4695parse_neon_mov (char **str, int *which_operand)
4696{
4697 int i = *which_operand, val;
4698 enum arm_reg_type rtype;
4699 char *ptr = *str;
4700 struct neon_type_el optype;
4701
4702 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4703 {
4704 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4705 inst.operands[i].reg = val;
4706 inst.operands[i].isscalar = 1;
4707 inst.operands[i].vectype = optype;
4708 inst.operands[i++].present = 1;
4709
4710 if (skip_past_comma (&ptr) == FAIL)
4711 goto wanted_comma;
4712
4713 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4714 goto wanted_arm;
4715
4716 inst.operands[i].reg = val;
4717 inst.operands[i].isreg = 1;
4718 inst.operands[i].present = 1;
4719 }
4720 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4721 != FAIL)
4722 {
4723 /* Cases 0, 1, 2, 3, 5 (D only). */
4724 if (skip_past_comma (&ptr) == FAIL)
4725 goto wanted_comma;
4726
4727 inst.operands[i].reg = val;
4728 inst.operands[i].isreg = 1;
4729 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4730 inst.operands[i].vectype = optype;
4731 inst.operands[i++].present = 1;
4732
4733 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4734 {
4735 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4736 inst.operands[i-1].regisimm = 1;
4737 inst.operands[i].reg = val;
4738 inst.operands[i].isreg = 1;
4739 inst.operands[i++].present = 1;
4740
4741 if (rtype == REG_TYPE_NQ)
4742 {
4743 first_error (_("can't use Neon quad register here"));
4744 return FAIL;
4745 }
4746 if (skip_past_comma (&ptr) == FAIL)
4747 goto wanted_comma;
4748 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4749 goto wanted_arm;
4750 inst.operands[i].reg = val;
4751 inst.operands[i].isreg = 1;
4752 inst.operands[i].present = 1;
4753 }
4754 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4755 {
4756 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4757 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4758 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4759 goto bad_cond;
4760 }
4761 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4762 {
4763 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4764 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4765 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4766 goto bad_cond;
4767 }
4768 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4769 != FAIL)
4770 {
4771 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4772 Case 1: VMOV<c><q> <Dd>, <Dm> */
4773 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4774 goto bad_cond;
4775
4776 inst.operands[i].reg = val;
4777 inst.operands[i].isreg = 1;
4778 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4779 inst.operands[i].vectype = optype;
4780 inst.operands[i].present = 1;
4781 }
4782 else
4783 {
4784 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4785 return FAIL;
4786 }
4787 }
4788 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4789 {
4790 /* Cases 6, 7. */
4791 inst.operands[i].reg = val;
4792 inst.operands[i].isreg = 1;
4793 inst.operands[i++].present = 1;
4794
4795 if (skip_past_comma (&ptr) == FAIL)
4796 goto wanted_comma;
4797
4798 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4799 {
4800 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4801 inst.operands[i].reg = val;
4802 inst.operands[i].isscalar = 1;
4803 inst.operands[i].present = 1;
4804 inst.operands[i].vectype = optype;
4805 }
4806 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4807 {
4808 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4809 inst.operands[i].reg = val;
4810 inst.operands[i].isreg = 1;
4811 inst.operands[i++].present = 1;
4812
4813 if (skip_past_comma (&ptr) == FAIL)
4814 goto wanted_comma;
4815
4816 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4817 == FAIL)
4818 {
4819 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4820 return FAIL;
4821 }
4822
4823 inst.operands[i].reg = val;
4824 inst.operands[i].isreg = 1;
4825 inst.operands[i].regisimm = 1;
4826 inst.operands[i].vectype = optype;
4827 inst.operands[i].present = 1;
4828 }
4829 }
4830 else
4831 {
4832 first_error (_("parse error"));
4833 return FAIL;
4834 }
4835
4836 /* Successfully parsed the operands. Update args. */
4837 *which_operand = i;
4838 *str = ptr;
4839 return SUCCESS;
4840
4841 wanted_comma:
4842 first_error (_("expected comma"));
4843 return FAIL;
4844
4845 wanted_arm:
4846 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4847 return FAIL;
4848
4849 bad_cond:
4850 first_error (_("instruction cannot be conditionalized"));
4851 return FAIL;
4852}
4853
4854/* Matcher codes for parse_operands. */
4855enum operand_parse_code
4856{
4857 OP_stop, /* end of line */
4858
4859 OP_RR, /* ARM register */
4860 OP_RRnpc, /* ARM register, not r15 */
4861 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4862 OP_RRw, /* ARM register, not r15, optional trailing ! */
4863 OP_RCP, /* Coprocessor number */
4864 OP_RCN, /* Coprocessor register */
4865 OP_RF, /* FPA register */
4866 OP_RVS, /* VFP single precision register */
4867 OP_RVD, /* VFP double precision register (0..15) */
4868 OP_RND, /* Neon double precision register (0..31) */
4869 OP_RNQ, /* Neon quad precision register */
4870 OP_RNDQ, /* Neon double or quad precision register */
4871 OP_RNSC, /* Neon scalar D[X] */
4872 OP_RVC, /* VFP control register */
4873 OP_RMF, /* Maverick F register */
4874 OP_RMD, /* Maverick D register */
4875 OP_RMFX, /* Maverick FX register */
4876 OP_RMDX, /* Maverick DX register */
4877 OP_RMAX, /* Maverick AX register */
4878 OP_RMDS, /* Maverick DSPSC register */
4879 OP_RIWR, /* iWMMXt wR register */
4880 OP_RIWC, /* iWMMXt wC register */
4881 OP_RIWG, /* iWMMXt wCG register */
4882 OP_RXA, /* XScale accumulator register */
4883
4884 OP_REGLST, /* ARM register list */
4885 OP_VRSLST, /* VFP single-precision register list */
4886 OP_VRDLST, /* VFP double-precision register list */
4887 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4888 OP_NSTRLST, /* Neon element/structure list */
4889
4890 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4891 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4892 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4893 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4894 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4895 OP_VMOV, /* Neon VMOV operands. */
4896 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4897 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4898
4899 OP_I0, /* immediate zero */
4900 OP_I7, /* immediate value 0 .. 7 */
4901 OP_I15, /* 0 .. 15 */
4902 OP_I16, /* 1 .. 16 */
4903 OP_I16z, /* 0 .. 16 */
4904 OP_I31, /* 0 .. 31 */
4905 OP_I31w, /* 0 .. 31, optional trailing ! */
4906 OP_I32, /* 1 .. 32 */
4907 OP_I32z, /* 0 .. 32 */
4908 OP_I63, /* 0 .. 63 */
4909 OP_I63s, /* -64 .. 63 */
4910 OP_I64, /* 1 .. 64 */
4911 OP_I64z, /* 0 .. 64 */
4912 OP_I255, /* 0 .. 255 */
4913 OP_Iffff, /* 0 .. 65535 */
4914
4915 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4916 OP_I7b, /* 0 .. 7 */
4917 OP_I15b, /* 0 .. 15 */
4918 OP_I31b, /* 0 .. 31 */
4919
4920 OP_SH, /* shifter operand */
4921 OP_ADDR, /* Memory address expression (any mode) */
4922 OP_EXP, /* arbitrary expression */
4923 OP_EXPi, /* same, with optional immediate prefix */
4924 OP_EXPr, /* same, with optional relocation suffix */
4925
4926 OP_CPSF, /* CPS flags */
4927 OP_ENDI, /* Endianness specifier */
4928 OP_PSR, /* CPSR/SPSR mask for msr */
4929 OP_COND, /* conditional code */
4930 OP_TB, /* Table branch. */
4931
4932 OP_RRnpc_I0, /* ARM register or literal 0 */
4933 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4934 OP_RR_EXi, /* ARM register or expression with imm prefix */
4935 OP_RF_IF, /* FPA register or immediate */
4936 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4937
4938 /* Optional operands. */
4939 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4940 OP_oI31b, /* 0 .. 31 */
4941 OP_oI32b, /* 1 .. 32 */
4942 OP_oIffffb, /* 0 .. 65535 */
4943 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4944
4945 OP_oRR, /* ARM register */
4946 OP_oRRnpc, /* ARM register, not the PC */
4947 OP_oRND, /* Optional Neon double precision register */
4948 OP_oRNQ, /* Optional Neon quad precision register */
4949 OP_oRNDQ, /* Optional Neon double or quad precision register */
4950 OP_oSHll, /* LSL immediate */
4951 OP_oSHar, /* ASR immediate */
4952 OP_oSHllar, /* LSL or ASR immediate */
4953 OP_oROR, /* ROR 0/8/16/24 */
4954 OP_oBARRIER, /* Option argument for a barrier instruction. */
4955
4956 OP_FIRST_OPTIONAL = OP_oI7b
4957};
4958
4959/* Generic instruction operand parser. This does no encoding and no
4960 semantic validation; it merely squirrels values away in the inst
4961 structure. Returns SUCCESS or FAIL depending on whether the
4962 specified grammar matched. */
4963static int
4964parse_operands (char *str, const unsigned char *pattern)
4965{
4966 unsigned const char *upat = pattern;
4967 char *backtrack_pos = 0;
4968 const char *backtrack_error = 0;
4969 int i, val, backtrack_index = 0;
4970 enum arm_reg_type rtype;
4971
4972#define po_char_or_fail(chr) do { \
4973 if (skip_past_char (&str, chr) == FAIL) \
4974 goto bad_args; \
4975} while (0)
4976
4977#define po_reg_or_fail(regtype) do { \
4978 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4979 &inst.operands[i].vectype); \
4980 if (val == FAIL) \
4981 { \
4982 first_error (_(reg_expected_msgs[regtype])); \
4983 goto failure; \
4984 } \
4985 inst.operands[i].reg = val; \
4986 inst.operands[i].isreg = 1; \
4987 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4988} while (0)
4989
4990#define po_reg_or_goto(regtype, label) do { \
4991 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4992 &inst.operands[i].vectype); \
4993 if (val == FAIL) \
4994 goto label; \
4995 \
4996 inst.operands[i].reg = val; \
4997 inst.operands[i].isreg = 1; \
4998 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4999} while (0)
5000
5001#define po_imm_or_fail(min, max, popt) do { \
5002 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5003 goto failure; \
5004 inst.operands[i].imm = val; \
5005} while (0)
5006
5007#define po_scalar_or_goto(elsz, label) do { \
5008 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5009 if (val == FAIL) \
5010 goto label; \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isscalar = 1; \
5013} while (0)
5014
5015#define po_misc_or_fail(expr) do { \
5016 if (expr) \
5017 goto failure; \
5018} while (0)
5019
5020 skip_whitespace (str);
5021
5022 for (i = 0; upat[i] != OP_stop; i++)
5023 {
5024 if (upat[i] >= OP_FIRST_OPTIONAL)
5025 {
5026 /* Remember where we are in case we need to backtrack. */
5027 assert (!backtrack_pos);
5028 backtrack_pos = str;
5029 backtrack_error = inst.error;
5030 backtrack_index = i;
5031 }
5032
5033 if (i > 0)
5034 po_char_or_fail (',');
5035
5036 switch (upat[i])
5037 {
5038 /* Registers */
5039 case OP_oRRnpc:
5040 case OP_RRnpc:
5041 case OP_oRR:
5042 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5043 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5044 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5045 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5046 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5047 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5048 case OP_oRND:
5049 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5050 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5051 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5052 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5053 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5054 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5055 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5056 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5057 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5058 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5059 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5060 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5061 case OP_oRNQ:
5062 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5063 case OP_oRNDQ:
5064 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5065
5066 /* Neon scalar. Using an element size of 8 means that some invalid
5067 scalars are accepted here, so deal with those in later code. */
5068 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5069
5070 /* WARNING: We can expand to two operands here. This has the potential
5071 to totally confuse the backtracking mechanism! It will be OK at
5072 least as long as we don't try to use optional args as well,
5073 though. */
5074 case OP_NILO:
5075 {
5076 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5077 i++;
5078 skip_past_comma (&str);
5079 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5080 break;
5081 one_reg_only:
5082 /* Optional register operand was omitted. Unfortunately, it's in
5083 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5084 here (this is a bit grotty). */
5085 inst.operands[i] = inst.operands[i-1];
5086 inst.operands[i-1].present = 0;
5087 break;
5088 try_imm:
5089 /* Immediate gets verified properly later, so accept any now. */
5090 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5091 }
5092 break;
5093
5094 case OP_RNDQ_I0:
5095 {
5096 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5097 break;
5098 try_imm0:
5099 po_imm_or_fail (0, 0, TRUE);
5100 }
5101 break;
5102
5103 case OP_RR_RNSC:
5104 {
5105 po_scalar_or_goto (8, try_rr);
5106 break;
5107 try_rr:
5108 po_reg_or_fail (REG_TYPE_RN);
5109 }
5110 break;
5111
5112 case OP_RNDQ_RNSC:
5113 {
5114 po_scalar_or_goto (8, try_ndq);
5115 break;
5116 try_ndq:
5117 po_reg_or_fail (REG_TYPE_NDQ);
5118 }
5119 break;
5120
5121 case OP_RND_RNSC:
5122 {
5123 po_scalar_or_goto (8, try_vfd);
5124 break;
5125 try_vfd:
5126 po_reg_or_fail (REG_TYPE_VFD);
5127 }
5128 break;
5129
5130 case OP_VMOV:
5131 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5132 not careful then bad things might happen. */
5133 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5134 break;
5135
5136 case OP_RNDQ_IMVNb:
5137 {
5138 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5139 break;
5140 try_mvnimm:
5141 /* There's a possibility of getting a 64-bit immediate here, so
5142 we need special handling. */
5143 if (parse_big_immediate (&str, i) == FAIL)
5144 {
5145 inst.error = _("immediate value is out of range");
5146 goto failure;
5147 }
5148 }
5149 break;
5150
5151 case OP_RNDQ_I63b:
5152 {
5153 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5154 break;
5155 try_shimm:
5156 po_imm_or_fail (0, 63, TRUE);
5157 }
5158 break;
5159
5160 case OP_RRnpcb:
5161 po_char_or_fail ('[');
5162 po_reg_or_fail (REG_TYPE_RN);
5163 po_char_or_fail (']');
5164 break;
5165
5166 case OP_RRw:
5167 po_reg_or_fail (REG_TYPE_RN);
5168 if (skip_past_char (&str, '!') == SUCCESS)
5169 inst.operands[i].writeback = 1;
5170 break;
5171
5172 /* Immediates */
5173 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5174 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5175 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5176 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5177 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5178 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5179 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5180 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5181 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5182 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5183 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5184 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5185 case OP_Iffff: po_imm_or_fail ( 0, 0xffff, FALSE); break;
5186
5187 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5188 case OP_oI7b:
5189 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5190 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5191 case OP_oI31b:
5192 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5193 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5194 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5195
5196 /* Immediate variants */
5197 case OP_oI255c:
5198 po_char_or_fail ('{');
5199 po_imm_or_fail (0, 255, TRUE);
5200 po_char_or_fail ('}');
5201 break;
5202
5203 case OP_I31w:
5204 /* The expression parser chokes on a trailing !, so we have
5205 to find it first and zap it. */
5206 {
5207 char *s = str;
5208 while (*s && *s != ',')
5209 s++;
5210 if (s[-1] == '!')
5211 {
5212 s[-1] = '\0';
5213 inst.operands[i].writeback = 1;
5214 }
5215 po_imm_or_fail (0, 31, TRUE);
5216 if (str == s - 1)
5217 str = s;
5218 }
5219 break;
5220
5221 /* Expressions */
5222 case OP_EXPi: EXPi:
5223 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5224 GE_OPT_PREFIX));
5225 break;
5226
5227 case OP_EXP:
5228 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5229 GE_NO_PREFIX));
5230 break;
5231
5232 case OP_EXPr: EXPr:
5233 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5234 GE_NO_PREFIX));
5235 if (inst.reloc.exp.X_op == O_symbol)
5236 {
5237 val = parse_reloc (&str);
5238 if (val == -1)
5239 {
5240 inst.error = _("unrecognized relocation suffix");
5241 goto failure;
5242 }
5243 else if (val != BFD_RELOC_UNUSED)
5244 {
5245 inst.operands[i].imm = val;
5246 inst.operands[i].hasreloc = 1;
5247 }
5248 }
5249 break;
5250
5251 /* Register or expression */
5252 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5253 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5254
5255 /* Register or immediate */
5256 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5257 I0: po_imm_or_fail (0, 0, FALSE); break;
5258
5259 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5260 IF:
5261 if (!is_immediate_prefix (*str))
5262 goto bad_args;
5263 str++;
5264 val = parse_fpa_immediate (&str);
5265 if (val == FAIL)
5266 goto failure;
5267 /* FPA immediates are encoded as registers 8-15.
5268 parse_fpa_immediate has already applied the offset. */
5269 inst.operands[i].reg = val;
5270 inst.operands[i].isreg = 1;
5271 break;
5272
5273 /* Two kinds of register */
5274 case OP_RIWR_RIWC:
5275 {
5276 struct reg_entry *rege = arm_reg_parse_multi (&str);
5277 if (rege->type != REG_TYPE_MMXWR
5278 && rege->type != REG_TYPE_MMXWC
5279 && rege->type != REG_TYPE_MMXWCG)
5280 {
5281 inst.error = _("iWMMXt data or control register expected");
5282 goto failure;
5283 }
5284 inst.operands[i].reg = rege->number;
5285 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5286 }
5287 break;
5288
5289 /* Misc */
5290 case OP_CPSF: val = parse_cps_flags (&str); break;
5291 case OP_ENDI: val = parse_endian_specifier (&str); break;
5292 case OP_oROR: val = parse_ror (&str); break;
5293 case OP_PSR: val = parse_psr (&str); break;
5294 case OP_COND: val = parse_cond (&str); break;
5295 case OP_oBARRIER:val = parse_barrier (&str); break;
5296
5297 case OP_TB:
5298 po_misc_or_fail (parse_tb (&str));
5299 break;
5300
5301 /* Register lists */
5302 case OP_REGLST:
5303 val = parse_reg_list (&str);
5304 if (*str == '^')
5305 {
5306 inst.operands[1].writeback = 1;
5307 str++;
5308 }
5309 break;
5310
5311 case OP_VRSLST:
5312 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5313 break;
5314
5315 case OP_VRDLST:
5316 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5317 break;
5318
5319 case OP_NRDLST:
5320 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5321 REGLIST_NEON_D);
5322 break;
5323
5324 case OP_NSTRLST:
5325 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5326 &inst.operands[i].vectype);
5327 break;
5328
5329 /* Addressing modes */
5330 case OP_ADDR:
5331 po_misc_or_fail (parse_address (&str, i));
5332 break;
5333
5334 case OP_SH:
5335 po_misc_or_fail (parse_shifter_operand (&str, i));
5336 break;
5337
5338 case OP_oSHll:
5339 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5340 break;
5341
5342 case OP_oSHar:
5343 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5344 break;
5345
5346 case OP_oSHllar:
5347 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5348 break;
5349
5350 default:
5351 as_fatal ("unhandled operand code %d", upat[i]);
5352 }
5353
5354 /* Various value-based sanity checks and shared operations. We
5355 do not signal immediate failures for the register constraints;
5356 this allows a syntax error to take precedence. */
5357 switch (upat[i])
5358 {
5359 case OP_oRRnpc:
5360 case OP_RRnpc:
5361 case OP_RRnpcb:
5362 case OP_RRw:
5363 case OP_RRnpc_I0:
5364 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5365 inst.error = BAD_PC;
5366 break;
5367
5368 case OP_CPSF:
5369 case OP_ENDI:
5370 case OP_oROR:
5371 case OP_PSR:
5372 case OP_COND:
5373 case OP_oBARRIER:
5374 case OP_REGLST:
5375 case OP_VRSLST:
5376 case OP_VRDLST:
5377 case OP_NRDLST:
5378 case OP_NSTRLST:
5379 if (val == FAIL)
5380 goto failure;
5381 inst.operands[i].imm = val;
5382 break;
5383
5384 default:
5385 break;
5386 }
5387
5388 /* If we get here, this operand was successfully parsed. */
5389 inst.operands[i].present = 1;
5390 continue;
5391
5392 bad_args:
5393 inst.error = BAD_ARGS;
5394
5395 failure:
5396 if (!backtrack_pos)
5397 {
5398 /* The parse routine should already have set inst.error, but set a
5399 defaut here just in case. */
5400 if (!inst.error)
5401 inst.error = _("syntax error");
5402 return FAIL;
5403 }
5404
5405 /* Do not backtrack over a trailing optional argument that
5406 absorbed some text. We will only fail again, with the
5407 'garbage following instruction' error message, which is
5408 probably less helpful than the current one. */
5409 if (backtrack_index == i && backtrack_pos != str
5410 && upat[i+1] == OP_stop)
5411 {
5412 if (!inst.error)
5413 inst.error = _("syntax error");
5414 return FAIL;
5415 }
5416
5417 /* Try again, skipping the optional argument at backtrack_pos. */
5418 str = backtrack_pos;
5419 inst.error = backtrack_error;
5420 inst.operands[backtrack_index].present = 0;
5421 i = backtrack_index;
5422 backtrack_pos = 0;
5423 }
5424
5425 /* Check that we have parsed all the arguments. */
5426 if (*str != '\0' && !inst.error)
5427 inst.error = _("garbage following instruction");
5428
5429 return inst.error ? FAIL : SUCCESS;
5430}
5431
5432#undef po_char_or_fail
5433#undef po_reg_or_fail
5434#undef po_reg_or_goto
5435#undef po_imm_or_fail
5436#undef po_scalar_or_fail
5437\f
5438/* Shorthand macro for instruction encoding functions issuing errors. */
5439#define constraint(expr, err) do { \
5440 if (expr) \
5441 { \
5442 inst.error = err; \
5443 return; \
5444 } \
5445} while (0)
5446
5447/* Functions for operand encoding. ARM, then Thumb. */
5448
5449#define rotate_left(v, n) (v << n | v >> (32 - n))
5450
5451/* If VAL can be encoded in the immediate field of an ARM instruction,
5452 return the encoded form. Otherwise, return FAIL. */
5453
5454static unsigned int
5455encode_arm_immediate (unsigned int val)
5456{
5457 unsigned int a, i;
5458
5459 for (i = 0; i < 32; i += 2)
5460 if ((a = rotate_left (val, i)) <= 0xff)
5461 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5462
5463 return FAIL;
5464}
5465
5466/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5468static unsigned int
5469encode_thumb32_immediate (unsigned int val)
5470{
5471 unsigned int a, i;
5472
5473 if (val <= 0xff)
5474 return val;
5475
5476 for (i = 1; i <= 24; i++)
5477 {
5478 a = val >> i;
5479 if ((val & ~(0xff << i)) == 0)
5480 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5481 }
5482
5483 a = val & 0xff;
5484 if (val == ((a << 16) | a))
5485 return 0x100 | a;
5486 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5487 return 0x300 | a;
5488
5489 a = val & 0xff00;
5490 if (val == ((a << 16) | a))
5491 return 0x200 | (a >> 8);
5492
5493 return FAIL;
5494}
5495/* Encode a VFP SP or DP register number into inst.instruction. */
5496
5497static void
5498encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5499{
5500 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5501 && reg > 15)
5502 {
5503 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5504 {
5505 if (thumb_mode)
5506 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5507 fpu_vfp_ext_v3);
5508 else
5509 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5510 fpu_vfp_ext_v3);
5511 }
5512 else
5513 {
5514 first_error (_("D register out of range for selected VFP version"));
5515 return;
5516 }
5517 }
5518
5519 switch (pos)
5520 {
5521 case VFP_REG_Sd:
5522 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5523 break;
5524
5525 case VFP_REG_Sn:
5526 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5527 break;
5528
5529 case VFP_REG_Sm:
5530 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5531 break;
5532
5533 case VFP_REG_Dd:
5534 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5535 break;
5536
5537 case VFP_REG_Dn:
5538 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5539 break;
5540
5541 case VFP_REG_Dm:
5542 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5543 break;
5544
5545 default:
5546 abort ();
5547 }
5548}
5549
5550/* Encode a <shift> in an ARM-format instruction. The immediate,
5551 if any, is handled by md_apply_fix. */
5552static void
5553encode_arm_shift (int i)
5554{
5555 if (inst.operands[i].shift_kind == SHIFT_RRX)
5556 inst.instruction |= SHIFT_ROR << 5;
5557 else
5558 {
5559 inst.instruction |= inst.operands[i].shift_kind << 5;
5560 if (inst.operands[i].immisreg)
5561 {
5562 inst.instruction |= SHIFT_BY_REG;
5563 inst.instruction |= inst.operands[i].imm << 8;
5564 }
5565 else
5566 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5567 }
5568}
5569
5570static void
5571encode_arm_shifter_operand (int i)
5572{
5573 if (inst.operands[i].isreg)
5574 {
5575 inst.instruction |= inst.operands[i].reg;
5576 encode_arm_shift (i);
5577 }
5578 else
5579 inst.instruction |= INST_IMMEDIATE;
5580}
5581
5582/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5583static void
5584encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5585{
5586 assert (inst.operands[i].isreg);
5587 inst.instruction |= inst.operands[i].reg << 16;
5588
5589 if (inst.operands[i].preind)
5590 {
5591 if (is_t)
5592 {
5593 inst.error = _("instruction does not accept preindexed addressing");
5594 return;
5595 }
5596 inst.instruction |= PRE_INDEX;
5597 if (inst.operands[i].writeback)
5598 inst.instruction |= WRITE_BACK;
5599
5600 }
5601 else if (inst.operands[i].postind)
5602 {
5603 assert (inst.operands[i].writeback);
5604 if (is_t)
5605 inst.instruction |= WRITE_BACK;
5606 }
5607 else /* unindexed - only for coprocessor */
5608 {
5609 inst.error = _("instruction does not accept unindexed addressing");
5610 return;
5611 }
5612
5613 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5614 && (((inst.instruction & 0x000f0000) >> 16)
5615 == ((inst.instruction & 0x0000f000) >> 12)))
5616 as_warn ((inst.instruction & LOAD_BIT)
5617 ? _("destination register same as write-back base")
5618 : _("source register same as write-back base"));
5619}
5620
5621/* inst.operands[i] was set up by parse_address. Encode it into an
5622 ARM-format mode 2 load or store instruction. If is_t is true,
5623 reject forms that cannot be used with a T instruction (i.e. not
5624 post-indexed). */
5625static void
5626encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5627{
5628 encode_arm_addr_mode_common (i, is_t);
5629
5630 if (inst.operands[i].immisreg)
5631 {
5632 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5633 inst.instruction |= inst.operands[i].imm;
5634 if (!inst.operands[i].negative)
5635 inst.instruction |= INDEX_UP;
5636 if (inst.operands[i].shifted)
5637 {
5638 if (inst.operands[i].shift_kind == SHIFT_RRX)
5639 inst.instruction |= SHIFT_ROR << 5;
5640 else
5641 {
5642 inst.instruction |= inst.operands[i].shift_kind << 5;
5643 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5644 }
5645 }
5646 }
5647 else /* immediate offset in inst.reloc */
5648 {
5649 if (inst.reloc.type == BFD_RELOC_UNUSED)
5650 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5651 }
5652}
5653
5654/* inst.operands[i] was set up by parse_address. Encode it into an
5655 ARM-format mode 3 load or store instruction. Reject forms that
5656 cannot be used with such instructions. If is_t is true, reject
5657 forms that cannot be used with a T instruction (i.e. not
5658 post-indexed). */
5659static void
5660encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5661{
5662 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5663 {
5664 inst.error = _("instruction does not accept scaled register index");
5665 return;
5666 }
5667
5668 encode_arm_addr_mode_common (i, is_t);
5669
5670 if (inst.operands[i].immisreg)
5671 {
5672 inst.instruction |= inst.operands[i].imm;
5673 if (!inst.operands[i].negative)
5674 inst.instruction |= INDEX_UP;
5675 }
5676 else /* immediate offset in inst.reloc */
5677 {
5678 inst.instruction |= HWOFFSET_IMM;
5679 if (inst.reloc.type == BFD_RELOC_UNUSED)
5680 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5681 }
5682}
5683
5684/* inst.operands[i] was set up by parse_address. Encode it into an
5685 ARM-format instruction. Reject all forms which cannot be encoded
5686 into a coprocessor load/store instruction. If wb_ok is false,
5687 reject use of writeback; if unind_ok is false, reject use of
5688 unindexed addressing. If reloc_override is not 0, use it instead
5689 of BFD_ARM_CP_OFF_IMM. */
5690
5691static int
5692encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5693{
5694 inst.instruction |= inst.operands[i].reg << 16;
5695
5696 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5697
5698 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5699 {
5700 assert (!inst.operands[i].writeback);
5701 if (!unind_ok)
5702 {
5703 inst.error = _("instruction does not support unindexed addressing");
5704 return FAIL;
5705 }
5706 inst.instruction |= inst.operands[i].imm;
5707 inst.instruction |= INDEX_UP;
5708 return SUCCESS;
5709 }
5710
5711 if (inst.operands[i].preind)
5712 inst.instruction |= PRE_INDEX;
5713
5714 if (inst.operands[i].writeback)
5715 {
5716 if (inst.operands[i].reg == REG_PC)
5717 {
5718 inst.error = _("pc may not be used with write-back");
5719 return FAIL;
5720 }
5721 if (!wb_ok)
5722 {
5723 inst.error = _("instruction does not support writeback");
5724 return FAIL;
5725 }
5726 inst.instruction |= WRITE_BACK;
5727 }
5728
5729 if (reloc_override)
5730 inst.reloc.type = reloc_override;
5731 else if (thumb_mode)
5732 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5733 else
5734 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5735 return SUCCESS;
5736}
5737
5738/* inst.reloc.exp describes an "=expr" load pseudo-operation.
5739 Determine whether it can be performed with a move instruction; if
5740 it can, convert inst.instruction to that move instruction and
5741 return 1; if it can't, convert inst.instruction to a literal-pool
5742 load and return 0. If this is not a valid thing to do in the
5743 current context, set inst.error and return 1.
5744
5745 inst.operands[i] describes the destination register. */
5746
5747static int
5748move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5749{
5750 unsigned long tbit;
5751
5752 if (thumb_p)
5753 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5754 else
5755 tbit = LOAD_BIT;
5756
5757 if ((inst.instruction & tbit) == 0)
5758 {
5759 inst.error = _("invalid pseudo operation");
5760 return 1;
5761 }
5762 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5763 {
5764 inst.error = _("constant expression expected");
5765 return 1;
5766 }
5767 if (inst.reloc.exp.X_op == O_constant)
5768 {
5769 if (thumb_p)
5770 {
5771 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5772 {
5773 /* This can be done with a mov(1) instruction. */
5774 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5775 inst.instruction |= inst.reloc.exp.X_add_number;
5776 return 1;
5777 }
5778 }
5779 else
5780 {
5781 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5782 if (value != FAIL)
5783 {
5784 /* This can be done with a mov instruction. */
5785 inst.instruction &= LITERAL_MASK;
5786 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5787 inst.instruction |= value & 0xfff;
5788 return 1;
5789 }
5790
5791 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5792 if (value != FAIL)
5793 {
5794 /* This can be done with a mvn instruction. */
5795 inst.instruction &= LITERAL_MASK;
5796 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5797 inst.instruction |= value & 0xfff;
5798 return 1;
5799 }
5800 }
5801 }
5802
5803 if (add_to_lit_pool () == FAIL)
5804 {
5805 inst.error = _("literal pool insertion failed");
5806 return 1;
5807 }
5808 inst.operands[1].reg = REG_PC;
5809 inst.operands[1].isreg = 1;
5810 inst.operands[1].preind = 1;
5811 inst.reloc.pc_rel = 1;
5812 inst.reloc.type = (thumb_p
5813 ? BFD_RELOC_ARM_THUMB_OFFSET
5814 : (mode_3
5815 ? BFD_RELOC_ARM_HWLITERAL
5816 : BFD_RELOC_ARM_LITERAL));
5817 return 0;
5818}
5819
5820/* Functions for instruction encoding, sorted by subarchitecture.
5821 First some generics; their names are taken from the conventional
5822 bit positions for register arguments in ARM format instructions. */
5823
5824static void
5825do_noargs (void)
5826{
5827}
5828
5829static void
5830do_rd (void)
5831{
5832 inst.instruction |= inst.operands[0].reg << 12;
5833}
5834
5835static void
5836do_rd_rm (void)
5837{
5838 inst.instruction |= inst.operands[0].reg << 12;
5839 inst.instruction |= inst.operands[1].reg;
5840}
5841
5842static void
5843do_rd_rn (void)
5844{
5845 inst.instruction |= inst.operands[0].reg << 12;
5846 inst.instruction |= inst.operands[1].reg << 16;
5847}
5848
5849static void
5850do_rn_rd (void)
5851{
5852 inst.instruction |= inst.operands[0].reg << 16;
5853 inst.instruction |= inst.operands[1].reg << 12;
5854}
5855
5856static void
5857do_rd_rm_rn (void)
5858{
5859 unsigned Rn = inst.operands[2].reg;
5860 /* Enforce restrictions on SWP instruction. */
5861 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5862 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5863 _("Rn must not overlap other operands"));
5864 inst.instruction |= inst.operands[0].reg << 12;
5865 inst.instruction |= inst.operands[1].reg;
5866 inst.instruction |= Rn << 16;
5867}
5868
5869static void
5870do_rd_rn_rm (void)
5871{
5872 inst.instruction |= inst.operands[0].reg << 12;
5873 inst.instruction |= inst.operands[1].reg << 16;
5874 inst.instruction |= inst.operands[2].reg;
5875}
5876
5877static void
5878do_rm_rd_rn (void)
5879{
5880 inst.instruction |= inst.operands[0].reg;
5881 inst.instruction |= inst.operands[1].reg << 12;
5882 inst.instruction |= inst.operands[2].reg << 16;
5883}
5884
5885static void
5886do_imm0 (void)
5887{
5888 inst.instruction |= inst.operands[0].imm;
5889}
5890
5891static void
5892do_rd_cpaddr (void)
5893{
5894 inst.instruction |= inst.operands[0].reg << 12;
5895 encode_arm_cp_address (1, TRUE, TRUE, 0);
5896}
5897
5898/* ARM instructions, in alphabetical order by function name (except
5899 that wrapper functions appear immediately after the function they
5900 wrap). */
5901
5902/* This is a pseudo-op of the form "adr rd, label" to be converted
5903 into a relative address of the form "add rd, pc, #label-.-8". */
5904
5905static void
5906do_adr (void)
5907{
5908 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5909
5910 /* Frag hacking will turn this into a sub instruction if the offset turns
5911 out to be negative. */
5912 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5913 inst.reloc.pc_rel = 1;
5914 inst.reloc.exp.X_add_number -= 8;
5915}
5916
5917/* This is a pseudo-op of the form "adrl rd, label" to be converted
5918 into a relative address of the form:
5919 add rd, pc, #low(label-.-8)"
5920 add rd, rd, #high(label-.-8)" */
5921
5922static void
5923do_adrl (void)
5924{
5925 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5926
5927 /* Frag hacking will turn this into a sub instruction if the offset turns
5928 out to be negative. */
5929 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5930 inst.reloc.pc_rel = 1;
5931 inst.size = INSN_SIZE * 2;
5932 inst.reloc.exp.X_add_number -= 8;
5933}
5934
5935static void
5936do_arit (void)
5937{
5938 if (!inst.operands[1].present)
5939 inst.operands[1].reg = inst.operands[0].reg;
5940 inst.instruction |= inst.operands[0].reg << 12;
5941 inst.instruction |= inst.operands[1].reg << 16;
5942 encode_arm_shifter_operand (2);
5943}
5944
5945static void
5946do_barrier (void)
5947{
5948 if (inst.operands[0].present)
5949 {
5950 constraint ((inst.instruction & 0xf0) != 0x40
5951 && inst.operands[0].imm != 0xf,
5952 "bad barrier type");
5953 inst.instruction |= inst.operands[0].imm;
5954 }
5955 else
5956 inst.instruction |= 0xf;
5957}
5958
5959static void
5960do_bfc (void)
5961{
5962 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
5963 constraint (msb > 32, _("bit-field extends past end of register"));
5964 /* The instruction encoding stores the LSB and MSB,
5965 not the LSB and width. */
5966 inst.instruction |= inst.operands[0].reg << 12;
5967 inst.instruction |= inst.operands[1].imm << 7;
5968 inst.instruction |= (msb - 1) << 16;
5969}
5970
5971static void
5972do_bfi (void)
5973{
5974 unsigned int msb;
5975
5976 /* #0 in second position is alternative syntax for bfc, which is
5977 the same instruction but with REG_PC in the Rm field. */
5978 if (!inst.operands[1].isreg)
5979 inst.operands[1].reg = REG_PC;
5980
5981 msb = inst.operands[2].imm + inst.operands[3].imm;
5982 constraint (msb > 32, _("bit-field extends past end of register"));
5983 /* The instruction encoding stores the LSB and MSB,
5984 not the LSB and width. */
5985 inst.instruction |= inst.operands[0].reg << 12;
5986 inst.instruction |= inst.operands[1].reg;
5987 inst.instruction |= inst.operands[2].imm << 7;
5988 inst.instruction |= (msb - 1) << 16;
5989}
5990
5991static void
5992do_bfx (void)
5993{
5994 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
5995 _("bit-field extends past end of register"));
5996 inst.instruction |= inst.operands[0].reg << 12;
5997 inst.instruction |= inst.operands[1].reg;
5998 inst.instruction |= inst.operands[2].imm << 7;
5999 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6000}
6001
6002/* ARM V5 breakpoint instruction (argument parse)
6003 BKPT <16 bit unsigned immediate>
6004 Instruction is not conditional.
6005 The bit pattern given in insns[] has the COND_ALWAYS condition,
6006 and it is an error if the caller tried to override that. */
6007
6008static void
6009do_bkpt (void)
6010{
6011 /* Top 12 of 16 bits to bits 19:8. */
6012 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6013
6014 /* Bottom 4 of 16 bits to bits 3:0. */
6015 inst.instruction |= inst.operands[0].imm & 0xf;
6016}
6017
6018static void
6019encode_branch (int default_reloc)
6020{
6021 if (inst.operands[0].hasreloc)
6022 {
6023 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6024 _("the only suffix valid here is '(plt)'"));
6025 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6026 }
6027 else
6028 {
6029 inst.reloc.type = default_reloc;
6030 }
6031 inst.reloc.pc_rel = 1;
6032}
6033
6034static void
6035do_branch (void)
6036{
6037#ifdef OBJ_ELF
6038 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6039 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6040 else
6041#endif
6042 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6043}
6044
6045static void
6046do_bl (void)
6047{
6048#ifdef OBJ_ELF
6049 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6050 {
6051 if (inst.cond == COND_ALWAYS)
6052 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6053 else
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6055 }
6056 else
6057#endif
6058 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6059}
6060
6061/* ARM V5 branch-link-exchange instruction (argument parse)
6062 BLX <target_addr> ie BLX(1)
6063 BLX{<condition>} <Rm> ie BLX(2)
6064 Unfortunately, there are two different opcodes for this mnemonic.
6065 So, the insns[].value is not used, and the code here zaps values
6066 into inst.instruction.
6067 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6068
6069static void
6070do_blx (void)
6071{
6072 if (inst.operands[0].isreg)
6073 {
6074 /* Arg is a register; the opcode provided by insns[] is correct.
6075 It is not illegal to do "blx pc", just useless. */
6076 if (inst.operands[0].reg == REG_PC)
6077 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6078
6079 inst.instruction |= inst.operands[0].reg;
6080 }
6081 else
6082 {
6083 /* Arg is an address; this instruction cannot be executed
6084 conditionally, and the opcode must be adjusted. */
6085 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6086 inst.instruction = 0xfa000000;
6087#ifdef OBJ_ELF
6088 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6089 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6090 else
6091#endif
6092 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6093 }
6094}
6095
6096static void
6097do_bx (void)
6098{
6099 if (inst.operands[0].reg == REG_PC)
6100 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6101
6102 inst.instruction |= inst.operands[0].reg;
6103}
6104
6105
6106/* ARM v5TEJ. Jump to Jazelle code. */
6107
6108static void
6109do_bxj (void)
6110{
6111 if (inst.operands[0].reg == REG_PC)
6112 as_tsktsk (_("use of r15 in bxj is not really useful"));
6113
6114 inst.instruction |= inst.operands[0].reg;
6115}
6116
6117/* Co-processor data operation:
6118 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6119 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6120static void
6121do_cdp (void)
6122{
6123 inst.instruction |= inst.operands[0].reg << 8;
6124 inst.instruction |= inst.operands[1].imm << 20;
6125 inst.instruction |= inst.operands[2].reg << 12;
6126 inst.instruction |= inst.operands[3].reg << 16;
6127 inst.instruction |= inst.operands[4].reg;
6128 inst.instruction |= inst.operands[5].imm << 5;
6129}
6130
6131static void
6132do_cmp (void)
6133{
6134 inst.instruction |= inst.operands[0].reg << 16;
6135 encode_arm_shifter_operand (1);
6136}
6137
6138/* Transfer between coprocessor and ARM registers.
6139 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6140 MRC2
6141 MCR{cond}
6142 MCR2
6143
6144 No special properties. */
6145
6146static void
6147do_co_reg (void)
6148{
6149 inst.instruction |= inst.operands[0].reg << 8;
6150 inst.instruction |= inst.operands[1].imm << 21;
6151 inst.instruction |= inst.operands[2].reg << 12;
6152 inst.instruction |= inst.operands[3].reg << 16;
6153 inst.instruction |= inst.operands[4].reg;
6154 inst.instruction |= inst.operands[5].imm << 5;
6155}
6156
6157/* Transfer between coprocessor register and pair of ARM registers.
6158 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6159 MCRR2
6160 MRRC{cond}
6161 MRRC2
6162
6163 Two XScale instructions are special cases of these:
6164
6165 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6166 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6167
6168 Result unpredicatable if Rd or Rn is R15. */
6169
6170static void
6171do_co_reg2c (void)
6172{
6173 inst.instruction |= inst.operands[0].reg << 8;
6174 inst.instruction |= inst.operands[1].imm << 4;
6175 inst.instruction |= inst.operands[2].reg << 12;
6176 inst.instruction |= inst.operands[3].reg << 16;
6177 inst.instruction |= inst.operands[4].reg;
6178}
6179
6180static void
6181do_cpsi (void)
6182{
6183 inst.instruction |= inst.operands[0].imm << 6;
6184 inst.instruction |= inst.operands[1].imm;
6185}
6186
6187static void
6188do_dbg (void)
6189{
6190 inst.instruction |= inst.operands[0].imm;
6191}
6192
6193static void
6194do_it (void)
6195{
6196 /* There is no IT instruction in ARM mode. We
6197 process it but do not generate code for it. */
6198 inst.size = 0;
6199}
6200
6201static void
6202do_ldmstm (void)
6203{
6204 int base_reg = inst.operands[0].reg;
6205 int range = inst.operands[1].imm;
6206
6207 inst.instruction |= base_reg << 16;
6208 inst.instruction |= range;
6209
6210 if (inst.operands[1].writeback)
6211 inst.instruction |= LDM_TYPE_2_OR_3;
6212
6213 if (inst.operands[0].writeback)
6214 {
6215 inst.instruction |= WRITE_BACK;
6216 /* Check for unpredictable uses of writeback. */
6217 if (inst.instruction & LOAD_BIT)
6218 {
6219 /* Not allowed in LDM type 2. */
6220 if ((inst.instruction & LDM_TYPE_2_OR_3)
6221 && ((range & (1 << REG_PC)) == 0))
6222 as_warn (_("writeback of base register is UNPREDICTABLE"));
6223 /* Only allowed if base reg not in list for other types. */
6224 else if (range & (1 << base_reg))
6225 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6226 }
6227 else /* STM. */
6228 {
6229 /* Not allowed for type 2. */
6230 if (inst.instruction & LDM_TYPE_2_OR_3)
6231 as_warn (_("writeback of base register is UNPREDICTABLE"));
6232 /* Only allowed if base reg not in list, or first in list. */
6233 else if ((range & (1 << base_reg))
6234 && (range & ((1 << base_reg) - 1)))
6235 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6236 }
6237 }
6238}
6239
6240/* ARMv5TE load-consecutive (argument parse)
6241 Mode is like LDRH.
6242
6243 LDRccD R, mode
6244 STRccD R, mode. */
6245
6246static void
6247do_ldrd (void)
6248{
6249 constraint (inst.operands[0].reg % 2 != 0,
6250 _("first destination register must be even"));
6251 constraint (inst.operands[1].present
6252 && inst.operands[1].reg != inst.operands[0].reg + 1,
6253 _("can only load two consecutive registers"));
6254 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6255 constraint (!inst.operands[2].isreg, _("'[' expected"));
6256
6257 if (!inst.operands[1].present)
6258 inst.operands[1].reg = inst.operands[0].reg + 1;
6259
6260 if (inst.instruction & LOAD_BIT)
6261 {
6262 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6263 register and the first register written; we have to diagnose
6264 overlap between the base and the second register written here. */
6265
6266 if (inst.operands[2].reg == inst.operands[1].reg
6267 && (inst.operands[2].writeback || inst.operands[2].postind))
6268 as_warn (_("base register written back, and overlaps "
6269 "second destination register"));
6270
6271 /* For an index-register load, the index register must not overlap the
6272 destination (even if not write-back). */
6273 else if (inst.operands[2].immisreg
6274 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6275 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6276 as_warn (_("index register overlaps destination register"));
6277 }
6278
6279 inst.instruction |= inst.operands[0].reg << 12;
6280 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6281}
6282
6283static void
6284do_ldrex (void)
6285{
6286 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6287 || inst.operands[1].postind || inst.operands[1].writeback
6288 || inst.operands[1].immisreg || inst.operands[1].shifted
6289 || inst.operands[1].negative
6290 /* This can arise if the programmer has written
6291 strex rN, rM, foo
6292 or if they have mistakenly used a register name as the last
6293 operand, eg:
6294 strex rN, rM, rX
6295 It is very difficult to distinguish between these two cases
6296 because "rX" might actually be a label. ie the register
6297 name has been occluded by a symbol of the same name. So we
6298 just generate a general 'bad addressing mode' type error
6299 message and leave it up to the programmer to discover the
6300 true cause and fix their mistake. */
6301 || (inst.operands[1].reg == REG_PC),
6302 BAD_ADDR_MODE);
6303
6304 constraint (inst.reloc.exp.X_op != O_constant
6305 || inst.reloc.exp.X_add_number != 0,
6306 _("offset must be zero in ARM encoding"));
6307
6308 inst.instruction |= inst.operands[0].reg << 12;
6309 inst.instruction |= inst.operands[1].reg << 16;
6310 inst.reloc.type = BFD_RELOC_UNUSED;
6311}
6312
6313static void
6314do_ldrexd (void)
6315{
6316 constraint (inst.operands[0].reg % 2 != 0,
6317 _("even register required"));
6318 constraint (inst.operands[1].present
6319 && inst.operands[1].reg != inst.operands[0].reg + 1,
6320 _("can only load two consecutive registers"));
6321 /* If op 1 were present and equal to PC, this function wouldn't
6322 have been called in the first place. */
6323 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6324
6325 inst.instruction |= inst.operands[0].reg << 12;
6326 inst.instruction |= inst.operands[2].reg << 16;
6327}
6328
6329static void
6330do_ldst (void)
6331{
6332 inst.instruction |= inst.operands[0].reg << 12;
6333 if (!inst.operands[1].isreg)
6334 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6335 return;
6336 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6337}
6338
6339static void
6340do_ldstt (void)
6341{
6342 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6343 reject [Rn,...]. */
6344 if (inst.operands[1].preind)
6345 {
6346 constraint (inst.reloc.exp.X_op != O_constant ||
6347 inst.reloc.exp.X_add_number != 0,
6348 _("this instruction requires a post-indexed address"));
6349
6350 inst.operands[1].preind = 0;
6351 inst.operands[1].postind = 1;
6352 inst.operands[1].writeback = 1;
6353 }
6354 inst.instruction |= inst.operands[0].reg << 12;
6355 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6356}
6357
6358/* Halfword and signed-byte load/store operations. */
6359
6360static void
6361do_ldstv4 (void)
6362{
6363 inst.instruction |= inst.operands[0].reg << 12;
6364 if (!inst.operands[1].isreg)
6365 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6366 return;
6367 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6368}
6369
6370static void
6371do_ldsttv4 (void)
6372{
6373 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6374 reject [Rn,...]. */
6375 if (inst.operands[1].preind)
6376 {
6377 constraint (inst.reloc.exp.X_op != O_constant ||
6378 inst.reloc.exp.X_add_number != 0,
6379 _("this instruction requires a post-indexed address"));
6380
6381 inst.operands[1].preind = 0;
6382 inst.operands[1].postind = 1;
6383 inst.operands[1].writeback = 1;
6384 }
6385 inst.instruction |= inst.operands[0].reg << 12;
6386 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6387}
6388
6389/* Co-processor register load/store.
6390 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6391static void
6392do_lstc (void)
6393{
6394 inst.instruction |= inst.operands[0].reg << 8;
6395 inst.instruction |= inst.operands[1].reg << 12;
6396 encode_arm_cp_address (2, TRUE, TRUE, 0);
6397}
6398
6399static void
6400do_mlas (void)
6401{
6402 /* This restriction does not apply to mls (nor to mla in v6, but
6403 that's hard to detect at present). */
6404 if (inst.operands[0].reg == inst.operands[1].reg
6405 && !(inst.instruction & 0x00400000))
6406 as_tsktsk (_("rd and rm should be different in mla"));
6407
6408 inst.instruction |= inst.operands[0].reg << 16;
6409 inst.instruction |= inst.operands[1].reg;
6410 inst.instruction |= inst.operands[2].reg << 8;
6411 inst.instruction |= inst.operands[3].reg << 12;
6412
6413}
6414
6415static void
6416do_mov (void)
6417{
6418 inst.instruction |= inst.operands[0].reg << 12;
6419 encode_arm_shifter_operand (1);
6420}
6421
6422/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6423static void
6424do_mov16 (void)
6425{
6426 inst.instruction |= inst.operands[0].reg << 12;
6427 /* The value is in two pieces: 0:11, 16:19. */
6428 inst.instruction |= (inst.operands[1].imm & 0x00000fff);
6429 inst.instruction |= (inst.operands[1].imm & 0x0000f000) << 4;
6430}
6431
6432static void
6433do_mrs (void)
6434{
6435 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6436 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6437 != (PSR_c|PSR_f),
6438 _("'CPSR' or 'SPSR' expected"));
6439 inst.instruction |= inst.operands[0].reg << 12;
6440 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6441}
6442
6443/* Two possible forms:
6444 "{C|S}PSR_<field>, Rm",
6445 "{C|S}PSR_f, #expression". */
6446
6447static void
6448do_msr (void)
6449{
6450 inst.instruction |= inst.operands[0].imm;
6451 if (inst.operands[1].isreg)
6452 inst.instruction |= inst.operands[1].reg;
6453 else
6454 {
6455 inst.instruction |= INST_IMMEDIATE;
6456 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6457 inst.reloc.pc_rel = 0;
6458 }
6459}
6460
6461static void
6462do_mul (void)
6463{
6464 if (!inst.operands[2].present)
6465 inst.operands[2].reg = inst.operands[0].reg;
6466 inst.instruction |= inst.operands[0].reg << 16;
6467 inst.instruction |= inst.operands[1].reg;
6468 inst.instruction |= inst.operands[2].reg << 8;
6469
6470 if (inst.operands[0].reg == inst.operands[1].reg)
6471 as_tsktsk (_("rd and rm should be different in mul"));
6472}
6473
6474/* Long Multiply Parser
6475 UMULL RdLo, RdHi, Rm, Rs
6476 SMULL RdLo, RdHi, Rm, Rs
6477 UMLAL RdLo, RdHi, Rm, Rs
6478 SMLAL RdLo, RdHi, Rm, Rs. */
6479
6480static void
6481do_mull (void)
6482{
6483 inst.instruction |= inst.operands[0].reg << 12;
6484 inst.instruction |= inst.operands[1].reg << 16;
6485 inst.instruction |= inst.operands[2].reg;
6486 inst.instruction |= inst.operands[3].reg << 8;
6487
6488 /* rdhi, rdlo and rm must all be different. */
6489 if (inst.operands[0].reg == inst.operands[1].reg
6490 || inst.operands[0].reg == inst.operands[2].reg
6491 || inst.operands[1].reg == inst.operands[2].reg)
6492 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6493}
6494
6495static void
6496do_nop (void)
6497{
6498 if (inst.operands[0].present)
6499 {
6500 /* Architectural NOP hints are CPSR sets with no bits selected. */
6501 inst.instruction &= 0xf0000000;
6502 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6503 }
6504}
6505
6506/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6507 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6508 Condition defaults to COND_ALWAYS.
6509 Error if Rd, Rn or Rm are R15. */
6510
6511static void
6512do_pkhbt (void)
6513{
6514 inst.instruction |= inst.operands[0].reg << 12;
6515 inst.instruction |= inst.operands[1].reg << 16;
6516 inst.instruction |= inst.operands[2].reg;
6517 if (inst.operands[3].present)
6518 encode_arm_shift (3);
6519}
6520
6521/* ARM V6 PKHTB (Argument Parse). */
6522
6523static void
6524do_pkhtb (void)
6525{
6526 if (!inst.operands[3].present)
6527 {
6528 /* If the shift specifier is omitted, turn the instruction
6529 into pkhbt rd, rm, rn. */
6530 inst.instruction &= 0xfff00010;
6531 inst.instruction |= inst.operands[0].reg << 12;
6532 inst.instruction |= inst.operands[1].reg;
6533 inst.instruction |= inst.operands[2].reg << 16;
6534 }
6535 else
6536 {
6537 inst.instruction |= inst.operands[0].reg << 12;
6538 inst.instruction |= inst.operands[1].reg << 16;
6539 inst.instruction |= inst.operands[2].reg;
6540 encode_arm_shift (3);
6541 }
6542}
6543
6544/* ARMv5TE: Preload-Cache
6545
6546 PLD <addr_mode>
6547
6548 Syntactically, like LDR with B=1, W=0, L=1. */
6549
6550static void
6551do_pld (void)
6552{
6553 constraint (!inst.operands[0].isreg,
6554 _("'[' expected after PLD mnemonic"));
6555 constraint (inst.operands[0].postind,
6556 _("post-indexed expression used in preload instruction"));
6557 constraint (inst.operands[0].writeback,
6558 _("writeback used in preload instruction"));
6559 constraint (!inst.operands[0].preind,
6560 _("unindexed addressing used in preload instruction"));
6561 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6562}
6563
6564/* ARMv7: PLI <addr_mode> */
6565static void
6566do_pli (void)
6567{
6568 constraint (!inst.operands[0].isreg,
6569 _("'[' expected after PLI mnemonic"));
6570 constraint (inst.operands[0].postind,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst.operands[0].writeback,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst.operands[0].preind,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6577 inst.instruction &= ~PRE_INDEX;
6578}
6579
6580static void
6581do_push_pop (void)
6582{
6583 inst.operands[1] = inst.operands[0];
6584 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6585 inst.operands[0].isreg = 1;
6586 inst.operands[0].writeback = 1;
6587 inst.operands[0].reg = REG_SP;
6588 do_ldmstm ();
6589}
6590
6591/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6592 word at the specified address and the following word
6593 respectively.
6594 Unconditionally executed.
6595 Error if Rn is R15. */
6596
6597static void
6598do_rfe (void)
6599{
6600 inst.instruction |= inst.operands[0].reg << 16;
6601 if (inst.operands[0].writeback)
6602 inst.instruction |= WRITE_BACK;
6603}
6604
6605/* ARM V6 ssat (argument parse). */
6606
6607static void
6608do_ssat (void)
6609{
6610 inst.instruction |= inst.operands[0].reg << 12;
6611 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6612 inst.instruction |= inst.operands[2].reg;
6613
6614 if (inst.operands[3].present)
6615 encode_arm_shift (3);
6616}
6617
6618/* ARM V6 usat (argument parse). */
6619
6620static void
6621do_usat (void)
6622{
6623 inst.instruction |= inst.operands[0].reg << 12;
6624 inst.instruction |= inst.operands[1].imm << 16;
6625 inst.instruction |= inst.operands[2].reg;
6626
6627 if (inst.operands[3].present)
6628 encode_arm_shift (3);
6629}
6630
6631/* ARM V6 ssat16 (argument parse). */
6632
6633static void
6634do_ssat16 (void)
6635{
6636 inst.instruction |= inst.operands[0].reg << 12;
6637 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6638 inst.instruction |= inst.operands[2].reg;
6639}
6640
6641static void
6642do_usat16 (void)
6643{
6644 inst.instruction |= inst.operands[0].reg << 12;
6645 inst.instruction |= inst.operands[1].imm << 16;
6646 inst.instruction |= inst.operands[2].reg;
6647}
6648
6649/* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6650 preserving the other bits.
6651
6652 setend <endian_specifier>, where <endian_specifier> is either
6653 BE or LE. */
6654
6655static void
6656do_setend (void)
6657{
6658 if (inst.operands[0].imm)
6659 inst.instruction |= 0x200;
6660}
6661
6662static void
6663do_shift (void)
6664{
6665 unsigned int Rm = (inst.operands[1].present
6666 ? inst.operands[1].reg
6667 : inst.operands[0].reg);
6668
6669 inst.instruction |= inst.operands[0].reg << 12;
6670 inst.instruction |= Rm;
6671 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6672 {
6673 inst.instruction |= inst.operands[2].reg << 8;
6674 inst.instruction |= SHIFT_BY_REG;
6675 }
6676 else
6677 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6678}
6679
6680static void
6681do_smc (void)
6682{
6683 inst.reloc.type = BFD_RELOC_ARM_SMC;
6684 inst.reloc.pc_rel = 0;
6685}
6686
6687static void
6688do_swi (void)
6689{
6690 inst.reloc.type = BFD_RELOC_ARM_SWI;
6691 inst.reloc.pc_rel = 0;
6692}
6693
6694/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6695 SMLAxy{cond} Rd,Rm,Rs,Rn
6696 SMLAWy{cond} Rd,Rm,Rs,Rn
6697 Error if any register is R15. */
6698
6699static void
6700do_smla (void)
6701{
6702 inst.instruction |= inst.operands[0].reg << 16;
6703 inst.instruction |= inst.operands[1].reg;
6704 inst.instruction |= inst.operands[2].reg << 8;
6705 inst.instruction |= inst.operands[3].reg << 12;
6706}
6707
6708/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6709 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6710 Error if any register is R15.
6711 Warning if Rdlo == Rdhi. */
6712
6713static void
6714do_smlal (void)
6715{
6716 inst.instruction |= inst.operands[0].reg << 12;
6717 inst.instruction |= inst.operands[1].reg << 16;
6718 inst.instruction |= inst.operands[2].reg;
6719 inst.instruction |= inst.operands[3].reg << 8;
6720
6721 if (inst.operands[0].reg == inst.operands[1].reg)
6722 as_tsktsk (_("rdhi and rdlo must be different"));
6723}
6724
6725/* ARM V5E (El Segundo) signed-multiply (argument parse)
6726 SMULxy{cond} Rd,Rm,Rs
6727 Error if any register is R15. */
6728
6729static void
6730do_smul (void)
6731{
6732 inst.instruction |= inst.operands[0].reg << 16;
6733 inst.instruction |= inst.operands[1].reg;
6734 inst.instruction |= inst.operands[2].reg << 8;
6735}
6736
6737/* ARM V6 srs (argument parse). */
6738
6739static void
6740do_srs (void)
6741{
6742 inst.instruction |= inst.operands[0].imm;
6743 if (inst.operands[0].writeback)
6744 inst.instruction |= WRITE_BACK;
6745}
6746
6747/* ARM V6 strex (argument parse). */
6748
6749static void
6750do_strex (void)
6751{
6752 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6753 || inst.operands[2].postind || inst.operands[2].writeback
6754 || inst.operands[2].immisreg || inst.operands[2].shifted
6755 || inst.operands[2].negative
6756 /* See comment in do_ldrex(). */
6757 || (inst.operands[2].reg == REG_PC),
6758 BAD_ADDR_MODE);
6759
6760 constraint (inst.operands[0].reg == inst.operands[1].reg
6761 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6762
6763 constraint (inst.reloc.exp.X_op != O_constant
6764 || inst.reloc.exp.X_add_number != 0,
6765 _("offset must be zero in ARM encoding"));
6766
6767 inst.instruction |= inst.operands[0].reg << 12;
6768 inst.instruction |= inst.operands[1].reg;
6769 inst.instruction |= inst.operands[2].reg << 16;
6770 inst.reloc.type = BFD_RELOC_UNUSED;
6771}
6772
6773static void
6774do_strexd (void)
6775{
6776 constraint (inst.operands[1].reg % 2 != 0,
6777 _("even register required"));
6778 constraint (inst.operands[2].present
6779 && inst.operands[2].reg != inst.operands[1].reg + 1,
6780 _("can only store two consecutive registers"));
6781 /* If op 2 were present and equal to PC, this function wouldn't
6782 have been called in the first place. */
6783 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6784
6785 constraint (inst.operands[0].reg == inst.operands[1].reg
6786 || inst.operands[0].reg == inst.operands[1].reg + 1
6787 || inst.operands[0].reg == inst.operands[3].reg,
6788 BAD_OVERLAP);
6789
6790 inst.instruction |= inst.operands[0].reg << 12;
6791 inst.instruction |= inst.operands[1].reg;
6792 inst.instruction |= inst.operands[3].reg << 16;
6793}
6794
6795/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6796 extends it to 32-bits, and adds the result to a value in another
6797 register. You can specify a rotation by 0, 8, 16, or 24 bits
6798 before extracting the 16-bit value.
6799 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6800 Condition defaults to COND_ALWAYS.
6801 Error if any register uses R15. */
6802
6803static void
6804do_sxtah (void)
6805{
6806 inst.instruction |= inst.operands[0].reg << 12;
6807 inst.instruction |= inst.operands[1].reg << 16;
6808 inst.instruction |= inst.operands[2].reg;
6809 inst.instruction |= inst.operands[3].imm << 10;
6810}
6811
6812/* ARM V6 SXTH.
6813
6814 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6817
6818static void
6819do_sxth (void)
6820{
6821 inst.instruction |= inst.operands[0].reg << 12;
6822 inst.instruction |= inst.operands[1].reg;
6823 inst.instruction |= inst.operands[2].imm << 10;
6824}
6825\f
6826/* VFP instructions. In a logical order: SP variant first, monad
6827 before dyad, arithmetic then move then load/store. */
6828
6829static void
6830do_vfp_sp_monadic (void)
6831{
6832 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6833 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6834}
6835
6836static void
6837do_vfp_sp_dyadic (void)
6838{
6839 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6840 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6841 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6842}
6843
6844static void
6845do_vfp_sp_compare_z (void)
6846{
6847 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6848}
6849
6850static void
6851do_vfp_dp_sp_cvt (void)
6852{
6853 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6854 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6855}
6856
6857static void
6858do_vfp_sp_dp_cvt (void)
6859{
6860 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6861 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6862}
6863
6864static void
6865do_vfp_reg_from_sp (void)
6866{
6867 inst.instruction |= inst.operands[0].reg << 12;
6868 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6869}
6870
6871static void
6872do_vfp_reg2_from_sp2 (void)
6873{
6874 constraint (inst.operands[2].imm != 2,
6875 _("only two consecutive VFP SP registers allowed here"));
6876 inst.instruction |= inst.operands[0].reg << 12;
6877 inst.instruction |= inst.operands[1].reg << 16;
6878 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6879}
6880
6881static void
6882do_vfp_sp_from_reg (void)
6883{
6884 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6885 inst.instruction |= inst.operands[1].reg << 12;
6886}
6887
6888static void
6889do_vfp_sp2_from_reg2 (void)
6890{
6891 constraint (inst.operands[0].imm != 2,
6892 _("only two consecutive VFP SP registers allowed here"));
6893 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6894 inst.instruction |= inst.operands[1].reg << 12;
6895 inst.instruction |= inst.operands[2].reg << 16;
6896}
6897
6898static void
6899do_vfp_sp_ldst (void)
6900{
6901 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6902 encode_arm_cp_address (1, FALSE, TRUE, 0);
6903}
6904
6905static void
6906do_vfp_dp_ldst (void)
6907{
6908 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6909 encode_arm_cp_address (1, FALSE, TRUE, 0);
6910}
6911
6912
6913static void
6914vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6915{
6916 if (inst.operands[0].writeback)
6917 inst.instruction |= WRITE_BACK;
6918 else
6919 constraint (ldstm_type != VFP_LDSTMIA,
6920 _("this addressing mode requires base-register writeback"));
6921 inst.instruction |= inst.operands[0].reg << 16;
6922 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6923 inst.instruction |= inst.operands[1].imm;
6924}
6925
6926static void
6927vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6928{
6929 int count;
6930
6931 if (inst.operands[0].writeback)
6932 inst.instruction |= WRITE_BACK;
6933 else
6934 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
6935 _("this addressing mode requires base-register writeback"));
6936
6937 inst.instruction |= inst.operands[0].reg << 16;
6938 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6939
6940 count = inst.operands[1].imm << 1;
6941 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
6942 count += 1;
6943
6944 inst.instruction |= count;
6945}
6946
6947static void
6948do_vfp_sp_ldstmia (void)
6949{
6950 vfp_sp_ldstm (VFP_LDSTMIA);
6951}
6952
6953static void
6954do_vfp_sp_ldstmdb (void)
6955{
6956 vfp_sp_ldstm (VFP_LDSTMDB);
6957}
6958
6959static void
6960do_vfp_dp_ldstmia (void)
6961{
6962 vfp_dp_ldstm (VFP_LDSTMIA);
6963}
6964
6965static void
6966do_vfp_dp_ldstmdb (void)
6967{
6968 vfp_dp_ldstm (VFP_LDSTMDB);
6969}
6970
6971static void
6972do_vfp_xp_ldstmia (void)
6973{
6974 vfp_dp_ldstm (VFP_LDSTMIAX);
6975}
6976
6977static void
6978do_vfp_xp_ldstmdb (void)
6979{
6980 vfp_dp_ldstm (VFP_LDSTMDBX);
6981}
6982
6983static void
6984do_vfp_dp_rd_rm (void)
6985{
6986 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6987 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6988}
6989
6990static void
6991do_vfp_dp_rn_rd (void)
6992{
6993 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
6994 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6995}
6996
6997static void
6998do_vfp_dp_rd_rn (void)
6999{
7000 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7001 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7002}
7003
7004static void
7005do_vfp_dp_rd_rn_rm (void)
7006{
7007 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7008 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7009 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7010}
7011
7012static void
7013do_vfp_dp_rd (void)
7014{
7015 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7016}
7017
7018static void
7019do_vfp_dp_rm_rd_rn (void)
7020{
7021 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7022 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7023 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7024}
7025
7026/* VFPv3 instructions. */
7027static void
7028do_vfp_sp_const (void)
7029{
7030 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7031 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7032 inst.instruction |= (inst.operands[1].imm >> 4);
7033}
7034
7035static void
7036do_vfp_dp_const (void)
7037{
7038 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7039 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7040 inst.instruction |= (inst.operands[1].imm >> 4);
7041}
7042
7043static void
7044vfp_conv (int srcsize)
7045{
7046 unsigned immbits = srcsize - inst.operands[1].imm;
7047 inst.instruction |= (immbits & 1) << 5;
7048 inst.instruction |= (immbits >> 1);
7049}
7050
7051static void
7052do_vfp_sp_conv_16 (void)
7053{
7054 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7055 vfp_conv (16);
7056}
7057
7058static void
7059do_vfp_dp_conv_16 (void)
7060{
7061 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7062 vfp_conv (16);
7063}
7064
7065static void
7066do_vfp_sp_conv_32 (void)
7067{
7068 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7069 vfp_conv (32);
7070}
7071
7072static void
7073do_vfp_dp_conv_32 (void)
7074{
7075 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7076 vfp_conv (32);
7077}
7078
7079\f
7080/* FPA instructions. Also in a logical order. */
7081
7082static void
7083do_fpa_cmp (void)
7084{
7085 inst.instruction |= inst.operands[0].reg << 16;
7086 inst.instruction |= inst.operands[1].reg;
7087}
7088
7089static void
7090do_fpa_ldmstm (void)
7091{
7092 inst.instruction |= inst.operands[0].reg << 12;
7093 switch (inst.operands[1].imm)
7094 {
7095 case 1: inst.instruction |= CP_T_X; break;
7096 case 2: inst.instruction |= CP_T_Y; break;
7097 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7098 case 4: break;
7099 default: abort ();
7100 }
7101
7102 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7103 {
7104 /* The instruction specified "ea" or "fd", so we can only accept
7105 [Rn]{!}. The instruction does not really support stacking or
7106 unstacking, so we have to emulate these by setting appropriate
7107 bits and offsets. */
7108 constraint (inst.reloc.exp.X_op != O_constant
7109 || inst.reloc.exp.X_add_number != 0,
7110 _("this instruction does not support indexing"));
7111
7112 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7113 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7114
7115 if (!(inst.instruction & INDEX_UP))
7116 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7117
7118 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7119 {
7120 inst.operands[2].preind = 0;
7121 inst.operands[2].postind = 1;
7122 }
7123 }
7124
7125 encode_arm_cp_address (2, TRUE, TRUE, 0);
7126}
7127\f
7128/* iWMMXt instructions: strictly in alphabetical order. */
7129
7130static void
7131do_iwmmxt_tandorc (void)
7132{
7133 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7134}
7135
7136static void
7137do_iwmmxt_textrc (void)
7138{
7139 inst.instruction |= inst.operands[0].reg << 12;
7140 inst.instruction |= inst.operands[1].imm;
7141}
7142
7143static void
7144do_iwmmxt_textrm (void)
7145{
7146 inst.instruction |= inst.operands[0].reg << 12;
7147 inst.instruction |= inst.operands[1].reg << 16;
7148 inst.instruction |= inst.operands[2].imm;
7149}
7150
7151static void
7152do_iwmmxt_tinsr (void)
7153{
7154 inst.instruction |= inst.operands[0].reg << 16;
7155 inst.instruction |= inst.operands[1].reg << 12;
7156 inst.instruction |= inst.operands[2].imm;
7157}
7158
7159static void
7160do_iwmmxt_tmia (void)
7161{
7162 inst.instruction |= inst.operands[0].reg << 5;
7163 inst.instruction |= inst.operands[1].reg;
7164 inst.instruction |= inst.operands[2].reg << 12;
7165}
7166
7167static void
7168do_iwmmxt_waligni (void)
7169{
7170 inst.instruction |= inst.operands[0].reg << 12;
7171 inst.instruction |= inst.operands[1].reg << 16;
7172 inst.instruction |= inst.operands[2].reg;
7173 inst.instruction |= inst.operands[3].imm << 20;
7174}
7175
7176static void
7177do_iwmmxt_wmov (void)
7178{
7179 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7180 inst.instruction |= inst.operands[0].reg << 12;
7181 inst.instruction |= inst.operands[1].reg << 16;
7182 inst.instruction |= inst.operands[1].reg;
7183}
7184
7185static void
7186do_iwmmxt_wldstbh (void)
7187{
7188 int reloc;
7189 inst.instruction |= inst.operands[0].reg << 12;
7190 if (thumb_mode)
7191 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7192 else
7193 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7194 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7195}
7196
7197static void
7198do_iwmmxt_wldstw (void)
7199{
7200 /* RIWR_RIWC clears .isreg for a control register. */
7201 if (!inst.operands[0].isreg)
7202 {
7203 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7204 inst.instruction |= 0xf0000000;
7205 }
7206
7207 inst.instruction |= inst.operands[0].reg << 12;
7208 encode_arm_cp_address (1, TRUE, TRUE, 0);
7209}
7210
7211static void
7212do_iwmmxt_wldstd (void)
7213{
7214 inst.instruction |= inst.operands[0].reg << 12;
7215 encode_arm_cp_address (1, TRUE, FALSE, 0);
7216}
7217
7218static void
7219do_iwmmxt_wshufh (void)
7220{
7221 inst.instruction |= inst.operands[0].reg << 12;
7222 inst.instruction |= inst.operands[1].reg << 16;
7223 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7224 inst.instruction |= (inst.operands[2].imm & 0x0f);
7225}
7226
7227static void
7228do_iwmmxt_wzero (void)
7229{
7230 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7231 inst.instruction |= inst.operands[0].reg;
7232 inst.instruction |= inst.operands[0].reg << 12;
7233 inst.instruction |= inst.operands[0].reg << 16;
7234}
7235\f
7236/* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7237 operations first, then control, shift, and load/store. */
7238
7239/* Insns like "foo X,Y,Z". */
7240
7241static void
7242do_mav_triple (void)
7243{
7244 inst.instruction |= inst.operands[0].reg << 16;
7245 inst.instruction |= inst.operands[1].reg;
7246 inst.instruction |= inst.operands[2].reg << 12;
7247}
7248
7249/* Insns like "foo W,X,Y,Z".
7250 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7251
7252static void
7253do_mav_quad (void)
7254{
7255 inst.instruction |= inst.operands[0].reg << 5;
7256 inst.instruction |= inst.operands[1].reg << 12;
7257 inst.instruction |= inst.operands[2].reg << 16;
7258 inst.instruction |= inst.operands[3].reg;
7259}
7260
7261/* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7262static void
7263do_mav_dspsc (void)
7264{
7265 inst.instruction |= inst.operands[1].reg << 12;
7266}
7267
7268/* Maverick shift immediate instructions.
7269 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7270 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7271
7272static void
7273do_mav_shift (void)
7274{
7275 int imm = inst.operands[2].imm;
7276
7277 inst.instruction |= inst.operands[0].reg << 12;
7278 inst.instruction |= inst.operands[1].reg << 16;
7279
7280 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7281 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7282 Bit 4 should be 0. */
7283 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7284
7285 inst.instruction |= imm;
7286}
7287\f
7288/* XScale instructions. Also sorted arithmetic before move. */
7289
7290/* Xscale multiply-accumulate (argument parse)
7291 MIAcc acc0,Rm,Rs
7292 MIAPHcc acc0,Rm,Rs
7293 MIAxycc acc0,Rm,Rs. */
7294
7295static void
7296do_xsc_mia (void)
7297{
7298 inst.instruction |= inst.operands[1].reg;
7299 inst.instruction |= inst.operands[2].reg << 12;
7300}
7301
7302/* Xscale move-accumulator-register (argument parse)
7303
7304 MARcc acc0,RdLo,RdHi. */
7305
7306static void
7307do_xsc_mar (void)
7308{
7309 inst.instruction |= inst.operands[1].reg << 12;
7310 inst.instruction |= inst.operands[2].reg << 16;
7311}
7312
7313/* Xscale move-register-accumulator (argument parse)
7314
7315 MRAcc RdLo,RdHi,acc0. */
7316
7317static void
7318do_xsc_mra (void)
7319{
7320 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7321 inst.instruction |= inst.operands[0].reg << 12;
7322 inst.instruction |= inst.operands[1].reg << 16;
7323}
7324\f
7325/* Encoding functions relevant only to Thumb. */
7326
7327/* inst.operands[i] is a shifted-register operand; encode
7328 it into inst.instruction in the format used by Thumb32. */
7329
7330static void
7331encode_thumb32_shifted_operand (int i)
7332{
7333 unsigned int value = inst.reloc.exp.X_add_number;
7334 unsigned int shift = inst.operands[i].shift_kind;
7335
7336 constraint (inst.operands[i].immisreg,
7337 _("shift by register not allowed in thumb mode"));
7338 inst.instruction |= inst.operands[i].reg;
7339 if (shift == SHIFT_RRX)
7340 inst.instruction |= SHIFT_ROR << 4;
7341 else
7342 {
7343 constraint (inst.reloc.exp.X_op != O_constant,
7344 _("expression too complex"));
7345
7346 constraint (value > 32
7347 || (value == 32 && (shift == SHIFT_LSL
7348 || shift == SHIFT_ROR)),
7349 _("shift expression is too large"));
7350
7351 if (value == 0)
7352 shift = SHIFT_LSL;
7353 else if (value == 32)
7354 value = 0;
7355
7356 inst.instruction |= shift << 4;
7357 inst.instruction |= (value & 0x1c) << 10;
7358 inst.instruction |= (value & 0x03) << 6;
7359 }
7360}
7361
7362
7363/* inst.operands[i] was set up by parse_address. Encode it into a
7364 Thumb32 format load or store instruction. Reject forms that cannot
7365 be used with such instructions. If is_t is true, reject forms that
7366 cannot be used with a T instruction; if is_d is true, reject forms
7367 that cannot be used with a D instruction. */
7368
7369static void
7370encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7371{
7372 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7373
7374 constraint (!inst.operands[i].isreg,
7375 _("Instruction does not support =N addresses"));
7376
7377 inst.instruction |= inst.operands[i].reg << 16;
7378 if (inst.operands[i].immisreg)
7379 {
7380 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7381 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7382 constraint (inst.operands[i].negative,
7383 _("Thumb does not support negative register indexing"));
7384 constraint (inst.operands[i].postind,
7385 _("Thumb does not support register post-indexing"));
7386 constraint (inst.operands[i].writeback,
7387 _("Thumb does not support register indexing with writeback"));
7388 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7389 _("Thumb supports only LSL in shifted register indexing"));
7390
7391 inst.instruction |= inst.operands[i].imm;
7392 if (inst.operands[i].shifted)
7393 {
7394 constraint (inst.reloc.exp.X_op != O_constant,
7395 _("expression too complex"));
7396 constraint (inst.reloc.exp.X_add_number < 0
7397 || inst.reloc.exp.X_add_number > 3,
7398 _("shift out of range"));
7399 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7400 }
7401 inst.reloc.type = BFD_RELOC_UNUSED;
7402 }
7403 else if (inst.operands[i].preind)
7404 {
7405 constraint (is_pc && inst.operands[i].writeback,
7406 _("cannot use writeback with PC-relative addressing"));
7407 constraint (is_t && inst.operands[i].writeback,
7408 _("cannot use writeback with this instruction"));
7409
7410 if (is_d)
7411 {
7412 inst.instruction |= 0x01000000;
7413 if (inst.operands[i].writeback)
7414 inst.instruction |= 0x00200000;
7415 }
7416 else
7417 {
7418 inst.instruction |= 0x00000c00;
7419 if (inst.operands[i].writeback)
7420 inst.instruction |= 0x00000100;
7421 }
7422 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7423 }
7424 else if (inst.operands[i].postind)
7425 {
7426 assert (inst.operands[i].writeback);
7427 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7428 constraint (is_t, _("cannot use post-indexing with this instruction"));
7429
7430 if (is_d)
7431 inst.instruction |= 0x00200000;
7432 else
7433 inst.instruction |= 0x00000900;
7434 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7435 }
7436 else /* unindexed - only for coprocessor */
7437 inst.error = _("instruction does not accept unindexed addressing");
7438}
7439
7440/* Table of Thumb instructions which exist in both 16- and 32-bit
7441 encodings (the latter only in post-V6T2 cores). The index is the
7442 value used in the insns table below. When there is more than one
7443 possible 16-bit encoding for the instruction, this table always
7444 holds variant (1).
7445 Also contains several pseudo-instructions used during relaxation. */
7446#define T16_32_TAB \
7447 X(adc, 4140, eb400000), \
7448 X(adcs, 4140, eb500000), \
7449 X(add, 1c00, eb000000), \
7450 X(adds, 1c00, eb100000), \
7451 X(addi, 0000, f1000000), \
7452 X(addis, 0000, f1100000), \
7453 X(add_pc,000f, f20f0000), \
7454 X(add_sp,000d, f10d0000), \
7455 X(adr, 000f, f20f0000), \
7456 X(and, 4000, ea000000), \
7457 X(ands, 4000, ea100000), \
7458 X(asr, 1000, fa40f000), \
7459 X(asrs, 1000, fa50f000), \
7460 X(b, e000, f000b000), \
7461 X(bcond, d000, f0008000), \
7462 X(bic, 4380, ea200000), \
7463 X(bics, 4380, ea300000), \
7464 X(cmn, 42c0, eb100f00), \
7465 X(cmp, 2800, ebb00f00), \
7466 X(cpsie, b660, f3af8400), \
7467 X(cpsid, b670, f3af8600), \
7468 X(cpy, 4600, ea4f0000), \
7469 X(dec_sp,80dd, f1bd0d00), \
7470 X(eor, 4040, ea800000), \
7471 X(eors, 4040, ea900000), \
7472 X(inc_sp,00dd, f10d0d00), \
7473 X(ldmia, c800, e8900000), \
7474 X(ldr, 6800, f8500000), \
7475 X(ldrb, 7800, f8100000), \
7476 X(ldrh, 8800, f8300000), \
7477 X(ldrsb, 5600, f9100000), \
7478 X(ldrsh, 5e00, f9300000), \
7479 X(ldr_pc,4800, f85f0000), \
7480 X(ldr_pc2,4800, f85f0000), \
7481 X(ldr_sp,9800, f85d0000), \
7482 X(lsl, 0000, fa00f000), \
7483 X(lsls, 0000, fa10f000), \
7484 X(lsr, 0800, fa20f000), \
7485 X(lsrs, 0800, fa30f000), \
7486 X(mov, 2000, ea4f0000), \
7487 X(movs, 2000, ea5f0000), \
7488 X(mul, 4340, fb00f000), \
7489 X(muls, 4340, ffffffff), /* no 32b muls */ \
7490 X(mvn, 43c0, ea6f0000), \
7491 X(mvns, 43c0, ea7f0000), \
7492 X(neg, 4240, f1c00000), /* rsb #0 */ \
7493 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7494 X(orr, 4300, ea400000), \
7495 X(orrs, 4300, ea500000), \
7496 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7497 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7498 X(rev, ba00, fa90f080), \
7499 X(rev16, ba40, fa90f090), \
7500 X(revsh, bac0, fa90f0b0), \
7501 X(ror, 41c0, fa60f000), \
7502 X(rors, 41c0, fa70f000), \
7503 X(sbc, 4180, eb600000), \
7504 X(sbcs, 4180, eb700000), \
7505 X(stmia, c000, e8800000), \
7506 X(str, 6000, f8400000), \
7507 X(strb, 7000, f8000000), \
7508 X(strh, 8000, f8200000), \
7509 X(str_sp,9000, f84d0000), \
7510 X(sub, 1e00, eba00000), \
7511 X(subs, 1e00, ebb00000), \
7512 X(subi, 8000, f1a00000), \
7513 X(subis, 8000, f1b00000), \
7514 X(sxtb, b240, fa4ff080), \
7515 X(sxth, b200, fa0ff080), \
7516 X(tst, 4200, ea100f00), \
7517 X(uxtb, b2c0, fa5ff080), \
7518 X(uxth, b280, fa1ff080), \
7519 X(nop, bf00, f3af8000), \
7520 X(yield, bf10, f3af8001), \
7521 X(wfe, bf20, f3af8002), \
7522 X(wfi, bf30, f3af8003), \
7523 X(sev, bf40, f3af9004), /* typo, 8004? */
7524
7525/* To catch errors in encoding functions, the codes are all offset by
7526 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7527 as 16-bit instructions. */
7528#define X(a,b,c) T_MNEM_##a
7529enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7530#undef X
7531
7532#define X(a,b,c) 0x##b
7533static const unsigned short thumb_op16[] = { T16_32_TAB };
7534#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7535#undef X
7536
7537#define X(a,b,c) 0x##c
7538static const unsigned int thumb_op32[] = { T16_32_TAB };
7539#define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7540#define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7541#undef X
7542#undef T16_32_TAB
7543
7544/* Thumb instruction encoders, in alphabetical order. */
7545
7546/* ADDW or SUBW. */
7547static void
7548do_t_add_sub_w (void)
7549{
7550 int Rd, Rn;
7551
7552 Rd = inst.operands[0].reg;
7553 Rn = inst.operands[1].reg;
7554
7555 constraint (Rd == 15, _("PC not allowed as destination"));
7556 inst.instruction |= (Rn << 16) | (Rd << 8);
7557 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7558}
7559
7560/* Parse an add or subtract instruction. We get here with inst.instruction
7561 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7562
7563static void
7564do_t_add_sub (void)
7565{
7566 int Rd, Rs, Rn;
7567
7568 Rd = inst.operands[0].reg;
7569 Rs = (inst.operands[1].present
7570 ? inst.operands[1].reg /* Rd, Rs, foo */
7571 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7572
7573 if (unified_syntax)
7574 {
7575 bfd_boolean flags;
7576 bfd_boolean narrow;
7577 int opcode;
7578
7579 flags = (inst.instruction == T_MNEM_adds
7580 || inst.instruction == T_MNEM_subs);
7581 if (flags)
7582 narrow = (current_it_mask == 0);
7583 else
7584 narrow = (current_it_mask != 0);
7585 if (!inst.operands[2].isreg)
7586 {
7587 opcode = 0;
7588 if (inst.size_req != 4)
7589 {
7590 int add;
7591
7592 add = (inst.instruction == T_MNEM_add
7593 || inst.instruction == T_MNEM_adds);
7594 /* Attempt to use a narrow opcode, with relaxation if
7595 appropriate. */
7596 if (Rd == REG_SP && Rs == REG_SP && !flags)
7597 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7598 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7599 opcode = T_MNEM_add_sp;
7600 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7601 opcode = T_MNEM_add_pc;
7602 else if (Rd <= 7 && Rs <= 7 && narrow)
7603 {
7604 if (flags)
7605 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7606 else
7607 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7608 }
7609 if (opcode)
7610 {
7611 inst.instruction = THUMB_OP16(opcode);
7612 inst.instruction |= (Rd << 4) | Rs;
7613 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7614 if (inst.size_req != 2)
7615 inst.relax = opcode;
7616 }
7617 else
7618 constraint (inst.size_req == 2, BAD_HIREG);
7619 }
7620 if (inst.size_req == 4
7621 || (inst.size_req != 2 && !opcode))
7622 {
7623 /* ??? Convert large immediates to addw/subw. */
7624 inst.instruction = THUMB_OP32 (inst.instruction);
7625 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7626 inst.instruction |= inst.operands[0].reg << 8;
7627 inst.instruction |= inst.operands[1].reg << 16;
7628 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7629 }
7630 }
7631 else
7632 {
7633 Rn = inst.operands[2].reg;
7634 /* See if we can do this with a 16-bit instruction. */
7635 if (!inst.operands[2].shifted && inst.size_req != 4)
7636 {
7637 if (Rd > 7 || Rs > 7 || Rn > 7)
7638 narrow = FALSE;
7639
7640 if (narrow)
7641 {
7642 inst.instruction = ((inst.instruction == T_MNEM_adds
7643 || inst.instruction == T_MNEM_add)
7644 ? T_OPCODE_ADD_R3
7645 : T_OPCODE_SUB_R3);
7646 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7647 return;
7648 }
7649
7650 if (inst.instruction == T_MNEM_add)
7651 {
7652 if (Rd == Rs)
7653 {
7654 inst.instruction = T_OPCODE_ADD_HI;
7655 inst.instruction |= (Rd & 8) << 4;
7656 inst.instruction |= (Rd & 7);
7657 inst.instruction |= Rn << 3;
7658 return;
7659 }
7660 /* ... because addition is commutative! */
7661 else if (Rd == Rn)
7662 {
7663 inst.instruction = T_OPCODE_ADD_HI;
7664 inst.instruction |= (Rd & 8) << 4;
7665 inst.instruction |= (Rd & 7);
7666 inst.instruction |= Rs << 3;
7667 return;
7668 }
7669 }
7670 }
7671 /* If we get here, it can't be done in 16 bits. */
7672 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7673 _("shift must be constant"));
7674 inst.instruction = THUMB_OP32 (inst.instruction);
7675 inst.instruction |= Rd << 8;
7676 inst.instruction |= Rs << 16;
7677 encode_thumb32_shifted_operand (2);
7678 }
7679 }
7680 else
7681 {
7682 constraint (inst.instruction == T_MNEM_adds
7683 || inst.instruction == T_MNEM_subs,
7684 BAD_THUMB32);
7685
7686 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7687 {
7688 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7689 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7690 BAD_HIREG);
7691
7692 inst.instruction = (inst.instruction == T_MNEM_add
7693 ? 0x0000 : 0x8000);
7694 inst.instruction |= (Rd << 4) | Rs;
7695 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7696 return;
7697 }
7698
7699 Rn = inst.operands[2].reg;
7700 constraint (inst.operands[2].shifted, _("unshifted register required"));
7701
7702 /* We now have Rd, Rs, and Rn set to registers. */
7703 if (Rd > 7 || Rs > 7 || Rn > 7)
7704 {
7705 /* Can't do this for SUB. */
7706 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7707 inst.instruction = T_OPCODE_ADD_HI;
7708 inst.instruction |= (Rd & 8) << 4;
7709 inst.instruction |= (Rd & 7);
7710 if (Rs == Rd)
7711 inst.instruction |= Rn << 3;
7712 else if (Rn == Rd)
7713 inst.instruction |= Rs << 3;
7714 else
7715 constraint (1, _("dest must overlap one source register"));
7716 }
7717 else
7718 {
7719 inst.instruction = (inst.instruction == T_MNEM_add
7720 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7721 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7722 }
7723 }
7724}
7725
7726static void
7727do_t_adr (void)
7728{
7729 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7730 {
7731 /* Defer to section relaxation. */
7732 inst.relax = inst.instruction;
7733 inst.instruction = THUMB_OP16 (inst.instruction);
7734 inst.instruction |= inst.operands[0].reg << 4;
7735 }
7736 else if (unified_syntax && inst.size_req != 2)
7737 {
7738 /* Generate a 32-bit opcode. */
7739 inst.instruction = THUMB_OP32 (inst.instruction);
7740 inst.instruction |= inst.operands[0].reg << 8;
7741 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7742 inst.reloc.pc_rel = 1;
7743 }
7744 else
7745 {
7746 /* Generate a 16-bit opcode. */
7747 inst.instruction = THUMB_OP16 (inst.instruction);
7748 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7749 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7750 inst.reloc.pc_rel = 1;
7751
7752 inst.instruction |= inst.operands[0].reg << 4;
7753 }
7754}
7755
7756/* Arithmetic instructions for which there is just one 16-bit
7757 instruction encoding, and it allows only two low registers.
7758 For maximal compatibility with ARM syntax, we allow three register
7759 operands even when Thumb-32 instructions are not available, as long
7760 as the first two are identical. For instance, both "sbc r0,r1" and
7761 "sbc r0,r0,r1" are allowed. */
7762static void
7763do_t_arit3 (void)
7764{
7765 int Rd, Rs, Rn;
7766
7767 Rd = inst.operands[0].reg;
7768 Rs = (inst.operands[1].present
7769 ? inst.operands[1].reg /* Rd, Rs, foo */
7770 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7771 Rn = inst.operands[2].reg;
7772
7773 if (unified_syntax)
7774 {
7775 if (!inst.operands[2].isreg)
7776 {
7777 /* For an immediate, we always generate a 32-bit opcode;
7778 section relaxation will shrink it later if possible. */
7779 inst.instruction = THUMB_OP32 (inst.instruction);
7780 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7781 inst.instruction |= Rd << 8;
7782 inst.instruction |= Rs << 16;
7783 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7784 }
7785 else
7786 {
7787 bfd_boolean narrow;
7788
7789 /* See if we can do this with a 16-bit instruction. */
7790 if (THUMB_SETS_FLAGS (inst.instruction))
7791 narrow = current_it_mask == 0;
7792 else
7793 narrow = current_it_mask != 0;
7794
7795 if (Rd > 7 || Rn > 7 || Rs > 7)
7796 narrow = FALSE;
7797 if (inst.operands[2].shifted)
7798 narrow = FALSE;
7799 if (inst.size_req == 4)
7800 narrow = FALSE;
7801
7802 if (narrow
7803 && Rd == Rs)
7804 {
7805 inst.instruction = THUMB_OP16 (inst.instruction);
7806 inst.instruction |= Rd;
7807 inst.instruction |= Rn << 3;
7808 return;
7809 }
7810
7811 /* If we get here, it can't be done in 16 bits. */
7812 constraint (inst.operands[2].shifted
7813 && inst.operands[2].immisreg,
7814 _("shift must be constant"));
7815 inst.instruction = THUMB_OP32 (inst.instruction);
7816 inst.instruction |= Rd << 8;
7817 inst.instruction |= Rs << 16;
7818 encode_thumb32_shifted_operand (2);
7819 }
7820 }
7821 else
7822 {
7823 /* On its face this is a lie - the instruction does set the
7824 flags. However, the only supported mnemonic in this mode
7825 says it doesn't. */
7826 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7827
7828 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7829 _("unshifted register required"));
7830 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7831 constraint (Rd != Rs,
7832 _("dest and source1 must be the same register"));
7833
7834 inst.instruction = THUMB_OP16 (inst.instruction);
7835 inst.instruction |= Rd;
7836 inst.instruction |= Rn << 3;
7837 }
7838}
7839
7840/* Similarly, but for instructions where the arithmetic operation is
7841 commutative, so we can allow either of them to be different from
7842 the destination operand in a 16-bit instruction. For instance, all
7843 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7844 accepted. */
7845static void
7846do_t_arit3c (void)
7847{
7848 int Rd, Rs, Rn;
7849
7850 Rd = inst.operands[0].reg;
7851 Rs = (inst.operands[1].present
7852 ? inst.operands[1].reg /* Rd, Rs, foo */
7853 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7854 Rn = inst.operands[2].reg;
7855
7856 if (unified_syntax)
7857 {
7858 if (!inst.operands[2].isreg)
7859 {
7860 /* For an immediate, we always generate a 32-bit opcode;
7861 section relaxation will shrink it later if possible. */
7862 inst.instruction = THUMB_OP32 (inst.instruction);
7863 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7864 inst.instruction |= Rd << 8;
7865 inst.instruction |= Rs << 16;
7866 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7867 }
7868 else
7869 {
7870 bfd_boolean narrow;
7871
7872 /* See if we can do this with a 16-bit instruction. */
7873 if (THUMB_SETS_FLAGS (inst.instruction))
7874 narrow = current_it_mask == 0;
7875 else
7876 narrow = current_it_mask != 0;
7877
7878 if (Rd > 7 || Rn > 7 || Rs > 7)
7879 narrow = FALSE;
7880 if (inst.operands[2].shifted)
7881 narrow = FALSE;
7882 if (inst.size_req == 4)
7883 narrow = FALSE;
7884
7885 if (narrow)
7886 {
7887 if (Rd == Rs)
7888 {
7889 inst.instruction = THUMB_OP16 (inst.instruction);
7890 inst.instruction |= Rd;
7891 inst.instruction |= Rn << 3;
7892 return;
7893 }
7894 if (Rd == Rn)
7895 {
7896 inst.instruction = THUMB_OP16 (inst.instruction);
7897 inst.instruction |= Rd;
7898 inst.instruction |= Rs << 3;
7899 return;
7900 }
7901 }
7902
7903 /* If we get here, it can't be done in 16 bits. */
7904 constraint (inst.operands[2].shifted
7905 && inst.operands[2].immisreg,
7906 _("shift must be constant"));
7907 inst.instruction = THUMB_OP32 (inst.instruction);
7908 inst.instruction |= Rd << 8;
7909 inst.instruction |= Rs << 16;
7910 encode_thumb32_shifted_operand (2);
7911 }
7912 }
7913 else
7914 {
7915 /* On its face this is a lie - the instruction does set the
7916 flags. However, the only supported mnemonic in this mode
7917 says it doesn't. */
7918 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7919
7920 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7921 _("unshifted register required"));
7922 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7923
7924 inst.instruction = THUMB_OP16 (inst.instruction);
7925 inst.instruction |= Rd;
7926
7927 if (Rd == Rs)
7928 inst.instruction |= Rn << 3;
7929 else if (Rd == Rn)
7930 inst.instruction |= Rs << 3;
7931 else
7932 constraint (1, _("dest must overlap one source register"));
7933 }
7934}
7935
7936static void
7937do_t_barrier (void)
7938{
7939 if (inst.operands[0].present)
7940 {
7941 constraint ((inst.instruction & 0xf0) != 0x40
7942 && inst.operands[0].imm != 0xf,
7943 "bad barrier type");
7944 inst.instruction |= inst.operands[0].imm;
7945 }
7946 else
7947 inst.instruction |= 0xf;
7948}
7949
7950static void
7951do_t_bfc (void)
7952{
7953 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7954 constraint (msb > 32, _("bit-field extends past end of register"));
7955 /* The instruction encoding stores the LSB and MSB,
7956 not the LSB and width. */
7957 inst.instruction |= inst.operands[0].reg << 8;
7958 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
7959 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
7960 inst.instruction |= msb - 1;
7961}
7962
7963static void
7964do_t_bfi (void)
7965{
7966 unsigned int msb;
7967
7968 /* #0 in second position is alternative syntax for bfc, which is
7969 the same instruction but with REG_PC in the Rm field. */
7970 if (!inst.operands[1].isreg)
7971 inst.operands[1].reg = REG_PC;
7972
7973 msb = inst.operands[2].imm + inst.operands[3].imm;
7974 constraint (msb > 32, _("bit-field extends past end of register"));
7975 /* The instruction encoding stores the LSB and MSB,
7976 not the LSB and width. */
7977 inst.instruction |= inst.operands[0].reg << 8;
7978 inst.instruction |= inst.operands[1].reg << 16;
7979 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7980 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7981 inst.instruction |= msb - 1;
7982}
7983
7984static void
7985do_t_bfx (void)
7986{
7987 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7988 _("bit-field extends past end of register"));
7989 inst.instruction |= inst.operands[0].reg << 8;
7990 inst.instruction |= inst.operands[1].reg << 16;
7991 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7992 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7993 inst.instruction |= inst.operands[3].imm - 1;
7994}
7995
7996/* ARM V5 Thumb BLX (argument parse)
7997 BLX <target_addr> which is BLX(1)
7998 BLX <Rm> which is BLX(2)
7999 Unfortunately, there are two different opcodes for this mnemonic.
8000 So, the insns[].value is not used, and the code here zaps values
8001 into inst.instruction.
8002
8003 ??? How to take advantage of the additional two bits of displacement
8004 available in Thumb32 mode? Need new relocation? */
8005
8006static void
8007do_t_blx (void)
8008{
8009 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8010 if (inst.operands[0].isreg)
8011 /* We have a register, so this is BLX(2). */
8012 inst.instruction |= inst.operands[0].reg << 3;
8013 else
8014 {
8015 /* No register. This must be BLX(1). */
8016 inst.instruction = 0xf000e800;
8017#ifdef OBJ_ELF
8018 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8019 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8020 else
8021#endif
8022 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8023 inst.reloc.pc_rel = 1;
8024 }
8025}
8026
8027static void
8028do_t_branch (void)
8029{
8030 int opcode;
8031 int cond;
8032
8033 if (current_it_mask)
8034 {
8035 /* Conditional branches inside IT blocks are encoded as unconditional
8036 branches. */
8037 cond = COND_ALWAYS;
8038 /* A branch must be the last instruction in an IT block. */
8039 constraint (current_it_mask != 0x10, BAD_BRANCH);
8040 }
8041 else
8042 cond = inst.cond;
8043
8044 if (cond != COND_ALWAYS)
8045 opcode = T_MNEM_bcond;
8046 else
8047 opcode = inst.instruction;
8048
8049 if (unified_syntax && inst.size_req == 4)
8050 {
8051 inst.instruction = THUMB_OP32(opcode);
8052 if (cond == COND_ALWAYS)
8053 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8054 else
8055 {
8056 assert (cond != 0xF);
8057 inst.instruction |= cond << 22;
8058 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8059 }
8060 }
8061 else
8062 {
8063 inst.instruction = THUMB_OP16(opcode);
8064 if (cond == COND_ALWAYS)
8065 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8066 else
8067 {
8068 inst.instruction |= cond << 8;
8069 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8070 }
8071 /* Allow section relaxation. */
8072 if (unified_syntax && inst.size_req != 2)
8073 inst.relax = opcode;
8074 }
8075
8076 inst.reloc.pc_rel = 1;
8077}
8078
8079static void
8080do_t_bkpt (void)
8081{
8082 constraint (inst.cond != COND_ALWAYS,
8083 _("instruction is always unconditional"));
8084 if (inst.operands[0].present)
8085 {
8086 constraint (inst.operands[0].imm > 255,
8087 _("immediate value out of range"));
8088 inst.instruction |= inst.operands[0].imm;
8089 }
8090}
8091
8092static void
8093do_t_branch23 (void)
8094{
8095 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8096 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8097 inst.reloc.pc_rel = 1;
8098
8099 /* If the destination of the branch is a defined symbol which does not have
8100 the THUMB_FUNC attribute, then we must be calling a function which has
8101 the (interfacearm) attribute. We look for the Thumb entry point to that
8102 function and change the branch to refer to that function instead. */
8103 if ( inst.reloc.exp.X_op == O_symbol
8104 && inst.reloc.exp.X_add_symbol != NULL
8105 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8106 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8107 inst.reloc.exp.X_add_symbol =
8108 find_real_start (inst.reloc.exp.X_add_symbol);
8109}
8110
8111static void
8112do_t_bx (void)
8113{
8114 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8115 inst.instruction |= inst.operands[0].reg << 3;
8116 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8117 should cause the alignment to be checked once it is known. This is
8118 because BX PC only works if the instruction is word aligned. */
8119}
8120
8121static void
8122do_t_bxj (void)
8123{
8124 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8125 if (inst.operands[0].reg == REG_PC)
8126 as_tsktsk (_("use of r15 in bxj is not really useful"));
8127
8128 inst.instruction |= inst.operands[0].reg << 16;
8129}
8130
8131static void
8132do_t_clz (void)
8133{
8134 inst.instruction |= inst.operands[0].reg << 8;
8135 inst.instruction |= inst.operands[1].reg << 16;
8136 inst.instruction |= inst.operands[1].reg;
8137}
8138
8139static void
8140do_t_cps (void)
8141{
8142 constraint (current_it_mask, BAD_NOT_IT);
8143 inst.instruction |= inst.operands[0].imm;
8144}
8145
8146static void
8147do_t_cpsi (void)
8148{
8149 constraint (current_it_mask, BAD_NOT_IT);
8150 if (unified_syntax
8151 && (inst.operands[1].present || inst.size_req == 4)
8152 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8153 {
8154 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8155 inst.instruction = 0xf3af8000;
8156 inst.instruction |= imod << 9;
8157 inst.instruction |= inst.operands[0].imm << 5;
8158 if (inst.operands[1].present)
8159 inst.instruction |= 0x100 | inst.operands[1].imm;
8160 }
8161 else
8162 {
8163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8164 && (inst.operands[0].imm & 4),
8165 _("selected processor does not support 'A' form "
8166 "of this instruction"));
8167 constraint (inst.operands[1].present || inst.size_req == 4,
8168 _("Thumb does not support the 2-argument "
8169 "form of this instruction"));
8170 inst.instruction |= inst.operands[0].imm;
8171 }
8172}
8173
8174/* THUMB CPY instruction (argument parse). */
8175
8176static void
8177do_t_cpy (void)
8178{
8179 if (inst.size_req == 4)
8180 {
8181 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8182 inst.instruction |= inst.operands[0].reg << 8;
8183 inst.instruction |= inst.operands[1].reg;
8184 }
8185 else
8186 {
8187 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8188 inst.instruction |= (inst.operands[0].reg & 0x7);
8189 inst.instruction |= inst.operands[1].reg << 3;
8190 }
8191}
8192
8193static void
8194do_t_czb (void)
8195{
8196 constraint (current_it_mask, BAD_NOT_IT);
8197 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8198 inst.instruction |= inst.operands[0].reg;
8199 inst.reloc.pc_rel = 1;
8200 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8201}
8202
8203static void
8204do_t_dbg (void)
8205{
8206 inst.instruction |= inst.operands[0].imm;
8207}
8208
8209static void
8210do_t_div (void)
8211{
8212 if (!inst.operands[1].present)
8213 inst.operands[1].reg = inst.operands[0].reg;
8214 inst.instruction |= inst.operands[0].reg << 8;
8215 inst.instruction |= inst.operands[1].reg << 16;
8216 inst.instruction |= inst.operands[2].reg;
8217}
8218
8219static void
8220do_t_hint (void)
8221{
8222 if (unified_syntax && inst.size_req == 4)
8223 inst.instruction = THUMB_OP32 (inst.instruction);
8224 else
8225 inst.instruction = THUMB_OP16 (inst.instruction);
8226}
8227
8228static void
8229do_t_it (void)
8230{
8231 unsigned int cond = inst.operands[0].imm;
8232
8233 constraint (current_it_mask, BAD_NOT_IT);
8234 current_it_mask = (inst.instruction & 0xf) | 0x10;
8235 current_cc = cond;
8236
8237 /* If the condition is a negative condition, invert the mask. */
8238 if ((cond & 0x1) == 0x0)
8239 {
8240 unsigned int mask = inst.instruction & 0x000f;
8241
8242 if ((mask & 0x7) == 0)
8243 /* no conversion needed */;
8244 else if ((mask & 0x3) == 0)
8245 mask ^= 0x8;
8246 else if ((mask & 0x1) == 0)
8247 mask ^= 0xC;
8248 else
8249 mask ^= 0xE;
8250
8251 inst.instruction &= 0xfff0;
8252 inst.instruction |= mask;
8253 }
8254
8255 inst.instruction |= cond << 4;
8256}
8257
8258static void
8259do_t_ldmstm (void)
8260{
8261 /* This really doesn't seem worth it. */
8262 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8263 _("expression too complex"));
8264 constraint (inst.operands[1].writeback,
8265 _("Thumb load/store multiple does not support {reglist}^"));
8266
8267 if (unified_syntax)
8268 {
8269 /* See if we can use a 16-bit instruction. */
8270 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8271 && inst.size_req != 4
8272 && inst.operands[0].reg <= 7
8273 && !(inst.operands[1].imm & ~0xff)
8274 && (inst.instruction == T_MNEM_stmia
8275 ? inst.operands[0].writeback
8276 : (inst.operands[0].writeback
8277 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8278 {
8279 if (inst.instruction == T_MNEM_stmia
8280 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8281 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8282 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8283 inst.operands[0].reg);
8284
8285 inst.instruction = THUMB_OP16 (inst.instruction);
8286 inst.instruction |= inst.operands[0].reg << 8;
8287 inst.instruction |= inst.operands[1].imm;
8288 }
8289 else
8290 {
8291 if (inst.operands[1].imm & (1 << 13))
8292 as_warn (_("SP should not be in register list"));
8293 if (inst.instruction == T_MNEM_stmia)
8294 {
8295 if (inst.operands[1].imm & (1 << 15))
8296 as_warn (_("PC should not be in register list"));
8297 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8298 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8299 inst.operands[0].reg);
8300 }
8301 else
8302 {
8303 if (inst.operands[1].imm & (1 << 14)
8304 && inst.operands[1].imm & (1 << 15))
8305 as_warn (_("LR and PC should not both be in register list"));
8306 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8307 && inst.operands[0].writeback)
8308 as_warn (_("base register should not be in register list "
8309 "when written back"));
8310 }
8311 if (inst.instruction < 0xffff)
8312 inst.instruction = THUMB_OP32 (inst.instruction);
8313 inst.instruction |= inst.operands[0].reg << 16;
8314 inst.instruction |= inst.operands[1].imm;
8315 if (inst.operands[0].writeback)
8316 inst.instruction |= WRITE_BACK;
8317 }
8318 }
8319 else
8320 {
8321 constraint (inst.operands[0].reg > 7
8322 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8323 if (inst.instruction == T_MNEM_stmia)
8324 {
8325 if (!inst.operands[0].writeback)
8326 as_warn (_("this instruction will write back the base register"));
8327 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8328 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8329 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8330 inst.operands[0].reg);
8331 }
8332 else
8333 {
8334 if (!inst.operands[0].writeback
8335 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8336 as_warn (_("this instruction will write back the base register"));
8337 else if (inst.operands[0].writeback
8338 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8339 as_warn (_("this instruction will not write back the base register"));
8340 }
8341
8342 inst.instruction = THUMB_OP16 (inst.instruction);
8343 inst.instruction |= inst.operands[0].reg << 8;
8344 inst.instruction |= inst.operands[1].imm;
8345 }
8346}
8347
8348static void
8349do_t_ldrex (void)
8350{
8351 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8352 || inst.operands[1].postind || inst.operands[1].writeback
8353 || inst.operands[1].immisreg || inst.operands[1].shifted
8354 || inst.operands[1].negative,
8355 BAD_ADDR_MODE);
8356
8357 inst.instruction |= inst.operands[0].reg << 12;
8358 inst.instruction |= inst.operands[1].reg << 16;
8359 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8360}
8361
8362static void
8363do_t_ldrexd (void)
8364{
8365 if (!inst.operands[1].present)
8366 {
8367 constraint (inst.operands[0].reg == REG_LR,
8368 _("r14 not allowed as first register "
8369 "when second register is omitted"));
8370 inst.operands[1].reg = inst.operands[0].reg + 1;
8371 }
8372 constraint (inst.operands[0].reg == inst.operands[1].reg,
8373 BAD_OVERLAP);
8374
8375 inst.instruction |= inst.operands[0].reg << 12;
8376 inst.instruction |= inst.operands[1].reg << 8;
8377 inst.instruction |= inst.operands[2].reg << 16;
8378}
8379
8380static void
8381do_t_ldst (void)
8382{
8383 unsigned long opcode;
8384 int Rn;
8385
8386 opcode = inst.instruction;
8387 if (unified_syntax)
8388 {
8389 if (!inst.operands[1].isreg)
8390 {
8391 if (opcode <= 0xffff)
8392 inst.instruction = THUMB_OP32 (opcode);
8393 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8394 return;
8395 }
8396 if (inst.operands[1].isreg
8397 && !inst.operands[1].writeback
8398 && !inst.operands[1].shifted && !inst.operands[1].postind
8399 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8400 && opcode <= 0xffff
8401 && inst.size_req != 4)
8402 {
8403 /* Insn may have a 16-bit form. */
8404 Rn = inst.operands[1].reg;
8405 if (inst.operands[1].immisreg)
8406 {
8407 inst.instruction = THUMB_OP16 (opcode);
8408 /* [Rn, Ri] */
8409 if (Rn <= 7 && inst.operands[1].imm <= 7)
8410 goto op16;
8411 }
8412 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8413 && opcode != T_MNEM_ldrsb)
8414 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8415 || (Rn == REG_SP && opcode == T_MNEM_str))
8416 {
8417 /* [Rn, #const] */
8418 if (Rn > 7)
8419 {
8420 if (Rn == REG_PC)
8421 {
8422 if (inst.reloc.pc_rel)
8423 opcode = T_MNEM_ldr_pc2;
8424 else
8425 opcode = T_MNEM_ldr_pc;
8426 }
8427 else
8428 {
8429 if (opcode == T_MNEM_ldr)
8430 opcode = T_MNEM_ldr_sp;
8431 else
8432 opcode = T_MNEM_str_sp;
8433 }
8434 inst.instruction = inst.operands[0].reg << 8;
8435 }
8436 else
8437 {
8438 inst.instruction = inst.operands[0].reg;
8439 inst.instruction |= inst.operands[1].reg << 3;
8440 }
8441 inst.instruction |= THUMB_OP16 (opcode);
8442 if (inst.size_req == 2)
8443 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8444 else
8445 inst.relax = opcode;
8446 return;
8447 }
8448 }
8449 /* Definitely a 32-bit variant. */
8450 inst.instruction = THUMB_OP32 (opcode);
8451 inst.instruction |= inst.operands[0].reg << 12;
8452 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8453 return;
8454 }
8455
8456 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8457
8458 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8459 {
8460 /* Only [Rn,Rm] is acceptable. */
8461 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8462 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8463 || inst.operands[1].postind || inst.operands[1].shifted
8464 || inst.operands[1].negative,
8465 _("Thumb does not support this addressing mode"));
8466 inst.instruction = THUMB_OP16 (inst.instruction);
8467 goto op16;
8468 }
8469
8470 inst.instruction = THUMB_OP16 (inst.instruction);
8471 if (!inst.operands[1].isreg)
8472 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8473 return;
8474
8475 constraint (!inst.operands[1].preind
8476 || inst.operands[1].shifted
8477 || inst.operands[1].writeback,
8478 _("Thumb does not support this addressing mode"));
8479 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8480 {
8481 constraint (inst.instruction & 0x0600,
8482 _("byte or halfword not valid for base register"));
8483 constraint (inst.operands[1].reg == REG_PC
8484 && !(inst.instruction & THUMB_LOAD_BIT),
8485 _("r15 based store not allowed"));
8486 constraint (inst.operands[1].immisreg,
8487 _("invalid base register for register offset"));
8488
8489 if (inst.operands[1].reg == REG_PC)
8490 inst.instruction = T_OPCODE_LDR_PC;
8491 else if (inst.instruction & THUMB_LOAD_BIT)
8492 inst.instruction = T_OPCODE_LDR_SP;
8493 else
8494 inst.instruction = T_OPCODE_STR_SP;
8495
8496 inst.instruction |= inst.operands[0].reg << 8;
8497 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8498 return;
8499 }
8500
8501 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8502 if (!inst.operands[1].immisreg)
8503 {
8504 /* Immediate offset. */
8505 inst.instruction |= inst.operands[0].reg;
8506 inst.instruction |= inst.operands[1].reg << 3;
8507 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8508 return;
8509 }
8510
8511 /* Register offset. */
8512 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8513 constraint (inst.operands[1].negative,
8514 _("Thumb does not support this addressing mode"));
8515
8516 op16:
8517 switch (inst.instruction)
8518 {
8519 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8520 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8521 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8522 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8523 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8524 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8525 case 0x5600 /* ldrsb */:
8526 case 0x5e00 /* ldrsh */: break;
8527 default: abort ();
8528 }
8529
8530 inst.instruction |= inst.operands[0].reg;
8531 inst.instruction |= inst.operands[1].reg << 3;
8532 inst.instruction |= inst.operands[1].imm << 6;
8533}
8534
8535static void
8536do_t_ldstd (void)
8537{
8538 if (!inst.operands[1].present)
8539 {
8540 inst.operands[1].reg = inst.operands[0].reg + 1;
8541 constraint (inst.operands[0].reg == REG_LR,
8542 _("r14 not allowed here"));
8543 }
8544 inst.instruction |= inst.operands[0].reg << 12;
8545 inst.instruction |= inst.operands[1].reg << 8;
8546 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8547
8548}
8549
8550static void
8551do_t_ldstt (void)
8552{
8553 inst.instruction |= inst.operands[0].reg << 12;
8554 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8555}
8556
8557static void
8558do_t_mla (void)
8559{
8560 inst.instruction |= inst.operands[0].reg << 8;
8561 inst.instruction |= inst.operands[1].reg << 16;
8562 inst.instruction |= inst.operands[2].reg;
8563 inst.instruction |= inst.operands[3].reg << 12;
8564}
8565
8566static void
8567do_t_mlal (void)
8568{
8569 inst.instruction |= inst.operands[0].reg << 12;
8570 inst.instruction |= inst.operands[1].reg << 8;
8571 inst.instruction |= inst.operands[2].reg << 16;
8572 inst.instruction |= inst.operands[3].reg;
8573}
8574
8575static void
8576do_t_mov_cmp (void)
8577{
8578 if (unified_syntax)
8579 {
8580 int r0off = (inst.instruction == T_MNEM_mov
8581 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8582 unsigned long opcode;
8583 bfd_boolean narrow;
8584 bfd_boolean low_regs;
8585
8586 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8587 opcode = inst.instruction;
8588 if (current_it_mask)
8589 narrow = opcode != T_MNEM_movs;
8590 else
8591 narrow = opcode != T_MNEM_movs || low_regs;
8592 if (inst.size_req == 4
8593 || inst.operands[1].shifted)
8594 narrow = FALSE;
8595
8596 if (!inst.operands[1].isreg)
8597 {
8598 /* Immediate operand. */
8599 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8600 narrow = 0;
8601 if (low_regs && narrow)
8602 {
8603 inst.instruction = THUMB_OP16 (opcode);
8604 inst.instruction |= inst.operands[0].reg << 8;
8605 if (inst.size_req == 2)
8606 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8607 else
8608 inst.relax = opcode;
8609 }
8610 else
8611 {
8612 inst.instruction = THUMB_OP32 (inst.instruction);
8613 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8614 inst.instruction |= inst.operands[0].reg << r0off;
8615 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8616 }
8617 }
8618 else if (!narrow)
8619 {
8620 inst.instruction = THUMB_OP32 (inst.instruction);
8621 inst.instruction |= inst.operands[0].reg << r0off;
8622 encode_thumb32_shifted_operand (1);
8623 }
8624 else
8625 switch (inst.instruction)
8626 {
8627 case T_MNEM_mov:
8628 inst.instruction = T_OPCODE_MOV_HR;
8629 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8630 inst.instruction |= (inst.operands[0].reg & 0x7);
8631 inst.instruction |= inst.operands[1].reg << 3;
8632 break;
8633
8634 case T_MNEM_movs:
8635 /* We know we have low registers at this point.
8636 Generate ADD Rd, Rs, #0. */
8637 inst.instruction = T_OPCODE_ADD_I3;
8638 inst.instruction |= inst.operands[0].reg;
8639 inst.instruction |= inst.operands[1].reg << 3;
8640 break;
8641
8642 case T_MNEM_cmp:
8643 if (low_regs)
8644 {
8645 inst.instruction = T_OPCODE_CMP_LR;
8646 inst.instruction |= inst.operands[0].reg;
8647 inst.instruction |= inst.operands[1].reg << 3;
8648 }
8649 else
8650 {
8651 inst.instruction = T_OPCODE_CMP_HR;
8652 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8653 inst.instruction |= (inst.operands[0].reg & 0x7);
8654 inst.instruction |= inst.operands[1].reg << 3;
8655 }
8656 break;
8657 }
8658 return;
8659 }
8660
8661 inst.instruction = THUMB_OP16 (inst.instruction);
8662 if (inst.operands[1].isreg)
8663 {
8664 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8665 {
8666 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8667 since a MOV instruction produces unpredictable results. */
8668 if (inst.instruction == T_OPCODE_MOV_I8)
8669 inst.instruction = T_OPCODE_ADD_I3;
8670 else
8671 inst.instruction = T_OPCODE_CMP_LR;
8672
8673 inst.instruction |= inst.operands[0].reg;
8674 inst.instruction |= inst.operands[1].reg << 3;
8675 }
8676 else
8677 {
8678 if (inst.instruction == T_OPCODE_MOV_I8)
8679 inst.instruction = T_OPCODE_MOV_HR;
8680 else
8681 inst.instruction = T_OPCODE_CMP_HR;
8682 do_t_cpy ();
8683 }
8684 }
8685 else
8686 {
8687 constraint (inst.operands[0].reg > 7,
8688 _("only lo regs allowed with immediate"));
8689 inst.instruction |= inst.operands[0].reg << 8;
8690 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8691 }
8692}
8693
8694static void
8695do_t_mov16 (void)
8696{
8697 inst.instruction |= inst.operands[0].reg << 8;
8698 inst.instruction |= (inst.operands[1].imm & 0xf000) << 4;
8699 inst.instruction |= (inst.operands[1].imm & 0x0800) << 15;
8700 inst.instruction |= (inst.operands[1].imm & 0x0700) << 4;
8701 inst.instruction |= (inst.operands[1].imm & 0x00ff);
8702}
8703
8704static void
8705do_t_mvn_tst (void)
8706{
8707 if (unified_syntax)
8708 {
8709 int r0off = (inst.instruction == T_MNEM_mvn
8710 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8711 bfd_boolean narrow;
8712
8713 if (inst.size_req == 4
8714 || inst.instruction > 0xffff
8715 || inst.operands[1].shifted
8716 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8717 narrow = FALSE;
8718 else if (inst.instruction == T_MNEM_cmn)
8719 narrow = TRUE;
8720 else if (THUMB_SETS_FLAGS (inst.instruction))
8721 narrow = (current_it_mask == 0);
8722 else
8723 narrow = (current_it_mask != 0);
8724
8725 if (!inst.operands[1].isreg)
8726 {
8727 /* For an immediate, we always generate a 32-bit opcode;
8728 section relaxation will shrink it later if possible. */
8729 if (inst.instruction < 0xffff)
8730 inst.instruction = THUMB_OP32 (inst.instruction);
8731 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8732 inst.instruction |= inst.operands[0].reg << r0off;
8733 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8734 }
8735 else
8736 {
8737 /* See if we can do this with a 16-bit instruction. */
8738 if (narrow)
8739 {
8740 inst.instruction = THUMB_OP16 (inst.instruction);
8741 inst.instruction |= inst.operands[0].reg;
8742 inst.instruction |= inst.operands[1].reg << 3;
8743 }
8744 else
8745 {
8746 constraint (inst.operands[1].shifted
8747 && inst.operands[1].immisreg,
8748 _("shift must be constant"));
8749 if (inst.instruction < 0xffff)
8750 inst.instruction = THUMB_OP32 (inst.instruction);
8751 inst.instruction |= inst.operands[0].reg << r0off;
8752 encode_thumb32_shifted_operand (1);
8753 }
8754 }
8755 }
8756 else
8757 {
8758 constraint (inst.instruction > 0xffff
8759 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8760 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8761 _("unshifted register required"));
8762 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8763 BAD_HIREG);
8764
8765 inst.instruction = THUMB_OP16 (inst.instruction);
8766 inst.instruction |= inst.operands[0].reg;
8767 inst.instruction |= inst.operands[1].reg << 3;
8768 }
8769}
8770
8771static void
8772do_t_mrs (void)
8773{
8774 int flags;
8775 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8776 if (flags == 0)
8777 {
8778 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8779 _("selected processor does not support "
8780 "requested special purpose register"));
8781 }
8782 else
8783 {
8784 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8785 _("selected processor does not support "
8786 "requested special purpose register %x"));
8787 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8788 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8789 _("'CPSR' or 'SPSR' expected"));
8790 }
8791
8792 inst.instruction |= inst.operands[0].reg << 8;
8793 inst.instruction |= (flags & SPSR_BIT) >> 2;
8794 inst.instruction |= inst.operands[1].imm & 0xff;
8795}
8796
8797static void
8798do_t_msr (void)
8799{
8800 int flags;
8801
8802 constraint (!inst.operands[1].isreg,
8803 _("Thumb encoding does not support an immediate here"));
8804 flags = inst.operands[0].imm;
8805 if (flags & ~0xff)
8806 {
8807 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8808 _("selected processor does not support "
8809 "requested special purpose register"));
8810 }
8811 else
8812 {
8813 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8814 _("selected processor does not support "
8815 "requested special purpose register"));
8816 flags |= PSR_f;
8817 }
8818 inst.instruction |= (flags & SPSR_BIT) >> 2;
8819 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8820 inst.instruction |= (flags & 0xff);
8821 inst.instruction |= inst.operands[1].reg << 16;
8822}
8823
8824static void
8825do_t_mul (void)
8826{
8827 if (!inst.operands[2].present)
8828 inst.operands[2].reg = inst.operands[0].reg;
8829
8830 /* There is no 32-bit MULS and no 16-bit MUL. */
8831 if (unified_syntax && inst.instruction == T_MNEM_mul)
8832 {
8833 inst.instruction = THUMB_OP32 (inst.instruction);
8834 inst.instruction |= inst.operands[0].reg << 8;
8835 inst.instruction |= inst.operands[1].reg << 16;
8836 inst.instruction |= inst.operands[2].reg << 0;
8837 }
8838 else
8839 {
8840 constraint (!unified_syntax
8841 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8842 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8843 BAD_HIREG);
8844
8845 inst.instruction = THUMB_OP16 (inst.instruction);
8846 inst.instruction |= inst.operands[0].reg;
8847
8848 if (inst.operands[0].reg == inst.operands[1].reg)
8849 inst.instruction |= inst.operands[2].reg << 3;
8850 else if (inst.operands[0].reg == inst.operands[2].reg)
8851 inst.instruction |= inst.operands[1].reg << 3;
8852 else
8853 constraint (1, _("dest must overlap one source register"));
8854 }
8855}
8856
8857static void
8858do_t_mull (void)
8859{
8860 inst.instruction |= inst.operands[0].reg << 12;
8861 inst.instruction |= inst.operands[1].reg << 8;
8862 inst.instruction |= inst.operands[2].reg << 16;
8863 inst.instruction |= inst.operands[3].reg;
8864
8865 if (inst.operands[0].reg == inst.operands[1].reg)
8866 as_tsktsk (_("rdhi and rdlo must be different"));
8867}
8868
8869static void
8870do_t_nop (void)
8871{
8872 if (unified_syntax)
8873 {
8874 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8875 {
8876 inst.instruction = THUMB_OP32 (inst.instruction);
8877 inst.instruction |= inst.operands[0].imm;
8878 }
8879 else
8880 {
8881 inst.instruction = THUMB_OP16 (inst.instruction);
8882 inst.instruction |= inst.operands[0].imm << 4;
8883 }
8884 }
8885 else
8886 {
8887 constraint (inst.operands[0].present,
8888 _("Thumb does not support NOP with hints"));
8889 inst.instruction = 0x46c0;
8890 }
8891}
8892
8893static void
8894do_t_neg (void)
8895{
8896 if (unified_syntax)
8897 {
8898 bfd_boolean narrow;
8899
8900 if (THUMB_SETS_FLAGS (inst.instruction))
8901 narrow = (current_it_mask == 0);
8902 else
8903 narrow = (current_it_mask != 0);
8904 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8905 narrow = FALSE;
8906 if (inst.size_req == 4)
8907 narrow = FALSE;
8908
8909 if (!narrow)
8910 {
8911 inst.instruction = THUMB_OP32 (inst.instruction);
8912 inst.instruction |= inst.operands[0].reg << 8;
8913 inst.instruction |= inst.operands[1].reg << 16;
8914 }
8915 else
8916 {
8917 inst.instruction = THUMB_OP16 (inst.instruction);
8918 inst.instruction |= inst.operands[0].reg;
8919 inst.instruction |= inst.operands[1].reg << 3;
8920 }
8921 }
8922 else
8923 {
8924 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8925 BAD_HIREG);
8926 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8927
8928 inst.instruction = THUMB_OP16 (inst.instruction);
8929 inst.instruction |= inst.operands[0].reg;
8930 inst.instruction |= inst.operands[1].reg << 3;
8931 }
8932}
8933
8934static void
8935do_t_pkhbt (void)
8936{
8937 inst.instruction |= inst.operands[0].reg << 8;
8938 inst.instruction |= inst.operands[1].reg << 16;
8939 inst.instruction |= inst.operands[2].reg;
8940 if (inst.operands[3].present)
8941 {
8942 unsigned int val = inst.reloc.exp.X_add_number;
8943 constraint (inst.reloc.exp.X_op != O_constant,
8944 _("expression too complex"));
8945 inst.instruction |= (val & 0x1c) << 10;
8946 inst.instruction |= (val & 0x03) << 6;
8947 }
8948}
8949
8950static void
8951do_t_pkhtb (void)
8952{
8953 if (!inst.operands[3].present)
8954 inst.instruction &= ~0x00000020;
8955 do_t_pkhbt ();
8956}
8957
8958static void
8959do_t_pld (void)
8960{
8961 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
8962}
8963
8964static void
8965do_t_push_pop (void)
8966{
8967 unsigned mask;
8968
8969 constraint (inst.operands[0].writeback,
8970 _("push/pop do not support {reglist}^"));
8971 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8972 _("expression too complex"));
8973
8974 mask = inst.operands[0].imm;
8975 if ((mask & ~0xff) == 0)
8976 inst.instruction = THUMB_OP16 (inst.instruction);
8977 else if ((inst.instruction == T_MNEM_push
8978 && (mask & ~0xff) == 1 << REG_LR)
8979 || (inst.instruction == T_MNEM_pop
8980 && (mask & ~0xff) == 1 << REG_PC))
8981 {
8982 inst.instruction = THUMB_OP16 (inst.instruction);
8983 inst.instruction |= THUMB_PP_PC_LR;
8984 mask &= 0xff;
8985 }
8986 else if (unified_syntax)
8987 {
8988 if (mask & (1 << 13))
8989 inst.error = _("SP not allowed in register list");
8990 if (inst.instruction == T_MNEM_push)
8991 {
8992 if (mask & (1 << 15))
8993 inst.error = _("PC not allowed in register list");
8994 }
8995 else
8996 {
8997 if (mask & (1 << 14)
8998 && mask & (1 << 15))
8999 inst.error = _("LR and PC should not both be in register list");
9000 }
9001 if ((mask & (mask - 1)) == 0)
9002 {
9003 /* Single register push/pop implemented as str/ldr. */
9004 if (inst.instruction == T_MNEM_push)
9005 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9006 else
9007 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9008 mask = ffs(mask) - 1;
9009 mask <<= 12;
9010 }
9011 else
9012 inst.instruction = THUMB_OP32 (inst.instruction);
9013 }
9014 else
9015 {
9016 inst.error = _("invalid register list to push/pop instruction");
9017 return;
9018 }
9019
9020 inst.instruction |= mask;
9021}
9022
9023static void
9024do_t_rbit (void)
9025{
9026 inst.instruction |= inst.operands[0].reg << 8;
9027 inst.instruction |= inst.operands[1].reg << 16;
9028}
9029
9030static void
9031do_t_rev (void)
9032{
9033 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9034 && inst.size_req != 4)
9035 {
9036 inst.instruction = THUMB_OP16 (inst.instruction);
9037 inst.instruction |= inst.operands[0].reg;
9038 inst.instruction |= inst.operands[1].reg << 3;
9039 }
9040 else if (unified_syntax)
9041 {
9042 inst.instruction = THUMB_OP32 (inst.instruction);
9043 inst.instruction |= inst.operands[0].reg << 8;
9044 inst.instruction |= inst.operands[1].reg << 16;
9045 inst.instruction |= inst.operands[1].reg;
9046 }
9047 else
9048 inst.error = BAD_HIREG;
9049}
9050
9051static void
9052do_t_rsb (void)
9053{
9054 int Rd, Rs;
9055
9056 Rd = inst.operands[0].reg;
9057 Rs = (inst.operands[1].present
9058 ? inst.operands[1].reg /* Rd, Rs, foo */
9059 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9060
9061 inst.instruction |= Rd << 8;
9062 inst.instruction |= Rs << 16;
9063 if (!inst.operands[2].isreg)
9064 {
9065 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9066 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9067 }
9068 else
9069 encode_thumb32_shifted_operand (2);
9070}
9071
9072static void
9073do_t_setend (void)
9074{
9075 constraint (current_it_mask, BAD_NOT_IT);
9076 if (inst.operands[0].imm)
9077 inst.instruction |= 0x8;
9078}
9079
9080static void
9081do_t_shift (void)
9082{
9083 if (!inst.operands[1].present)
9084 inst.operands[1].reg = inst.operands[0].reg;
9085
9086 if (unified_syntax)
9087 {
9088 bfd_boolean narrow;
9089 int shift_kind;
9090
9091 switch (inst.instruction)
9092 {
9093 case T_MNEM_asr:
9094 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9095 case T_MNEM_lsl:
9096 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9097 case T_MNEM_lsr:
9098 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9099 case T_MNEM_ror:
9100 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9101 default: abort ();
9102 }
9103
9104 if (THUMB_SETS_FLAGS (inst.instruction))
9105 narrow = (current_it_mask == 0);
9106 else
9107 narrow = (current_it_mask != 0);
9108 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9109 narrow = FALSE;
9110 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9111 narrow = FALSE;
9112 if (inst.operands[2].isreg
9113 && (inst.operands[1].reg != inst.operands[0].reg
9114 || inst.operands[2].reg > 7))
9115 narrow = FALSE;
9116 if (inst.size_req == 4)
9117 narrow = FALSE;
9118
9119 if (!narrow)
9120 {
9121 if (inst.operands[2].isreg)
9122 {
9123 inst.instruction = THUMB_OP32 (inst.instruction);
9124 inst.instruction |= inst.operands[0].reg << 8;
9125 inst.instruction |= inst.operands[1].reg << 16;
9126 inst.instruction |= inst.operands[2].reg;
9127 }
9128 else
9129 {
9130 inst.operands[1].shifted = 1;
9131 inst.operands[1].shift_kind = shift_kind;
9132 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9133 ? T_MNEM_movs : T_MNEM_mov);
9134 inst.instruction |= inst.operands[0].reg << 8;
9135 encode_thumb32_shifted_operand (1);
9136 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9137 inst.reloc.type = BFD_RELOC_UNUSED;
9138 }
9139 }
9140 else
9141 {
9142 if (inst.operands[2].isreg)
9143 {
9144 switch (shift_kind)
9145 {
9146 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9147 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9148 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9149 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9150 default: abort ();
9151 }
9152
9153 inst.instruction |= inst.operands[0].reg;
9154 inst.instruction |= inst.operands[2].reg << 3;
9155 }
9156 else
9157 {
9158 switch (shift_kind)
9159 {
9160 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9161 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9162 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9163 default: abort ();
9164 }
9165 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9166 inst.instruction |= inst.operands[0].reg;
9167 inst.instruction |= inst.operands[1].reg << 3;
9168 }
9169 }
9170 }
9171 else
9172 {
9173 constraint (inst.operands[0].reg > 7
9174 || inst.operands[1].reg > 7, BAD_HIREG);
9175 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9176
9177 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9178 {
9179 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9180 constraint (inst.operands[0].reg != inst.operands[1].reg,
9181 _("source1 and dest must be same register"));
9182
9183 switch (inst.instruction)
9184 {
9185 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9186 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9187 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9188 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9189 default: abort ();
9190 }
9191
9192 inst.instruction |= inst.operands[0].reg;
9193 inst.instruction |= inst.operands[2].reg << 3;
9194 }
9195 else
9196 {
9197 switch (inst.instruction)
9198 {
9199 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9200 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9201 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9202 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9203 default: abort ();
9204 }
9205 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9206 inst.instruction |= inst.operands[0].reg;
9207 inst.instruction |= inst.operands[1].reg << 3;
9208 }
9209 }
9210}
9211
9212static void
9213do_t_simd (void)
9214{
9215 inst.instruction |= inst.operands[0].reg << 8;
9216 inst.instruction |= inst.operands[1].reg << 16;
9217 inst.instruction |= inst.operands[2].reg;
9218}
9219
9220static void
9221do_t_smc (void)
9222{
9223 unsigned int value = inst.reloc.exp.X_add_number;
9224 constraint (inst.reloc.exp.X_op != O_constant,
9225 _("expression too complex"));
9226 inst.reloc.type = BFD_RELOC_UNUSED;
9227 inst.instruction |= (value & 0xf000) >> 12;
9228 inst.instruction |= (value & 0x0ff0);
9229 inst.instruction |= (value & 0x000f) << 16;
9230}
9231
9232static void
9233do_t_ssat (void)
9234{
9235 inst.instruction |= inst.operands[0].reg << 8;
9236 inst.instruction |= inst.operands[1].imm - 1;
9237 inst.instruction |= inst.operands[2].reg << 16;
9238
9239 if (inst.operands[3].present)
9240 {
9241 constraint (inst.reloc.exp.X_op != O_constant,
9242 _("expression too complex"));
9243
9244 if (inst.reloc.exp.X_add_number != 0)
9245 {
9246 if (inst.operands[3].shift_kind == SHIFT_ASR)
9247 inst.instruction |= 0x00200000; /* sh bit */
9248 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9249 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9250 }
9251 inst.reloc.type = BFD_RELOC_UNUSED;
9252 }
9253}
9254
9255static void
9256do_t_ssat16 (void)
9257{
9258 inst.instruction |= inst.operands[0].reg << 8;
9259 inst.instruction |= inst.operands[1].imm - 1;
9260 inst.instruction |= inst.operands[2].reg << 16;
9261}
9262
9263static void
9264do_t_strex (void)
9265{
9266 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9267 || inst.operands[2].postind || inst.operands[2].writeback
9268 || inst.operands[2].immisreg || inst.operands[2].shifted
9269 || inst.operands[2].negative,
9270 BAD_ADDR_MODE);
9271
9272 inst.instruction |= inst.operands[0].reg << 8;
9273 inst.instruction |= inst.operands[1].reg << 12;
9274 inst.instruction |= inst.operands[2].reg << 16;
9275 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9276}
9277
9278static void
9279do_t_strexd (void)
9280{
9281 if (!inst.operands[2].present)
9282 inst.operands[2].reg = inst.operands[1].reg + 1;
9283
9284 constraint (inst.operands[0].reg == inst.operands[1].reg
9285 || inst.operands[0].reg == inst.operands[2].reg
9286 || inst.operands[0].reg == inst.operands[3].reg
9287 || inst.operands[1].reg == inst.operands[2].reg,
9288 BAD_OVERLAP);
9289
9290 inst.instruction |= inst.operands[0].reg;
9291 inst.instruction |= inst.operands[1].reg << 12;
9292 inst.instruction |= inst.operands[2].reg << 8;
9293 inst.instruction |= inst.operands[3].reg << 16;
9294}
9295
9296static void
9297do_t_sxtah (void)
9298{
9299 inst.instruction |= inst.operands[0].reg << 8;
9300 inst.instruction |= inst.operands[1].reg << 16;
9301 inst.instruction |= inst.operands[2].reg;
9302 inst.instruction |= inst.operands[3].imm << 4;
9303}
9304
9305static void
9306do_t_sxth (void)
9307{
9308 if (inst.instruction <= 0xffff && inst.size_req != 4
9309 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9310 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9311 {
9312 inst.instruction = THUMB_OP16 (inst.instruction);
9313 inst.instruction |= inst.operands[0].reg;
9314 inst.instruction |= inst.operands[1].reg << 3;
9315 }
9316 else if (unified_syntax)
9317 {
9318 if (inst.instruction <= 0xffff)
9319 inst.instruction = THUMB_OP32 (inst.instruction);
9320 inst.instruction |= inst.operands[0].reg << 8;
9321 inst.instruction |= inst.operands[1].reg;
9322 inst.instruction |= inst.operands[2].imm << 4;
9323 }
9324 else
9325 {
9326 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9327 _("Thumb encoding does not support rotation"));
9328 constraint (1, BAD_HIREG);
9329 }
9330}
9331
9332static void
9333do_t_swi (void)
9334{
9335 inst.reloc.type = BFD_RELOC_ARM_SWI;
9336}
9337
9338static void
9339do_t_tb (void)
9340{
9341 int half;
9342
9343 half = (inst.instruction & 0x10) != 0;
9344 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9345 constraint (inst.operands[0].immisreg,
9346 _("instruction requires register index"));
9347 constraint (inst.operands[0].imm == 15,
9348 _("PC is not a valid index register"));
9349 constraint (!half && inst.operands[0].shifted,
9350 _("instruction does not allow shifted index"));
9351 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9352}
9353
9354static void
9355do_t_usat (void)
9356{
9357 inst.instruction |= inst.operands[0].reg << 8;
9358 inst.instruction |= inst.operands[1].imm;
9359 inst.instruction |= inst.operands[2].reg << 16;
9360
9361 if (inst.operands[3].present)
9362 {
9363 constraint (inst.reloc.exp.X_op != O_constant,
9364 _("expression too complex"));
9365 if (inst.reloc.exp.X_add_number != 0)
9366 {
9367 if (inst.operands[3].shift_kind == SHIFT_ASR)
9368 inst.instruction |= 0x00200000; /* sh bit */
9369
9370 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9371 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9372 }
9373 inst.reloc.type = BFD_RELOC_UNUSED;
9374 }
9375}
9376
9377static void
9378do_t_usat16 (void)
9379{
9380 inst.instruction |= inst.operands[0].reg << 8;
9381 inst.instruction |= inst.operands[1].imm;
9382 inst.instruction |= inst.operands[2].reg << 16;
9383}
9384
9385/* Neon instruction encoder helpers. */
9386
9387/* Encodings for the different types for various Neon opcodes. */
9388
9389/* An "invalid" code for the following tables. */
9390#define N_INV -1u
9391
9392struct neon_tab_entry
9393{
9394 unsigned integer;
9395 unsigned float_or_poly;
9396 unsigned scalar_or_imm;
9397};
9398
9399/* Map overloaded Neon opcodes to their respective encodings. */
9400#define NEON_ENC_TAB \
9401 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9402 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9403 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9404 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9405 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9406 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9407 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9408 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9409 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9410 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9411 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9412 /* Register variants of the following two instructions are encoded as
9413 vcge / vcgt with the operands reversed. */ \
9414 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9415 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9416 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9417 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9418 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9419 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9420 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9421 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9422 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9423 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9424 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9425 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9426 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9427 X(vshl, 0x0000400, N_INV, 0x0800510), \
9428 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9429 X(vand, 0x0000110, N_INV, 0x0800030), \
9430 X(vbic, 0x0100110, N_INV, 0x0800030), \
9431 X(veor, 0x1000110, N_INV, N_INV), \
9432 X(vorn, 0x0300110, N_INV, 0x0800010), \
9433 X(vorr, 0x0200110, N_INV, 0x0800010), \
9434 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9435 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9436 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9437 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9438 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9439 X(vst1, 0x0000000, 0x0800000, N_INV), \
9440 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9441 X(vst2, 0x0000100, 0x0800100, N_INV), \
9442 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9443 X(vst3, 0x0000200, 0x0800200, N_INV), \
9444 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9445 X(vst4, 0x0000300, 0x0800300, N_INV), \
9446 X(vmovn, 0x1b20200, N_INV, N_INV), \
9447 X(vtrn, 0x1b20080, N_INV, N_INV), \
9448 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9449 X(vqmovun, 0x1b20240, N_INV, N_INV)
9450
9451enum neon_opc
9452{
9453#define X(OPC,I,F,S) N_MNEM_##OPC
9454NEON_ENC_TAB
9455#undef X
9456};
9457
9458static const struct neon_tab_entry neon_enc_tab[] =
9459{
9460#define X(OPC,I,F,S) { (I), (F), (S) }
9461NEON_ENC_TAB
9462#undef X
9463};
9464
9465#define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9466#define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9467#define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9468#define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9469#define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9470#define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9471#define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9472#define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9473#define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9474
9475/* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9476 shapes which an instruction can accept. The following mnemonic characters
9477 are used in the tag names for this enumeration:
9478
9479 D - Neon D<n> register
9480 Q - Neon Q<n> register
9481 I - Immediate
9482 S - Scalar
9483 R - ARM register
9484 L - D<n> register list
9485*/
9486
9487enum neon_shape
9488{
9489 NS_DDD_QQQ,
9490 NS_DDD,
9491 NS_QQQ,
9492 NS_DDI_QQI,
9493 NS_DDI,
9494 NS_QQI,
9495 NS_DDS_QQS,
9496 NS_DDS,
9497 NS_QQS,
9498 NS_DD_QQ,
9499 NS_DD,
9500 NS_QQ,
9501 NS_DS_QS,
9502 NS_DS,
9503 NS_QS,
9504 NS_DR_QR,
9505 NS_DR,
9506 NS_QR,
9507 NS_DI_QI,
9508 NS_DI,
9509 NS_QI,
9510 NS_DLD,
9511 NS_DQ,
9512 NS_QD,
9513 NS_DQI,
9514 NS_QDI,
9515 NS_QDD,
9516 NS_QDS,
9517 NS_QQD,
9518 NS_DQQ,
9519 NS_DDDI_QQQI,
9520 NS_DDDI,
9521 NS_QQQI,
9522 NS_IGNORE
9523};
9524
9525/* Bit masks used in type checking given instructions.
9526 'N_EQK' means the type must be the same as (or based on in some way) the key
9527 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9528 set, various other bits can be set as well in order to modify the meaning of
9529 the type constraint. */
9530
9531enum neon_type_mask
9532{
9533 N_S8 = 0x000001,
9534 N_S16 = 0x000002,
9535 N_S32 = 0x000004,
9536 N_S64 = 0x000008,
9537 N_U8 = 0x000010,
9538 N_U16 = 0x000020,
9539 N_U32 = 0x000040,
9540 N_U64 = 0x000080,
9541 N_I8 = 0x000100,
9542 N_I16 = 0x000200,
9543 N_I32 = 0x000400,
9544 N_I64 = 0x000800,
9545 N_8 = 0x001000,
9546 N_16 = 0x002000,
9547 N_32 = 0x004000,
9548 N_64 = 0x008000,
9549 N_P8 = 0x010000,
9550 N_P16 = 0x020000,
9551 N_F32 = 0x040000,
9552 N_KEY = 0x080000, /* key element (main type specifier). */
9553 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9554 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9555 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9556 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9557 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9558 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9559 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9560 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9561 N_UTYP = 0,
9562 N_MAX_NONSPECIAL = N_F32
9563};
9564
9565#define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9566
9567#define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9568#define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9569#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9570#define N_SUF_32 (N_SU_32 | N_F32)
9571#define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9572#define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9573
9574/* Pass this as the first type argument to neon_check_type to ignore types
9575 altogether. */
9576#define N_IGNORE_TYPE (N_KEY | N_EQK)
9577
9578/* Check the shape of a Neon instruction (sizes of registers). Returns the more
9579 specific shape when there are two alternatives. For non-polymorphic shapes,
9580 checking is done during operand parsing, so is not implemented here. */
9581
9582static enum neon_shape
9583neon_check_shape (enum neon_shape req)
9584{
9585#define RR(X) (inst.operands[(X)].isreg)
9586#define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9587#define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9588#define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9589#define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9590
9591 /* Fix missing optional operands. FIXME: we don't know at this point how
9592 many arguments we should have, so this makes the assumption that we have
9593 > 1. This is true of all current Neon opcodes, I think, but may not be
9594 true in the future. */
9595 if (!inst.operands[1].present)
9596 inst.operands[1] = inst.operands[0];
9597
9598 switch (req)
9599 {
9600 case NS_DDD_QQQ:
9601 {
9602 if (RD(0) && RD(1) && RD(2))
9603 return NS_DDD;
9604 else if (RQ(0) && RQ(1) && RQ(2))
9605 return NS_QQQ;
9606 else
9607 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9608 "operands"));
9609 }
9610 break;
9611
9612 case NS_DDI_QQI:
9613 {
9614 if (RD(0) && RD(1) && IM(2))
9615 return NS_DDI;
9616 else if (RQ(0) && RQ(1) && IM(2))
9617 return NS_QQI;
9618 else
9619 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9620 "operands"));
9621 }
9622 break;
9623
9624 case NS_DDDI_QQQI:
9625 {
9626 if (RD(0) && RD(1) && RD(2) && IM(3))
9627 return NS_DDDI;
9628 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9629 return NS_QQQI;
9630 else
9631 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9632 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9633 }
9634 break;
9635
9636 case NS_DDS_QQS:
9637 {
9638 if (RD(0) && RD(1) && SC(2))
9639 return NS_DDS;
9640 else if (RQ(0) && RQ(1) && SC(2))
9641 return NS_QQS;
9642 else
9643 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9644 "operands"));
9645 }
9646 break;
9647
9648 case NS_DD_QQ:
9649 {
9650 if (RD(0) && RD(1))
9651 return NS_DD;
9652 else if (RQ(0) && RQ(1))
9653 return NS_QQ;
9654 else
9655 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9656 }
9657 break;
9658
9659 case NS_DS_QS:
9660 {
9661 if (RD(0) && SC(1))
9662 return NS_DS;
9663 else if (RQ(0) && SC(1))
9664 return NS_QS;
9665 else
9666 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9667 }
9668 break;
9669
9670 case NS_DR_QR:
9671 {
9672 if (RD(0) && RR(1))
9673 return NS_DR;
9674 else if (RQ(0) && RR(1))
9675 return NS_QR;
9676 else
9677 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9678 }
9679 break;
9680
9681 case NS_DI_QI:
9682 {
9683 if (RD(0) && IM(1))
9684 return NS_DI;
9685 else if (RQ(0) && IM(1))
9686 return NS_QI;
9687 else
9688 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9689 }
9690 break;
9691
9692 default:
9693 abort ();
9694 }
9695
9696 return req;
9697#undef RR
9698#undef RD
9699#undef RQ
9700#undef IM
9701#undef SC
9702}
9703
9704static void
9705neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9706 unsigned *g_size)
9707{
9708 /* Allow modification to be made to types which are constrained to be
9709 based on the key element, based on bits set alongside N_EQK. */
9710 if ((typebits & N_EQK) != 0)
9711 {
9712 if ((typebits & N_HLF) != 0)
9713 *g_size /= 2;
9714 else if ((typebits & N_DBL) != 0)
9715 *g_size *= 2;
9716 if ((typebits & N_SGN) != 0)
9717 *g_type = NT_signed;
9718 else if ((typebits & N_UNS) != 0)
9719 *g_type = NT_unsigned;
9720 else if ((typebits & N_INT) != 0)
9721 *g_type = NT_integer;
9722 else if ((typebits & N_FLT) != 0)
9723 *g_type = NT_float;
9724 else if ((typebits & N_SIZ) != 0)
9725 *g_type = NT_untyped;
9726 }
9727}
9728
9729/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9730 operand type, i.e. the single type specified in a Neon instruction when it
9731 is the only one given. */
9732
9733static struct neon_type_el
9734neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9735{
9736 struct neon_type_el dest = *key;
9737
9738 assert ((thisarg & N_EQK) != 0);
9739
9740 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9741
9742 return dest;
9743}
9744
9745/* Convert Neon type and size into compact bitmask representation. */
9746
9747static enum neon_type_mask
9748type_chk_of_el_type (enum neon_el_type type, unsigned size)
9749{
9750 switch (type)
9751 {
9752 case NT_untyped:
9753 switch (size)
9754 {
9755 case 8: return N_8;
9756 case 16: return N_16;
9757 case 32: return N_32;
9758 case 64: return N_64;
9759 default: ;
9760 }
9761 break;
9762
9763 case NT_integer:
9764 switch (size)
9765 {
9766 case 8: return N_I8;
9767 case 16: return N_I16;
9768 case 32: return N_I32;
9769 case 64: return N_I64;
9770 default: ;
9771 }
9772 break;
9773
9774 case NT_float:
9775 if (size == 32)
9776 return N_F32;
9777 break;
9778
9779 case NT_poly:
9780 switch (size)
9781 {
9782 case 8: return N_P8;
9783 case 16: return N_P16;
9784 default: ;
9785 }
9786 break;
9787
9788 case NT_signed:
9789 switch (size)
9790 {
9791 case 8: return N_S8;
9792 case 16: return N_S16;
9793 case 32: return N_S32;
9794 case 64: return N_S64;
9795 default: ;
9796 }
9797 break;
9798
9799 case NT_unsigned:
9800 switch (size)
9801 {
9802 case 8: return N_U8;
9803 case 16: return N_U16;
9804 case 32: return N_U32;
9805 case 64: return N_U64;
9806 default: ;
9807 }
9808 break;
9809
9810 default: ;
9811 }
9812
9813 return N_UTYP;
9814}
9815
9816/* Convert compact Neon bitmask type representation to a type and size. Only
9817 handles the case where a single bit is set in the mask. */
9818
9819static int
9820el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9821 enum neon_type_mask mask)
9822{
9823 if ((mask & N_EQK) != 0)
9824 return FAIL;
9825
9826 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9827 *size = 8;
9828 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9829 *size = 16;
9830 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9831 *size = 32;
9832 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9833 *size = 64;
9834 else
9835 return FAIL;
9836
9837 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9838 *type = NT_signed;
9839 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9840 *type = NT_unsigned;
9841 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9842 *type = NT_integer;
9843 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9844 *type = NT_untyped;
9845 else if ((mask & (N_P8 | N_P16)) != 0)
9846 *type = NT_poly;
9847 else if ((mask & N_F32) != 0)
9848 *type = NT_float;
9849 else
9850 return FAIL;
9851
9852 return SUCCESS;
9853}
9854
9855/* Modify a bitmask of allowed types. This is only needed for type
9856 relaxation. */
9857
9858static unsigned
9859modify_types_allowed (unsigned allowed, unsigned mods)
9860{
9861 unsigned size;
9862 enum neon_el_type type;
9863 unsigned destmask;
9864 int i;
9865
9866 destmask = 0;
9867
9868 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9869 {
9870 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9871 {
9872 neon_modify_type_size (mods, &type, &size);
9873 destmask |= type_chk_of_el_type (type, size);
9874 }
9875 }
9876
9877 return destmask;
9878}
9879
9880/* Check type and return type classification.
9881 The manual states (paraphrase): If one datatype is given, it indicates the
9882 type given in:
9883 - the second operand, if there is one
9884 - the operand, if there is no second operand
9885 - the result, if there are no operands.
9886 This isn't quite good enough though, so we use a concept of a "key" datatype
9887 which is set on a per-instruction basis, which is the one which matters when
9888 only one data type is written.
9889 Note: this function has side-effects (e.g. filling in missing operands). All
9890 Neon instructions should call it before performing bit encoding.
9891*/
9892
9893static struct neon_type_el
9894neon_check_type (unsigned els, enum neon_shape ns, ...)
9895{
9896 va_list ap;
9897 unsigned i, pass, key_el = 0;
9898 unsigned types[NEON_MAX_TYPE_ELS];
9899 enum neon_el_type k_type = NT_invtype;
9900 unsigned k_size = -1u;
9901 struct neon_type_el badtype = {NT_invtype, -1};
9902 unsigned key_allowed = 0;
9903
9904 /* Optional registers in Neon instructions are always (not) in operand 1.
9905 Fill in the missing operand here, if it was omitted. */
9906 if (els > 1 && !inst.operands[1].present)
9907 inst.operands[1] = inst.operands[0];
9908
9909 /* Suck up all the varargs. */
9910 va_start (ap, ns);
9911 for (i = 0; i < els; i++)
9912 {
9913 unsigned thisarg = va_arg (ap, unsigned);
9914 if (thisarg == N_IGNORE_TYPE)
9915 {
9916 va_end (ap);
9917 return badtype;
9918 }
9919 types[i] = thisarg;
9920 if ((thisarg & N_KEY) != 0)
9921 key_el = i;
9922 }
9923 va_end (ap);
9924
9925 if (inst.vectype.elems > 0)
9926 for (i = 0; i < els; i++)
9927 if (inst.operands[i].vectype.type != NT_invtype)
9928 {
9929 first_error (_("types specified in both the mnemonic and operands"));
9930 return badtype;
9931 }
9932
9933 /* Duplicate inst.vectype elements here as necessary.
9934 FIXME: No idea if this is exactly the same as the ARM assembler,
9935 particularly when an insn takes one register and one non-register
9936 operand. */
9937 if (inst.vectype.elems == 1 && els > 1)
9938 {
9939 unsigned j;
9940 inst.vectype.elems = els;
9941 inst.vectype.el[key_el] = inst.vectype.el[0];
9942 for (j = 0; j < els; j++)
9943 if (j != key_el)
9944 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9945 types[j]);
9946 }
9947 else if (inst.vectype.elems == 0 && els > 0)
9948 {
9949 unsigned j;
9950 /* No types were given after the mnemonic, so look for types specified
9951 after each operand. We allow some flexibility here; as long as the
9952 "key" operand has a type, we can infer the others. */
9953 for (j = 0; j < els; j++)
9954 if (inst.operands[j].vectype.type != NT_invtype)
9955 inst.vectype.el[j] = inst.operands[j].vectype;
9956
9957 if (inst.operands[key_el].vectype.type != NT_invtype)
9958 {
9959 for (j = 0; j < els; j++)
9960 if (inst.operands[j].vectype.type == NT_invtype)
9961 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9962 types[j]);
9963 }
9964 else
9965 {
9966 first_error (_("operand types can't be inferred"));
9967 return badtype;
9968 }
9969 }
9970 else if (inst.vectype.elems != els)
9971 {
9972 first_error (_("type specifier has the wrong number of parts"));
9973 return badtype;
9974 }
9975
9976 for (pass = 0; pass < 2; pass++)
9977 {
9978 for (i = 0; i < els; i++)
9979 {
9980 unsigned thisarg = types[i];
9981 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
9982 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
9983 enum neon_el_type g_type = inst.vectype.el[i].type;
9984 unsigned g_size = inst.vectype.el[i].size;
9985
9986 /* Decay more-specific signed & unsigned types to sign-insensitive
9987 integer types if sign-specific variants are unavailable. */
9988 if ((g_type == NT_signed || g_type == NT_unsigned)
9989 && (types_allowed & N_SU_ALL) == 0)
9990 g_type = NT_integer;
9991
9992 /* If only untyped args are allowed, decay any more specific types to
9993 them. Some instructions only care about signs for some element
9994 sizes, so handle that properly. */
9995 if ((g_size == 8 && (types_allowed & N_8) != 0)
9996 || (g_size == 16 && (types_allowed & N_16) != 0)
9997 || (g_size == 32 && (types_allowed & N_32) != 0)
9998 || (g_size == 64 && (types_allowed & N_64) != 0))
9999 g_type = NT_untyped;
10000
10001 if (pass == 0)
10002 {
10003 if ((thisarg & N_KEY) != 0)
10004 {
10005 k_type = g_type;
10006 k_size = g_size;
10007 key_allowed = thisarg & ~N_KEY;
10008 }
10009 }
10010 else
10011 {
10012 if ((thisarg & N_EQK) == 0)
10013 {
10014 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10015
10016 if ((given_type & types_allowed) == 0)
10017 {
10018 first_error (_("bad type in Neon instruction"));
10019 return badtype;
10020 }
10021 }
10022 else
10023 {
10024 enum neon_el_type mod_k_type = k_type;
10025 unsigned mod_k_size = k_size;
10026 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10027 if (g_type != mod_k_type || g_size != mod_k_size)
10028 {
10029 first_error (_("inconsistent types in Neon instruction"));
10030 return badtype;
10031 }
10032 }
10033 }
10034 }
10035 }
10036
10037 return inst.vectype.el[key_el];
10038}
10039
10040/* Fix up Neon data-processing instructions, ORing in the correct bits for
10041 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10042
10043static unsigned
10044neon_dp_fixup (unsigned i)
10045{
10046 if (thumb_mode)
10047 {
10048 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10049 if (i & (1 << 24))
10050 i |= 1 << 28;
10051
10052 i &= ~(1 << 24);
10053
10054 i |= 0xef000000;
10055 }
10056 else
10057 i |= 0xf2000000;
10058
10059 return i;
10060}
10061
10062/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10063 (0, 1, 2, 3). */
10064
10065static unsigned
10066neon_logbits (unsigned x)
10067{
10068 return ffs (x) - 4;
10069}
10070
10071#define LOW4(R) ((R) & 0xf)
10072#define HI1(R) (((R) >> 4) & 1)
10073
10074/* Encode insns with bit pattern:
10075
10076 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10077 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10078
10079 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10080 different meaning for some instruction. */
10081
10082static void
10083neon_three_same (int isquad, int ubit, int size)
10084{
10085 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10086 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10087 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10088 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10089 inst.instruction |= LOW4 (inst.operands[2].reg);
10090 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10091 inst.instruction |= (isquad != 0) << 6;
10092 inst.instruction |= (ubit != 0) << 24;
10093 if (size != -1)
10094 inst.instruction |= neon_logbits (size) << 20;
10095
10096 inst.instruction = neon_dp_fixup (inst.instruction);
10097}
10098
10099/* Encode instructions of the form:
10100
10101 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10102 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10103
10104 Don't write size if SIZE == -1. */
10105
10106static void
10107neon_two_same (int qbit, int ubit, int size)
10108{
10109 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10110 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10111 inst.instruction |= LOW4 (inst.operands[1].reg);
10112 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10113 inst.instruction |= (qbit != 0) << 6;
10114 inst.instruction |= (ubit != 0) << 24;
10115
10116 if (size != -1)
10117 inst.instruction |= neon_logbits (size) << 18;
10118
10119 inst.instruction = neon_dp_fixup (inst.instruction);
10120}
10121
10122/* Neon instruction encoders, in approximate order of appearance. */
10123
10124static void
10125do_neon_dyadic_i_su (void)
10126{
10127 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10128 struct neon_type_el et = neon_check_type (3, rs,
10129 N_EQK, N_EQK, N_SU_32 | N_KEY);
10130 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10131}
10132
10133static void
10134do_neon_dyadic_i64_su (void)
10135{
10136 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10137 struct neon_type_el et = neon_check_type (3, rs,
10138 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10139 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10140}
10141
10142static void
10143neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10144 unsigned immbits)
10145{
10146 unsigned size = et.size >> 3;
10147 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10148 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10149 inst.instruction |= LOW4 (inst.operands[1].reg);
10150 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10151 inst.instruction |= (isquad != 0) << 6;
10152 inst.instruction |= immbits << 16;
10153 inst.instruction |= (size >> 3) << 7;
10154 inst.instruction |= (size & 0x7) << 19;
10155 if (write_ubit)
10156 inst.instruction |= (uval != 0) << 24;
10157
10158 inst.instruction = neon_dp_fixup (inst.instruction);
10159}
10160
10161static void
10162do_neon_shl_imm (void)
10163{
10164 if (!inst.operands[2].isreg)
10165 {
10166 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10167 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10168 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10169 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10170 }
10171 else
10172 {
10173 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10174 struct neon_type_el et = neon_check_type (3, rs,
10175 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10176 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10177 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10178 }
10179}
10180
10181static void
10182do_neon_qshl_imm (void)
10183{
10184 if (!inst.operands[2].isreg)
10185 {
10186 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10187 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10188 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10189 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10190 inst.operands[2].imm);
10191 }
10192 else
10193 {
10194 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10195 struct neon_type_el et = neon_check_type (3, rs,
10196 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10197 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10198 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10199 }
10200}
10201
10202static int
10203neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10204{
10205 /* Handle .I8 and .I64 as pseudo-instructions. */
10206 switch (size)
10207 {
10208 case 8:
10209 /* Unfortunately, this will make everything apart from zero out-of-range.
10210 FIXME is this the intended semantics? There doesn't seem much point in
10211 accepting .I8 if so. */
10212 immediate |= immediate << 8;
10213 size = 16;
10214 break;
10215 case 64:
10216 /* Similarly, anything other than zero will be replicated in bits [63:32],
10217 which probably isn't want we want if we specified .I64. */
10218 if (immediate != 0)
10219 goto bad_immediate;
10220 size = 32;
10221 break;
10222 default: ;
10223 }
10224
10225 if (immediate == (immediate & 0x000000ff))
10226 {
10227 *immbits = immediate;
10228 return (size == 16) ? 0x9 : 0x1;
10229 }
10230 else if (immediate == (immediate & 0x0000ff00))
10231 {
10232 *immbits = immediate >> 8;
10233 return (size == 16) ? 0xb : 0x3;
10234 }
10235 else if (immediate == (immediate & 0x00ff0000))
10236 {
10237 *immbits = immediate >> 16;
10238 return 0x5;
10239 }
10240 else if (immediate == (immediate & 0xff000000))
10241 {
10242 *immbits = immediate >> 24;
10243 return 0x7;
10244 }
10245
10246 bad_immediate:
10247 first_error (_("immediate value out of range"));
10248 return FAIL;
10249}
10250
10251/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10252 A, B, C, D. */
10253
10254static int
10255neon_bits_same_in_bytes (unsigned imm)
10256{
10257 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10258 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10259 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10260 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10261}
10262
10263/* For immediate of above form, return 0bABCD. */
10264
10265static unsigned
10266neon_squash_bits (unsigned imm)
10267{
10268 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10269 | ((imm & 0x01000000) >> 21);
10270}
10271
10272/* Compress quarter-float representation to 0b...000 abcdefgh. */
10273
10274static unsigned
10275neon_qfloat_bits (unsigned imm)
10276{
10277 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10278}
10279
10280/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10281 the instruction. *OP is passed as the initial value of the op field, and
10282 may be set to a different value depending on the constant (i.e.
10283 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10284 MVN). */
10285
10286static int
10287neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10288 int *op, int size, enum neon_el_type type)
10289{
10290 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10291 {
10292 if (size != 32 || *op == 1)
10293 return FAIL;
10294 *immbits = neon_qfloat_bits (immlo);
10295 return 0xf;
10296 }
10297 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10298 && neon_bits_same_in_bytes (immlo))
10299 {
10300 /* Check this one first so we don't have to bother with immhi in later
10301 tests. */
10302 if (*op == 1)
10303 return FAIL;
10304 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10305 *op = 1;
10306 return 0xe;
10307 }
10308 else if (immhi != 0)
10309 return FAIL;
10310 else if (immlo == (immlo & 0x000000ff))
10311 {
10312 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10313 immediate. */
10314 if ((size != 8 && size != 16 && size != 32)
10315 || (size == 8 && *op == 1))
10316 return FAIL;
10317 *immbits = immlo;
10318 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10319 }
10320 else if (immlo == (immlo & 0x0000ff00))
10321 {
10322 if (size != 16 && size != 32)
10323 return FAIL;
10324 *immbits = immlo >> 8;
10325 return (size == 16) ? 0xa : 0x2;
10326 }
10327 else if (immlo == (immlo & 0x00ff0000))
10328 {
10329 if (size != 32)
10330 return FAIL;
10331 *immbits = immlo >> 16;
10332 return 0x4;
10333 }
10334 else if (immlo == (immlo & 0xff000000))
10335 {
10336 if (size != 32)
10337 return FAIL;
10338 *immbits = immlo >> 24;
10339 return 0x6;
10340 }
10341 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10342 {
10343 if (size != 32)
10344 return FAIL;
10345 *immbits = (immlo >> 8) & 0xff;
10346 return 0xc;
10347 }
10348 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10349 {
10350 if (size != 32)
10351 return FAIL;
10352 *immbits = (immlo >> 16) & 0xff;
10353 return 0xd;
10354 }
10355
10356 return FAIL;
10357}
10358
10359/* Write immediate bits [7:0] to the following locations:
10360
10361 |28/24|23 19|18 16|15 4|3 0|
10362 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10363
10364 This function is used by VMOV/VMVN/VORR/VBIC. */
10365
10366static void
10367neon_write_immbits (unsigned immbits)
10368{
10369 inst.instruction |= immbits & 0xf;
10370 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10371 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10372}
10373
10374/* Invert low-order SIZE bits of XHI:XLO. */
10375
10376static void
10377neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10378{
10379 unsigned immlo = xlo ? *xlo : 0;
10380 unsigned immhi = xhi ? *xhi : 0;
10381
10382 switch (size)
10383 {
10384 case 8:
10385 immlo = (~immlo) & 0xff;
10386 break;
10387
10388 case 16:
10389 immlo = (~immlo) & 0xffff;
10390 break;
10391
10392 case 64:
10393 immhi = (~immhi) & 0xffffffff;
10394 /* fall through. */
10395
10396 case 32:
10397 immlo = (~immlo) & 0xffffffff;
10398 break;
10399
10400 default:
10401 abort ();
10402 }
10403
10404 if (xlo)
10405 *xlo = immlo;
10406
10407 if (xhi)
10408 *xhi = immhi;
10409}
10410
10411static void
10412do_neon_logic (void)
10413{
10414 if (inst.operands[2].present && inst.operands[2].isreg)
10415 {
10416 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10417 neon_check_type (3, rs, N_IGNORE_TYPE);
10418 /* U bit and size field were set as part of the bitmask. */
10419 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10420 neon_three_same (rs == NS_QQQ, 0, -1);
10421 }
10422 else
10423 {
10424 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10425 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10426 | N_I64 | N_F32);
10427 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10428 unsigned immbits;
10429 int cmode;
10430
10431 if (et.type == NT_invtype)
10432 return;
10433
10434 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10435
10436 switch (opcode)
10437 {
10438 case N_MNEM_vbic:
10439 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10440 et.size);
10441 break;
10442
10443 case N_MNEM_vorr:
10444 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10445 et.size);
10446 break;
10447
10448 case N_MNEM_vand:
10449 /* Pseudo-instruction for VBIC. */
10450 immbits = inst.operands[1].imm;
10451 neon_invert_size (&immbits, 0, et.size);
10452 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10453 break;
10454
10455 case N_MNEM_vorn:
10456 /* Pseudo-instruction for VORR. */
10457 immbits = inst.operands[1].imm;
10458 neon_invert_size (&immbits, 0, et.size);
10459 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10460 break;
10461
10462 default:
10463 abort ();
10464 }
10465
10466 if (cmode == FAIL)
10467 return;
10468
10469 inst.instruction |= (rs == NS_QI) << 6;
10470 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10471 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10472 inst.instruction |= cmode << 8;
10473 neon_write_immbits (immbits);
10474
10475 inst.instruction = neon_dp_fixup (inst.instruction);
10476 }
10477}
10478
10479static void
10480do_neon_bitfield (void)
10481{
10482 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10483 neon_check_type (3, rs, N_IGNORE_TYPE);
10484 neon_three_same (rs == NS_QQQ, 0, -1);
10485}
10486
10487static void
10488neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10489 unsigned destbits)
10490{
10491 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10492 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10493 types | N_KEY);
10494 if (et.type == NT_float)
10495 {
10496 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10497 neon_three_same (rs == NS_QQQ, 0, -1);
10498 }
10499 else
10500 {
10501 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10502 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10503 }
10504}
10505
10506static void
10507do_neon_dyadic_if_su (void)
10508{
10509 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10510}
10511
10512static void
10513do_neon_dyadic_if_su_d (void)
10514{
10515 /* This version only allow D registers, but that constraint is enforced during
10516 operand parsing so we don't need to do anything extra here. */
10517 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10518}
10519
10520static void
10521do_neon_dyadic_if_i (void)
10522{
10523 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10524}
10525
10526static void
10527do_neon_dyadic_if_i_d (void)
10528{
10529 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10530}
10531
10532static void
10533do_neon_addsub_if_i (void)
10534{
10535 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10536 affected if we specify unsigned args. */
10537 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10538}
10539
10540/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10541 result to be:
10542 V<op> A,B (A is operand 0, B is operand 2)
10543 to mean:
10544 V<op> A,B,A
10545 not:
10546 V<op> A,B,B
10547 so handle that case specially. */
10548
10549static void
10550neon_exchange_operands (void)
10551{
10552 void *scratch = alloca (sizeof (inst.operands[0]));
10553 if (inst.operands[1].present)
10554 {
10555 /* Swap operands[1] and operands[2]. */
10556 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10557 inst.operands[1] = inst.operands[2];
10558 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10559 }
10560 else
10561 {
10562 inst.operands[1] = inst.operands[2];
10563 inst.operands[2] = inst.operands[0];
10564 }
10565}
10566
10567static void
10568neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10569{
10570 if (inst.operands[2].isreg)
10571 {
10572 if (invert)
10573 neon_exchange_operands ();
10574 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10575 }
10576 else
10577 {
10578 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10579 struct neon_type_el et = neon_check_type (2, rs,
10580 N_EQK | N_SIZ, immtypes | N_KEY);
10581
10582 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10583 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10584 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10585 inst.instruction |= LOW4 (inst.operands[1].reg);
10586 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10587 inst.instruction |= (rs == NS_QQI) << 6;
10588 inst.instruction |= (et.type == NT_float) << 10;
10589 inst.instruction |= neon_logbits (et.size) << 18;
10590
10591 inst.instruction = neon_dp_fixup (inst.instruction);
10592 }
10593}
10594
10595static void
10596do_neon_cmp (void)
10597{
10598 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10599}
10600
10601static void
10602do_neon_cmp_inv (void)
10603{
10604 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10605}
10606
10607static void
10608do_neon_ceq (void)
10609{
10610 neon_compare (N_IF_32, N_IF_32, FALSE);
10611}
10612
10613/* For multiply instructions, we have the possibility of 16-bit or 32-bit
10614 scalars, which are encoded in 5 bits, M : Rm.
10615 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10616 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10617 index in M. */
10618
10619static unsigned
10620neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10621{
10622 unsigned regno = NEON_SCALAR_REG (scalar);
10623 unsigned elno = NEON_SCALAR_INDEX (scalar);
10624
10625 switch (elsize)
10626 {
10627 case 16:
10628 if (regno > 7 || elno > 3)
10629 goto bad_scalar;
10630 return regno | (elno << 3);
10631
10632 case 32:
10633 if (regno > 15 || elno > 1)
10634 goto bad_scalar;
10635 return regno | (elno << 4);
10636
10637 default:
10638 bad_scalar:
10639 first_error (_("scalar out of range for multiply instruction"));
10640 }
10641
10642 return 0;
10643}
10644
10645/* Encode multiply / multiply-accumulate scalar instructions. */
10646
10647static void
10648neon_mul_mac (struct neon_type_el et, int ubit)
10649{
10650 unsigned scalar;
10651
10652 /* Give a more helpful error message if we have an invalid type. */
10653 if (et.type == NT_invtype)
10654 return;
10655
10656 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10657 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10658 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10659 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10660 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10661 inst.instruction |= LOW4 (scalar);
10662 inst.instruction |= HI1 (scalar) << 5;
10663 inst.instruction |= (et.type == NT_float) << 8;
10664 inst.instruction |= neon_logbits (et.size) << 20;
10665 inst.instruction |= (ubit != 0) << 24;
10666
10667 inst.instruction = neon_dp_fixup (inst.instruction);
10668}
10669
10670static void
10671do_neon_mac_maybe_scalar (void)
10672{
10673 if (inst.operands[2].isscalar)
10674 {
10675 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10676 struct neon_type_el et = neon_check_type (3, rs,
10677 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10678 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10679 neon_mul_mac (et, rs == NS_QQS);
10680 }
10681 else
10682 do_neon_dyadic_if_i ();
10683}
10684
10685static void
10686do_neon_tst (void)
10687{
10688 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10689 struct neon_type_el et = neon_check_type (3, rs,
10690 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10691 neon_three_same (rs == NS_QQQ, 0, et.size);
10692}
10693
10694/* VMUL with 3 registers allows the P8 type. The scalar version supports the
10695 same types as the MAC equivalents. The polynomial type for this instruction
10696 is encoded the same as the integer type. */
10697
10698static void
10699do_neon_mul (void)
10700{
10701 if (inst.operands[2].isscalar)
10702 do_neon_mac_maybe_scalar ();
10703 else
10704 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10705}
10706
10707static void
10708do_neon_qdmulh (void)
10709{
10710 if (inst.operands[2].isscalar)
10711 {
10712 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10713 struct neon_type_el et = neon_check_type (3, rs,
10714 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10715 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10716 neon_mul_mac (et, rs == NS_QQS);
10717 }
10718 else
10719 {
10720 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10721 struct neon_type_el et = neon_check_type (3, rs,
10722 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10723 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10724 /* The U bit (rounding) comes from bit mask. */
10725 neon_three_same (rs == NS_QQQ, 0, et.size);
10726 }
10727}
10728
10729static void
10730do_neon_fcmp_absolute (void)
10731{
10732 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10733 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10734 /* Size field comes from bit mask. */
10735 neon_three_same (rs == NS_QQQ, 1, -1);
10736}
10737
10738static void
10739do_neon_fcmp_absolute_inv (void)
10740{
10741 neon_exchange_operands ();
10742 do_neon_fcmp_absolute ();
10743}
10744
10745static void
10746do_neon_step (void)
10747{
10748 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10749 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10750 neon_three_same (rs == NS_QQQ, 0, -1);
10751}
10752
10753static void
10754do_neon_abs_neg (void)
10755{
10756 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10757 struct neon_type_el et = neon_check_type (3, rs,
10758 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10759 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10760 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10761 inst.instruction |= LOW4 (inst.operands[1].reg);
10762 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10763 inst.instruction |= (rs == NS_QQ) << 6;
10764 inst.instruction |= (et.type == NT_float) << 10;
10765 inst.instruction |= neon_logbits (et.size) << 18;
10766
10767 inst.instruction = neon_dp_fixup (inst.instruction);
10768}
10769
10770static void
10771do_neon_sli (void)
10772{
10773 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10774 struct neon_type_el et = neon_check_type (2, rs,
10775 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10776 int imm = inst.operands[2].imm;
10777 constraint (imm < 0 || (unsigned)imm >= et.size,
10778 _("immediate out of range for insert"));
10779 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10780}
10781
10782static void
10783do_neon_sri (void)
10784{
10785 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10786 struct neon_type_el et = neon_check_type (2, rs,
10787 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10788 int imm = inst.operands[2].imm;
10789 constraint (imm < 1 || (unsigned)imm > et.size,
10790 _("immediate out of range for insert"));
10791 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10792}
10793
10794static void
10795do_neon_qshlu_imm (void)
10796{
10797 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10798 struct neon_type_el et = neon_check_type (2, rs,
10799 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10800 int imm = inst.operands[2].imm;
10801 constraint (imm < 0 || (unsigned)imm >= et.size,
10802 _("immediate out of range for shift"));
10803 /* Only encodes the 'U present' variant of the instruction.
10804 In this case, signed types have OP (bit 8) set to 0.
10805 Unsigned types have OP set to 1. */
10806 inst.instruction |= (et.type == NT_unsigned) << 8;
10807 /* The rest of the bits are the same as other immediate shifts. */
10808 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10809}
10810
10811static void
10812do_neon_qmovn (void)
10813{
10814 struct neon_type_el et = neon_check_type (2, NS_DQ,
10815 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10816 /* Saturating move where operands can be signed or unsigned, and the
10817 destination has the same signedness. */
10818 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10819 if (et.type == NT_unsigned)
10820 inst.instruction |= 0xc0;
10821 else
10822 inst.instruction |= 0x80;
10823 neon_two_same (0, 1, et.size / 2);
10824}
10825
10826static void
10827do_neon_qmovun (void)
10828{
10829 struct neon_type_el et = neon_check_type (2, NS_DQ,
10830 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10831 /* Saturating move with unsigned results. Operands must be signed. */
10832 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10833 neon_two_same (0, 1, et.size / 2);
10834}
10835
10836static void
10837do_neon_rshift_sat_narrow (void)
10838{
10839 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10840 or unsigned. If operands are unsigned, results must also be unsigned. */
10841 struct neon_type_el et = neon_check_type (2, NS_DQI,
10842 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10843 int imm = inst.operands[2].imm;
10844 /* This gets the bounds check, size encoding and immediate bits calculation
10845 right. */
10846 et.size /= 2;
10847
10848 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10849 VQMOVN.I<size> <Dd>, <Qm>. */
10850 if (imm == 0)
10851 {
10852 inst.operands[2].present = 0;
10853 inst.instruction = N_MNEM_vqmovn;
10854 do_neon_qmovn ();
10855 return;
10856 }
10857
10858 constraint (imm < 1 || (unsigned)imm > et.size,
10859 _("immediate out of range"));
10860 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10861}
10862
10863static void
10864do_neon_rshift_sat_narrow_u (void)
10865{
10866 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10867 or unsigned. If operands are unsigned, results must also be unsigned. */
10868 struct neon_type_el et = neon_check_type (2, NS_DQI,
10869 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10870 int imm = inst.operands[2].imm;
10871 /* This gets the bounds check, size encoding and immediate bits calculation
10872 right. */
10873 et.size /= 2;
10874
10875 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10876 VQMOVUN.I<size> <Dd>, <Qm>. */
10877 if (imm == 0)
10878 {
10879 inst.operands[2].present = 0;
10880 inst.instruction = N_MNEM_vqmovun;
10881 do_neon_qmovun ();
10882 return;
10883 }
10884
10885 constraint (imm < 1 || (unsigned)imm > et.size,
10886 _("immediate out of range"));
10887 /* FIXME: The manual is kind of unclear about what value U should have in
10888 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10889 must be 1. */
10890 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10891}
10892
10893static void
10894do_neon_movn (void)
10895{
10896 struct neon_type_el et = neon_check_type (2, NS_DQ,
10897 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10898 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10899 neon_two_same (0, 1, et.size / 2);
10900}
10901
10902static void
10903do_neon_rshift_narrow (void)
10904{
10905 struct neon_type_el et = neon_check_type (2, NS_DQI,
10906 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10907 int imm = inst.operands[2].imm;
10908 /* This gets the bounds check, size encoding and immediate bits calculation
10909 right. */
10910 et.size /= 2;
10911
10912 /* If immediate is zero then we are a pseudo-instruction for
10913 VMOVN.I<size> <Dd>, <Qm> */
10914 if (imm == 0)
10915 {
10916 inst.operands[2].present = 0;
10917 inst.instruction = N_MNEM_vmovn;
10918 do_neon_movn ();
10919 return;
10920 }
10921
10922 constraint (imm < 1 || (unsigned)imm > et.size,
10923 _("immediate out of range for narrowing operation"));
10924 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
10925}
10926
10927static void
10928do_neon_shll (void)
10929{
10930 /* FIXME: Type checking when lengthening. */
10931 struct neon_type_el et = neon_check_type (2, NS_QDI,
10932 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
10933 unsigned imm = inst.operands[2].imm;
10934
10935 if (imm == et.size)
10936 {
10937 /* Maximum shift variant. */
10938 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10939 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10940 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10941 inst.instruction |= LOW4 (inst.operands[1].reg);
10942 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10943 inst.instruction |= neon_logbits (et.size) << 18;
10944
10945 inst.instruction = neon_dp_fixup (inst.instruction);
10946 }
10947 else
10948 {
10949 /* A more-specific type check for non-max versions. */
10950 et = neon_check_type (2, NS_QDI,
10951 N_EQK | N_DBL, N_SU_32 | N_KEY);
10952 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10953 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
10954 }
10955}
10956
10957/* Check the various types for the VCVT instruction, and return the one that
10958 the current instruction is. */
10959
10960static int
10961neon_cvt_flavour (enum neon_shape rs)
10962{
10963#define CVT_VAR(C,X,Y) \
10964 et = neon_check_type (2, rs, (X), (Y)); \
10965 if (et.type != NT_invtype) \
10966 { \
10967 inst.error = NULL; \
10968 return (C); \
10969 }
10970 struct neon_type_el et;
10971
10972 CVT_VAR (0, N_S32, N_F32);
10973 CVT_VAR (1, N_U32, N_F32);
10974 CVT_VAR (2, N_F32, N_S32);
10975 CVT_VAR (3, N_F32, N_U32);
10976
10977 return -1;
10978#undef CVT_VAR
10979}
10980
10981static void
10982do_neon_cvt (void)
10983{
10984 /* Fixed-point conversion with #0 immediate is encoded as an integer
10985 conversion. */
10986 if (inst.operands[2].present && inst.operands[2].imm != 0)
10987 {
10988 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10989 int flavour = neon_cvt_flavour (rs);
10990 unsigned immbits = 32 - inst.operands[2].imm;
10991 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
10992 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10993 if (flavour != -1)
10994 inst.instruction |= enctab[flavour];
10995 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10996 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10997 inst.instruction |= LOW4 (inst.operands[1].reg);
10998 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10999 inst.instruction |= (rs == NS_QQI) << 6;
11000 inst.instruction |= 1 << 21;
11001 inst.instruction |= immbits << 16;
11002 }
11003 else
11004 {
11005 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11006 int flavour = neon_cvt_flavour (rs);
11007 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11008 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11009 if (flavour != -1)
11010 inst.instruction |= enctab[flavour];
11011 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11012 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11013 inst.instruction |= LOW4 (inst.operands[1].reg);
11014 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11015 inst.instruction |= (rs == NS_QQ) << 6;
11016 inst.instruction |= 2 << 18;
11017 }
11018 inst.instruction = neon_dp_fixup (inst.instruction);
11019}
11020
11021static void
11022neon_move_immediate (void)
11023{
11024 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11025 struct neon_type_el et = neon_check_type (1, rs,
11026 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11027 unsigned immlo, immhi = 0, immbits;
11028 int op, cmode;
11029
11030 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11031 op = (inst.instruction & (1 << 5)) != 0;
11032
11033 immlo = inst.operands[1].imm;
11034 if (inst.operands[1].regisimm)
11035 immhi = inst.operands[1].reg;
11036
11037 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11038 _("immediate has bits set outside the operand size"));
11039
11040 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11041 et.size, et.type)) == FAIL)
11042 {
11043 /* Invert relevant bits only. */
11044 neon_invert_size (&immlo, &immhi, et.size);
11045 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11046 with one or the other; those cases are caught by
11047 neon_cmode_for_move_imm. */
11048 op = !op;
11049 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11050 et.size, et.type)) == FAIL)
11051 {
11052 first_error (_("immediate out of range"));
11053 return;
11054 }
11055 }
11056
11057 inst.instruction &= ~(1 << 5);
11058 inst.instruction |= op << 5;
11059
11060 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11061 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11062 inst.instruction |= (rs == NS_QI) << 6;
11063 inst.instruction |= cmode << 8;
11064
11065 neon_write_immbits (immbits);
11066}
11067
11068static void
11069do_neon_mvn (void)
11070{
11071 if (inst.operands[1].isreg)
11072 {
11073 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11074
11075 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11076 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11077 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11078 inst.instruction |= LOW4 (inst.operands[1].reg);
11079 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11080 inst.instruction |= (rs == NS_QQ) << 6;
11081 }
11082 else
11083 {
11084 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11085 neon_move_immediate ();
11086 }
11087
11088 inst.instruction = neon_dp_fixup (inst.instruction);
11089}
11090
11091/* Encode instructions of form:
11092
11093 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11094 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11095
11096*/
11097
11098static void
11099neon_mixed_length (struct neon_type_el et, unsigned size)
11100{
11101 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11102 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11103 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11104 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11105 inst.instruction |= LOW4 (inst.operands[2].reg);
11106 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11107 inst.instruction |= (et.type == NT_unsigned) << 24;
11108 inst.instruction |= neon_logbits (size) << 20;
11109
11110 inst.instruction = neon_dp_fixup (inst.instruction);
11111}
11112
11113static void
11114do_neon_dyadic_long (void)
11115{
11116 /* FIXME: Type checking for lengthening op. */
11117 struct neon_type_el et = neon_check_type (3, NS_QDD,
11118 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11119 neon_mixed_length (et, et.size);
11120}
11121
11122static void
11123do_neon_abal (void)
11124{
11125 struct neon_type_el et = neon_check_type (3, NS_QDD,
11126 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11127 neon_mixed_length (et, et.size);
11128}
11129
11130static void
11131neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11132{
11133 if (inst.operands[2].isscalar)
11134 {
11135 struct neon_type_el et = neon_check_type (3, NS_QDS,
11136 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11137 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11138 neon_mul_mac (et, et.type == NT_unsigned);
11139 }
11140 else
11141 {
11142 struct neon_type_el et = neon_check_type (3, NS_QDD,
11143 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11144 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11145 neon_mixed_length (et, et.size);
11146 }
11147}
11148
11149static void
11150do_neon_mac_maybe_scalar_long (void)
11151{
11152 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11153}
11154
11155static void
11156do_neon_dyadic_wide (void)
11157{
11158 struct neon_type_el et = neon_check_type (3, NS_QQD,
11159 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11160 neon_mixed_length (et, et.size);
11161}
11162
11163static void
11164do_neon_dyadic_narrow (void)
11165{
11166 struct neon_type_el et = neon_check_type (3, NS_QDD,
11167 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11168 neon_mixed_length (et, et.size / 2);
11169}
11170
11171static void
11172do_neon_mul_sat_scalar_long (void)
11173{
11174 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11175}
11176
11177static void
11178do_neon_vmull (void)
11179{
11180 if (inst.operands[2].isscalar)
11181 do_neon_mac_maybe_scalar_long ();
11182 else
11183 {
11184 struct neon_type_el et = neon_check_type (3, NS_QDD,
11185 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11186 if (et.type == NT_poly)
11187 inst.instruction = NEON_ENC_POLY (inst.instruction);
11188 else
11189 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11190 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11191 zero. Should be OK as-is. */
11192 neon_mixed_length (et, et.size);
11193 }
11194}
11195
11196static void
11197do_neon_ext (void)
11198{
11199 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11200 struct neon_type_el et = neon_check_type (3, rs,
11201 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11202 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11203 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11204 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11205 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11206 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11207 inst.instruction |= LOW4 (inst.operands[2].reg);
11208 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11209 inst.instruction |= (rs == NS_QQQI) << 6;
11210 inst.instruction |= imm << 8;
11211
11212 inst.instruction = neon_dp_fixup (inst.instruction);
11213}
11214
11215static void
11216do_neon_rev (void)
11217{
11218 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11219 struct neon_type_el et = neon_check_type (2, rs,
11220 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11221 unsigned op = (inst.instruction >> 7) & 3;
11222 /* N (width of reversed regions) is encoded as part of the bitmask. We
11223 extract it here to check the elements to be reversed are smaller.
11224 Otherwise we'd get a reserved instruction. */
11225 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11226 assert (elsize != 0);
11227 constraint (et.size >= elsize,
11228 _("elements must be smaller than reversal region"));
11229 neon_two_same (rs == NS_QQ, 1, et.size);
11230}
11231
11232static void
11233do_neon_dup (void)
11234{
11235 if (inst.operands[1].isscalar)
11236 {
11237 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11238 struct neon_type_el et = neon_check_type (2, rs,
11239 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11240 unsigned sizebits = et.size >> 3;
11241 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11242 int logsize = neon_logbits (et.size);
11243 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11244 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11245 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11246 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11247 inst.instruction |= LOW4 (dm);
11248 inst.instruction |= HI1 (dm) << 5;
11249 inst.instruction |= (rs == NS_QS) << 6;
11250 inst.instruction |= x << 17;
11251 inst.instruction |= sizebits << 16;
11252
11253 inst.instruction = neon_dp_fixup (inst.instruction);
11254 }
11255 else
11256 {
11257 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11258 struct neon_type_el et = neon_check_type (1, rs,
11259 N_8 | N_16 | N_32 | N_KEY);
11260 unsigned save_cond = inst.instruction & 0xf0000000;
11261 /* Duplicate ARM register to lanes of vector. */
11262 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11263 switch (et.size)
11264 {
11265 case 8: inst.instruction |= 0x400000; break;
11266 case 16: inst.instruction |= 0x000020; break;
11267 case 32: inst.instruction |= 0x000000; break;
11268 default: break;
11269 }
11270 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11271 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11272 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11273 inst.instruction |= (rs == NS_QR) << 21;
11274 /* The encoding for this instruction is identical for the ARM and Thumb
11275 variants, except for the condition field. */
11276 if (thumb_mode)
11277 inst.instruction |= 0xe0000000;
11278 else
11279 inst.instruction |= save_cond;
11280 }
11281}
11282
11283/* VMOV has particularly many variations. It can be one of:
11284 0. VMOV<c><q> <Qd>, <Qm>
11285 1. VMOV<c><q> <Dd>, <Dm>
11286 (Register operations, which are VORR with Rm = Rn.)
11287 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11288 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11289 (Immediate loads.)
11290 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11291 (ARM register to scalar.)
11292 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11293 (Two ARM registers to vector.)
11294 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11295 (Scalar to ARM register.)
11296 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11297 (Vector to two ARM registers.)
11298
11299 We should have just enough information to be able to disambiguate most of
11300 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11301 registers" cases. For these, abuse the .regisimm operand field to signify a
11302 Neon register.
11303
11304 All the encoded bits are hardcoded by this function.
11305
11306 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11307 can specify a type where it doesn't make sense to, and is ignored).
11308*/
11309
11310static void
11311do_neon_mov (void)
11312{
11313 int nargs = inst.operands[0].present + inst.operands[1].present
11314 + inst.operands[2].present;
11315 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11316
11317 switch (nargs)
11318 {
11319 case 2:
11320 /* Cases 0, 1, 2, 3, 4, 6. */
11321 if (inst.operands[1].isscalar)
11322 {
11323 /* Case 6. */
11324 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11325 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11326 unsigned logsize = neon_logbits (et.size);
11327 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11328 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11329 unsigned abcdebits = 0;
11330
11331 constraint (et.type == NT_invtype, _("bad type for scalar"));
11332 constraint (x >= 64 / et.size, _("scalar index out of range"));
11333
11334 switch (et.size)
11335 {
11336 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11337 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11338 case 32: abcdebits = 0x00; break;
11339 default: ;
11340 }
11341
11342 abcdebits |= x << logsize;
11343 inst.instruction = save_cond;
11344 inst.instruction |= 0xe100b10;
11345 inst.instruction |= LOW4 (dn) << 16;
11346 inst.instruction |= HI1 (dn) << 7;
11347 inst.instruction |= inst.operands[0].reg << 12;
11348 inst.instruction |= (abcdebits & 3) << 5;
11349 inst.instruction |= (abcdebits >> 2) << 21;
11350 }
11351 else if (inst.operands[1].isreg)
11352 {
11353 /* Cases 0, 1, 4. */
11354 if (inst.operands[0].isscalar)
11355 {
11356 /* Case 4. */
11357 unsigned bcdebits = 0;
11358 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11359 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11360 int logsize = neon_logbits (et.size);
11361 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11362 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11363
11364 constraint (et.type == NT_invtype, _("bad type for scalar"));
11365 constraint (x >= 64 / et.size, _("scalar index out of range"));
11366
11367 switch (et.size)
11368 {
11369 case 8: bcdebits = 0x8; break;
11370 case 16: bcdebits = 0x1; break;
11371 case 32: bcdebits = 0x0; break;
11372 default: ;
11373 }
11374
11375 bcdebits |= x << logsize;
11376 inst.instruction = save_cond;
11377 inst.instruction |= 0xe000b10;
11378 inst.instruction |= LOW4 (dn) << 16;
11379 inst.instruction |= HI1 (dn) << 7;
11380 inst.instruction |= inst.operands[1].reg << 12;
11381 inst.instruction |= (bcdebits & 3) << 5;
11382 inst.instruction |= (bcdebits >> 2) << 21;
11383 }
11384 else
11385 {
11386 /* Cases 0, 1. */
11387 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11388 /* The architecture manual I have doesn't explicitly state which
11389 value the U bit should have for register->register moves, but
11390 the equivalent VORR instruction has U = 0, so do that. */
11391 inst.instruction = 0x0200110;
11392 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11393 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11394 inst.instruction |= LOW4 (inst.operands[1].reg);
11395 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11396 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11397 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11398 inst.instruction |= (rs == NS_QQ) << 6;
11399
11400 inst.instruction = neon_dp_fixup (inst.instruction);
11401 }
11402 }
11403 else
11404 {
11405 /* Cases 2, 3. */
11406 inst.instruction = 0x0800010;
11407 neon_move_immediate ();
11408 inst.instruction = neon_dp_fixup (inst.instruction);
11409 }
11410 break;
11411
11412 case 3:
11413 /* Cases 5, 7. */
11414 if (inst.operands[0].regisimm)
11415 {
11416 /* Case 5. */
11417 inst.instruction = save_cond;
11418 inst.instruction |= 0xc400b10;
11419 inst.instruction |= LOW4 (inst.operands[0].reg);
11420 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11421 inst.instruction |= inst.operands[1].reg << 12;
11422 inst.instruction |= inst.operands[2].reg << 16;
11423 }
11424 else
11425 {
11426 /* Case 7. */
11427 inst.instruction = save_cond;
11428 inst.instruction |= 0xc500b10;
11429 inst.instruction |= inst.operands[0].reg << 12;
11430 inst.instruction |= inst.operands[1].reg << 16;
11431 inst.instruction |= LOW4 (inst.operands[2].reg);
11432 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11433 }
11434 break;
11435
11436 default:
11437 abort ();
11438 }
11439}
11440
11441static void
11442do_neon_rshift_round_imm (void)
11443{
11444 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11445 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11446 int imm = inst.operands[2].imm;
11447
11448 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11449 if (imm == 0)
11450 {
11451 inst.operands[2].present = 0;
11452 do_neon_mov ();
11453 return;
11454 }
11455
11456 constraint (imm < 1 || (unsigned)imm > et.size,
11457 _("immediate out of range for shift"));
11458 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11459 et.size - imm);
11460}
11461
11462static void
11463do_neon_movl (void)
11464{
11465 struct neon_type_el et = neon_check_type (2, NS_QD,
11466 N_EQK | N_DBL, N_SU_32 | N_KEY);
11467 unsigned sizebits = et.size >> 3;
11468 inst.instruction |= sizebits << 19;
11469 neon_two_same (0, et.type == NT_unsigned, -1);
11470}
11471
11472static void
11473do_neon_trn (void)
11474{
11475 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11476 struct neon_type_el et = neon_check_type (2, rs,
11477 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11478 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11479 neon_two_same (rs == NS_QQ, 1, et.size);
11480}
11481
11482static void
11483do_neon_zip_uzp (void)
11484{
11485 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11486 struct neon_type_el et = neon_check_type (2, rs,
11487 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11488 if (rs == NS_DD && et.size == 32)
11489 {
11490 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11491 inst.instruction = N_MNEM_vtrn;
11492 do_neon_trn ();
11493 return;
11494 }
11495 neon_two_same (rs == NS_QQ, 1, et.size);
11496}
11497
11498static void
11499do_neon_sat_abs_neg (void)
11500{
11501 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11502 struct neon_type_el et = neon_check_type (2, rs,
11503 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11504 neon_two_same (rs == NS_QQ, 1, et.size);
11505}
11506
11507static void
11508do_neon_pair_long (void)
11509{
11510 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11511 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11512 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11513 inst.instruction |= (et.type == NT_unsigned) << 7;
11514 neon_two_same (rs == NS_QQ, 1, et.size);
11515}
11516
11517static void
11518do_neon_recip_est (void)
11519{
11520 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11521 struct neon_type_el et = neon_check_type (2, rs,
11522 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11523 inst.instruction |= (et.type == NT_float) << 8;
11524 neon_two_same (rs == NS_QQ, 1, et.size);
11525}
11526
11527static void
11528do_neon_cls (void)
11529{
11530 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11531 struct neon_type_el et = neon_check_type (2, rs,
11532 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11533 neon_two_same (rs == NS_QQ, 1, et.size);
11534}
11535
11536static void
11537do_neon_clz (void)
11538{
11539 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11540 struct neon_type_el et = neon_check_type (2, rs,
11541 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11542 neon_two_same (rs == NS_QQ, 1, et.size);
11543}
11544
11545static void
11546do_neon_cnt (void)
11547{
11548 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11549 struct neon_type_el et = neon_check_type (2, rs,
11550 N_EQK | N_INT, N_8 | N_KEY);
11551 neon_two_same (rs == NS_QQ, 1, et.size);
11552}
11553
11554static void
11555do_neon_swp (void)
11556{
11557 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11558 neon_two_same (rs == NS_QQ, 1, -1);
11559}
11560
11561static void
11562do_neon_tbl_tbx (void)
11563{
11564 unsigned listlenbits;
11565 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11566
11567 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11568 {
11569 first_error (_("bad list length for table lookup"));
11570 return;
11571 }
11572
11573 listlenbits = inst.operands[1].imm - 1;
11574 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11575 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11576 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11577 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11578 inst.instruction |= LOW4 (inst.operands[2].reg);
11579 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11580 inst.instruction |= listlenbits << 8;
11581
11582 inst.instruction = neon_dp_fixup (inst.instruction);
11583}
11584
11585static void
11586do_neon_ldm_stm (void)
11587{
11588 /* P, U and L bits are part of bitmask. */
11589 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11590 unsigned offsetbits = inst.operands[1].imm * 2;
11591
11592 constraint (is_dbmode && !inst.operands[0].writeback,
11593 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11594
11595 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11596 _("register list must contain at least 1 and at most 16 "
11597 "registers"));
11598
11599 inst.instruction |= inst.operands[0].reg << 16;
11600 inst.instruction |= inst.operands[0].writeback << 21;
11601 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11602 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11603
11604 inst.instruction |= offsetbits;
11605
11606 if (thumb_mode)
11607 inst.instruction |= 0xe0000000;
11608}
11609
11610static void
11611do_neon_ldr_str (void)
11612{
11613 unsigned offsetbits;
11614 int offset_up = 1;
11615 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11616
11617 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11618 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11619
11620 constraint (inst.reloc.pc_rel && !is_ldr,
11621 _("PC-relative addressing unavailable with VSTR"));
11622
11623 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11624 _("Immediate value must be a constant"));
11625
11626 if (inst.reloc.exp.X_add_number < 0)
11627 {
11628 offset_up = 0;
11629 offsetbits = -inst.reloc.exp.X_add_number / 4;
11630 }
11631 else
11632 offsetbits = inst.reloc.exp.X_add_number / 4;
11633
11634 /* FIXME: Does this catch everything? */
11635 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11636 || inst.operands[1].postind || inst.operands[1].writeback
11637 || inst.operands[1].immisreg || inst.operands[1].shifted,
11638 BAD_ADDR_MODE);
11639 constraint ((inst.operands[1].imm & 3) != 0,
11640 _("Offset must be a multiple of 4"));
11641 constraint (offsetbits != (offsetbits & 0xff),
11642 _("Immediate offset out of range"));
11643
11644 inst.instruction |= inst.operands[1].reg << 16;
11645 inst.instruction |= offsetbits & 0xff;
11646 inst.instruction |= offset_up << 23;
11647
11648 if (thumb_mode)
11649 inst.instruction |= 0xe0000000;
11650
11651 if (inst.reloc.pc_rel)
11652 {
11653 if (thumb_mode)
11654 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11655 else
11656 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11657 }
11658 else
11659 inst.reloc.type = BFD_RELOC_UNUSED;
11660}
11661
11662/* "interleave" version also handles non-interleaving register VLD1/VST1
11663 instructions. */
11664
11665static void
11666do_neon_ld_st_interleave (void)
11667{
11668 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11669 N_8 | N_16 | N_32 | N_64);
11670 unsigned alignbits = 0;
11671 unsigned idx;
11672 /* The bits in this table go:
11673 0: register stride of one (0) or two (1)
11674 1,2: register list length, minus one (1, 2, 3, 4).
11675 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11676 We use -1 for invalid entries. */
11677 const int typetable[] =
11678 {
11679 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11680 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11681 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11682 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11683 };
11684 int typebits;
11685
11686 if (et.type == NT_invtype)
11687 return;
11688
11689 if (inst.operands[1].immisalign)
11690 switch (inst.operands[1].imm >> 8)
11691 {
11692 case 64: alignbits = 1; break;
11693 case 128:
11694 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11695 goto bad_alignment;
11696 alignbits = 2;
11697 break;
11698 case 256:
11699 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11700 goto bad_alignment;
11701 alignbits = 3;
11702 break;
11703 default:
11704 bad_alignment:
11705 first_error (_("bad alignment"));
11706 return;
11707 }
11708
11709 inst.instruction |= alignbits << 4;
11710 inst.instruction |= neon_logbits (et.size) << 6;
11711
11712 /* Bits [4:6] of the immediate in a list specifier encode register stride
11713 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11714 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11715 up the right value for "type" in a table based on this value and the given
11716 list style, then stick it back. */
11717 idx = ((inst.operands[0].imm >> 4) & 7)
11718 | (((inst.instruction >> 8) & 3) << 3);
11719
11720 typebits = typetable[idx];
11721
11722 constraint (typebits == -1, _("bad list type for instruction"));
11723
11724 inst.instruction &= ~0xf00;
11725 inst.instruction |= typebits << 8;
11726}
11727
11728/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11729 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11730 otherwise. The variable arguments are a list of pairs of legal (size, align)
11731 values, terminated with -1. */
11732
11733static int
11734neon_alignment_bit (int size, int align, int *do_align, ...)
11735{
11736 va_list ap;
11737 int result = FAIL, thissize, thisalign;
11738
11739 if (!inst.operands[1].immisalign)
11740 {
11741 *do_align = 0;
11742 return SUCCESS;
11743 }
11744
11745 va_start (ap, do_align);
11746
11747 do
11748 {
11749 thissize = va_arg (ap, int);
11750 if (thissize == -1)
11751 break;
11752 thisalign = va_arg (ap, int);
11753
11754 if (size == thissize && align == thisalign)
11755 result = SUCCESS;
11756 }
11757 while (result != SUCCESS);
11758
11759 va_end (ap);
11760
11761 if (result == SUCCESS)
11762 *do_align = 1;
11763 else
11764 first_error (_("unsupported alignment for instruction"));
11765
11766 return result;
11767}
11768
11769static void
11770do_neon_ld_st_lane (void)
11771{
11772 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11773 int align_good, do_align = 0;
11774 int logsize = neon_logbits (et.size);
11775 int align = inst.operands[1].imm >> 8;
11776 int n = (inst.instruction >> 8) & 3;
11777 int max_el = 64 / et.size;
11778
11779 if (et.type == NT_invtype)
11780 return;
11781
11782 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11783 _("bad list length"));
11784 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11785 _("scalar index out of range"));
11786 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11787 && et.size == 8,
11788 _("stride of 2 unavailable when element size is 8"));
11789
11790 switch (n)
11791 {
11792 case 0: /* VLD1 / VST1. */
11793 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11794 32, 32, -1);
11795 if (align_good == FAIL)
11796 return;
11797 if (do_align)
11798 {
11799 unsigned alignbits = 0;
11800 switch (et.size)
11801 {
11802 case 16: alignbits = 0x1; break;
11803 case 32: alignbits = 0x3; break;
11804 default: ;
11805 }
11806 inst.instruction |= alignbits << 4;
11807 }
11808 break;
11809
11810 case 1: /* VLD2 / VST2. */
11811 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11812 32, 64, -1);
11813 if (align_good == FAIL)
11814 return;
11815 if (do_align)
11816 inst.instruction |= 1 << 4;
11817 break;
11818
11819 case 2: /* VLD3 / VST3. */
11820 constraint (inst.operands[1].immisalign,
11821 _("can't use alignment with this instruction"));
11822 break;
11823
11824 case 3: /* VLD4 / VST4. */
11825 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11826 16, 64, 32, 64, 32, 128, -1);
11827 if (align_good == FAIL)
11828 return;
11829 if (do_align)
11830 {
11831 unsigned alignbits = 0;
11832 switch (et.size)
11833 {
11834 case 8: alignbits = 0x1; break;
11835 case 16: alignbits = 0x1; break;
11836 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11837 default: ;
11838 }
11839 inst.instruction |= alignbits << 4;
11840 }
11841 break;
11842
11843 default: ;
11844 }
11845
11846 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11847 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11848 inst.instruction |= 1 << (4 + logsize);
11849
11850 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11851 inst.instruction |= logsize << 10;
11852}
11853
11854/* Encode single n-element structure to all lanes VLD<n> instructions. */
11855
11856static void
11857do_neon_ld_dup (void)
11858{
11859 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11860 int align_good, do_align = 0;
11861
11862 if (et.type == NT_invtype)
11863 return;
11864
11865 switch ((inst.instruction >> 8) & 3)
11866 {
11867 case 0: /* VLD1. */
11868 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11869 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11870 &do_align, 16, 16, 32, 32, -1);
11871 if (align_good == FAIL)
11872 return;
11873 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11874 {
11875 case 1: break;
11876 case 2: inst.instruction |= 1 << 5; break;
11877 default: first_error (_("bad list length")); return;
11878 }
11879 inst.instruction |= neon_logbits (et.size) << 6;
11880 break;
11881
11882 case 1: /* VLD2. */
11883 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11884 &do_align, 8, 16, 16, 32, 32, 64, -1);
11885 if (align_good == FAIL)
11886 return;
11887 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11888 _("bad list length"));
11889 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11890 inst.instruction |= 1 << 5;
11891 inst.instruction |= neon_logbits (et.size) << 6;
11892 break;
11893
11894 case 2: /* VLD3. */
11895 constraint (inst.operands[1].immisalign,
11896 _("can't use alignment with this instruction"));
11897 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
11898 _("bad list length"));
11899 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11900 inst.instruction |= 1 << 5;
11901 inst.instruction |= neon_logbits (et.size) << 6;
11902 break;
11903
11904 case 3: /* VLD4. */
11905 {
11906 int align = inst.operands[1].imm >> 8;
11907 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11908 16, 64, 32, 64, 32, 128, -1);
11909 if (align_good == FAIL)
11910 return;
11911 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
11912 _("bad list length"));
11913 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11914 inst.instruction |= 1 << 5;
11915 if (et.size == 32 && align == 128)
11916 inst.instruction |= 0x3 << 6;
11917 else
11918 inst.instruction |= neon_logbits (et.size) << 6;
11919 }
11920 break;
11921
11922 default: ;
11923 }
11924
11925 inst.instruction |= do_align << 4;
11926}
11927
11928/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11929 apart from bits [11:4]. */
11930
11931static void
11932do_neon_ldx_stx (void)
11933{
11934 switch (NEON_LANE (inst.operands[0].imm))
11935 {
11936 case NEON_INTERLEAVE_LANES:
11937 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
11938 do_neon_ld_st_interleave ();
11939 break;
11940
11941 case NEON_ALL_LANES:
11942 inst.instruction = NEON_ENC_DUP (inst.instruction);
11943 do_neon_ld_dup ();
11944 break;
11945
11946 default:
11947 inst.instruction = NEON_ENC_LANE (inst.instruction);
11948 do_neon_ld_st_lane ();
11949 }
11950
11951 /* L bit comes from bit mask. */
11952 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11953 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11954 inst.instruction |= inst.operands[1].reg << 16;
11955
11956 if (inst.operands[1].postind)
11957 {
11958 int postreg = inst.operands[1].imm & 0xf;
11959 constraint (!inst.operands[1].immisreg,
11960 _("post-index must be a register"));
11961 constraint (postreg == 0xd || postreg == 0xf,
11962 _("bad register for post-index"));
11963 inst.instruction |= postreg;
11964 }
11965 else if (inst.operands[1].writeback)
11966 {
11967 inst.instruction |= 0xd;
11968 }
11969 else
11970 inst.instruction |= 0xf;
11971
11972 if (thumb_mode)
11973 inst.instruction |= 0xf9000000;
11974 else
11975 inst.instruction |= 0xf4000000;
11976}
11977
11978\f
11979/* Overall per-instruction processing. */
11980
11981/* We need to be able to fix up arbitrary expressions in some statements.
11982 This is so that we can handle symbols that are an arbitrary distance from
11983 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
11984 which returns part of an address in a form which will be valid for
11985 a data instruction. We do this by pushing the expression into a symbol
11986 in the expr_section, and creating a fix for that. */
11987
11988static void
11989fix_new_arm (fragS * frag,
11990 int where,
11991 short int size,
11992 expressionS * exp,
11993 int pc_rel,
11994 int reloc)
11995{
11996 fixS * new_fix;
11997
11998 switch (exp->X_op)
11999 {
12000 case O_constant:
12001 case O_symbol:
12002 case O_add:
12003 case O_subtract:
12004 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12005 break;
12006
12007 default:
12008 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12009 pc_rel, reloc);
12010 break;
12011 }
12012
12013 /* Mark whether the fix is to a THUMB instruction, or an ARM
12014 instruction. */
12015 new_fix->tc_fix_data = thumb_mode;
12016}
12017
12018/* Create a frg for an instruction requiring relaxation. */
12019static void
12020output_relax_insn (void)
12021{
12022 char * to;
12023 symbolS *sym;
12024 int offset;
12025
12026#ifdef OBJ_ELF
12027 /* The size of the instruction is unknown, so tie the debug info to the
12028 start of the instruction. */
12029 dwarf2_emit_insn (0);
12030#endif
12031
12032 switch (inst.reloc.exp.X_op)
12033 {
12034 case O_symbol:
12035 sym = inst.reloc.exp.X_add_symbol;
12036 offset = inst.reloc.exp.X_add_number;
12037 break;
12038 case O_constant:
12039 sym = NULL;
12040 offset = inst.reloc.exp.X_add_number;
12041 break;
12042 default:
12043 sym = make_expr_symbol (&inst.reloc.exp);
12044 offset = 0;
12045 break;
12046 }
12047 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12048 inst.relax, sym, offset, NULL/*offset, opcode*/);
12049 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12050}
12051
12052/* Write a 32-bit thumb instruction to buf. */
12053static void
12054put_thumb32_insn (char * buf, unsigned long insn)
12055{
12056 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12057 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12058}
12059
12060static void
12061output_inst (const char * str)
12062{
12063 char * to = NULL;
12064
12065 if (inst.error)
12066 {
12067 as_bad ("%s -- `%s'", inst.error, str);
12068 return;
12069 }
12070 if (inst.relax) {
12071 output_relax_insn();
12072 return;
12073 }
12074 if (inst.size == 0)
12075 return;
12076
12077 to = frag_more (inst.size);
12078
12079 if (thumb_mode && (inst.size > THUMB_SIZE))
12080 {
12081 assert (inst.size == (2 * THUMB_SIZE));
12082 put_thumb32_insn (to, inst.instruction);
12083 }
12084 else if (inst.size > INSN_SIZE)
12085 {
12086 assert (inst.size == (2 * INSN_SIZE));
12087 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12088 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12089 }
12090 else
12091 md_number_to_chars (to, inst.instruction, inst.size);
12092
12093 if (inst.reloc.type != BFD_RELOC_UNUSED)
12094 fix_new_arm (frag_now, to - frag_now->fr_literal,
12095 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12096 inst.reloc.type);
12097
12098#ifdef OBJ_ELF
12099 dwarf2_emit_insn (inst.size);
12100#endif
12101}
12102
12103/* Tag values used in struct asm_opcode's tag field. */
12104enum opcode_tag
12105{
12106 OT_unconditional, /* Instruction cannot be conditionalized.
12107 The ARM condition field is still 0xE. */
12108 OT_unconditionalF, /* Instruction cannot be conditionalized
12109 and carries 0xF in its ARM condition field. */
12110 OT_csuffix, /* Instruction takes a conditional suffix. */
12111 OT_cinfix3, /* Instruction takes a conditional infix,
12112 beginning at character index 3. (In
12113 unified mode, it becomes a suffix.) */
12114 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12115 character index 3, even in unified mode. Used for
12116 legacy instructions where suffix and infix forms
12117 may be ambiguous. */
12118 OT_csuf_or_in3, /* Instruction takes either a conditional
12119 suffix or an infix at character index 3. */
12120 OT_odd_infix_unc, /* This is the unconditional variant of an
12121 instruction that takes a conditional infix
12122 at an unusual position. In unified mode,
12123 this variant will accept a suffix. */
12124 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12125 are the conditional variants of instructions that
12126 take conditional infixes in unusual positions.
12127 The infix appears at character index
12128 (tag - OT_odd_infix_0). These are not accepted
12129 in unified mode. */
12130};
12131
12132/* Subroutine of md_assemble, responsible for looking up the primary
12133 opcode from the mnemonic the user wrote. STR points to the
12134 beginning of the mnemonic.
12135
12136 This is not simply a hash table lookup, because of conditional
12137 variants. Most instructions have conditional variants, which are
12138 expressed with a _conditional affix_ to the mnemonic. If we were
12139 to encode each conditional variant as a literal string in the opcode
12140 table, it would have approximately 20,000 entries.
12141
12142 Most mnemonics take this affix as a suffix, and in unified syntax,
12143 'most' is upgraded to 'all'. However, in the divided syntax, some
12144 instructions take the affix as an infix, notably the s-variants of
12145 the arithmetic instructions. Of those instructions, all but six
12146 have the infix appear after the third character of the mnemonic.
12147
12148 Accordingly, the algorithm for looking up primary opcodes given
12149 an identifier is:
12150
12151 1. Look up the identifier in the opcode table.
12152 If we find a match, go to step U.
12153
12154 2. Look up the last two characters of the identifier in the
12155 conditions table. If we find a match, look up the first N-2
12156 characters of the identifier in the opcode table. If we
12157 find a match, go to step CE.
12158
12159 3. Look up the fourth and fifth characters of the identifier in
12160 the conditions table. If we find a match, extract those
12161 characters from the identifier, and look up the remaining
12162 characters in the opcode table. If we find a match, go
12163 to step CM.
12164
12165 4. Fail.
12166
12167 U. Examine the tag field of the opcode structure, in case this is
12168 one of the six instructions with its conditional infix in an
12169 unusual place. If it is, the tag tells us where to find the
12170 infix; look it up in the conditions table and set inst.cond
12171 accordingly. Otherwise, this is an unconditional instruction.
12172 Again set inst.cond accordingly. Return the opcode structure.
12173
12174 CE. Examine the tag field to make sure this is an instruction that
12175 should receive a conditional suffix. If it is not, fail.
12176 Otherwise, set inst.cond from the suffix we already looked up,
12177 and return the opcode structure.
12178
12179 CM. Examine the tag field to make sure this is an instruction that
12180 should receive a conditional infix after the third character.
12181 If it is not, fail. Otherwise, undo the edits to the current
12182 line of input and proceed as for case CE. */
12183
12184static const struct asm_opcode *
12185opcode_lookup (char **str)
12186{
12187 char *end, *base;
12188 char *affix;
12189 const struct asm_opcode *opcode;
12190 const struct asm_cond *cond;
12191 char save[2];
12192
12193 /* Scan up to the end of the mnemonic, which must end in white space,
12194 '.' (in unified mode only), or end of string. */
12195 for (base = end = *str; *end != '\0'; end++)
12196 if (*end == ' ' || (unified_syntax && *end == '.'))
12197 break;
12198
12199 if (end == base)
12200 return 0;
12201
12202 /* Handle a possible width suffix and/or Neon type suffix. */
12203 if (end[0] == '.')
12204 {
12205 int offset = 2;
12206
12207 if (end[1] == 'w')
12208 inst.size_req = 4;
12209 else if (end[1] == 'n')
12210 inst.size_req = 2;
12211 else
12212 offset = 0;
12213
12214 inst.vectype.elems = 0;
12215
12216 *str = end + offset;
12217
12218 if (end[offset] == '.')
12219 {
12220 /* See if we have a Neon type suffix. */
12221 if (parse_neon_type (&inst.vectype, str) == FAIL)
12222 return 0;
12223 }
12224 else if (end[offset] != '\0' && end[offset] != ' ')
12225 return 0;
12226 }
12227 else
12228 *str = end;
12229
12230 /* Look for unaffixed or special-case affixed mnemonic. */
12231 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12232 if (opcode)
12233 {
12234 /* step U */
12235 if (opcode->tag < OT_odd_infix_0)
12236 {
12237 inst.cond = COND_ALWAYS;
12238 return opcode;
12239 }
12240
12241 if (unified_syntax)
12242 as_warn (_("conditional infixes are deprecated in unified syntax"));
12243 affix = base + (opcode->tag - OT_odd_infix_0);
12244 cond = hash_find_n (arm_cond_hsh, affix, 2);
12245 assert (cond);
12246
12247 inst.cond = cond->value;
12248 return opcode;
12249 }
12250
12251 /* Cannot have a conditional suffix on a mnemonic of less than two
12252 characters. */
12253 if (end - base < 3)
12254 return 0;
12255
12256 /* Look for suffixed mnemonic. */
12257 affix = end - 2;
12258 cond = hash_find_n (arm_cond_hsh, affix, 2);
12259 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12260 if (opcode && cond)
12261 {
12262 /* step CE */
12263 switch (opcode->tag)
12264 {
12265 case OT_cinfix3_legacy:
12266 /* Ignore conditional suffixes matched on infix only mnemonics. */
12267 break;
12268
12269 case OT_cinfix3:
12270 case OT_odd_infix_unc:
12271 if (!unified_syntax)
12272 return 0;
12273 /* else fall through */
12274
12275 case OT_csuffix:
12276 case OT_csuf_or_in3:
12277 inst.cond = cond->value;
12278 return opcode;
12279
12280 case OT_unconditional:
12281 case OT_unconditionalF:
12282 if (thumb_mode)
12283 {
12284 inst.cond = cond->value;
12285 }
12286 else
12287 {
12288 /* delayed diagnostic */
12289 inst.error = BAD_COND;
12290 inst.cond = COND_ALWAYS;
12291 }
12292 return opcode;
12293
12294 default:
12295 return 0;
12296 }
12297 }
12298
12299 /* Cannot have a usual-position infix on a mnemonic of less than
12300 six characters (five would be a suffix). */
12301 if (end - base < 6)
12302 return 0;
12303
12304 /* Look for infixed mnemonic in the usual position. */
12305 affix = base + 3;
12306 cond = hash_find_n (arm_cond_hsh, affix, 2);
12307 if (!cond)
12308 return 0;
12309
12310 memcpy (save, affix, 2);
12311 memmove (affix, affix + 2, (end - affix) - 2);
12312 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12313 memmove (affix + 2, affix, (end - affix) - 2);
12314 memcpy (affix, save, 2);
12315
12316 if (opcode && (opcode->tag == OT_cinfix3 || opcode->tag == OT_csuf_or_in3
12317 || opcode->tag == OT_cinfix3_legacy))
12318 {
12319 /* step CM */
12320 if (unified_syntax && opcode->tag == OT_cinfix3)
12321 as_warn (_("conditional infixes are deprecated in unified syntax"));
12322
12323 inst.cond = cond->value;
12324 return opcode;
12325 }
12326
12327 return 0;
12328}
12329
12330void
12331md_assemble (char *str)
12332{
12333 char *p = str;
12334 const struct asm_opcode * opcode;
12335
12336 /* Align the previous label if needed. */
12337 if (last_label_seen != NULL)
12338 {
12339 symbol_set_frag (last_label_seen, frag_now);
12340 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12341 S_SET_SEGMENT (last_label_seen, now_seg);
12342 }
12343
12344 memset (&inst, '\0', sizeof (inst));
12345 inst.reloc.type = BFD_RELOC_UNUSED;
12346
12347 opcode = opcode_lookup (&p);
12348 if (!opcode)
12349 {
12350 /* It wasn't an instruction, but it might be a register alias of
12351 the form alias .req reg, or a Neon .dn/.qn directive. */
12352 if (!create_register_alias (str, p)
12353 && !create_neon_reg_alias (str, p))
12354 as_bad (_("bad instruction `%s'"), str);
12355
12356 return;
12357 }
12358
12359 if (thumb_mode)
12360 {
12361 arm_feature_set variant;
12362
12363 variant = cpu_variant;
12364 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12365 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12366 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12367 /* Check that this instruction is supported for this CPU. */
12368 if (!opcode->tvariant
12369 || (thumb_mode == 1
12370 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12371 {
12372 as_bad (_("selected processor does not support `%s'"), str);
12373 return;
12374 }
12375 if (inst.cond != COND_ALWAYS && !unified_syntax
12376 && opcode->tencode != do_t_branch)
12377 {
12378 as_bad (_("Thumb does not support conditional execution"));
12379 return;
12380 }
12381
12382 /* Check conditional suffixes. */
12383 if (current_it_mask)
12384 {
12385 int cond;
12386 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12387 current_it_mask <<= 1;
12388 current_it_mask &= 0x1f;
12389 /* The BKPT instruction is unconditional even in an IT block. */
12390 if (!inst.error
12391 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12392 {
12393 as_bad (_("incorrect condition in IT block"));
12394 return;
12395 }
12396 }
12397 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12398 {
12399 as_bad (_("thumb conditional instrunction not in IT block"));
12400 return;
12401 }
12402
12403 mapping_state (MAP_THUMB);
12404 inst.instruction = opcode->tvalue;
12405
12406 if (!parse_operands (p, opcode->operands))
12407 opcode->tencode ();
12408
12409 /* Clear current_it_mask at the end of an IT block. */
12410 if (current_it_mask == 0x10)
12411 current_it_mask = 0;
12412
12413 if (!(inst.error || inst.relax))
12414 {
12415 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12416 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12417 if (inst.size_req && inst.size_req != inst.size)
12418 {
12419 as_bad (_("cannot honor width suffix -- `%s'"), str);
12420 return;
12421 }
12422 }
12423 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12424 *opcode->tvariant);
12425 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12426 set those bits when Thumb-2 32-bit instructions are seen. ie.
12427 anything other than bl/blx.
12428 This is overly pessimistic for relaxable instructions. */
12429 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12430 || inst.relax)
12431 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12432 arm_ext_v6t2);
12433 }
12434 else
12435 {
12436 /* Check that this instruction is supported for this CPU. */
12437 if (!opcode->avariant ||
12438 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12439 {
12440 as_bad (_("selected processor does not support `%s'"), str);
12441 return;
12442 }
12443 if (inst.size_req)
12444 {
12445 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12446 return;
12447 }
12448
12449 mapping_state (MAP_ARM);
12450 inst.instruction = opcode->avalue;
12451 if (opcode->tag == OT_unconditionalF)
12452 inst.instruction |= 0xF << 28;
12453 else
12454 inst.instruction |= inst.cond << 28;
12455 inst.size = INSN_SIZE;
12456 if (!parse_operands (p, opcode->operands))
12457 opcode->aencode ();
12458 /* Arm mode bx is marked as both v4T and v5 because it's still required
12459 on a hypothetical non-thumb v5 core. */
12460 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12461 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12462 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12463 else
12464 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12465 *opcode->avariant);
12466 }
12467 output_inst (str);
12468}
12469
12470/* Various frobbings of labels and their addresses. */
12471
12472void
12473arm_start_line_hook (void)
12474{
12475 last_label_seen = NULL;
12476}
12477
12478void
12479arm_frob_label (symbolS * sym)
12480{
12481 last_label_seen = sym;
12482
12483 ARM_SET_THUMB (sym, thumb_mode);
12484
12485#if defined OBJ_COFF || defined OBJ_ELF
12486 ARM_SET_INTERWORK (sym, support_interwork);
12487#endif
12488
12489 /* Note - do not allow local symbols (.Lxxx) to be labeled
12490 as Thumb functions. This is because these labels, whilst
12491 they exist inside Thumb code, are not the entry points for
12492 possible ARM->Thumb calls. Also, these labels can be used
12493 as part of a computed goto or switch statement. eg gcc
12494 can generate code that looks like this:
12495
12496 ldr r2, [pc, .Laaa]
12497 lsl r3, r3, #2
12498 ldr r2, [r3, r2]
12499 mov pc, r2
12500
12501 .Lbbb: .word .Lxxx
12502 .Lccc: .word .Lyyy
12503 ..etc...
12504 .Laaa: .word Lbbb
12505
12506 The first instruction loads the address of the jump table.
12507 The second instruction converts a table index into a byte offset.
12508 The third instruction gets the jump address out of the table.
12509 The fourth instruction performs the jump.
12510
12511 If the address stored at .Laaa is that of a symbol which has the
12512 Thumb_Func bit set, then the linker will arrange for this address
12513 to have the bottom bit set, which in turn would mean that the
12514 address computation performed by the third instruction would end
12515 up with the bottom bit set. Since the ARM is capable of unaligned
12516 word loads, the instruction would then load the incorrect address
12517 out of the jump table, and chaos would ensue. */
12518 if (label_is_thumb_function_name
12519 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12520 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12521 {
12522 /* When the address of a Thumb function is taken the bottom
12523 bit of that address should be set. This will allow
12524 interworking between Arm and Thumb functions to work
12525 correctly. */
12526
12527 THUMB_SET_FUNC (sym, 1);
12528
12529 label_is_thumb_function_name = FALSE;
12530 }
12531
12532#ifdef OBJ_ELF
12533 dwarf2_emit_label (sym);
12534#endif
12535}
12536
12537int
12538arm_data_in_code (void)
12539{
12540 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12541 {
12542 *input_line_pointer = '/';
12543 input_line_pointer += 5;
12544 *input_line_pointer = 0;
12545 return 1;
12546 }
12547
12548 return 0;
12549}
12550
12551char *
12552arm_canonicalize_symbol_name (char * name)
12553{
12554 int len;
12555
12556 if (thumb_mode && (len = strlen (name)) > 5
12557 && streq (name + len - 5, "/data"))
12558 *(name + len - 5) = 0;
12559
12560 return name;
12561}
12562\f
12563/* Table of all register names defined by default. The user can
12564 define additional names with .req. Note that all register names
12565 should appear in both upper and lowercase variants. Some registers
12566 also have mixed-case names. */
12567
12568#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12569#define REGNUM(p,n,t) REGDEF(p##n, n, t)
12570#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12571#define REGSET(p,t) \
12572 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12573 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12574 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12575 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12576#define REGSETH(p,t) \
12577 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12578 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12579 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12580 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12581#define REGSET2(p,t) \
12582 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12583 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12584 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12585 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12586
12587static const struct reg_entry reg_names[] =
12588{
12589 /* ARM integer registers. */
12590 REGSET(r, RN), REGSET(R, RN),
12591
12592 /* ATPCS synonyms. */
12593 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12594 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12595 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12596
12597 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12598 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12599 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12600
12601 /* Well-known aliases. */
12602 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12603 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12604
12605 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12606 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12607
12608 /* Coprocessor numbers. */
12609 REGSET(p, CP), REGSET(P, CP),
12610
12611 /* Coprocessor register numbers. The "cr" variants are for backward
12612 compatibility. */
12613 REGSET(c, CN), REGSET(C, CN),
12614 REGSET(cr, CN), REGSET(CR, CN),
12615
12616 /* FPA registers. */
12617 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12618 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12619
12620 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12621 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12622
12623 /* VFP SP registers. */
12624 REGSET(s,VFS), REGSET(S,VFS),
12625 REGSETH(s,VFS), REGSETH(S,VFS),
12626
12627 /* VFP DP Registers. */
12628 REGSET(d,VFD), REGSET(D,VFD),
12629 /* Extra Neon DP registers. */
12630 REGSETH(d,VFD), REGSETH(D,VFD),
12631
12632 /* Neon QP registers. */
12633 REGSET2(q,NQ), REGSET2(Q,NQ),
12634
12635 /* VFP control registers. */
12636 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12637 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12638
12639 /* Maverick DSP coprocessor registers. */
12640 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12641 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12642
12643 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12644 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12645 REGDEF(dspsc,0,DSPSC),
12646
12647 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12648 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12649 REGDEF(DSPSC,0,DSPSC),
12650
12651 /* iWMMXt data registers - p0, c0-15. */
12652 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12653
12654 /* iWMMXt control registers - p1, c0-3. */
12655 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12656 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12657 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12658 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12659
12660 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12661 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12662 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12663 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12664 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12665
12666 /* XScale accumulator registers. */
12667 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12668};
12669#undef REGDEF
12670#undef REGNUM
12671#undef REGSET
12672
12673/* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12674 within psr_required_here. */
12675static const struct asm_psr psrs[] =
12676{
12677 /* Backward compatibility notation. Note that "all" is no longer
12678 truly all possible PSR bits. */
12679 {"all", PSR_c | PSR_f},
12680 {"flg", PSR_f},
12681 {"ctl", PSR_c},
12682
12683 /* Individual flags. */
12684 {"f", PSR_f},
12685 {"c", PSR_c},
12686 {"x", PSR_x},
12687 {"s", PSR_s},
12688 /* Combinations of flags. */
12689 {"fs", PSR_f | PSR_s},
12690 {"fx", PSR_f | PSR_x},
12691 {"fc", PSR_f | PSR_c},
12692 {"sf", PSR_s | PSR_f},
12693 {"sx", PSR_s | PSR_x},
12694 {"sc", PSR_s | PSR_c},
12695 {"xf", PSR_x | PSR_f},
12696 {"xs", PSR_x | PSR_s},
12697 {"xc", PSR_x | PSR_c},
12698 {"cf", PSR_c | PSR_f},
12699 {"cs", PSR_c | PSR_s},
12700 {"cx", PSR_c | PSR_x},
12701 {"fsx", PSR_f | PSR_s | PSR_x},
12702 {"fsc", PSR_f | PSR_s | PSR_c},
12703 {"fxs", PSR_f | PSR_x | PSR_s},
12704 {"fxc", PSR_f | PSR_x | PSR_c},
12705 {"fcs", PSR_f | PSR_c | PSR_s},
12706 {"fcx", PSR_f | PSR_c | PSR_x},
12707 {"sfx", PSR_s | PSR_f | PSR_x},
12708 {"sfc", PSR_s | PSR_f | PSR_c},
12709 {"sxf", PSR_s | PSR_x | PSR_f},
12710 {"sxc", PSR_s | PSR_x | PSR_c},
12711 {"scf", PSR_s | PSR_c | PSR_f},
12712 {"scx", PSR_s | PSR_c | PSR_x},
12713 {"xfs", PSR_x | PSR_f | PSR_s},
12714 {"xfc", PSR_x | PSR_f | PSR_c},
12715 {"xsf", PSR_x | PSR_s | PSR_f},
12716 {"xsc", PSR_x | PSR_s | PSR_c},
12717 {"xcf", PSR_x | PSR_c | PSR_f},
12718 {"xcs", PSR_x | PSR_c | PSR_s},
12719 {"cfs", PSR_c | PSR_f | PSR_s},
12720 {"cfx", PSR_c | PSR_f | PSR_x},
12721 {"csf", PSR_c | PSR_s | PSR_f},
12722 {"csx", PSR_c | PSR_s | PSR_x},
12723 {"cxf", PSR_c | PSR_x | PSR_f},
12724 {"cxs", PSR_c | PSR_x | PSR_s},
12725 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12726 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12727 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12728 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12729 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12730 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12731 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12732 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12733 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12734 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12735 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12736 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12737 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12738 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12739 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12740 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12741 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12742 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12743 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12744 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12745 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12746 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12747 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12748 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12749};
12750
12751/* Table of V7M psr names. */
12752static const struct asm_psr v7m_psrs[] =
12753{
12754 {"apsr", 0 },
12755 {"iapsr", 1 },
12756 {"eapsr", 2 },
12757 {"psr", 3 },
12758 {"ipsr", 5 },
12759 {"epsr", 6 },
12760 {"iepsr", 7 },
12761 {"msp", 8 },
12762 {"psp", 9 },
12763 {"primask", 16},
12764 {"basepri", 17},
12765 {"basepri_max", 18},
12766 {"faultmask", 19},
12767 {"control", 20}
12768};
12769
12770/* Table of all shift-in-operand names. */
12771static const struct asm_shift_name shift_names [] =
12772{
12773 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12774 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12775 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12776 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12777 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12778 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12779};
12780
12781/* Table of all explicit relocation names. */
12782#ifdef OBJ_ELF
12783static struct reloc_entry reloc_names[] =
12784{
12785 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12786 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12787 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12788 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12789 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12790 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12791 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12792 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12793 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12794 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12795 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12796};
12797#endif
12798
12799/* Table of all conditional affixes. 0xF is not defined as a condition code. */
12800static const struct asm_cond conds[] =
12801{
12802 {"eq", 0x0},
12803 {"ne", 0x1},
12804 {"cs", 0x2}, {"hs", 0x2},
12805 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12806 {"mi", 0x4},
12807 {"pl", 0x5},
12808 {"vs", 0x6},
12809 {"vc", 0x7},
12810 {"hi", 0x8},
12811 {"ls", 0x9},
12812 {"ge", 0xa},
12813 {"lt", 0xb},
12814 {"gt", 0xc},
12815 {"le", 0xd},
12816 {"al", 0xe}
12817};
12818
12819static struct asm_barrier_opt barrier_opt_names[] =
12820{
12821 { "sy", 0xf },
12822 { "un", 0x7 },
12823 { "st", 0xe },
12824 { "unst", 0x6 }
12825};
12826
12827/* Table of ARM-format instructions. */
12828
12829/* Macros for gluing together operand strings. N.B. In all cases
12830 other than OPS0, the trailing OP_stop comes from default
12831 zero-initialization of the unspecified elements of the array. */
12832#define OPS0() { OP_stop, }
12833#define OPS1(a) { OP_##a, }
12834#define OPS2(a,b) { OP_##a,OP_##b, }
12835#define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12836#define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12837#define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12838#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12839
12840/* These macros abstract out the exact format of the mnemonic table and
12841 save some repeated characters. */
12842
12843/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12844#define TxCE(mnem, op, top, nops, ops, ae, te) \
12845 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12846 THUMB_VARIANT, do_##ae, do_##te }
12847
12848/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12849 a T_MNEM_xyz enumerator. */
12850#define TCE(mnem, aop, top, nops, ops, ae, te) \
12851 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12852#define tCE(mnem, aop, top, nops, ops, ae, te) \
12853 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12854
12855/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12856 infix after the third character. */
12857#define TxC3(mnem, op, top, nops, ops, ae, te) \
12858 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12859 THUMB_VARIANT, do_##ae, do_##te }
12860#define TC3(mnem, aop, top, nops, ops, ae, te) \
12861 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12862#define tC3(mnem, aop, top, nops, ops, ae, te) \
12863 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12864
12865/* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12866 appear in the condition table. */
12867#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12868 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12869 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12870
12871#define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12872 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12873 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12874 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12875 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12876 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12877 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12878 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12879 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12880 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12881 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12882 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12883 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12884 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12885 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12886 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12887 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12888 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12889 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12890 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12891
12892#define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12893 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12894#define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12895 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12896
12897/* Mnemonic that cannot be conditionalized. The ARM condition-code
12898 field is still 0xE. Many of the Thumb variants can be executed
12899 conditionally, so this is checked separately. */
12900#define TUE(mnem, op, top, nops, ops, ae, te) \
12901 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12902 THUMB_VARIANT, do_##ae, do_##te }
12903
12904/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12905 condition code field. */
12906#define TUF(mnem, op, top, nops, ops, ae, te) \
12907 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12908 THUMB_VARIANT, do_##ae, do_##te }
12909
12910/* ARM-only variants of all the above. */
12911#define CE(mnem, op, nops, ops, ae) \
12912 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12913
12914#define C3(mnem, op, nops, ops, ae) \
12915 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12916
12917/* Legacy mnemonics that always have conditional infix after the third
12918 character. */
12919#define CL(mnem, op, nops, ops, ae) \
12920 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12921 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12922
12923/* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12924#define cCE(mnem, op, nops, ops, ae) \
12925 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12926
12927/* Legacy coprocessor instructions where conditional infix and conditional
12928 suffix are ambiguous. For consistency this includes all FPA instructions,
12929 not just the potentially ambiguous ones. */
12930#define cCL(mnem, op, nops, ops, ae) \
12931 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12932 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12933
12934/* Coprocessor, takes either a suffix or a position-3 infix
12935 (for an FPA corner case). */
12936#define C3E(mnem, op, nops, ops, ae) \
12937 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12938 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12939
12940#define xCM_(m1, m2, m3, op, nops, ops, ae) \
12941 { #m1 #m2 #m3, OPS##nops ops, \
12942 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12943 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12944
12945#define CM(m1, m2, op, nops, ops, ae) \
12946 xCM_(m1, , m2, op, nops, ops, ae), \
12947 xCM_(m1, eq, m2, op, nops, ops, ae), \
12948 xCM_(m1, ne, m2, op, nops, ops, ae), \
12949 xCM_(m1, cs, m2, op, nops, ops, ae), \
12950 xCM_(m1, hs, m2, op, nops, ops, ae), \
12951 xCM_(m1, cc, m2, op, nops, ops, ae), \
12952 xCM_(m1, ul, m2, op, nops, ops, ae), \
12953 xCM_(m1, lo, m2, op, nops, ops, ae), \
12954 xCM_(m1, mi, m2, op, nops, ops, ae), \
12955 xCM_(m1, pl, m2, op, nops, ops, ae), \
12956 xCM_(m1, vs, m2, op, nops, ops, ae), \
12957 xCM_(m1, vc, m2, op, nops, ops, ae), \
12958 xCM_(m1, hi, m2, op, nops, ops, ae), \
12959 xCM_(m1, ls, m2, op, nops, ops, ae), \
12960 xCM_(m1, ge, m2, op, nops, ops, ae), \
12961 xCM_(m1, lt, m2, op, nops, ops, ae), \
12962 xCM_(m1, gt, m2, op, nops, ops, ae), \
12963 xCM_(m1, le, m2, op, nops, ops, ae), \
12964 xCM_(m1, al, m2, op, nops, ops, ae)
12965
12966#define UE(mnem, op, nops, ops, ae) \
12967 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12968
12969#define UF(mnem, op, nops, ops, ae) \
12970 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12971
12972/* Neon data-processing. ARM versions are unconditional with cond=0xf.
12973 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
12974 use the same encoding function for each. */
12975#define NUF(mnem, op, nops, ops, enc) \
12976 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
12977 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12978
12979/* Neon data processing, version which indirects through neon_enc_tab for
12980 the various overloaded versions of opcodes. */
12981#define nUF(mnem, op, nops, ops, enc) \
12982 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
12983 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12984
12985/* Neon insn with conditional suffix for the ARM version, non-overloaded
12986 version. */
12987#define NCE(mnem, op, nops, ops, enc) \
12988 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
12989 THUMB_VARIANT, do_##enc, do_##enc }
12990
12991/* Neon insn with conditional suffix for the ARM version, overloaded types. */
12992#define nCE(mnem, op, nops, ops, enc) \
12993 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
12994 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12995
12996#define do_0 0
12997
12998/* Thumb-only, unconditional. */
12999#define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13000
13001static const struct asm_opcode insns[] =
13002{
13003#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13004#define THUMB_VARIANT &arm_ext_v4t
13005 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13006 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13007 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13008 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13009 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13010 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13011 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13012 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13013 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13014 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13015 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13016 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13017 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13018 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13019 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13020 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13021
13022 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13023 for setting PSR flag bits. They are obsolete in V6 and do not
13024 have Thumb equivalents. */
13025 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13026 tC3(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13027 CL(tstp, 110f000, 2, (RR, SH), cmp),
13028 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13029 tC3(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13030 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13031 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13032 tC3(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13033 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13034
13035 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13036 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13037 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13038 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13039
13040 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13041 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13042 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13043 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13044
13045 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13046 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13047 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13048 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13049 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13050 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13051
13052 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13053 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13054 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13055 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13056
13057 /* Pseudo ops. */
13058 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13059 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13060 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13061
13062 /* Thumb-compatibility pseudo ops. */
13063 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13064 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13065 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13066 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13067 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13068 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13069 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13070 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13071 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13072 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13073 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13074 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13075
13076#undef THUMB_VARIANT
13077#define THUMB_VARIANT &arm_ext_v6
13078 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13079
13080 /* V1 instructions with no Thumb analogue prior to V6T2. */
13081#undef THUMB_VARIANT
13082#define THUMB_VARIANT &arm_ext_v6t2
13083 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13084 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13085 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13086 TC3(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13087 CL(teqp, 130f000, 2, (RR, SH), cmp),
13088
13089 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13090 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13091 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13092 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13093
13094 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13095 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13096
13097 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13098 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13099
13100 /* V1 instructions with no Thumb analogue at all. */
13101 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13102 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13103
13104 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13105 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13106 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13107 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13108 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13109 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13110 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13111 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13112
13113#undef ARM_VARIANT
13114#define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13115#undef THUMB_VARIANT
13116#define THUMB_VARIANT &arm_ext_v4t
13117 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13118 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13119
13120#undef THUMB_VARIANT
13121#define THUMB_VARIANT &arm_ext_v6t2
13122 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13123 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13124
13125 /* Generic coprocessor instructions. */
13126 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13127 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13128 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13129 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13130 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13131 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13132 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13133
13134#undef ARM_VARIANT
13135#define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13136 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13137 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13138
13139#undef ARM_VARIANT
13140#define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13141 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13142 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13143
13144#undef ARM_VARIANT
13145#define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13146 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13147 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13148 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13149 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13150 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13151 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13152 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13153 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13154
13155#undef ARM_VARIANT
13156#define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13157#undef THUMB_VARIANT
13158#define THUMB_VARIANT &arm_ext_v4t
13159 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13160 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13161 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13162 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13163 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13164 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13165
13166#undef ARM_VARIANT
13167#define ARM_VARIANT &arm_ext_v4t_5
13168 /* ARM Architecture 4T. */
13169 /* Note: bx (and blx) are required on V5, even if the processor does
13170 not support Thumb. */
13171 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13172
13173#undef ARM_VARIANT
13174#define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13175#undef THUMB_VARIANT
13176#define THUMB_VARIANT &arm_ext_v5t
13177 /* Note: blx has 2 variants; the .value coded here is for
13178 BLX(2). Only this variant has conditional execution. */
13179 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13180 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13181
13182#undef THUMB_VARIANT
13183#define THUMB_VARIANT &arm_ext_v6t2
13184 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13185 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13186 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13187 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13188 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13189 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13190 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13191 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13192
13193#undef ARM_VARIANT
13194#define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13195 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13196 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13197 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13198 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13199
13200 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13201 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13202
13203 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13204 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13205 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13206 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13207
13208 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13209 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13210 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13211 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13212
13213 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13214 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13215
13216 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13217 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13218 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13219 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13220
13221#undef ARM_VARIANT
13222#define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13223 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13224 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13225 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13226
13227 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13228 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13229
13230#undef ARM_VARIANT
13231#define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13232 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13233
13234#undef ARM_VARIANT
13235#define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13236#undef THUMB_VARIANT
13237#define THUMB_VARIANT &arm_ext_v6
13238 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13239 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13240 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13241 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13242 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13243 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13244 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13245 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13246 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13247 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13248
13249#undef THUMB_VARIANT
13250#define THUMB_VARIANT &arm_ext_v6t2
13251 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13252 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13253 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13254
13255 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13256 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13257
13258/* ARM V6 not included in V7M (eg. integer SIMD). */
13259#undef THUMB_VARIANT
13260#define THUMB_VARIANT &arm_ext_v6_notm
13261 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13262 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13263 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13264 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13265 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13266 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13267 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13268 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13269 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13270 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13271 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13272 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13273 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13274 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13275 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13276 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13277 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13278 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13279 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13280 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13281 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13282 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13283 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13284 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13285 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13286 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13287 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13288 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13289 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13290 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13291 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13292 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13293 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13294 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13295 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13296 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13297 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13298 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13299 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13300 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13301 UF(rfeib, 9900a00, 1, (RRw), rfe),
13302 UF(rfeda, 8100a00, 1, (RRw), rfe),
13303 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13304 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13305 UF(rfefa, 9900a00, 1, (RRw), rfe),
13306 UF(rfeea, 8100a00, 1, (RRw), rfe),
13307 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13308 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13309 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13310 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13311 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13312 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13313 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13314 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13315 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13316 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13317 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13318 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13319 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13320 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13321 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13322 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13323 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13324 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13325 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13326 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13327 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13328 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13329 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13330 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13331 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13332 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13333 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13334 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13335 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13336 UF(srsib, 9cd0500, 1, (I31w), srs),
13337 UF(srsda, 84d0500, 1, (I31w), srs),
13338 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13339 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13340 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13341 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13342 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13343 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13344 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13345
13346#undef ARM_VARIANT
13347#define ARM_VARIANT &arm_ext_v6k
13348#undef THUMB_VARIANT
13349#define THUMB_VARIANT &arm_ext_v6k
13350 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13351 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13352 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13353 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13354
13355#undef THUMB_VARIANT
13356#define THUMB_VARIANT &arm_ext_v6_notm
13357 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13358 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13359
13360#undef THUMB_VARIANT
13361#define THUMB_VARIANT &arm_ext_v6t2
13362 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13363 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13364 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13365 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13366 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13367
13368#undef ARM_VARIANT
13369#define ARM_VARIANT &arm_ext_v6z
13370 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13371
13372#undef ARM_VARIANT
13373#define ARM_VARIANT &arm_ext_v6t2
13374 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13375 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13376 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13377 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13378
13379 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13380 TCE(movw, 3000000, f2400000, 2, (RRnpc, Iffff), mov16, t_mov16),
13381 TCE(movt, 3400000, f2c00000, 2, (RRnpc, Iffff), mov16, t_mov16),
13382 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13383
13384 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13385 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13386 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13387 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13388
13389 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13390 UT(cbz, b100, 2, (RR, EXP), t_czb),
13391 /* ARM does not really have an IT instruction. */
13392 TUE(it, 0, bf08, 1, (COND), it, t_it),
13393 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13394 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13395 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13396 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13397 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13398 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13399 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13400 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13401 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13402 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13403 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13404 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13405 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13406 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13407
13408 /* Thumb2 only instructions. */
13409#undef ARM_VARIANT
13410#define ARM_VARIANT NULL
13411
13412 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13413 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13414 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13415 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13416
13417 /* Thumb-2 hardware division instructions (R and M profiles only). */
13418#undef THUMB_VARIANT
13419#define THUMB_VARIANT &arm_ext_div
13420 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13421 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13422
13423 /* ARM V7 instructions. */
13424#undef ARM_VARIANT
13425#define ARM_VARIANT &arm_ext_v7
13426#undef THUMB_VARIANT
13427#define THUMB_VARIANT &arm_ext_v7
13428 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13429 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13430 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13431 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13432 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13433
13434#undef ARM_VARIANT
13435#define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13436 cCE(wfs, e200110, 1, (RR), rd),
13437 cCE(rfs, e300110, 1, (RR), rd),
13438 cCE(wfc, e400110, 1, (RR), rd),
13439 cCE(rfc, e500110, 1, (RR), rd),
13440
13441 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13442 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13443 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13444 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13445
13446 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13447 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13448 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13449 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13450
13451 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13452 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13453 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13454 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13455 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13456 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13457 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13458 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13459 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13460 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13461 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13462 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13463
13464 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13465 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13466 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13467 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13468 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13469 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13470 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13471 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13472 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13473 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13474 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13475 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13476
13477 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13478 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13479 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13480 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13481 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13482 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13483 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13484 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13485 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13486 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13487 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13488 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13489
13490 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13491 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13492 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13493 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13494 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13495 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13496 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13497 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13498 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13499 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13500 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13501 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13502
13503 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13504 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13505 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13506 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13507 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13508 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13509 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13510 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13511 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13512 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13513 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13514 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13515
13516 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13517 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13518 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13519 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13520 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13521 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13522 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13523 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13524 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13525 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13526 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13527 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13528
13529 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13530 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13531 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13532 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13533 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13534 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13535 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13536 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13537 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13538 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13539 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13540 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13541
13542 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13543 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13544 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13545 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13546 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13547 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13548 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13549 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13550 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13551 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13552 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13553 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13554
13555 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13556 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13557 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13558 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13559 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13560 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13561 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13562 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13563 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13564 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13565 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13566 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13567
13568 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13569 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13570 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13571 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13572 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13573 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13574 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13575 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13576 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13577 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13578 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13579 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13580
13581 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13582 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13583 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13584 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13585 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13586 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13587 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13588 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13589 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13590 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13591 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13592 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13593
13594 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13595 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13596 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13597 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13598 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13599 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13600 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13601 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13602 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13603 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13604 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13605 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13606
13607 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13608 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13609 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13610 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13611 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13612 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13613 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13614 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13615 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13616 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13617 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13618 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13619
13620 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13621 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13622 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13623 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13624 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13625 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13626 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13627 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13628 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13629 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13630 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13631 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13632
13633 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13634 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13635 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13636 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13637 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13638 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13639 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13640 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13641 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13642 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13643 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13644 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13645
13646 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13647 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13648 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13649 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13650 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13651 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13652 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13653 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13654 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13655 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13656 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13657 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13658
13659 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13660 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13661 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13662 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13663 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13664 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13665 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13666 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13667 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13668 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13669 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13670 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13671
13672 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13673 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13674 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13675 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13676 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13677 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13678 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13679 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13680 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13681 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13682 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13683 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13684
13685 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13686 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13687 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13688 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13689 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13690 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13691 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13692 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13693 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13694 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13695 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13696 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13697
13698 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13699 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13700 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13701 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13702 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13703 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13704 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13705 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13706 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13707 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13708 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13709 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13710
13711 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13712 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13713 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13714 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13715 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13716 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13717 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13718 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13719 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13720 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13721 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13722 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13723
13724 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13725 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13726 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13727 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13728 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13729 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13730 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13731 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13732 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13733 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13734 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13735 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13736
13737 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13738 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13739 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13740 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13741 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13742 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13743 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13744 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13745 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13746 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13747 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13748 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13749
13750 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13751 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13752 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13753 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13754 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13755 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13756 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13757 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13758 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13759 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13760 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13761 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13762
13763 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13764 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13765 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13766 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13767 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13768 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13769 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13770 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13771 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13772 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13773 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13774 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13775
13776 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13777 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13778 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13779 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13780 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13781 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13782 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13784 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13785 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13788
13789 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13790 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13794 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13795 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13797 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13798 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13801
13802 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13803 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13807 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13808 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13810 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13811 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13814
13815 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13816 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13820 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13821 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13823 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13824 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13827
13828 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13829 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13830 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13831 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13832
13833 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13834 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13835 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13836 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13837 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13838 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13839 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13840 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13841 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13842 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13843 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13844 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13845
13846 /* The implementation of the FIX instruction is broken on some
13847 assemblers, in that it accepts a precision specifier as well as a
13848 rounding specifier, despite the fact that this is meaningless.
13849 To be more compatible, we accept it as well, though of course it
13850 does not set any bits. */
13851 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13852 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13853 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13854 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13855 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13856 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13857 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13858 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13859 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13860 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13861 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13862 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13863 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13864
13865 /* Instructions that were new with the real FPA, call them V2. */
13866#undef ARM_VARIANT
13867#define ARM_VARIANT &fpu_fpa_ext_v2
13868 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13869 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13870 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13871 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13872 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13873 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13874
13875#undef ARM_VARIANT
13876#define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13877 /* Moves and type conversions. */
13878 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
13879 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
13880 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
13881 cCE(fmstat, ef1fa10, 0, (), noargs),
13882 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
13883 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
13884 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
13885 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13886 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
13887 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13888 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
13889 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
13890
13891 /* Memory operations. */
13892 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
13893 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
13894 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13895 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13896 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13897 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13898 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13899 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13900 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13901 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13902 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13903 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13904 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13905 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13906 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13907 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13908 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13909 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13910
13911 /* Monadic operations. */
13912 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
13913 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
13914 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
13915
13916 /* Dyadic operations. */
13917 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13918 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13919 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13920 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13921 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13922 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13923 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13924 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13925 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13926
13927 /* Comparisons. */
13928 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
13929 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
13930 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
13931 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
13932
13933#undef ARM_VARIANT
13934#define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13935 /* Moves and type conversions. */
13936 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13937 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13938 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13939 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
13940 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
13941 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
13942 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
13943 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13944 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
13945 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13946 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13947 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13948 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13949
13950 /* Memory operations. */
13951 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
13952 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
13953 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13954 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13955 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13956 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13957 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13958 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13959 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13960 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13961
13962 /* Monadic operations. */
13963 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13964 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13965 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13966
13967 /* Dyadic operations. */
13968 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13969 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13970 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13971 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13972 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13973 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13974 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13975 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13976 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13977
13978 /* Comparisons. */
13979 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13980 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
13981 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13982 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
13983
13984#undef ARM_VARIANT
13985#define ARM_VARIANT &fpu_vfp_ext_v2
13986 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
13987 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
13988 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
13989 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
13990
13991#undef THUMB_VARIANT
13992#define THUMB_VARIANT &fpu_neon_ext_v1
13993#undef ARM_VARIANT
13994#define ARM_VARIANT &fpu_neon_ext_v1
13995 /* Data processing with three registers of the same length. */
13996 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
13997 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
13998 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
13999 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14000 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14001 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14002 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14003 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14004 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14005 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14006 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14007 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14008 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14009 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14010 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14011 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14012 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14013 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14014 /* If not immediate, fall back to neon_dyadic_i64_su.
14015 shl_imm should accept I8 I16 I32 I64,
14016 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14017 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14018 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14019 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14020 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14021 /* Logic ops, types optional & ignored. */
14022 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14023 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14024 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14025 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14026 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14027 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14028 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14029 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14030 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14031 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14032 /* Bitfield ops, untyped. */
14033 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14034 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14035 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14036 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14037 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14038 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14039 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14040 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14041 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14042 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14043 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14044 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14045 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14046 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14047 back to neon_dyadic_if_su. */
14048 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14049 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14050 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14051 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14052 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14053 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14054 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14055 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14056 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14057 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14058 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14059 /* As above, D registers only. */
14060 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14061 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14062 /* Int and float variants, signedness unimportant. */
14063 /* If not scalar, fall back to neon_dyadic_if_i. */
14064 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14065 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14066 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14067 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14068 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14069 /* Add/sub take types I8 I16 I32 I64 F32. */
14070 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14071 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14072 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14073 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14074 /* vtst takes sizes 8, 16, 32. */
14075 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14076 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14077 /* VMUL takes I8 I16 I32 F32 P8. */
14078 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14079 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14080 /* VQD{R}MULH takes S16 S32. */
14081 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14082 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14083 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14084 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14085 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14086 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14087 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14088 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14089 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14090 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14091 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14092 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14093 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14094 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14095 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14096 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14097
14098 /* Two address, int/float. Types S8 S16 S32 F32. */
14099 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14100 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14101 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14102 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14103
14104 /* Data processing with two registers and a shift amount. */
14105 /* Right shifts, and variants with rounding.
14106 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14107 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14108 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14109 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14110 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14111 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14112 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14113 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14114 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14115 /* Shift and insert. Sizes accepted 8 16 32 64. */
14116 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14117 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14118 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14119 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14120 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14121 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14122 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14123 /* Right shift immediate, saturating & narrowing, with rounding variants.
14124 Types accepted S16 S32 S64 U16 U32 U64. */
14125 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14126 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14127 /* As above, unsigned. Types accepted S16 S32 S64. */
14128 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14129 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14130 /* Right shift narrowing. Types accepted I16 I32 I64. */
14131 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14132 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14133 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14134 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14135 /* CVT with optional immediate for fixed-point variant. */
14136 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14137 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14138
14139 /* One register and an immediate value. All encoding special-cased! */
14140 NCE(vmov, 0, 1, (VMOV), neon_mov),
14141 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14142 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14143 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14144
14145 /* Data processing, three registers of different lengths. */
14146 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14147 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14148 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14149 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14150 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14151 /* If not scalar, fall back to neon_dyadic_long.
14152 Vector types as above, scalar types S16 S32 U16 U32. */
14153 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14154 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14155 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14156 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14157 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14158 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14159 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14160 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14161 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14162 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14163 /* Saturating doubling multiplies. Types S16 S32. */
14164 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14165 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14166 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14167 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14168 S16 S32 U16 U32. */
14169 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14170
14171 /* Extract. Size 8. */
14172 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14173 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14174
14175 /* Two registers, miscellaneous. */
14176 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14177 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14178 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14179 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14180 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14181 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14182 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14183 /* Vector replicate. Sizes 8 16 32. */
14184 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14185 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14186 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14187 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14188 /* VMOVN. Types I16 I32 I64. */
14189 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14190 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14191 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14192 /* VQMOVUN. Types S16 S32 S64. */
14193 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14194 /* VZIP / VUZP. Sizes 8 16 32. */
14195 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14196 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14197 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14198 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14199 /* VQABS / VQNEG. Types S8 S16 S32. */
14200 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14201 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14202 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14203 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14204 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14205 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14206 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14207 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14208 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14209 /* Reciprocal estimates. Types U32 F32. */
14210 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14211 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14212 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14213 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14214 /* VCLS. Types S8 S16 S32. */
14215 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14216 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14217 /* VCLZ. Types I8 I16 I32. */
14218 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14219 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14220 /* VCNT. Size 8. */
14221 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14222 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14223 /* Two address, untyped. */
14224 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14225 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14226 /* VTRN. Sizes 8 16 32. */
14227 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14228 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14229
14230 /* Table lookup. Size 8. */
14231 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14232 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14233
14234#undef THUMB_VARIANT
14235#define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14236#undef ARM_VARIANT
14237#define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14238
14239 /* Load/store instructions. Available in Neon or VFPv3. */
14240 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14241 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14242 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14243 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14244 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14245 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14246 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14247 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14248
14249 /* Neon element/structure load/store. */
14250 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14251 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14252 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14253 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14254 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14255 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14256 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14257 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14258
14259#undef THUMB_VARIANT
14260#define THUMB_VARIANT &fpu_vfp_ext_v3
14261#undef ARM_VARIANT
14262#define ARM_VARIANT &fpu_vfp_ext_v3
14263
14264 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14265 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14266 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14267 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14268 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14269 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14270 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14271 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14272 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14273 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14274 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14275 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14276 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14277 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14278 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14279 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14280 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14281 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14282
14283#undef THUMB_VARIANT
14284#undef ARM_VARIANT
14285#define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14286 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14287 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14288 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14289 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14290 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14291 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14292 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14293 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14294
14295#undef ARM_VARIANT
14296#define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14297 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14298 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14299 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14300 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14301 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14302 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14303 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14304 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14305 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14306 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14307 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14308 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14309 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14310 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14311 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14312 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14313 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14314 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14315 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14316 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14317 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14318 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14319 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14320 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14321 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14322 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14323 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14324 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14325 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14326 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14327 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14328 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14329 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14330 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14331 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14332 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14333 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14334 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14335 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14336 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14337 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14338 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14339 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14340 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14341 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14342 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14343 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14344 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14345 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14346 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14347 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14348 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14349 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14350 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14351 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14352 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14353 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14354 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14355 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14356 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14357 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14358 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14359 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14360 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14361 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14362 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14363 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14364 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14365 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14366 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14367 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14368 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14369 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14370 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14371 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14372 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14373 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14374 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14375 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14376 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14377 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14378 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14379 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14380 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14381 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14382 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14383 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14384 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14385 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14386 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14387 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14388 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14389 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14390 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14391 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14392 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14393 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14394 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14395 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14396 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14397 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14398 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14399 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14400 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14401 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14402 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14403 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14404 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14405 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14406 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14407 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14408 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14409 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14410 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14411 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14412 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14413 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14414 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14415 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14416 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14417 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14418 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14419 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14420 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14421 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14422 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14423 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14424 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14425 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14426 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14427 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14428 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14429 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14430 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14431 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14432 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14433 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14434 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14435 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14436 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14437 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14438 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14439 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14440 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14441 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14442 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14443 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14444 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14445 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14446 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14447 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14448 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14449 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14450 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14451 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14452 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14453 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14454 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14455 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14456 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14457 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14458 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14459
14460#undef ARM_VARIANT
14461#define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14462 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14463 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14464 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14465 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14466 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14467 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14468 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14469 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14470 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14471 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14472 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14473 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14474 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14475 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14476 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14477 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14478 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14479 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14480 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14481 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14482 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14483 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14484 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14485 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14486 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14487 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14488 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14489 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14490 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14491 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14492 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14493 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14494 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14495 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14496 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14497 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14498 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14499 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14500 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14501 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14502 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14503 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14504 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14505 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14506 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14507 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14508 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14509 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14510 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14511 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14512 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14513 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14514 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14515 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14516 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14517 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14518 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14519 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14520 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14521 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14522 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14523 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14524 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14525 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14526 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14527 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14528 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14529 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14530 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14531 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14532 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14533 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14534 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14535 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14536 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14537 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14538};
14539#undef ARM_VARIANT
14540#undef THUMB_VARIANT
14541#undef TCE
14542#undef TCM
14543#undef TUE
14544#undef TUF
14545#undef TCC
14546#undef cCE
14547#undef cCL
14548#undef C3E
14549#undef CE
14550#undef CM
14551#undef UE
14552#undef UF
14553#undef UT
14554#undef NUF
14555#undef nUF
14556#undef NCE
14557#undef nCE
14558#undef OPS0
14559#undef OPS1
14560#undef OPS2
14561#undef OPS3
14562#undef OPS4
14563#undef OPS5
14564#undef OPS6
14565#undef do_0
14566\f
14567/* MD interface: bits in the object file. */
14568
14569/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14570 for use in the a.out file, and stores them in the array pointed to by buf.
14571 This knows about the endian-ness of the target machine and does
14572 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14573 2 (short) and 4 (long) Floating numbers are put out as a series of
14574 LITTLENUMS (shorts, here at least). */
14575
14576void
14577md_number_to_chars (char * buf, valueT val, int n)
14578{
14579 if (target_big_endian)
14580 number_to_chars_bigendian (buf, val, n);
14581 else
14582 number_to_chars_littleendian (buf, val, n);
14583}
14584
14585static valueT
14586md_chars_to_number (char * buf, int n)
14587{
14588 valueT result = 0;
14589 unsigned char * where = (unsigned char *) buf;
14590
14591 if (target_big_endian)
14592 {
14593 while (n--)
14594 {
14595 result <<= 8;
14596 result |= (*where++ & 255);
14597 }
14598 }
14599 else
14600 {
14601 while (n--)
14602 {
14603 result <<= 8;
14604 result |= (where[n] & 255);
14605 }
14606 }
14607
14608 return result;
14609}
14610
14611/* MD interface: Sections. */
14612
14613/* Estimate the size of a frag before relaxing. Assume everything fits in
14614 2 bytes. */
14615
14616int
14617md_estimate_size_before_relax (fragS * fragp,
14618 segT segtype ATTRIBUTE_UNUSED)
14619{
14620 fragp->fr_var = 2;
14621 return 2;
14622}
14623
14624/* Convert a machine dependent frag. */
14625
14626void
14627md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14628{
14629 unsigned long insn;
14630 unsigned long old_op;
14631 char *buf;
14632 expressionS exp;
14633 fixS *fixp;
14634 int reloc_type;
14635 int pc_rel;
14636 int opcode;
14637
14638 buf = fragp->fr_literal + fragp->fr_fix;
14639
14640 old_op = bfd_get_16(abfd, buf);
14641 if (fragp->fr_symbol) {
14642 exp.X_op = O_symbol;
14643 exp.X_add_symbol = fragp->fr_symbol;
14644 } else {
14645 exp.X_op = O_constant;
14646 }
14647 exp.X_add_number = fragp->fr_offset;
14648 opcode = fragp->fr_subtype;
14649 switch (opcode)
14650 {
14651 case T_MNEM_ldr_pc:
14652 case T_MNEM_ldr_pc2:
14653 case T_MNEM_ldr_sp:
14654 case T_MNEM_str_sp:
14655 case T_MNEM_ldr:
14656 case T_MNEM_ldrb:
14657 case T_MNEM_ldrh:
14658 case T_MNEM_str:
14659 case T_MNEM_strb:
14660 case T_MNEM_strh:
14661 if (fragp->fr_var == 4)
14662 {
14663 insn = THUMB_OP32(opcode);
14664 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14665 {
14666 insn |= (old_op & 0x700) << 4;
14667 }
14668 else
14669 {
14670 insn |= (old_op & 7) << 12;
14671 insn |= (old_op & 0x38) << 13;
14672 }
14673 insn |= 0x00000c00;
14674 put_thumb32_insn (buf, insn);
14675 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14676 }
14677 else
14678 {
14679 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14680 }
14681 pc_rel = (opcode == T_MNEM_ldr_pc2);
14682 break;
14683 case T_MNEM_adr:
14684 if (fragp->fr_var == 4)
14685 {
14686 insn = THUMB_OP32 (opcode);
14687 insn |= (old_op & 0xf0) << 4;
14688 put_thumb32_insn (buf, insn);
14689 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14690 }
14691 else
14692 {
14693 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14694 exp.X_add_number -= 4;
14695 }
14696 pc_rel = 1;
14697 break;
14698 case T_MNEM_mov:
14699 case T_MNEM_movs:
14700 case T_MNEM_cmp:
14701 case T_MNEM_cmn:
14702 if (fragp->fr_var == 4)
14703 {
14704 int r0off = (opcode == T_MNEM_mov
14705 || opcode == T_MNEM_movs) ? 0 : 8;
14706 insn = THUMB_OP32 (opcode);
14707 insn = (insn & 0xe1ffffff) | 0x10000000;
14708 insn |= (old_op & 0x700) << r0off;
14709 put_thumb32_insn (buf, insn);
14710 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14711 }
14712 else
14713 {
14714 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14715 }
14716 pc_rel = 0;
14717 break;
14718 case T_MNEM_b:
14719 if (fragp->fr_var == 4)
14720 {
14721 insn = THUMB_OP32(opcode);
14722 put_thumb32_insn (buf, insn);
14723 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14724 }
14725 else
14726 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14727 pc_rel = 1;
14728 break;
14729 case T_MNEM_bcond:
14730 if (fragp->fr_var == 4)
14731 {
14732 insn = THUMB_OP32(opcode);
14733 insn |= (old_op & 0xf00) << 14;
14734 put_thumb32_insn (buf, insn);
14735 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14736 }
14737 else
14738 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14739 pc_rel = 1;
14740 break;
14741 case T_MNEM_add_sp:
14742 case T_MNEM_add_pc:
14743 case T_MNEM_inc_sp:
14744 case T_MNEM_dec_sp:
14745 if (fragp->fr_var == 4)
14746 {
14747 /* ??? Choose between add and addw. */
14748 insn = THUMB_OP32 (opcode);
14749 insn |= (old_op & 0xf0) << 4;
14750 put_thumb32_insn (buf, insn);
14751 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14752 }
14753 else
14754 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14755 pc_rel = 0;
14756 break;
14757
14758 case T_MNEM_addi:
14759 case T_MNEM_addis:
14760 case T_MNEM_subi:
14761 case T_MNEM_subis:
14762 if (fragp->fr_var == 4)
14763 {
14764 insn = THUMB_OP32 (opcode);
14765 insn |= (old_op & 0xf0) << 4;
14766 insn |= (old_op & 0xf) << 16;
14767 put_thumb32_insn (buf, insn);
14768 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14769 }
14770 else
14771 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14772 pc_rel = 0;
14773 break;
14774 default:
14775 abort();
14776 }
14777 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14778 reloc_type);
14779 fixp->fx_file = fragp->fr_file;
14780 fixp->fx_line = fragp->fr_line;
14781 fragp->fr_fix += fragp->fr_var;
14782}
14783
14784/* Return the size of a relaxable immediate operand instruction.
14785 SHIFT and SIZE specify the form of the allowable immediate. */
14786static int
14787relax_immediate (fragS *fragp, int size, int shift)
14788{
14789 offsetT offset;
14790 offsetT mask;
14791 offsetT low;
14792
14793 /* ??? Should be able to do better than this. */
14794 if (fragp->fr_symbol)
14795 return 4;
14796
14797 low = (1 << shift) - 1;
14798 mask = (1 << (shift + size)) - (1 << shift);
14799 offset = fragp->fr_offset;
14800 /* Force misaligned offsets to 32-bit variant. */
14801 if (offset & low)
14802 return -4;
14803 if (offset & ~mask)
14804 return 4;
14805 return 2;
14806}
14807
14808/* Return the size of a relaxable adr pseudo-instruction or PC-relative
14809 load. */
14810static int
14811relax_adr (fragS *fragp, asection *sec)
14812{
14813 addressT addr;
14814 offsetT val;
14815
14816 /* Assume worst case for symbols not known to be in the same section. */
14817 if (!S_IS_DEFINED(fragp->fr_symbol)
14818 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14819 return 4;
14820
14821 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14822 addr = fragp->fr_address + fragp->fr_fix;
14823 addr = (addr + 4) & ~3;
14824 /* Fix the insn as the 4-byte version if the target address is not
14825 sufficiently aligned. This is prevents an infinite loop when two
14826 instructions have contradictory range/alignment requirements. */
14827 if (val & 3)
14828 return -4;
14829 val -= addr;
14830 if (val < 0 || val > 1020)
14831 return 4;
14832 return 2;
14833}
14834
14835/* Return the size of a relaxable add/sub immediate instruction. */
14836static int
14837relax_addsub (fragS *fragp, asection *sec)
14838{
14839 char *buf;
14840 int op;
14841
14842 buf = fragp->fr_literal + fragp->fr_fix;
14843 op = bfd_get_16(sec->owner, buf);
14844 if ((op & 0xf) == ((op >> 4) & 0xf))
14845 return relax_immediate (fragp, 8, 0);
14846 else
14847 return relax_immediate (fragp, 3, 0);
14848}
14849
14850
14851/* Return the size of a relaxable branch instruction. BITS is the
14852 size of the offset field in the narrow instruction. */
14853
14854static int
14855relax_branch (fragS *fragp, asection *sec, int bits)
14856{
14857 addressT addr;
14858 offsetT val;
14859 offsetT limit;
14860
14861 /* Assume worst case for symbols not known to be in the same section. */
14862 if (!S_IS_DEFINED(fragp->fr_symbol)
14863 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14864 return 4;
14865
14866 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14867 addr = fragp->fr_address + fragp->fr_fix + 4;
14868 val -= addr;
14869
14870 /* Offset is a signed value *2 */
14871 limit = 1 << bits;
14872 if (val >= limit || val < -limit)
14873 return 4;
14874 return 2;
14875}
14876
14877
14878/* Relax a machine dependent frag. This returns the amount by which
14879 the current size of the frag should change. */
14880
14881int
14882arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
14883{
14884 int oldsize;
14885 int newsize;
14886
14887 oldsize = fragp->fr_var;
14888 switch (fragp->fr_subtype)
14889 {
14890 case T_MNEM_ldr_pc2:
14891 newsize = relax_adr(fragp, sec);
14892 break;
14893 case T_MNEM_ldr_pc:
14894 case T_MNEM_ldr_sp:
14895 case T_MNEM_str_sp:
14896 newsize = relax_immediate(fragp, 8, 2);
14897 break;
14898 case T_MNEM_ldr:
14899 case T_MNEM_str:
14900 newsize = relax_immediate(fragp, 5, 2);
14901 break;
14902 case T_MNEM_ldrh:
14903 case T_MNEM_strh:
14904 newsize = relax_immediate(fragp, 5, 1);
14905 break;
14906 case T_MNEM_ldrb:
14907 case T_MNEM_strb:
14908 newsize = relax_immediate(fragp, 5, 0);
14909 break;
14910 case T_MNEM_adr:
14911 newsize = relax_adr(fragp, sec);
14912 break;
14913 case T_MNEM_mov:
14914 case T_MNEM_movs:
14915 case T_MNEM_cmp:
14916 case T_MNEM_cmn:
14917 newsize = relax_immediate(fragp, 8, 0);
14918 break;
14919 case T_MNEM_b:
14920 newsize = relax_branch(fragp, sec, 11);
14921 break;
14922 case T_MNEM_bcond:
14923 newsize = relax_branch(fragp, sec, 8);
14924 break;
14925 case T_MNEM_add_sp:
14926 case T_MNEM_add_pc:
14927 newsize = relax_immediate (fragp, 8, 2);
14928 break;
14929 case T_MNEM_inc_sp:
14930 case T_MNEM_dec_sp:
14931 newsize = relax_immediate (fragp, 7, 2);
14932 break;
14933 case T_MNEM_addi:
14934 case T_MNEM_addis:
14935 case T_MNEM_subi:
14936 case T_MNEM_subis:
14937 newsize = relax_addsub (fragp, sec);
14938 break;
14939 default:
14940 abort();
14941 }
14942 if (newsize < 0)
14943 {
14944 fragp->fr_var = -newsize;
14945 md_convert_frag (sec->owner, sec, fragp);
14946 frag_wane(fragp);
14947 return -(newsize + oldsize);
14948 }
14949 fragp->fr_var = newsize;
14950 return newsize - oldsize;
14951}
14952
14953/* Round up a section size to the appropriate boundary. */
14954
14955valueT
14956md_section_align (segT segment ATTRIBUTE_UNUSED,
14957 valueT size)
14958{
14959#ifdef OBJ_ELF
14960 return size;
14961#else
14962 /* Round all sects to multiple of 4. */
14963 return (size + 3) & ~3;
14964#endif
14965}
14966
14967/* This is called from HANDLE_ALIGN in write.c. Fill in the contents
14968 of an rs_align_code fragment. */
14969
14970void
14971arm_handle_align (fragS * fragP)
14972{
14973 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
14974 static char const thumb_noop[2] = { 0xc0, 0x46 };
14975 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
14976 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
14977
14978 int bytes, fix, noop_size;
14979 char * p;
14980 const char * noop;
14981
14982 if (fragP->fr_type != rs_align_code)
14983 return;
14984
14985 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
14986 p = fragP->fr_literal + fragP->fr_fix;
14987 fix = 0;
14988
14989 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
14990 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
14991
14992 if (fragP->tc_frag_data)
14993 {
14994 if (target_big_endian)
14995 noop = thumb_bigend_noop;
14996 else
14997 noop = thumb_noop;
14998 noop_size = sizeof (thumb_noop);
14999 }
15000 else
15001 {
15002 if (target_big_endian)
15003 noop = arm_bigend_noop;
15004 else
15005 noop = arm_noop;
15006 noop_size = sizeof (arm_noop);
15007 }
15008
15009 if (bytes & (noop_size - 1))
15010 {
15011 fix = bytes & (noop_size - 1);
15012 memset (p, 0, fix);
15013 p += fix;
15014 bytes -= fix;
15015 }
15016
15017 while (bytes >= noop_size)
15018 {
15019 memcpy (p, noop, noop_size);
15020 p += noop_size;
15021 bytes -= noop_size;
15022 fix += noop_size;
15023 }
15024
15025 fragP->fr_fix += fix;
15026 fragP->fr_var = noop_size;
15027}
15028
15029/* Called from md_do_align. Used to create an alignment
15030 frag in a code section. */
15031
15032void
15033arm_frag_align_code (int n, int max)
15034{
15035 char * p;
15036
15037 /* We assume that there will never be a requirement
15038 to support alignments greater than 32 bytes. */
15039 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15040 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15041
15042 p = frag_var (rs_align_code,
15043 MAX_MEM_FOR_RS_ALIGN_CODE,
15044 1,
15045 (relax_substateT) max,
15046 (symbolS *) NULL,
15047 (offsetT) n,
15048 (char *) NULL);
15049 *p = 0;
15050}
15051
15052/* Perform target specific initialisation of a frag. */
15053
15054void
15055arm_init_frag (fragS * fragP)
15056{
15057 /* Record whether this frag is in an ARM or a THUMB area. */
15058 fragP->tc_frag_data = thumb_mode;
15059}
15060
15061#ifdef OBJ_ELF
15062/* When we change sections we need to issue a new mapping symbol. */
15063
15064void
15065arm_elf_change_section (void)
15066{
15067 flagword flags;
15068 segment_info_type *seginfo;
15069
15070 /* Link an unlinked unwind index table section to the .text section. */
15071 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15072 && elf_linked_to_section (now_seg) == NULL)
15073 elf_linked_to_section (now_seg) = text_section;
15074
15075 if (!SEG_NORMAL (now_seg))
15076 return;
15077
15078 flags = bfd_get_section_flags (stdoutput, now_seg);
15079
15080 /* We can ignore sections that only contain debug info. */
15081 if ((flags & SEC_ALLOC) == 0)
15082 return;
15083
15084 seginfo = seg_info (now_seg);
15085 mapstate = seginfo->tc_segment_info_data.mapstate;
15086 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15087}
15088
15089int
15090arm_elf_section_type (const char * str, size_t len)
15091{
15092 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15093 return SHT_ARM_EXIDX;
15094
15095 return -1;
15096}
15097\f
15098/* Code to deal with unwinding tables. */
15099
15100static void add_unwind_adjustsp (offsetT);
15101
15102/* Cenerate and deferred unwind frame offset. */
15103
15104static void
15105flush_pending_unwind (void)
15106{
15107 offsetT offset;
15108
15109 offset = unwind.pending_offset;
15110 unwind.pending_offset = 0;
15111 if (offset != 0)
15112 add_unwind_adjustsp (offset);
15113}
15114
15115/* Add an opcode to this list for this function. Two-byte opcodes should
15116 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15117 order. */
15118
15119static void
15120add_unwind_opcode (valueT op, int length)
15121{
15122 /* Add any deferred stack adjustment. */
15123 if (unwind.pending_offset)
15124 flush_pending_unwind ();
15125
15126 unwind.sp_restored = 0;
15127
15128 if (unwind.opcode_count + length > unwind.opcode_alloc)
15129 {
15130 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15131 if (unwind.opcodes)
15132 unwind.opcodes = xrealloc (unwind.opcodes,
15133 unwind.opcode_alloc);
15134 else
15135 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15136 }
15137 while (length > 0)
15138 {
15139 length--;
15140 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15141 op >>= 8;
15142 unwind.opcode_count++;
15143 }
15144}
15145
15146/* Add unwind opcodes to adjust the stack pointer. */
15147
15148static void
15149add_unwind_adjustsp (offsetT offset)
15150{
15151 valueT op;
15152
15153 if (offset > 0x200)
15154 {
15155 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15156 char bytes[5];
15157 int n;
15158 valueT o;
15159
15160 /* Long form: 0xb2, uleb128. */
15161 /* This might not fit in a word so add the individual bytes,
15162 remembering the list is built in reverse order. */
15163 o = (valueT) ((offset - 0x204) >> 2);
15164 if (o == 0)
15165 add_unwind_opcode (0, 1);
15166
15167 /* Calculate the uleb128 encoding of the offset. */
15168 n = 0;
15169 while (o)
15170 {
15171 bytes[n] = o & 0x7f;
15172 o >>= 7;
15173 if (o)
15174 bytes[n] |= 0x80;
15175 n++;
15176 }
15177 /* Add the insn. */
15178 for (; n; n--)
15179 add_unwind_opcode (bytes[n - 1], 1);
15180 add_unwind_opcode (0xb2, 1);
15181 }
15182 else if (offset > 0x100)
15183 {
15184 /* Two short opcodes. */
15185 add_unwind_opcode (0x3f, 1);
15186 op = (offset - 0x104) >> 2;
15187 add_unwind_opcode (op, 1);
15188 }
15189 else if (offset > 0)
15190 {
15191 /* Short opcode. */
15192 op = (offset - 4) >> 2;
15193 add_unwind_opcode (op, 1);
15194 }
15195 else if (offset < 0)
15196 {
15197 offset = -offset;
15198 while (offset > 0x100)
15199 {
15200 add_unwind_opcode (0x7f, 1);
15201 offset -= 0x100;
15202 }
15203 op = ((offset - 4) >> 2) | 0x40;
15204 add_unwind_opcode (op, 1);
15205 }
15206}
15207
15208/* Finish the list of unwind opcodes for this function. */
15209static void
15210finish_unwind_opcodes (void)
15211{
15212 valueT op;
15213
15214 if (unwind.fp_used)
15215 {
15216 /* Adjust sp as necessary. */
15217 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15218 flush_pending_unwind ();
15219
15220 /* After restoring sp from the frame pointer. */
15221 op = 0x90 | unwind.fp_reg;
15222 add_unwind_opcode (op, 1);
15223 }
15224 else
15225 flush_pending_unwind ();
15226}
15227
15228
15229/* Start an exception table entry. If idx is nonzero this is an index table
15230 entry. */
15231
15232static void
15233start_unwind_section (const segT text_seg, int idx)
15234{
15235 const char * text_name;
15236 const char * prefix;
15237 const char * prefix_once;
15238 const char * group_name;
15239 size_t prefix_len;
15240 size_t text_len;
15241 char * sec_name;
15242 size_t sec_name_len;
15243 int type;
15244 int flags;
15245 int linkonce;
15246
15247 if (idx)
15248 {
15249 prefix = ELF_STRING_ARM_unwind;
15250 prefix_once = ELF_STRING_ARM_unwind_once;
15251 type = SHT_ARM_EXIDX;
15252 }
15253 else
15254 {
15255 prefix = ELF_STRING_ARM_unwind_info;
15256 prefix_once = ELF_STRING_ARM_unwind_info_once;
15257 type = SHT_PROGBITS;
15258 }
15259
15260 text_name = segment_name (text_seg);
15261 if (streq (text_name, ".text"))
15262 text_name = "";
15263
15264 if (strncmp (text_name, ".gnu.linkonce.t.",
15265 strlen (".gnu.linkonce.t.")) == 0)
15266 {
15267 prefix = prefix_once;
15268 text_name += strlen (".gnu.linkonce.t.");
15269 }
15270
15271 prefix_len = strlen (prefix);
15272 text_len = strlen (text_name);
15273 sec_name_len = prefix_len + text_len;
15274 sec_name = xmalloc (sec_name_len + 1);
15275 memcpy (sec_name, prefix, prefix_len);
15276 memcpy (sec_name + prefix_len, text_name, text_len);
15277 sec_name[prefix_len + text_len] = '\0';
15278
15279 flags = SHF_ALLOC;
15280 linkonce = 0;
15281 group_name = 0;
15282
15283 /* Handle COMDAT group. */
15284 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15285 {
15286 group_name = elf_group_name (text_seg);
15287 if (group_name == NULL)
15288 {
15289 as_bad ("Group section `%s' has no group signature",
15290 segment_name (text_seg));
15291 ignore_rest_of_line ();
15292 return;
15293 }
15294 flags |= SHF_GROUP;
15295 linkonce = 1;
15296 }
15297
15298 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15299
15300 /* Set the setion link for index tables. */
15301 if (idx)
15302 elf_linked_to_section (now_seg) = text_seg;
15303}
15304
15305
15306/* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15307 personality routine data. Returns zero, or the index table value for
15308 and inline entry. */
15309
15310static valueT
15311create_unwind_entry (int have_data)
15312{
15313 int size;
15314 addressT where;
15315 char *ptr;
15316 /* The current word of data. */
15317 valueT data;
15318 /* The number of bytes left in this word. */
15319 int n;
15320
15321 finish_unwind_opcodes ();
15322
15323 /* Remember the current text section. */
15324 unwind.saved_seg = now_seg;
15325 unwind.saved_subseg = now_subseg;
15326
15327 start_unwind_section (now_seg, 0);
15328
15329 if (unwind.personality_routine == NULL)
15330 {
15331 if (unwind.personality_index == -2)
15332 {
15333 if (have_data)
15334 as_bad (_("handerdata in cantunwind frame"));
15335 return 1; /* EXIDX_CANTUNWIND. */
15336 }
15337
15338 /* Use a default personality routine if none is specified. */
15339 if (unwind.personality_index == -1)
15340 {
15341 if (unwind.opcode_count > 3)
15342 unwind.personality_index = 1;
15343 else
15344 unwind.personality_index = 0;
15345 }
15346
15347 /* Space for the personality routine entry. */
15348 if (unwind.personality_index == 0)
15349 {
15350 if (unwind.opcode_count > 3)
15351 as_bad (_("too many unwind opcodes for personality routine 0"));
15352
15353 if (!have_data)
15354 {
15355 /* All the data is inline in the index table. */
15356 data = 0x80;
15357 n = 3;
15358 while (unwind.opcode_count > 0)
15359 {
15360 unwind.opcode_count--;
15361 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15362 n--;
15363 }
15364
15365 /* Pad with "finish" opcodes. */
15366 while (n--)
15367 data = (data << 8) | 0xb0;
15368
15369 return data;
15370 }
15371 size = 0;
15372 }
15373 else
15374 /* We get two opcodes "free" in the first word. */
15375 size = unwind.opcode_count - 2;
15376 }
15377 else
15378 /* An extra byte is required for the opcode count. */
15379 size = unwind.opcode_count + 1;
15380
15381 size = (size + 3) >> 2;
15382 if (size > 0xff)
15383 as_bad (_("too many unwind opcodes"));
15384
15385 frag_align (2, 0, 0);
15386 record_alignment (now_seg, 2);
15387 unwind.table_entry = expr_build_dot ();
15388
15389 /* Allocate the table entry. */
15390 ptr = frag_more ((size << 2) + 4);
15391 where = frag_now_fix () - ((size << 2) + 4);
15392
15393 switch (unwind.personality_index)
15394 {
15395 case -1:
15396 /* ??? Should this be a PLT generating relocation? */
15397 /* Custom personality routine. */
15398 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15399 BFD_RELOC_ARM_PREL31);
15400
15401 where += 4;
15402 ptr += 4;
15403
15404 /* Set the first byte to the number of additional words. */
15405 data = size - 1;
15406 n = 3;
15407 break;
15408
15409 /* ABI defined personality routines. */
15410 case 0:
15411 /* Three opcodes bytes are packed into the first word. */
15412 data = 0x80;
15413 n = 3;
15414 break;
15415
15416 case 1:
15417 case 2:
15418 /* The size and first two opcode bytes go in the first word. */
15419 data = ((0x80 + unwind.personality_index) << 8) | size;
15420 n = 2;
15421 break;
15422
15423 default:
15424 /* Should never happen. */
15425 abort ();
15426 }
15427
15428 /* Pack the opcodes into words (MSB first), reversing the list at the same
15429 time. */
15430 while (unwind.opcode_count > 0)
15431 {
15432 if (n == 0)
15433 {
15434 md_number_to_chars (ptr, data, 4);
15435 ptr += 4;
15436 n = 4;
15437 data = 0;
15438 }
15439 unwind.opcode_count--;
15440 n--;
15441 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15442 }
15443
15444 /* Finish off the last word. */
15445 if (n < 4)
15446 {
15447 /* Pad with "finish" opcodes. */
15448 while (n--)
15449 data = (data << 8) | 0xb0;
15450
15451 md_number_to_chars (ptr, data, 4);
15452 }
15453
15454 if (!have_data)
15455 {
15456 /* Add an empty descriptor if there is no user-specified data. */
15457 ptr = frag_more (4);
15458 md_number_to_chars (ptr, 0, 4);
15459 }
15460
15461 return 0;
15462}
15463
15464/* Convert REGNAME to a DWARF-2 register number. */
15465
15466int
15467tc_arm_regname_to_dw2regnum (const char *regname)
15468{
15469 int reg = arm_reg_parse ((char **) &regname, REG_TYPE_RN);
15470
15471 if (reg == FAIL)
15472 return -1;
15473
15474 return reg;
15475}
15476
15477/* Initialize the DWARF-2 unwind information for this procedure. */
15478
15479void
15480tc_arm_frame_initial_instructions (void)
15481{
15482 cfi_add_CFA_def_cfa (REG_SP, 0);
15483}
15484#endif /* OBJ_ELF */
15485
15486
15487/* MD interface: Symbol and relocation handling. */
15488
15489/* Return the address within the segment that a PC-relative fixup is
15490 relative to. For ARM, PC-relative fixups applied to instructions
15491 are generally relative to the location of the fixup plus 8 bytes.
15492 Thumb branches are offset by 4, and Thumb loads relative to PC
15493 require special handling. */
15494
15495long
15496md_pcrel_from_section (fixS * fixP, segT seg)
15497{
15498 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15499
15500 /* If this is pc-relative and we are going to emit a relocation
15501 then we just want to put out any pipeline compensation that the linker
15502 will need. Otherwise we want to use the calculated base. */
15503 if (fixP->fx_pcrel
15504 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15505 || arm_force_relocation (fixP)))
15506 base = 0;
15507
15508 switch (fixP->fx_r_type)
15509 {
15510 /* PC relative addressing on the Thumb is slightly odd as the
15511 bottom two bits of the PC are forced to zero for the
15512 calculation. This happens *after* application of the
15513 pipeline offset. However, Thumb adrl already adjusts for
15514 this, so we need not do it again. */
15515 case BFD_RELOC_ARM_THUMB_ADD:
15516 return base & ~3;
15517
15518 case BFD_RELOC_ARM_THUMB_OFFSET:
15519 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15520 case BFD_RELOC_ARM_T32_ADD_PC12:
15521 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15522 return (base + 4) & ~3;
15523
15524 /* Thumb branches are simply offset by +4. */
15525 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15526 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15527 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15528 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15529 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15530 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15531 case BFD_RELOC_THUMB_PCREL_BLX:
15532 return base + 4;
15533
15534 /* ARM mode branches are offset by +8. However, the Windows CE
15535 loader expects the relocation not to take this into account. */
15536 case BFD_RELOC_ARM_PCREL_BRANCH:
15537 case BFD_RELOC_ARM_PCREL_CALL:
15538 case BFD_RELOC_ARM_PCREL_JUMP:
15539 case BFD_RELOC_ARM_PCREL_BLX:
15540 case BFD_RELOC_ARM_PLT32:
15541#ifdef TE_WINCE
15542 return base;
15543#else
15544 return base + 8;
15545#endif
15546
15547 /* ARM mode loads relative to PC are also offset by +8. Unlike
15548 branches, the Windows CE loader *does* expect the relocation
15549 to take this into account. */
15550 case BFD_RELOC_ARM_OFFSET_IMM:
15551 case BFD_RELOC_ARM_OFFSET_IMM8:
15552 case BFD_RELOC_ARM_HWLITERAL:
15553 case BFD_RELOC_ARM_LITERAL:
15554 case BFD_RELOC_ARM_CP_OFF_IMM:
15555 return base + 8;
15556
15557
15558 /* Other PC-relative relocations are un-offset. */
15559 default:
15560 return base;
15561 }
15562}
15563
15564/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15565 Otherwise we have no need to default values of symbols. */
15566
15567symbolS *
15568md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15569{
15570#ifdef OBJ_ELF
15571 if (name[0] == '_' && name[1] == 'G'
15572 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15573 {
15574 if (!GOT_symbol)
15575 {
15576 if (symbol_find (name))
15577 as_bad ("GOT already in the symbol table");
15578
15579 GOT_symbol = symbol_new (name, undefined_section,
15580 (valueT) 0, & zero_address_frag);
15581 }
15582
15583 return GOT_symbol;
15584 }
15585#endif
15586
15587 return 0;
15588}
15589
15590/* Subroutine of md_apply_fix. Check to see if an immediate can be
15591 computed as two separate immediate values, added together. We
15592 already know that this value cannot be computed by just one ARM
15593 instruction. */
15594
15595static unsigned int
15596validate_immediate_twopart (unsigned int val,
15597 unsigned int * highpart)
15598{
15599 unsigned int a;
15600 unsigned int i;
15601
15602 for (i = 0; i < 32; i += 2)
15603 if (((a = rotate_left (val, i)) & 0xff) != 0)
15604 {
15605 if (a & 0xff00)
15606 {
15607 if (a & ~ 0xffff)
15608 continue;
15609 * highpart = (a >> 8) | ((i + 24) << 7);
15610 }
15611 else if (a & 0xff0000)
15612 {
15613 if (a & 0xff000000)
15614 continue;
15615 * highpart = (a >> 16) | ((i + 16) << 7);
15616 }
15617 else
15618 {
15619 assert (a & 0xff000000);
15620 * highpart = (a >> 24) | ((i + 8) << 7);
15621 }
15622
15623 return (a & 0xff) | (i << 7);
15624 }
15625
15626 return FAIL;
15627}
15628
15629static int
15630validate_offset_imm (unsigned int val, int hwse)
15631{
15632 if ((hwse && val > 255) || val > 4095)
15633 return FAIL;
15634 return val;
15635}
15636
15637/* Subroutine of md_apply_fix. Do those data_ops which can take a
15638 negative immediate constant by altering the instruction. A bit of
15639 a hack really.
15640 MOV <-> MVN
15641 AND <-> BIC
15642 ADC <-> SBC
15643 by inverting the second operand, and
15644 ADD <-> SUB
15645 CMP <-> CMN
15646 by negating the second operand. */
15647
15648static int
15649negate_data_op (unsigned long * instruction,
15650 unsigned long value)
15651{
15652 int op, new_inst;
15653 unsigned long negated, inverted;
15654
15655 negated = encode_arm_immediate (-value);
15656 inverted = encode_arm_immediate (~value);
15657
15658 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15659 switch (op)
15660 {
15661 /* First negates. */
15662 case OPCODE_SUB: /* ADD <-> SUB */
15663 new_inst = OPCODE_ADD;
15664 value = negated;
15665 break;
15666
15667 case OPCODE_ADD:
15668 new_inst = OPCODE_SUB;
15669 value = negated;
15670 break;
15671
15672 case OPCODE_CMP: /* CMP <-> CMN */
15673 new_inst = OPCODE_CMN;
15674 value = negated;
15675 break;
15676
15677 case OPCODE_CMN:
15678 new_inst = OPCODE_CMP;
15679 value = negated;
15680 break;
15681
15682 /* Now Inverted ops. */
15683 case OPCODE_MOV: /* MOV <-> MVN */
15684 new_inst = OPCODE_MVN;
15685 value = inverted;
15686 break;
15687
15688 case OPCODE_MVN:
15689 new_inst = OPCODE_MOV;
15690 value = inverted;
15691 break;
15692
15693 case OPCODE_AND: /* AND <-> BIC */
15694 new_inst = OPCODE_BIC;
15695 value = inverted;
15696 break;
15697
15698 case OPCODE_BIC:
15699 new_inst = OPCODE_AND;
15700 value = inverted;
15701 break;
15702
15703 case OPCODE_ADC: /* ADC <-> SBC */
15704 new_inst = OPCODE_SBC;
15705 value = inverted;
15706 break;
15707
15708 case OPCODE_SBC:
15709 new_inst = OPCODE_ADC;
15710 value = inverted;
15711 break;
15712
15713 /* We cannot do anything. */
15714 default:
15715 return FAIL;
15716 }
15717
15718 if (value == (unsigned) FAIL)
15719 return FAIL;
15720
15721 *instruction &= OPCODE_MASK;
15722 *instruction |= new_inst << DATA_OP_SHIFT;
15723 return value;
15724}
15725
15726/* Like negate_data_op, but for Thumb-2. */
15727
15728static unsigned int
15729thumb32_negate_data_op (offsetT *instruction, offsetT value)
15730{
15731 int op, new_inst;
15732 int rd;
15733 offsetT negated, inverted;
15734
15735 negated = encode_thumb32_immediate (-value);
15736 inverted = encode_thumb32_immediate (~value);
15737
15738 rd = (*instruction >> 8) & 0xf;
15739 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15740 switch (op)
15741 {
15742 /* ADD <-> SUB. Includes CMP <-> CMN. */
15743 case T2_OPCODE_SUB:
15744 new_inst = T2_OPCODE_ADD;
15745 value = negated;
15746 break;
15747
15748 case T2_OPCODE_ADD:
15749 new_inst = T2_OPCODE_SUB;
15750 value = negated;
15751 break;
15752
15753 /* ORR <-> ORN. Includes MOV <-> MVN. */
15754 case T2_OPCODE_ORR:
15755 new_inst = T2_OPCODE_ORN;
15756 value = inverted;
15757 break;
15758
15759 case T2_OPCODE_ORN:
15760 new_inst = T2_OPCODE_ORR;
15761 value = inverted;
15762 break;
15763
15764 /* AND <-> BIC. TST has no inverted equivalent. */
15765 case T2_OPCODE_AND:
15766 new_inst = T2_OPCODE_BIC;
15767 if (rd == 15)
15768 value = FAIL;
15769 else
15770 value = inverted;
15771 break;
15772
15773 case T2_OPCODE_BIC:
15774 new_inst = T2_OPCODE_AND;
15775 value = inverted;
15776 break;
15777
15778 /* ADC <-> SBC */
15779 case T2_OPCODE_ADC:
15780 new_inst = T2_OPCODE_SBC;
15781 value = inverted;
15782 break;
15783
15784 case T2_OPCODE_SBC:
15785 new_inst = T2_OPCODE_ADC;
15786 value = inverted;
15787 break;
15788
15789 /* We cannot do anything. */
15790 default:
15791 return FAIL;
15792 }
15793
15794 if (value == FAIL)
15795 return FAIL;
15796
15797 *instruction &= T2_OPCODE_MASK;
15798 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15799 return value;
15800}
15801
15802/* Read a 32-bit thumb instruction from buf. */
15803static unsigned long
15804get_thumb32_insn (char * buf)
15805{
15806 unsigned long insn;
15807 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15808 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15809
15810 return insn;
15811}
15812
15813
15814/* We usually want to set the low bit on the address of thumb function
15815 symbols. In particular .word foo - . should have the low bit set.
15816 Generic code tries to fold the difference of two symbols to
15817 a constant. Prevent this and force a relocation when the first symbols
15818 is a thumb function. */
15819int
15820arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
15821{
15822 if (op == O_subtract
15823 && l->X_op == O_symbol
15824 && r->X_op == O_symbol
15825 && THUMB_IS_FUNC (l->X_add_symbol))
15826 {
15827 l->X_op = O_subtract;
15828 l->X_op_symbol = r->X_add_symbol;
15829 l->X_add_number -= r->X_add_number;
15830 return 1;
15831 }
15832 /* Process as normal. */
15833 return 0;
15834}
15835
15836void
15837md_apply_fix (fixS * fixP,
15838 valueT * valP,
15839 segT seg)
15840{
15841 offsetT value = * valP;
15842 offsetT newval;
15843 unsigned int newimm;
15844 unsigned long temp;
15845 int sign;
15846 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
15847
15848 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
15849
15850 /* Note whether this will delete the relocation. */
15851 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
15852 fixP->fx_done = 1;
15853
15854 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15855 consistency with the behavior on 32-bit hosts. Remember value
15856 for emit_reloc. */
15857 value &= 0xffffffff;
15858 value ^= 0x80000000;
15859 value -= 0x80000000;
15860
15861 *valP = value;
15862 fixP->fx_addnumber = value;
15863
15864 /* Same treatment for fixP->fx_offset. */
15865 fixP->fx_offset &= 0xffffffff;
15866 fixP->fx_offset ^= 0x80000000;
15867 fixP->fx_offset -= 0x80000000;
15868
15869 switch (fixP->fx_r_type)
15870 {
15871 case BFD_RELOC_NONE:
15872 /* This will need to go in the object file. */
15873 fixP->fx_done = 0;
15874 break;
15875
15876 case BFD_RELOC_ARM_IMMEDIATE:
15877 /* We claim that this fixup has been processed here,
15878 even if in fact we generate an error because we do
15879 not have a reloc for it, so tc_gen_reloc will reject it. */
15880 fixP->fx_done = 1;
15881
15882 if (fixP->fx_addsy
15883 && ! S_IS_DEFINED (fixP->fx_addsy))
15884 {
15885 as_bad_where (fixP->fx_file, fixP->fx_line,
15886 _("undefined symbol %s used as an immediate value"),
15887 S_GET_NAME (fixP->fx_addsy));
15888 break;
15889 }
15890
15891 newimm = encode_arm_immediate (value);
15892 temp = md_chars_to_number (buf, INSN_SIZE);
15893
15894 /* If the instruction will fail, see if we can fix things up by
15895 changing the opcode. */
15896 if (newimm == (unsigned int) FAIL
15897 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
15898 {
15899 as_bad_where (fixP->fx_file, fixP->fx_line,
15900 _("invalid constant (%lx) after fixup"),
15901 (unsigned long) value);
15902 break;
15903 }
15904
15905 newimm |= (temp & 0xfffff000);
15906 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15907 break;
15908
15909 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
15910 {
15911 unsigned int highpart = 0;
15912 unsigned int newinsn = 0xe1a00000; /* nop. */
15913
15914 newimm = encode_arm_immediate (value);
15915 temp = md_chars_to_number (buf, INSN_SIZE);
15916
15917 /* If the instruction will fail, see if we can fix things up by
15918 changing the opcode. */
15919 if (newimm == (unsigned int) FAIL
15920 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
15921 {
15922 /* No ? OK - try using two ADD instructions to generate
15923 the value. */
15924 newimm = validate_immediate_twopart (value, & highpart);
15925
15926 /* Yes - then make sure that the second instruction is
15927 also an add. */
15928 if (newimm != (unsigned int) FAIL)
15929 newinsn = temp;
15930 /* Still No ? Try using a negated value. */
15931 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
15932 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
15933 /* Otherwise - give up. */
15934 else
15935 {
15936 as_bad_where (fixP->fx_file, fixP->fx_line,
15937 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
15938 (long) value);
15939 break;
15940 }
15941
15942 /* Replace the first operand in the 2nd instruction (which
15943 is the PC) with the destination register. We have
15944 already added in the PC in the first instruction and we
15945 do not want to do it again. */
15946 newinsn &= ~ 0xf0000;
15947 newinsn |= ((newinsn & 0x0f000) << 4);
15948 }
15949
15950 newimm |= (temp & 0xfffff000);
15951 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15952
15953 highpart |= (newinsn & 0xfffff000);
15954 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
15955 }
15956 break;
15957
15958 case BFD_RELOC_ARM_OFFSET_IMM:
15959 if (!fixP->fx_done && seg->use_rela_p)
15960 value = 0;
15961
15962 case BFD_RELOC_ARM_LITERAL:
15963 sign = value >= 0;
15964
15965 if (value < 0)
15966 value = - value;
15967
15968 if (validate_offset_imm (value, 0) == FAIL)
15969 {
15970 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
15971 as_bad_where (fixP->fx_file, fixP->fx_line,
15972 _("invalid literal constant: pool needs to be closer"));
15973 else
15974 as_bad_where (fixP->fx_file, fixP->fx_line,
15975 _("bad immediate value for offset (%ld)"),
15976 (long) value);
15977 break;
15978 }
15979
15980 newval = md_chars_to_number (buf, INSN_SIZE);
15981 newval &= 0xff7ff000;
15982 newval |= value | (sign ? INDEX_UP : 0);
15983 md_number_to_chars (buf, newval, INSN_SIZE);
15984 break;
15985
15986 case BFD_RELOC_ARM_OFFSET_IMM8:
15987 case BFD_RELOC_ARM_HWLITERAL:
15988 sign = value >= 0;
15989
15990 if (value < 0)
15991 value = - value;
15992
15993 if (validate_offset_imm (value, 1) == FAIL)
15994 {
15995 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
15996 as_bad_where (fixP->fx_file, fixP->fx_line,
15997 _("invalid literal constant: pool needs to be closer"));
15998 else
15999 as_bad (_("bad immediate value for half-word offset (%ld)"),
16000 (long) value);
16001 break;
16002 }
16003
16004 newval = md_chars_to_number (buf, INSN_SIZE);
16005 newval &= 0xff7ff0f0;
16006 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
16007 md_number_to_chars (buf, newval, INSN_SIZE);
16008 break;
16009
16010 case BFD_RELOC_ARM_T32_OFFSET_U8:
16011 if (value < 0 || value > 1020 || value % 4 != 0)
16012 as_bad_where (fixP->fx_file, fixP->fx_line,
16013 _("bad immediate value for offset (%ld)"), (long) value);
16014 value /= 4;
16015
16016 newval = md_chars_to_number (buf+2, THUMB_SIZE);
16017 newval |= value;
16018 md_number_to_chars (buf+2, newval, THUMB_SIZE);
16019 break;
16020
16021 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16022 /* This is a complicated relocation used for all varieties of Thumb32
16023 load/store instruction with immediate offset:
16024
16025 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16026 *4, optional writeback(W)
16027 (doubleword load/store)
16028
16029 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16030 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16031 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16032 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16033 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16034
16035 Uppercase letters indicate bits that are already encoded at
16036 this point. Lowercase letters are our problem. For the
16037 second block of instructions, the secondary opcode nybble
16038 (bits 8..11) is present, and bit 23 is zero, even if this is
16039 a PC-relative operation. */
16040 newval = md_chars_to_number (buf, THUMB_SIZE);
16041 newval <<= 16;
16042 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16043
16044 if ((newval & 0xf0000000) == 0xe0000000)
16045 {
16046 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16047 if (value >= 0)
16048 newval |= (1 << 23);
16049 else
16050 value = -value;
16051 if (value % 4 != 0)
16052 {
16053 as_bad_where (fixP->fx_file, fixP->fx_line,
16054 _("offset not a multiple of 4"));
16055 break;
16056 }
16057 value /= 4;
16058 if (value > 0xff)
16059 {
16060 as_bad_where (fixP->fx_file, fixP->fx_line,
16061 _("offset out of range"));
16062 break;
16063 }
16064 newval &= ~0xff;
16065 }
16066 else if ((newval & 0x000f0000) == 0x000f0000)
16067 {
16068 /* PC-relative, 12-bit offset. */
16069 if (value >= 0)
16070 newval |= (1 << 23);
16071 else
16072 value = -value;
16073 if (value > 0xfff)
16074 {
16075 as_bad_where (fixP->fx_file, fixP->fx_line,
16076 _("offset out of range"));
16077 break;
16078 }
16079 newval &= ~0xfff;
16080 }
16081 else if ((newval & 0x00000100) == 0x00000100)
16082 {
16083 /* Writeback: 8-bit, +/- offset. */
16084 if (value >= 0)
16085 newval |= (1 << 9);
16086 else
16087 value = -value;
16088 if (value > 0xff)
16089 {
16090 as_bad_where (fixP->fx_file, fixP->fx_line,
16091 _("offset out of range"));
16092 break;
16093 }
16094 newval &= ~0xff;
16095 }
16096 else if ((newval & 0x00000f00) == 0x00000e00)
16097 {
16098 /* T-instruction: positive 8-bit offset. */
16099 if (value < 0 || value > 0xff)
16100 {
16101 as_bad_where (fixP->fx_file, fixP->fx_line,
16102 _("offset out of range"));
16103 break;
16104 }
16105 newval &= ~0xff;
16106 newval |= value;
16107 }
16108 else
16109 {
16110 /* Positive 12-bit or negative 8-bit offset. */
16111 int limit;
16112 if (value >= 0)
16113 {
16114 newval |= (1 << 23);
16115 limit = 0xfff;
16116 }
16117 else
16118 {
16119 value = -value;
16120 limit = 0xff;
16121 }
16122 if (value > limit)
16123 {
16124 as_bad_where (fixP->fx_file, fixP->fx_line,
16125 _("offset out of range"));
16126 break;
16127 }
16128 newval &= ~limit;
16129 }
16130
16131 newval |= value;
16132 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16133 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16134 break;
16135
16136 case BFD_RELOC_ARM_SHIFT_IMM:
16137 newval = md_chars_to_number (buf, INSN_SIZE);
16138 if (((unsigned long) value) > 32
16139 || (value == 32
16140 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16141 {
16142 as_bad_where (fixP->fx_file, fixP->fx_line,
16143 _("shift expression is too large"));
16144 break;
16145 }
16146
16147 if (value == 0)
16148 /* Shifts of zero must be done as lsl. */
16149 newval &= ~0x60;
16150 else if (value == 32)
16151 value = 0;
16152 newval &= 0xfffff07f;
16153 newval |= (value & 0x1f) << 7;
16154 md_number_to_chars (buf, newval, INSN_SIZE);
16155 break;
16156
16157 case BFD_RELOC_ARM_T32_IMMEDIATE:
16158 case BFD_RELOC_ARM_T32_IMM12:
16159 case BFD_RELOC_ARM_T32_ADD_PC12:
16160 /* We claim that this fixup has been processed here,
16161 even if in fact we generate an error because we do
16162 not have a reloc for it, so tc_gen_reloc will reject it. */
16163 fixP->fx_done = 1;
16164
16165 if (fixP->fx_addsy
16166 && ! S_IS_DEFINED (fixP->fx_addsy))
16167 {
16168 as_bad_where (fixP->fx_file, fixP->fx_line,
16169 _("undefined symbol %s used as an immediate value"),
16170 S_GET_NAME (fixP->fx_addsy));
16171 break;
16172 }
16173
16174 newval = md_chars_to_number (buf, THUMB_SIZE);
16175 newval <<= 16;
16176 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16177
16178 /* FUTURE: Implement analogue of negate_data_op for T32. */
16179 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16180 {
16181 newimm = encode_thumb32_immediate (value);
16182 if (newimm == (unsigned int) FAIL)
16183 newimm = thumb32_negate_data_op (&newval, value);
16184 }
16185 else
16186 {
16187 /* 12 bit immediate for addw/subw. */
16188 if (value < 0)
16189 {
16190 value = -value;
16191 newval ^= 0x00a00000;
16192 }
16193 if (value > 0xfff)
16194 newimm = (unsigned int) FAIL;
16195 else
16196 newimm = value;
16197 }
16198
16199 if (newimm == (unsigned int)FAIL)
16200 {
16201 as_bad_where (fixP->fx_file, fixP->fx_line,
16202 _("invalid constant (%lx) after fixup"),
16203 (unsigned long) value);
16204 break;
16205 }
16206
16207 newval |= (newimm & 0x800) << 15;
16208 newval |= (newimm & 0x700) << 4;
16209 newval |= (newimm & 0x0ff);
16210
16211 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16212 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16213 break;
16214
16215 case BFD_RELOC_ARM_SMC:
16216 if (((unsigned long) value) > 0xffff)
16217 as_bad_where (fixP->fx_file, fixP->fx_line,
16218 _("invalid smc expression"));
16219 newval = md_chars_to_number (buf, INSN_SIZE);
16220 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16221 md_number_to_chars (buf, newval, INSN_SIZE);
16222 break;
16223
16224 case BFD_RELOC_ARM_SWI:
16225 if (fixP->tc_fix_data != 0)
16226 {
16227 if (((unsigned long) value) > 0xff)
16228 as_bad_where (fixP->fx_file, fixP->fx_line,
16229 _("invalid swi expression"));
16230 newval = md_chars_to_number (buf, THUMB_SIZE);
16231 newval |= value;
16232 md_number_to_chars (buf, newval, THUMB_SIZE);
16233 }
16234 else
16235 {
16236 if (((unsigned long) value) > 0x00ffffff)
16237 as_bad_where (fixP->fx_file, fixP->fx_line,
16238 _("invalid swi expression"));
16239 newval = md_chars_to_number (buf, INSN_SIZE);
16240 newval |= value;
16241 md_number_to_chars (buf, newval, INSN_SIZE);
16242 }
16243 break;
16244
16245 case BFD_RELOC_ARM_MULTI:
16246 if (((unsigned long) value) > 0xffff)
16247 as_bad_where (fixP->fx_file, fixP->fx_line,
16248 _("invalid expression in load/store multiple"));
16249 newval = value | md_chars_to_number (buf, INSN_SIZE);
16250 md_number_to_chars (buf, newval, INSN_SIZE);
16251 break;
16252
16253#ifdef OBJ_ELF
16254 case BFD_RELOC_ARM_PCREL_CALL:
16255 newval = md_chars_to_number (buf, INSN_SIZE);
16256 if ((newval & 0xf0000000) == 0xf0000000)
16257 temp = 1;
16258 else
16259 temp = 3;
16260 goto arm_branch_common;
16261
16262 case BFD_RELOC_ARM_PCREL_JUMP:
16263 case BFD_RELOC_ARM_PLT32:
16264#endif
16265 case BFD_RELOC_ARM_PCREL_BRANCH:
16266 temp = 3;
16267 goto arm_branch_common;
16268
16269 case BFD_RELOC_ARM_PCREL_BLX:
16270 temp = 1;
16271 arm_branch_common:
16272 /* We are going to store value (shifted right by two) in the
16273 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16274 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16275 also be be clear. */
16276 if (value & temp)
16277 as_bad_where (fixP->fx_file, fixP->fx_line,
16278 _("misaligned branch destination"));
16279 if ((value & (offsetT)0xfe000000) != (offsetT)0
16280 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16281 as_bad_where (fixP->fx_file, fixP->fx_line,
16282 _("branch out of range"));
16283
16284 if (fixP->fx_done || !seg->use_rela_p)
16285 {
16286 newval = md_chars_to_number (buf, INSN_SIZE);
16287 newval |= (value >> 2) & 0x00ffffff;
16288 /* Set the H bit on BLX instructions. */
16289 if (temp == 1)
16290 {
16291 if (value & 2)
16292 newval |= 0x01000000;
16293 else
16294 newval &= ~0x01000000;
16295 }
16296 md_number_to_chars (buf, newval, INSN_SIZE);
16297 }
16298 break;
16299
16300 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16301 /* CZB can only branch forward. */
16302 if (value & ~0x7e)
16303 as_bad_where (fixP->fx_file, fixP->fx_line,
16304 _("branch out of range"));
16305
16306 if (fixP->fx_done || !seg->use_rela_p)
16307 {
16308 newval = md_chars_to_number (buf, THUMB_SIZE);
16309 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16310 md_number_to_chars (buf, newval, THUMB_SIZE);
16311 }
16312 break;
16313
16314 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16315 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16316 as_bad_where (fixP->fx_file, fixP->fx_line,
16317 _("branch out of range"));
16318
16319 if (fixP->fx_done || !seg->use_rela_p)
16320 {
16321 newval = md_chars_to_number (buf, THUMB_SIZE);
16322 newval |= (value & 0x1ff) >> 1;
16323 md_number_to_chars (buf, newval, THUMB_SIZE);
16324 }
16325 break;
16326
16327 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16328 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16329 as_bad_where (fixP->fx_file, fixP->fx_line,
16330 _("branch out of range"));
16331
16332 if (fixP->fx_done || !seg->use_rela_p)
16333 {
16334 newval = md_chars_to_number (buf, THUMB_SIZE);
16335 newval |= (value & 0xfff) >> 1;
16336 md_number_to_chars (buf, newval, THUMB_SIZE);
16337 }
16338 break;
16339
16340 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16341 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16342 as_bad_where (fixP->fx_file, fixP->fx_line,
16343 _("conditional branch out of range"));
16344
16345 if (fixP->fx_done || !seg->use_rela_p)
16346 {
16347 offsetT newval2;
16348 addressT S, J1, J2, lo, hi;
16349
16350 S = (value & 0x00100000) >> 20;
16351 J2 = (value & 0x00080000) >> 19;
16352 J1 = (value & 0x00040000) >> 18;
16353 hi = (value & 0x0003f000) >> 12;
16354 lo = (value & 0x00000ffe) >> 1;
16355
16356 newval = md_chars_to_number (buf, THUMB_SIZE);
16357 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16358 newval |= (S << 10) | hi;
16359 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16360 md_number_to_chars (buf, newval, THUMB_SIZE);
16361 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16362 }
16363 break;
16364
16365 case BFD_RELOC_THUMB_PCREL_BLX:
16366 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16367 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16368 as_bad_where (fixP->fx_file, fixP->fx_line,
16369 _("branch out of range"));
16370
16371 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16372 /* For a BLX instruction, make sure that the relocation is rounded up
16373 to a word boundary. This follows the semantics of the instruction
16374 which specifies that bit 1 of the target address will come from bit
16375 1 of the base address. */
16376 value = (value + 1) & ~ 1;
16377
16378 if (fixP->fx_done || !seg->use_rela_p)
16379 {
16380 offsetT newval2;
16381
16382 newval = md_chars_to_number (buf, THUMB_SIZE);
16383 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16384 newval |= (value & 0x7fffff) >> 12;
16385 newval2 |= (value & 0xfff) >> 1;
16386 md_number_to_chars (buf, newval, THUMB_SIZE);
16387 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16388 }
16389 break;
16390
16391 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16392 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16393 as_bad_where (fixP->fx_file, fixP->fx_line,
16394 _("branch out of range"));
16395
16396 if (fixP->fx_done || !seg->use_rela_p)
16397 {
16398 offsetT newval2;
16399 addressT S, I1, I2, lo, hi;
16400
16401 S = (value & 0x01000000) >> 24;
16402 I1 = (value & 0x00800000) >> 23;
16403 I2 = (value & 0x00400000) >> 22;
16404 hi = (value & 0x003ff000) >> 12;
16405 lo = (value & 0x00000ffe) >> 1;
16406
16407 I1 = !(I1 ^ S);
16408 I2 = !(I2 ^ S);
16409
16410 newval = md_chars_to_number (buf, THUMB_SIZE);
16411 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16412 newval |= (S << 10) | hi;
16413 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16414 md_number_to_chars (buf, newval, THUMB_SIZE);
16415 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16416 }
16417 break;
16418
16419 case BFD_RELOC_8:
16420 if (fixP->fx_done || !seg->use_rela_p)
16421 md_number_to_chars (buf, value, 1);
16422 break;
16423
16424 case BFD_RELOC_16:
16425 if (fixP->fx_done || !seg->use_rela_p)
16426 md_number_to_chars (buf, value, 2);
16427 break;
16428
16429#ifdef OBJ_ELF
16430 case BFD_RELOC_ARM_TLS_GD32:
16431 case BFD_RELOC_ARM_TLS_LE32:
16432 case BFD_RELOC_ARM_TLS_IE32:
16433 case BFD_RELOC_ARM_TLS_LDM32:
16434 case BFD_RELOC_ARM_TLS_LDO32:
16435 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16436 /* fall through */
16437
16438 case BFD_RELOC_ARM_GOT32:
16439 case BFD_RELOC_ARM_GOTOFF:
16440 case BFD_RELOC_ARM_TARGET2:
16441 if (fixP->fx_done || !seg->use_rela_p)
16442 md_number_to_chars (buf, 0, 4);
16443 break;
16444#endif
16445
16446 case BFD_RELOC_RVA:
16447 case BFD_RELOC_32:
16448 case BFD_RELOC_ARM_TARGET1:
16449 case BFD_RELOC_ARM_ROSEGREL32:
16450 case BFD_RELOC_ARM_SBREL32:
16451 case BFD_RELOC_32_PCREL:
16452 if (fixP->fx_done || !seg->use_rela_p)
16453 md_number_to_chars (buf, value, 4);
16454 break;
16455
16456#ifdef OBJ_ELF
16457 case BFD_RELOC_ARM_PREL31:
16458 if (fixP->fx_done || !seg->use_rela_p)
16459 {
16460 newval = md_chars_to_number (buf, 4) & 0x80000000;
16461 if ((value ^ (value >> 1)) & 0x40000000)
16462 {
16463 as_bad_where (fixP->fx_file, fixP->fx_line,
16464 _("rel31 relocation overflow"));
16465 }
16466 newval |= value & 0x7fffffff;
16467 md_number_to_chars (buf, newval, 4);
16468 }
16469 break;
16470#endif
16471
16472 case BFD_RELOC_ARM_CP_OFF_IMM:
16473 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16474 if (value < -1023 || value > 1023 || (value & 3))
16475 as_bad_where (fixP->fx_file, fixP->fx_line,
16476 _("co-processor offset out of range"));
16477 cp_off_common:
16478 sign = value >= 0;
16479 if (value < 0)
16480 value = -value;
16481 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16482 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16483 newval = md_chars_to_number (buf, INSN_SIZE);
16484 else
16485 newval = get_thumb32_insn (buf);
16486 newval &= 0xff7fff00;
16487 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16488 if (value == 0)
16489 newval &= ~WRITE_BACK;
16490 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16491 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16492 md_number_to_chars (buf, newval, INSN_SIZE);
16493 else
16494 put_thumb32_insn (buf, newval);
16495 break;
16496
16497 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16498 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16499 if (value < -255 || value > 255)
16500 as_bad_where (fixP->fx_file, fixP->fx_line,
16501 _("co-processor offset out of range"));
16502 value *= 4;
16503 goto cp_off_common;
16504
16505 case BFD_RELOC_ARM_THUMB_OFFSET:
16506 newval = md_chars_to_number (buf, THUMB_SIZE);
16507 /* Exactly what ranges, and where the offset is inserted depends
16508 on the type of instruction, we can establish this from the
16509 top 4 bits. */
16510 switch (newval >> 12)
16511 {
16512 case 4: /* PC load. */
16513 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16514 forced to zero for these loads; md_pcrel_from has already
16515 compensated for this. */
16516 if (value & 3)
16517 as_bad_where (fixP->fx_file, fixP->fx_line,
16518 _("invalid offset, target not word aligned (0x%08lX)"),
16519 (((unsigned long) fixP->fx_frag->fr_address
16520 + (unsigned long) fixP->fx_where) & ~3)
16521 + (unsigned long) value);
16522
16523 if (value & ~0x3fc)
16524 as_bad_where (fixP->fx_file, fixP->fx_line,
16525 _("invalid offset, value too big (0x%08lX)"),
16526 (long) value);
16527
16528 newval |= value >> 2;
16529 break;
16530
16531 case 9: /* SP load/store. */
16532 if (value & ~0x3fc)
16533 as_bad_where (fixP->fx_file, fixP->fx_line,
16534 _("invalid offset, value too big (0x%08lX)"),
16535 (long) value);
16536 newval |= value >> 2;
16537 break;
16538
16539 case 6: /* Word load/store. */
16540 if (value & ~0x7c)
16541 as_bad_where (fixP->fx_file, fixP->fx_line,
16542 _("invalid offset, value too big (0x%08lX)"),
16543 (long) value);
16544 newval |= value << 4; /* 6 - 2. */
16545 break;
16546
16547 case 7: /* Byte load/store. */
16548 if (value & ~0x1f)
16549 as_bad_where (fixP->fx_file, fixP->fx_line,
16550 _("invalid offset, value too big (0x%08lX)"),
16551 (long) value);
16552 newval |= value << 6;
16553 break;
16554
16555 case 8: /* Halfword load/store. */
16556 if (value & ~0x3e)
16557 as_bad_where (fixP->fx_file, fixP->fx_line,
16558 _("invalid offset, value too big (0x%08lX)"),
16559 (long) value);
16560 newval |= value << 5; /* 6 - 1. */
16561 break;
16562
16563 default:
16564 as_bad_where (fixP->fx_file, fixP->fx_line,
16565 "Unable to process relocation for thumb opcode: %lx",
16566 (unsigned long) newval);
16567 break;
16568 }
16569 md_number_to_chars (buf, newval, THUMB_SIZE);
16570 break;
16571
16572 case BFD_RELOC_ARM_THUMB_ADD:
16573 /* This is a complicated relocation, since we use it for all of
16574 the following immediate relocations:
16575
16576 3bit ADD/SUB
16577 8bit ADD/SUB
16578 9bit ADD/SUB SP word-aligned
16579 10bit ADD PC/SP word-aligned
16580
16581 The type of instruction being processed is encoded in the
16582 instruction field:
16583
16584 0x8000 SUB
16585 0x00F0 Rd
16586 0x000F Rs
16587 */
16588 newval = md_chars_to_number (buf, THUMB_SIZE);
16589 {
16590 int rd = (newval >> 4) & 0xf;
16591 int rs = newval & 0xf;
16592 int subtract = !!(newval & 0x8000);
16593
16594 /* Check for HI regs, only very restricted cases allowed:
16595 Adjusting SP, and using PC or SP to get an address. */
16596 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16597 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16598 as_bad_where (fixP->fx_file, fixP->fx_line,
16599 _("invalid Hi register with immediate"));
16600
16601 /* If value is negative, choose the opposite instruction. */
16602 if (value < 0)
16603 {
16604 value = -value;
16605 subtract = !subtract;
16606 if (value < 0)
16607 as_bad_where (fixP->fx_file, fixP->fx_line,
16608 _("immediate value out of range"));
16609 }
16610
16611 if (rd == REG_SP)
16612 {
16613 if (value & ~0x1fc)
16614 as_bad_where (fixP->fx_file, fixP->fx_line,
16615 _("invalid immediate for stack address calculation"));
16616 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16617 newval |= value >> 2;
16618 }
16619 else if (rs == REG_PC || rs == REG_SP)
16620 {
16621 if (subtract || value & ~0x3fc)
16622 as_bad_where (fixP->fx_file, fixP->fx_line,
16623 _("invalid immediate for address calculation (value = 0x%08lX)"),
16624 (unsigned long) value);
16625 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16626 newval |= rd << 8;
16627 newval |= value >> 2;
16628 }
16629 else if (rs == rd)
16630 {
16631 if (value & ~0xff)
16632 as_bad_where (fixP->fx_file, fixP->fx_line,
16633 _("immediate value out of range"));
16634 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16635 newval |= (rd << 8) | value;
16636 }
16637 else
16638 {
16639 if (value & ~0x7)
16640 as_bad_where (fixP->fx_file, fixP->fx_line,
16641 _("immediate value out of range"));
16642 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16643 newval |= rd | (rs << 3) | (value << 6);
16644 }
16645 }
16646 md_number_to_chars (buf, newval, THUMB_SIZE);
16647 break;
16648
16649 case BFD_RELOC_ARM_THUMB_IMM:
16650 newval = md_chars_to_number (buf, THUMB_SIZE);
16651 if (value < 0 || value > 255)
16652 as_bad_where (fixP->fx_file, fixP->fx_line,
16653 _("invalid immediate: %ld is too large"),
16654 (long) value);
16655 newval |= value;
16656 md_number_to_chars (buf, newval, THUMB_SIZE);
16657 break;
16658
16659 case BFD_RELOC_ARM_THUMB_SHIFT:
16660 /* 5bit shift value (0..32). LSL cannot take 32. */
16661 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16662 temp = newval & 0xf800;
16663 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16664 as_bad_where (fixP->fx_file, fixP->fx_line,
16665 _("invalid shift value: %ld"), (long) value);
16666 /* Shifts of zero must be encoded as LSL. */
16667 if (value == 0)
16668 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16669 /* Shifts of 32 are encoded as zero. */
16670 else if (value == 32)
16671 value = 0;
16672 newval |= value << 6;
16673 md_number_to_chars (buf, newval, THUMB_SIZE);
16674 break;
16675
16676 case BFD_RELOC_VTABLE_INHERIT:
16677 case BFD_RELOC_VTABLE_ENTRY:
16678 fixP->fx_done = 0;
16679 return;
16680
16681 case BFD_RELOC_UNUSED:
16682 default:
16683 as_bad_where (fixP->fx_file, fixP->fx_line,
16684 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16685 }
16686}
16687
16688/* Translate internal representation of relocation info to BFD target
16689 format. */
16690
16691arelent *
16692tc_gen_reloc (asection *section, fixS *fixp)
16693{
16694 arelent * reloc;
16695 bfd_reloc_code_real_type code;
16696
16697 reloc = xmalloc (sizeof (arelent));
16698
16699 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16700 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16701 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16702
16703 if (fixp->fx_pcrel)
16704 {
16705 if (section->use_rela_p)
16706 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16707 else
16708 fixp->fx_offset = reloc->address;
16709 }
16710 reloc->addend = fixp->fx_offset;
16711
16712 switch (fixp->fx_r_type)
16713 {
16714 case BFD_RELOC_8:
16715 if (fixp->fx_pcrel)
16716 {
16717 code = BFD_RELOC_8_PCREL;
16718 break;
16719 }
16720
16721 case BFD_RELOC_16:
16722 if (fixp->fx_pcrel)
16723 {
16724 code = BFD_RELOC_16_PCREL;
16725 break;
16726 }
16727
16728 case BFD_RELOC_32:
16729 if (fixp->fx_pcrel)
16730 {
16731 code = BFD_RELOC_32_PCREL;
16732 break;
16733 }
16734
16735 case BFD_RELOC_NONE:
16736 case BFD_RELOC_ARM_PCREL_BRANCH:
16737 case BFD_RELOC_ARM_PCREL_BLX:
16738 case BFD_RELOC_RVA:
16739 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16740 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16741 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16742 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16743 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16744 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16745 case BFD_RELOC_THUMB_PCREL_BLX:
16746 case BFD_RELOC_VTABLE_ENTRY:
16747 case BFD_RELOC_VTABLE_INHERIT:
16748 code = fixp->fx_r_type;
16749 break;
16750
16751 case BFD_RELOC_ARM_LITERAL:
16752 case BFD_RELOC_ARM_HWLITERAL:
16753 /* If this is called then the a literal has
16754 been referenced across a section boundary. */
16755 as_bad_where (fixp->fx_file, fixp->fx_line,
16756 _("literal referenced across section boundary"));
16757 return NULL;
16758
16759#ifdef OBJ_ELF
16760 case BFD_RELOC_ARM_GOT32:
16761 case BFD_RELOC_ARM_GOTOFF:
16762 case BFD_RELOC_ARM_PLT32:
16763 case BFD_RELOC_ARM_TARGET1:
16764 case BFD_RELOC_ARM_ROSEGREL32:
16765 case BFD_RELOC_ARM_SBREL32:
16766 case BFD_RELOC_ARM_PREL31:
16767 case BFD_RELOC_ARM_TARGET2:
16768 case BFD_RELOC_ARM_TLS_LE32:
16769 case BFD_RELOC_ARM_TLS_LDO32:
16770 case BFD_RELOC_ARM_PCREL_CALL:
16771 case BFD_RELOC_ARM_PCREL_JUMP:
16772 code = fixp->fx_r_type;
16773 break;
16774
16775 case BFD_RELOC_ARM_TLS_GD32:
16776 case BFD_RELOC_ARM_TLS_IE32:
16777 case BFD_RELOC_ARM_TLS_LDM32:
16778 /* BFD will include the symbol's address in the addend.
16779 But we don't want that, so subtract it out again here. */
16780 if (!S_IS_COMMON (fixp->fx_addsy))
16781 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
16782 code = fixp->fx_r_type;
16783 break;
16784#endif
16785
16786 case BFD_RELOC_ARM_IMMEDIATE:
16787 as_bad_where (fixp->fx_file, fixp->fx_line,
16788 _("internal relocation (type: IMMEDIATE) not fixed up"));
16789 return NULL;
16790
16791 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16792 as_bad_where (fixp->fx_file, fixp->fx_line,
16793 _("ADRL used for a symbol not defined in the same file"));
16794 return NULL;
16795
16796 case BFD_RELOC_ARM_OFFSET_IMM:
16797 if (section->use_rela_p)
16798 {
16799 code = fixp->fx_r_type;
16800 break;
16801 }
16802
16803 if (fixp->fx_addsy != NULL
16804 && !S_IS_DEFINED (fixp->fx_addsy)
16805 && S_IS_LOCAL (fixp->fx_addsy))
16806 {
16807 as_bad_where (fixp->fx_file, fixp->fx_line,
16808 _("undefined local label `%s'"),
16809 S_GET_NAME (fixp->fx_addsy));
16810 return NULL;
16811 }
16812
16813 as_bad_where (fixp->fx_file, fixp->fx_line,
16814 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16815 return NULL;
16816
16817 default:
16818 {
16819 char * type;
16820
16821 switch (fixp->fx_r_type)
16822 {
16823 case BFD_RELOC_NONE: type = "NONE"; break;
16824 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
16825 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
16826 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
16827 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
16828 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
16829 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
16830 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
16831 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
16832 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
16833 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
16834 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
16835 default: type = _("<unknown>"); break;
16836 }
16837 as_bad_where (fixp->fx_file, fixp->fx_line,
16838 _("cannot represent %s relocation in this object file format"),
16839 type);
16840 return NULL;
16841 }
16842 }
16843
16844#ifdef OBJ_ELF
16845 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
16846 && GOT_symbol
16847 && fixp->fx_addsy == GOT_symbol)
16848 {
16849 code = BFD_RELOC_ARM_GOTPC;
16850 reloc->addend = fixp->fx_offset = reloc->address;
16851 }
16852#endif
16853
16854 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
16855
16856 if (reloc->howto == NULL)
16857 {
16858 as_bad_where (fixp->fx_file, fixp->fx_line,
16859 _("cannot represent %s relocation in this object file format"),
16860 bfd_get_reloc_code_name (code));
16861 return NULL;
16862 }
16863
16864 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16865 vtable entry to be used in the relocation's section offset. */
16866 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16867 reloc->address = fixp->fx_offset;
16868
16869 return reloc;
16870}
16871
16872/* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16873
16874void
16875cons_fix_new_arm (fragS * frag,
16876 int where,
16877 int size,
16878 expressionS * exp)
16879{
16880 bfd_reloc_code_real_type type;
16881 int pcrel = 0;
16882
16883 /* Pick a reloc.
16884 FIXME: @@ Should look at CPU word size. */
16885 switch (size)
16886 {
16887 case 1:
16888 type = BFD_RELOC_8;
16889 break;
16890 case 2:
16891 type = BFD_RELOC_16;
16892 break;
16893 case 4:
16894 default:
16895 type = BFD_RELOC_32;
16896 break;
16897 case 8:
16898 type = BFD_RELOC_64;
16899 break;
16900 }
16901
16902 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
16903}
16904
16905#if defined OBJ_COFF || defined OBJ_ELF
16906void
16907arm_validate_fix (fixS * fixP)
16908{
16909 /* If the destination of the branch is a defined symbol which does not have
16910 the THUMB_FUNC attribute, then we must be calling a function which has
16911 the (interfacearm) attribute. We look for the Thumb entry point to that
16912 function and change the branch to refer to that function instead. */
16913 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
16914 && fixP->fx_addsy != NULL
16915 && S_IS_DEFINED (fixP->fx_addsy)
16916 && ! THUMB_IS_FUNC (fixP->fx_addsy))
16917 {
16918 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
16919 }
16920}
16921#endif
16922
16923int
16924arm_force_relocation (struct fix * fixp)
16925{
16926#if defined (OBJ_COFF) && defined (TE_PE)
16927 if (fixp->fx_r_type == BFD_RELOC_RVA)
16928 return 1;
16929#endif
16930
16931 /* Resolve these relocations even if the symbol is extern or weak. */
16932 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
16933 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
16934 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
16935 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
16936 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
16937 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
16938 return 0;
16939
16940 return generic_force_reloc (fixp);
16941}
16942
16943#ifdef OBJ_COFF
16944/* This is a little hack to help the gas/arm/adrl.s test. It prevents
16945 local labels from being added to the output symbol table when they
16946 are used with the ADRL pseudo op. The ADRL relocation should always
16947 be resolved before the binbary is emitted, so it is safe to say that
16948 it is adjustable. */
16949
16950bfd_boolean
16951arm_fix_adjustable (fixS * fixP)
16952{
16953 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
16954 return 1;
16955 return 0;
16956}
16957#endif
16958
16959#ifdef OBJ_ELF
16960/* Relocations against Thumb function names must be left unadjusted,
16961 so that the linker can use this information to correctly set the
16962 bottom bit of their addresses. The MIPS version of this function
16963 also prevents relocations that are mips-16 specific, but I do not
16964 know why it does this.
16965
16966 FIXME:
16967 There is one other problem that ought to be addressed here, but
16968 which currently is not: Taking the address of a label (rather
16969 than a function) and then later jumping to that address. Such
16970 addresses also ought to have their bottom bit set (assuming that
16971 they reside in Thumb code), but at the moment they will not. */
16972
16973bfd_boolean
16974arm_fix_adjustable (fixS * fixP)
16975{
16976 if (fixP->fx_addsy == NULL)
16977 return 1;
16978
16979 if (THUMB_IS_FUNC (fixP->fx_addsy)
16980 && fixP->fx_subsy == NULL)
16981 return 0;
16982
16983 /* We need the symbol name for the VTABLE entries. */
16984 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
16985 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16986 return 0;
16987
16988 /* Don't allow symbols to be discarded on GOT related relocs. */
16989 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
16990 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
16991 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
16992 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
16993 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
16994 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
16995 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
16996 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
16997 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
16998 return 0;
16999
17000 return 1;
17001}
17002
17003const char *
17004elf32_arm_target_format (void)
17005{
17006#ifdef TE_SYMBIAN
17007 return (target_big_endian
17008 ? "elf32-bigarm-symbian"
17009 : "elf32-littlearm-symbian");
17010#elif defined (TE_VXWORKS)
17011 return (target_big_endian
17012 ? "elf32-bigarm-vxworks"
17013 : "elf32-littlearm-vxworks");
17014#else
17015 if (target_big_endian)
17016 return "elf32-bigarm";
17017 else
17018 return "elf32-littlearm";
17019#endif
17020}
17021
17022void
17023armelf_frob_symbol (symbolS * symp,
17024 int * puntp)
17025{
17026 elf_frob_symbol (symp, puntp);
17027}
17028#endif
17029
17030/* MD interface: Finalization. */
17031
17032/* A good place to do this, although this was probably not intended
17033 for this kind of use. We need to dump the literal pool before
17034 references are made to a null symbol pointer. */
17035
17036void
17037arm_cleanup (void)
17038{
17039 literal_pool * pool;
17040
17041 for (pool = list_of_pools; pool; pool = pool->next)
17042 {
17043 /* Put it at the end of the relevent section. */
17044 subseg_set (pool->section, pool->sub_section);
17045#ifdef OBJ_ELF
17046 arm_elf_change_section ();
17047#endif
17048 s_ltorg (0);
17049 }
17050}
17051
17052/* Adjust the symbol table. This marks Thumb symbols as distinct from
17053 ARM ones. */
17054
17055void
17056arm_adjust_symtab (void)
17057{
17058#ifdef OBJ_COFF
17059 symbolS * sym;
17060
17061 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17062 {
17063 if (ARM_IS_THUMB (sym))
17064 {
17065 if (THUMB_IS_FUNC (sym))
17066 {
17067 /* Mark the symbol as a Thumb function. */
17068 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17069 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17070 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17071
17072 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17073 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17074 else
17075 as_bad (_("%s: unexpected function type: %d"),
17076 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17077 }
17078 else switch (S_GET_STORAGE_CLASS (sym))
17079 {
17080 case C_EXT:
17081 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17082 break;
17083 case C_STAT:
17084 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17085 break;
17086 case C_LABEL:
17087 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17088 break;
17089 default:
17090 /* Do nothing. */
17091 break;
17092 }
17093 }
17094
17095 if (ARM_IS_INTERWORK (sym))
17096 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17097 }
17098#endif
17099#ifdef OBJ_ELF
17100 symbolS * sym;
17101 char bind;
17102
17103 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17104 {
17105 if (ARM_IS_THUMB (sym))
17106 {
17107 elf_symbol_type * elf_sym;
17108
17109 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17110 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17111
17112 if (! bfd_is_arm_mapping_symbol_name (elf_sym->symbol.name))
17113 {
17114 /* If it's a .thumb_func, declare it as so,
17115 otherwise tag label as .code 16. */
17116 if (THUMB_IS_FUNC (sym))
17117 elf_sym->internal_elf_sym.st_info =
17118 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17119 else
17120 elf_sym->internal_elf_sym.st_info =
17121 ELF_ST_INFO (bind, STT_ARM_16BIT);
17122 }
17123 }
17124 }
17125#endif
17126}
17127
17128/* MD interface: Initialization. */
17129
17130static void
17131set_constant_flonums (void)
17132{
17133 int i;
17134
17135 for (i = 0; i < NUM_FLOAT_VALS; i++)
17136 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17137 abort ();
17138}
17139
17140void
17141md_begin (void)
17142{
17143 unsigned mach;
17144 unsigned int i;
17145
17146 if ( (arm_ops_hsh = hash_new ()) == NULL
17147 || (arm_cond_hsh = hash_new ()) == NULL
17148 || (arm_shift_hsh = hash_new ()) == NULL
17149 || (arm_psr_hsh = hash_new ()) == NULL
17150 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17151 || (arm_reg_hsh = hash_new ()) == NULL
17152 || (arm_reloc_hsh = hash_new ()) == NULL
17153 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17154 as_fatal (_("virtual memory exhausted"));
17155
17156 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17157 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17158 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17159 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17160 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17161 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17162 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17163 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17164 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17165 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17166 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17167 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17168 for (i = 0;
17169 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17170 i++)
17171 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17172 (PTR) (barrier_opt_names + i));
17173#ifdef OBJ_ELF
17174 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17175 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17176#endif
17177
17178 set_constant_flonums ();
17179
17180 /* Set the cpu variant based on the command-line options. We prefer
17181 -mcpu= over -march= if both are set (as for GCC); and we prefer
17182 -mfpu= over any other way of setting the floating point unit.
17183 Use of legacy options with new options are faulted. */
17184 if (legacy_cpu)
17185 {
17186 if (mcpu_cpu_opt || march_cpu_opt)
17187 as_bad (_("use of old and new-style options to set CPU type"));
17188
17189 mcpu_cpu_opt = legacy_cpu;
17190 }
17191 else if (!mcpu_cpu_opt)
17192 mcpu_cpu_opt = march_cpu_opt;
17193
17194 if (legacy_fpu)
17195 {
17196 if (mfpu_opt)
17197 as_bad (_("use of old and new-style options to set FPU type"));
17198
17199 mfpu_opt = legacy_fpu;
17200 }
17201 else if (!mfpu_opt)
17202 {
17203#if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17204 /* Some environments specify a default FPU. If they don't, infer it
17205 from the processor. */
17206 if (mcpu_fpu_opt)
17207 mfpu_opt = mcpu_fpu_opt;
17208 else
17209 mfpu_opt = march_fpu_opt;
17210#else
17211 mfpu_opt = &fpu_default;
17212#endif
17213 }
17214
17215 if (!mfpu_opt)
17216 {
17217 if (!mcpu_cpu_opt)
17218 mfpu_opt = &fpu_default;
17219 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17220 mfpu_opt = &fpu_arch_vfp_v2;
17221 else
17222 mfpu_opt = &fpu_arch_fpa;
17223 }
17224
17225#ifdef CPU_DEFAULT
17226 if (!mcpu_cpu_opt)
17227 {
17228 mcpu_cpu_opt = &cpu_default;
17229 selected_cpu = cpu_default;
17230 }
17231#else
17232 if (mcpu_cpu_opt)
17233 selected_cpu = *mcpu_cpu_opt;
17234 else
17235 mcpu_cpu_opt = &arm_arch_any;
17236#endif
17237
17238 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17239
17240 arm_arch_used = thumb_arch_used = arm_arch_none;
17241
17242#if defined OBJ_COFF || defined OBJ_ELF
17243 {
17244 unsigned int flags = 0;
17245
17246#if defined OBJ_ELF
17247 flags = meabi_flags;
17248
17249 switch (meabi_flags)
17250 {
17251 case EF_ARM_EABI_UNKNOWN:
17252#endif
17253 /* Set the flags in the private structure. */
17254 if (uses_apcs_26) flags |= F_APCS26;
17255 if (support_interwork) flags |= F_INTERWORK;
17256 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17257 if (pic_code) flags |= F_PIC;
17258 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17259 flags |= F_SOFT_FLOAT;
17260
17261 switch (mfloat_abi_opt)
17262 {
17263 case ARM_FLOAT_ABI_SOFT:
17264 case ARM_FLOAT_ABI_SOFTFP:
17265 flags |= F_SOFT_FLOAT;
17266 break;
17267
17268 case ARM_FLOAT_ABI_HARD:
17269 if (flags & F_SOFT_FLOAT)
17270 as_bad (_("hard-float conflicts with specified fpu"));
17271 break;
17272 }
17273
17274 /* Using pure-endian doubles (even if soft-float). */
17275 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17276 flags |= F_VFP_FLOAT;
17277
17278#if defined OBJ_ELF
17279 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17280 flags |= EF_ARM_MAVERICK_FLOAT;
17281 break;
17282
17283 case EF_ARM_EABI_VER4:
17284 case EF_ARM_EABI_VER5:
17285 /* No additional flags to set. */
17286 break;
17287
17288 default:
17289 abort ();
17290 }
17291#endif
17292 bfd_set_private_flags (stdoutput, flags);
17293
17294 /* We have run out flags in the COFF header to encode the
17295 status of ATPCS support, so instead we create a dummy,
17296 empty, debug section called .arm.atpcs. */
17297 if (atpcs)
17298 {
17299 asection * sec;
17300
17301 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17302
17303 if (sec != NULL)
17304 {
17305 bfd_set_section_flags
17306 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17307 bfd_set_section_size (stdoutput, sec, 0);
17308 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17309 }
17310 }
17311 }
17312#endif
17313
17314 /* Record the CPU type as well. */
17315 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17316 mach = bfd_mach_arm_iWMMXt;
17317 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17318 mach = bfd_mach_arm_XScale;
17319 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17320 mach = bfd_mach_arm_ep9312;
17321 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17322 mach = bfd_mach_arm_5TE;
17323 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17324 {
17325 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17326 mach = bfd_mach_arm_5T;
17327 else
17328 mach = bfd_mach_arm_5;
17329 }
17330 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17331 {
17332 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17333 mach = bfd_mach_arm_4T;
17334 else
17335 mach = bfd_mach_arm_4;
17336 }
17337 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17338 mach = bfd_mach_arm_3M;
17339 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17340 mach = bfd_mach_arm_3;
17341 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17342 mach = bfd_mach_arm_2a;
17343 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17344 mach = bfd_mach_arm_2;
17345 else
17346 mach = bfd_mach_arm_unknown;
17347
17348 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17349}
17350
17351/* Command line processing. */
17352
17353/* md_parse_option
17354 Invocation line includes a switch not recognized by the base assembler.
17355 See if it's a processor-specific option.
17356
17357 This routine is somewhat complicated by the need for backwards
17358 compatibility (since older releases of gcc can't be changed).
17359 The new options try to make the interface as compatible as
17360 possible with GCC.
17361
17362 New options (supported) are:
17363
17364 -mcpu=<cpu name> Assemble for selected processor
17365 -march=<architecture name> Assemble for selected architecture
17366 -mfpu=<fpu architecture> Assemble for selected FPU.
17367 -EB/-mbig-endian Big-endian
17368 -EL/-mlittle-endian Little-endian
17369 -k Generate PIC code
17370 -mthumb Start in Thumb mode
17371 -mthumb-interwork Code supports ARM/Thumb interworking
17372
17373 For now we will also provide support for:
17374
17375 -mapcs-32 32-bit Program counter
17376 -mapcs-26 26-bit Program counter
17377 -macps-float Floats passed in FP registers
17378 -mapcs-reentrant Reentrant code
17379 -matpcs
17380 (sometime these will probably be replaced with -mapcs=<list of options>
17381 and -matpcs=<list of options>)
17382
17383 The remaining options are only supported for back-wards compatibility.
17384 Cpu variants, the arm part is optional:
17385 -m[arm]1 Currently not supported.
17386 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17387 -m[arm]3 Arm 3 processor
17388 -m[arm]6[xx], Arm 6 processors
17389 -m[arm]7[xx][t][[d]m] Arm 7 processors
17390 -m[arm]8[10] Arm 8 processors
17391 -m[arm]9[20][tdmi] Arm 9 processors
17392 -mstrongarm[110[0]] StrongARM processors
17393 -mxscale XScale processors
17394 -m[arm]v[2345[t[e]]] Arm architectures
17395 -mall All (except the ARM1)
17396 FP variants:
17397 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17398 -mfpe-old (No float load/store multiples)
17399 -mvfpxd VFP Single precision
17400 -mvfp All VFP
17401 -mno-fpu Disable all floating point instructions
17402
17403 The following CPU names are recognized:
17404 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17405 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17406 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17407 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17408 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17409 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17410 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17411
17412 */
17413
17414const char * md_shortopts = "m:k";
17415
17416#ifdef ARM_BI_ENDIAN
17417#define OPTION_EB (OPTION_MD_BASE + 0)
17418#define OPTION_EL (OPTION_MD_BASE + 1)
17419#else
17420#if TARGET_BYTES_BIG_ENDIAN
17421#define OPTION_EB (OPTION_MD_BASE + 0)
17422#else
17423#define OPTION_EL (OPTION_MD_BASE + 1)
17424#endif
17425#endif
17426
17427struct option md_longopts[] =
17428{
17429#ifdef OPTION_EB
17430 {"EB", no_argument, NULL, OPTION_EB},
17431#endif
17432#ifdef OPTION_EL
17433 {"EL", no_argument, NULL, OPTION_EL},
17434#endif
17435 {NULL, no_argument, NULL, 0}
17436};
17437
17438size_t md_longopts_size = sizeof (md_longopts);
17439
17440struct arm_option_table
17441{
17442 char *option; /* Option name to match. */
17443 char *help; /* Help information. */
17444 int *var; /* Variable to change. */
17445 int value; /* What to change it to. */
17446 char *deprecated; /* If non-null, print this message. */
17447};
17448
17449struct arm_option_table arm_opts[] =
17450{
17451 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17452 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17453 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17454 &support_interwork, 1, NULL},
17455 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17456 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17457 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17458 1, NULL},
17459 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17460 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17461 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17462 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17463 NULL},
17464
17465 /* These are recognized by the assembler, but have no affect on code. */
17466 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17467 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17468 {NULL, NULL, NULL, 0, NULL}
17469};
17470
17471struct arm_legacy_option_table
17472{
17473 char *option; /* Option name to match. */
17474 const arm_feature_set **var; /* Variable to change. */
17475 const arm_feature_set value; /* What to change it to. */
17476 char *deprecated; /* If non-null, print this message. */
17477};
17478
17479const struct arm_legacy_option_table arm_legacy_opts[] =
17480{
17481 /* DON'T add any new processors to this list -- we want the whole list
17482 to go away... Add them to the processors table instead. */
17483 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17484 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17485 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17486 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17487 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17488 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17489 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17490 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17491 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17492 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17493 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17494 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17495 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17496 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17497 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17498 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17499 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17500 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17501 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17502 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17503 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17504 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17505 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17506 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17507 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17508 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17509 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17510 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17511 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17512 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17513 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17514 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17515 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17516 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17517 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17518 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17519 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17520 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17521 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17522 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17523 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17524 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17525 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17526 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17527 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17528 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17529 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17530 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17531 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17532 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17533 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17534 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17535 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17536 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17537 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17538 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17539 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17540 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17541 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17542 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17543 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17544 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17545 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17546 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17547 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17548 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17549 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17550 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17551 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17552 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17553 N_("use -mcpu=strongarm110")},
17554 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17555 N_("use -mcpu=strongarm1100")},
17556 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17557 N_("use -mcpu=strongarm1110")},
17558 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17559 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17560 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17561
17562 /* Architecture variants -- don't add any more to this list either. */
17563 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17564 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17565 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17566 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17567 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17568 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17569 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17570 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17571 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17572 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17573 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17574 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17575 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17576 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17577 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17578 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17579 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17580 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17581
17582 /* Floating point variants -- don't add any more to this list either. */
17583 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17584 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17585 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17586 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17587 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17588
17589 {NULL, NULL, ARM_ARCH_NONE, NULL}
17590};
17591
17592struct arm_cpu_option_table
17593{
17594 char *name;
17595 const arm_feature_set value;
17596 /* For some CPUs we assume an FPU unless the user explicitly sets
17597 -mfpu=... */
17598 const arm_feature_set default_fpu;
17599 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17600 case. */
17601 const char *canonical_name;
17602};
17603
17604/* This list should, at a minimum, contain all the cpu names
17605 recognized by GCC. */
17606static const struct arm_cpu_option_table arm_cpus[] =
17607{
17608 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17609 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17610 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17611 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17612 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17613 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17614 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17615 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17616 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17617 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17618 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17619 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17620 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17621 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17622 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17623 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17624 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17625 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17626 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17627 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17628 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17629 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17630 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17631 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17632 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17633 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17634 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17635 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17636 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17637 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17638 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17639 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17640 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17641 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17642 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17643 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17644 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17645 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17646 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17647 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17648 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17649 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17650 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17651 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17652 /* For V5 or later processors we default to using VFP; but the user
17653 should really set the FPU type explicitly. */
17654 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17655 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17656 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17657 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17658 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17659 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17660 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17661 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17662 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17663 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17664 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17665 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17666 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17667 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17668 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17669 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17670 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17671 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17672 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17673 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17674 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17675 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17676 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17677 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17678 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17679 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17680 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17681 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17682 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17683 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17684 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17685 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17686 | FPU_NEON_EXT_V1),
17687 NULL},
17688 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17689 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17690 /* ??? XSCALE is really an architecture. */
17691 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17692 /* ??? iwmmxt is not a processor. */
17693 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17694 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17695 /* Maverick */
17696 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17697 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17698};
17699
17700struct arm_arch_option_table
17701{
17702 char *name;
17703 const arm_feature_set value;
17704 const arm_feature_set default_fpu;
17705};
17706
17707/* This list should, at a minimum, contain all the architecture names
17708 recognized by GCC. */
17709static const struct arm_arch_option_table arm_archs[] =
17710{
17711 {"all", ARM_ANY, FPU_ARCH_FPA},
17712 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17713 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17714 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17715 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17716 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17717 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17718 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17719 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17720 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17721 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17722 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17723 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17724 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17725 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17726 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17727 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17728 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17729 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17730 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17731 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17732 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17733 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17734 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17735 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17736 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17737 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17738 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17739 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17740 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17741 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17742 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17743 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17744};
17745
17746/* ISA extensions in the co-processor space. */
17747struct arm_option_cpu_value_table
17748{
17749 char *name;
17750 const arm_feature_set value;
17751};
17752
17753static const struct arm_option_cpu_value_table arm_extensions[] =
17754{
17755 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
17756 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
17757 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
17758 {NULL, ARM_ARCH_NONE}
17759};
17760
17761/* This list should, at a minimum, contain all the fpu names
17762 recognized by GCC. */
17763static const struct arm_option_cpu_value_table arm_fpus[] =
17764{
17765 {"softfpa", FPU_NONE},
17766 {"fpe", FPU_ARCH_FPE},
17767 {"fpe2", FPU_ARCH_FPE},
17768 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
17769 {"fpa", FPU_ARCH_FPA},
17770 {"fpa10", FPU_ARCH_FPA},
17771 {"fpa11", FPU_ARCH_FPA},
17772 {"arm7500fe", FPU_ARCH_FPA},
17773 {"softvfp", FPU_ARCH_VFP},
17774 {"softvfp+vfp", FPU_ARCH_VFP_V2},
17775 {"vfp", FPU_ARCH_VFP_V2},
17776 {"vfp9", FPU_ARCH_VFP_V2},
17777 {"vfp3", FPU_ARCH_VFP_V3},
17778 {"vfp10", FPU_ARCH_VFP_V2},
17779 {"vfp10-r0", FPU_ARCH_VFP_V1},
17780 {"vfpxd", FPU_ARCH_VFP_V1xD},
17781 {"arm1020t", FPU_ARCH_VFP_V1},
17782 {"arm1020e", FPU_ARCH_VFP_V2},
17783 {"arm1136jfs", FPU_ARCH_VFP_V2},
17784 {"arm1136jf-s", FPU_ARCH_VFP_V2},
17785 {"maverick", FPU_ARCH_MAVERICK},
17786 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
17787 {NULL, ARM_ARCH_NONE}
17788};
17789
17790struct arm_option_value_table
17791{
17792 char *name;
17793 long value;
17794};
17795
17796static const struct arm_option_value_table arm_float_abis[] =
17797{
17798 {"hard", ARM_FLOAT_ABI_HARD},
17799 {"softfp", ARM_FLOAT_ABI_SOFTFP},
17800 {"soft", ARM_FLOAT_ABI_SOFT},
17801 {NULL, 0}
17802};
17803
17804#ifdef OBJ_ELF
17805/* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17806static const struct arm_option_value_table arm_eabis[] =
17807{
17808 {"gnu", EF_ARM_EABI_UNKNOWN},
17809 {"4", EF_ARM_EABI_VER4},
17810 {"5", EF_ARM_EABI_VER5},
17811 {NULL, 0}
17812};
17813#endif
17814
17815struct arm_long_option_table
17816{
17817 char * option; /* Substring to match. */
17818 char * help; /* Help information. */
17819 int (* func) (char * subopt); /* Function to decode sub-option. */
17820 char * deprecated; /* If non-null, print this message. */
17821};
17822
17823static int
17824arm_parse_extension (char * str, const arm_feature_set **opt_p)
17825{
17826 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
17827
17828 /* Copy the feature set, so that we can modify it. */
17829 *ext_set = **opt_p;
17830 *opt_p = ext_set;
17831
17832 while (str != NULL && *str != 0)
17833 {
17834 const struct arm_option_cpu_value_table * opt;
17835 char * ext;
17836 int optlen;
17837
17838 if (*str != '+')
17839 {
17840 as_bad (_("invalid architectural extension"));
17841 return 0;
17842 }
17843
17844 str++;
17845 ext = strchr (str, '+');
17846
17847 if (ext != NULL)
17848 optlen = ext - str;
17849 else
17850 optlen = strlen (str);
17851
17852 if (optlen == 0)
17853 {
17854 as_bad (_("missing architectural extension"));
17855 return 0;
17856 }
17857
17858 for (opt = arm_extensions; opt->name != NULL; opt++)
17859 if (strncmp (opt->name, str, optlen) == 0)
17860 {
17861 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
17862 break;
17863 }
17864
17865 if (opt->name == NULL)
17866 {
17867 as_bad (_("unknown architectural extnsion `%s'"), str);
17868 return 0;
17869 }
17870
17871 str = ext;
17872 };
17873
17874 return 1;
17875}
17876
17877static int
17878arm_parse_cpu (char * str)
17879{
17880 const struct arm_cpu_option_table * opt;
17881 char * ext = strchr (str, '+');
17882 int optlen;
17883
17884 if (ext != NULL)
17885 optlen = ext - str;
17886 else
17887 optlen = strlen (str);
17888
17889 if (optlen == 0)
17890 {
17891 as_bad (_("missing cpu name `%s'"), str);
17892 return 0;
17893 }
17894
17895 for (opt = arm_cpus; opt->name != NULL; opt++)
17896 if (strncmp (opt->name, str, optlen) == 0)
17897 {
17898 mcpu_cpu_opt = &opt->value;
17899 mcpu_fpu_opt = &opt->default_fpu;
17900 if (opt->canonical_name)
17901 strcpy(selected_cpu_name, opt->canonical_name);
17902 else
17903 {
17904 int i;
17905 for (i = 0; i < optlen; i++)
17906 selected_cpu_name[i] = TOUPPER (opt->name[i]);
17907 selected_cpu_name[i] = 0;
17908 }
17909
17910 if (ext != NULL)
17911 return arm_parse_extension (ext, &mcpu_cpu_opt);
17912
17913 return 1;
17914 }
17915
17916 as_bad (_("unknown cpu `%s'"), str);
17917 return 0;
17918}
17919
17920static int
17921arm_parse_arch (char * str)
17922{
17923 const struct arm_arch_option_table *opt;
17924 char *ext = strchr (str, '+');
17925 int optlen;
17926
17927 if (ext != NULL)
17928 optlen = ext - str;
17929 else
17930 optlen = strlen (str);
17931
17932 if (optlen == 0)
17933 {
17934 as_bad (_("missing architecture name `%s'"), str);
17935 return 0;
17936 }
17937
17938 for (opt = arm_archs; opt->name != NULL; opt++)
17939 if (streq (opt->name, str))
17940 {
17941 march_cpu_opt = &opt->value;
17942 march_fpu_opt = &opt->default_fpu;
17943 strcpy(selected_cpu_name, opt->name);
17944
17945 if (ext != NULL)
17946 return arm_parse_extension (ext, &march_cpu_opt);
17947
17948 return 1;
17949 }
17950
17951 as_bad (_("unknown architecture `%s'\n"), str);
17952 return 0;
17953}
17954
17955static int
17956arm_parse_fpu (char * str)
17957{
17958 const struct arm_option_cpu_value_table * opt;
17959
17960 for (opt = arm_fpus; opt->name != NULL; opt++)
17961 if (streq (opt->name, str))
17962 {
17963 mfpu_opt = &opt->value;
17964 return 1;
17965 }
17966
17967 as_bad (_("unknown floating point format `%s'\n"), str);
17968 return 0;
17969}
17970
17971static int
17972arm_parse_float_abi (char * str)
17973{
17974 const struct arm_option_value_table * opt;
17975
17976 for (opt = arm_float_abis; opt->name != NULL; opt++)
17977 if (streq (opt->name, str))
17978 {
17979 mfloat_abi_opt = opt->value;
17980 return 1;
17981 }
17982
17983 as_bad (_("unknown floating point abi `%s'\n"), str);
17984 return 0;
17985}
17986
17987#ifdef OBJ_ELF
17988static int
17989arm_parse_eabi (char * str)
17990{
17991 const struct arm_option_value_table *opt;
17992
17993 for (opt = arm_eabis; opt->name != NULL; opt++)
17994 if (streq (opt->name, str))
17995 {
17996 meabi_flags = opt->value;
17997 return 1;
17998 }
17999 as_bad (_("unknown EABI `%s'\n"), str);
18000 return 0;
18001}
18002#endif
18003
18004struct arm_long_option_table arm_long_opts[] =
18005{
18006 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18007 arm_parse_cpu, NULL},
18008 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18009 arm_parse_arch, NULL},
18010 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18011 arm_parse_fpu, NULL},
18012 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18013 arm_parse_float_abi, NULL},
18014#ifdef OBJ_ELF
18015 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18016 arm_parse_eabi, NULL},
18017#endif
18018 {NULL, NULL, 0, NULL}
18019};
18020
18021int
18022md_parse_option (int c, char * arg)
18023{
18024 struct arm_option_table *opt;
18025 const struct arm_legacy_option_table *fopt;
18026 struct arm_long_option_table *lopt;
18027
18028 switch (c)
18029 {
18030#ifdef OPTION_EB
18031 case OPTION_EB:
18032 target_big_endian = 1;
18033 break;
18034#endif
18035
18036#ifdef OPTION_EL
18037 case OPTION_EL:
18038 target_big_endian = 0;
18039 break;
18040#endif
18041
18042 case 'a':
18043 /* Listing option. Just ignore these, we don't support additional
18044 ones. */
18045 return 0;
18046
18047 default:
18048 for (opt = arm_opts; opt->option != NULL; opt++)
18049 {
18050 if (c == opt->option[0]
18051 && ((arg == NULL && opt->option[1] == 0)
18052 || streq (arg, opt->option + 1)))
18053 {
18054#if WARN_DEPRECATED
18055 /* If the option is deprecated, tell the user. */
18056 if (opt->deprecated != NULL)
18057 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18058 arg ? arg : "", _(opt->deprecated));
18059#endif
18060
18061 if (opt->var != NULL)
18062 *opt->var = opt->value;
18063
18064 return 1;
18065 }
18066 }
18067
18068 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18069 {
18070 if (c == fopt->option[0]
18071 && ((arg == NULL && fopt->option[1] == 0)
18072 || streq (arg, fopt->option + 1)))
18073 {
18074#if WARN_DEPRECATED
18075 /* If the option is deprecated, tell the user. */
18076 if (fopt->deprecated != NULL)
18077 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18078 arg ? arg : "", _(fopt->deprecated));
18079#endif
18080
18081 if (fopt->var != NULL)
18082 *fopt->var = &fopt->value;
18083
18084 return 1;
18085 }
18086 }
18087
18088 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18089 {
18090 /* These options are expected to have an argument. */
18091 if (c == lopt->option[0]
18092 && arg != NULL
18093 && strncmp (arg, lopt->option + 1,
18094 strlen (lopt->option + 1)) == 0)
18095 {
18096#if WARN_DEPRECATED
18097 /* If the option is deprecated, tell the user. */
18098 if (lopt->deprecated != NULL)
18099 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18100 _(lopt->deprecated));
18101#endif
18102
18103 /* Call the sup-option parser. */
18104 return lopt->func (arg + strlen (lopt->option) - 1);
18105 }
18106 }
18107
18108 return 0;
18109 }
18110
18111 return 1;
18112}
18113
18114void
18115md_show_usage (FILE * fp)
18116{
18117 struct arm_option_table *opt;
18118 struct arm_long_option_table *lopt;
18119
18120 fprintf (fp, _(" ARM-specific assembler options:\n"));
18121
18122 for (opt = arm_opts; opt->option != NULL; opt++)
18123 if (opt->help != NULL)
18124 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18125
18126 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18127 if (lopt->help != NULL)
18128 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18129
18130#ifdef OPTION_EB
18131 fprintf (fp, _("\
18132 -EB assemble code for a big-endian cpu\n"));
18133#endif
18134
18135#ifdef OPTION_EL
18136 fprintf (fp, _("\
18137 -EL assemble code for a little-endian cpu\n"));
18138#endif
18139}
18140
18141
18142#ifdef OBJ_ELF
18143typedef struct
18144{
18145 int val;
18146 arm_feature_set flags;
18147} cpu_arch_ver_table;
18148
18149/* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18150 least features first. */
18151static const cpu_arch_ver_table cpu_arch_ver[] =
18152{
18153 {1, ARM_ARCH_V4},
18154 {2, ARM_ARCH_V4T},
18155 {3, ARM_ARCH_V5},
18156 {4, ARM_ARCH_V5TE},
18157 {5, ARM_ARCH_V5TEJ},
18158 {6, ARM_ARCH_V6},
18159 {7, ARM_ARCH_V6Z},
18160 {8, ARM_ARCH_V6K},
18161 {9, ARM_ARCH_V6T2},
18162 {10, ARM_ARCH_V7A},
18163 {10, ARM_ARCH_V7R},
18164 {10, ARM_ARCH_V7M},
18165 {0, ARM_ARCH_NONE}
18166};
18167
18168/* Set the public EABI object attributes. */
18169static void
18170aeabi_set_public_attributes (void)
18171{
18172 int arch;
18173 arm_feature_set flags;
18174 arm_feature_set tmp;
18175 const cpu_arch_ver_table *p;
18176
18177 /* Choose the architecture based on the capabilities of the requested cpu
18178 (if any) and/or the instructions actually used. */
18179 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18180 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18181 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18182
18183 tmp = flags;
18184 arch = 0;
18185 for (p = cpu_arch_ver; p->val; p++)
18186 {
18187 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18188 {
18189 arch = p->val;
18190 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18191 }
18192 }
18193
18194 /* Tag_CPU_name. */
18195 if (selected_cpu_name[0])
18196 {
18197 char *p;
18198
18199 p = selected_cpu_name;
18200 if (strncmp(p, "armv", 4) == 0)
18201 {
18202 int i;
18203
18204 p += 4;
18205 for (i = 0; p[i]; i++)
18206 p[i] = TOUPPER (p[i]);
18207 }
18208 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18209 }
18210 /* Tag_CPU_arch. */
18211 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18212 /* Tag_CPU_arch_profile. */
18213 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18214 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18215 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18216 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18217 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18218 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18219 /* Tag_ARM_ISA_use. */
18220 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18221 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18222 /* Tag_THUMB_ISA_use. */
18223 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18224 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18225 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18226 /* Tag_VFP_arch. */
18227 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18228 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18229 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18230 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18231 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18232 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18233 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18234 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18235 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18236 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18237 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18238 /* Tag_WMMX_arch. */
18239 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18240 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18241 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18242 /* Tag_NEON_arch. */
18243 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18244 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18245 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18246}
18247
18248/* Add the .ARM.attributes section. */
18249void
18250arm_md_end (void)
18251{
18252 segT s;
18253 char *p;
18254 addressT addr;
18255 offsetT size;
18256
18257 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18258 return;
18259
18260 aeabi_set_public_attributes ();
18261 size = elf32_arm_eabi_attr_size (stdoutput);
18262 s = subseg_new (".ARM.attributes", 0);
18263 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18264 addr = frag_now_fix ();
18265 p = frag_more (size);
18266 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18267}
18268#endif /* OBJ_ELF */
18269
18270
18271/* Parse a .cpu directive. */
18272
18273static void
18274s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18275{
18276 const struct arm_cpu_option_table *opt;
18277 char *name;
18278 char saved_char;
18279
18280 name = input_line_pointer;
18281 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18282 input_line_pointer++;
18283 saved_char = *input_line_pointer;
18284 *input_line_pointer = 0;
18285
18286 /* Skip the first "all" entry. */
18287 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18288 if (streq (opt->name, name))
18289 {
18290 mcpu_cpu_opt = &opt->value;
18291 selected_cpu = opt->value;
18292 if (opt->canonical_name)
18293 strcpy(selected_cpu_name, opt->canonical_name);
18294 else
18295 {
18296 int i;
18297 for (i = 0; opt->name[i]; i++)
18298 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18299 selected_cpu_name[i] = 0;
18300 }
18301 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18302 *input_line_pointer = saved_char;
18303 demand_empty_rest_of_line ();
18304 return;
18305 }
18306 as_bad (_("unknown cpu `%s'"), name);
18307 *input_line_pointer = saved_char;
18308 ignore_rest_of_line ();
18309}
18310
18311
18312/* Parse a .arch directive. */
18313
18314static void
18315s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18316{
18317 const struct arm_arch_option_table *opt;
18318 char saved_char;
18319 char *name;
18320
18321 name = input_line_pointer;
18322 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18323 input_line_pointer++;
18324 saved_char = *input_line_pointer;
18325 *input_line_pointer = 0;
18326
18327 /* Skip the first "all" entry. */
18328 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18329 if (streq (opt->name, name))
18330 {
18331 mcpu_cpu_opt = &opt->value;
18332 selected_cpu = opt->value;
18333 strcpy(selected_cpu_name, opt->name);
18334 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18335 *input_line_pointer = saved_char;
18336 demand_empty_rest_of_line ();
18337 return;
18338 }
18339
18340 as_bad (_("unknown architecture `%s'\n"), name);
18341 *input_line_pointer = saved_char;
18342 ignore_rest_of_line ();
18343}
18344
18345
18346/* Parse a .fpu directive. */
18347
18348static void
18349s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18350{
18351 const struct arm_option_cpu_value_table *opt;
18352 char saved_char;
18353 char *name;
18354
18355 name = input_line_pointer;
18356 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18357 input_line_pointer++;
18358 saved_char = *input_line_pointer;
18359 *input_line_pointer = 0;
18360
18361 for (opt = arm_fpus; opt->name != NULL; opt++)
18362 if (streq (opt->name, name))
18363 {
18364 mfpu_opt = &opt->value;
18365 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18366 *input_line_pointer = saved_char;
18367 demand_empty_rest_of_line ();
18368 return;
18369 }
18370
18371 as_bad (_("unknown floating point format `%s'\n"), name);
18372 *input_line_pointer = saved_char;
18373 ignore_rest_of_line ();
18374}
18375