]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/arm.h
cppbuiltin.c (define_builtin_macros_for_type_sizes): Define __BYTE_ORDER__...
[thirdparty/gcc.git] / gcc / config / arm / arm.h
1 /* Definitions of target machine for GNU compiler, for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
6 and Martin Simmons (@harleqn.co.uk).
7 More major hacks by Richard Earnshaw (rearnsha@arm.com)
8 Minor hacks by Nick Clifton (nickc@cygnus.com)
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify it
13 under the terms of the GNU General Public License as published
14 by the Free Software Foundation; either version 3, or (at your
15 option) any later version.
16
17 GCC is distributed in the hope that it will be useful, but WITHOUT
18 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #ifndef GCC_ARM_H
27 #define GCC_ARM_H
28
29 /* We can't use enum machine_mode inside a generator file because it
30 hasn't been created yet; we shouldn't be using any code that
31 needs the real definition though, so this ought to be safe. */
32 #ifdef GENERATOR_FILE
33 #define MACHMODE int
34 #else
35 #include "insn-modes.h"
36 #define MACHMODE enum machine_mode
37 #endif
38
39 #include "config/vxworks-dummy.h"
40
41 /* The architecture define. */
42 extern char arm_arch_name[];
43
44 /* Target CPU builtins. */
45 #define TARGET_CPU_CPP_BUILTINS() \
46 do \
47 { \
48 /* Define __arm__ even when in thumb mode, for \
49 consistency with armcc. */ \
50 builtin_define ("__arm__"); \
51 builtin_define ("__APCS_32__"); \
52 if (TARGET_THUMB) \
53 builtin_define ("__thumb__"); \
54 if (TARGET_THUMB2) \
55 builtin_define ("__thumb2__"); \
56 \
57 if (TARGET_BIG_END) \
58 { \
59 builtin_define ("__ARMEB__"); \
60 if (TARGET_THUMB) \
61 builtin_define ("__THUMBEB__"); \
62 if (TARGET_LITTLE_WORDS) \
63 builtin_define ("__ARMWEL__"); \
64 } \
65 else \
66 { \
67 builtin_define ("__ARMEL__"); \
68 if (TARGET_THUMB) \
69 builtin_define ("__THUMBEL__"); \
70 } \
71 \
72 if (TARGET_SOFT_FLOAT) \
73 builtin_define ("__SOFTFP__"); \
74 \
75 if (TARGET_VFP) \
76 builtin_define ("__VFP_FP__"); \
77 \
78 if (TARGET_NEON) \
79 builtin_define ("__ARM_NEON__"); \
80 \
81 /* Add a define for interworking. \
82 Needed when building libgcc.a. */ \
83 if (arm_cpp_interwork) \
84 builtin_define ("__THUMB_INTERWORK__"); \
85 \
86 builtin_assert ("cpu=arm"); \
87 builtin_assert ("machine=arm"); \
88 \
89 builtin_define (arm_arch_name); \
90 if (arm_arch_cirrus) \
91 builtin_define ("__MAVERICK__"); \
92 if (arm_arch_xscale) \
93 builtin_define ("__XSCALE__"); \
94 if (arm_arch_iwmmxt) \
95 builtin_define ("__IWMMXT__"); \
96 if (TARGET_AAPCS_BASED) \
97 { \
98 if (arm_pcs_default == ARM_PCS_AAPCS_VFP) \
99 builtin_define ("__ARM_PCS_VFP"); \
100 else if (arm_pcs_default == ARM_PCS_AAPCS) \
101 builtin_define ("__ARM_PCS"); \
102 builtin_define ("__ARM_EABI__"); \
103 } \
104 } while (0)
105
106 /* The various ARM cores. */
107 enum processor_type
108 {
109 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
110 IDENT,
111 #include "arm-cores.def"
112 #undef ARM_CORE
113 /* Used to indicate that no processor has been specified. */
114 arm_none
115 };
116
117 enum target_cpus
118 {
119 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
120 TARGET_CPU_##IDENT,
121 #include "arm-cores.def"
122 #undef ARM_CORE
123 TARGET_CPU_generic
124 };
125
126 /* The processor for which instructions should be scheduled. */
127 extern enum processor_type arm_tune;
128
129 enum arm_sync_generator_tag
130 {
131 arm_sync_generator_omn,
132 arm_sync_generator_omrn
133 };
134
135 /* Wrapper to pass around a polymorphic pointer to a sync instruction
136 generator and. */
137 struct arm_sync_generator
138 {
139 enum arm_sync_generator_tag op;
140 union
141 {
142 rtx (* omn) (rtx, rtx, rtx);
143 rtx (* omrn) (rtx, rtx, rtx, rtx);
144 } u;
145 };
146
147 typedef enum arm_cond_code
148 {
149 ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
150 ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
151 }
152 arm_cc;
153
154 extern arm_cc arm_current_cc;
155
156 #define ARM_INVERSE_CONDITION_CODE(X) ((arm_cc) (((int)X) ^ 1))
157
158 extern int arm_target_label;
159 extern int arm_ccfsm_state;
160 extern GTY(()) rtx arm_target_insn;
161 /* The label of the current constant pool. */
162 extern rtx pool_vector_label;
163 /* Set to 1 when a return insn is output, this means that the epilogue
164 is not needed. */
165 extern int return_used_this_function;
166 /* Callback to output language specific object attributes. */
167 extern void (*arm_lang_output_object_attributes_hook)(void);
168 \f
169 /* Just in case configure has failed to define anything. */
170 #ifndef TARGET_CPU_DEFAULT
171 #define TARGET_CPU_DEFAULT TARGET_CPU_generic
172 #endif
173
174
175 #undef CPP_SPEC
176 #define CPP_SPEC "%(subtarget_cpp_spec) \
177 %{msoft-float:%{mhard-float: \
178 %e-msoft-float and -mhard_float may not be used together}} \
179 %{mbig-endian:%{mlittle-endian: \
180 %e-mbig-endian and -mlittle-endian may not be used together}}"
181
182 #ifndef CC1_SPEC
183 #define CC1_SPEC ""
184 #endif
185
186 /* This macro defines names of additional specifications to put in the specs
187 that can be used in various specifications like CC1_SPEC. Its definition
188 is an initializer with a subgrouping for each command option.
189
190 Each subgrouping contains a string constant, that defines the
191 specification name, and a string constant that used by the GCC driver
192 program.
193
194 Do not define this macro if it does not need to do anything. */
195 #define EXTRA_SPECS \
196 { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
197 SUBTARGET_EXTRA_SPECS
198
199 #ifndef SUBTARGET_EXTRA_SPECS
200 #define SUBTARGET_EXTRA_SPECS
201 #endif
202
203 #ifndef SUBTARGET_CPP_SPEC
204 #define SUBTARGET_CPP_SPEC ""
205 #endif
206 \f
207 /* Run-time Target Specification. */
208 #ifndef TARGET_VERSION
209 #define TARGET_VERSION fputs (" (ARM/generic)", stderr);
210 #endif
211
212 #define TARGET_SOFT_FLOAT (arm_float_abi == ARM_FLOAT_ABI_SOFT)
213 /* Use hardware floating point instructions. */
214 #define TARGET_HARD_FLOAT (arm_float_abi != ARM_FLOAT_ABI_SOFT)
215 /* Use hardware floating point calling convention. */
216 #define TARGET_HARD_FLOAT_ABI (arm_float_abi == ARM_FLOAT_ABI_HARD)
217 #define TARGET_FPA (arm_fpu_desc->model == ARM_FP_MODEL_FPA)
218 #define TARGET_MAVERICK (arm_fpu_desc->model == ARM_FP_MODEL_MAVERICK)
219 #define TARGET_VFP (arm_fpu_desc->model == ARM_FP_MODEL_VFP)
220 #define TARGET_IWMMXT (arm_arch_iwmmxt)
221 #define TARGET_REALLY_IWMMXT (TARGET_IWMMXT && TARGET_32BIT)
222 #define TARGET_IWMMXT_ABI (TARGET_32BIT && arm_abi == ARM_ABI_IWMMXT)
223 #define TARGET_ARM (! TARGET_THUMB)
224 #define TARGET_EITHER 1 /* (TARGET_ARM | TARGET_THUMB) */
225 #define TARGET_BACKTRACE (leaf_function_p () \
226 ? TARGET_TPCS_LEAF_FRAME \
227 : TARGET_TPCS_FRAME)
228 #define TARGET_LDRD (arm_arch5e && ARM_DOUBLEWORD_ALIGN)
229 #define TARGET_AAPCS_BASED \
230 (arm_abi != ARM_ABI_APCS && arm_abi != ARM_ABI_ATPCS)
231
232 #define TARGET_HARD_TP (target_thread_pointer == TP_CP15)
233 #define TARGET_SOFT_TP (target_thread_pointer == TP_SOFT)
234
235 /* Only 16-bit thumb code. */
236 #define TARGET_THUMB1 (TARGET_THUMB && !arm_arch_thumb2)
237 /* Arm or Thumb-2 32-bit code. */
238 #define TARGET_32BIT (TARGET_ARM || arm_arch_thumb2)
239 /* 32-bit Thumb-2 code. */
240 #define TARGET_THUMB2 (TARGET_THUMB && arm_arch_thumb2)
241 /* Thumb-1 only. */
242 #define TARGET_THUMB1_ONLY (TARGET_THUMB1 && !arm_arch_notm)
243 /* FPA emulator without LFM. */
244 #define TARGET_FPA_EMU2 (TARGET_FPA && arm_fpu_desc->rev == 2)
245
246 /* The following two macros concern the ability to execute coprocessor
247 instructions for VFPv3 or NEON. TARGET_VFP3/TARGET_VFPD32 are currently
248 only ever tested when we know we are generating for VFP hardware; we need
249 to be more careful with TARGET_NEON as noted below. */
250
251 /* FPU is has the full VFPv3/NEON register file of 32 D registers. */
252 #define TARGET_VFPD32 (TARGET_VFP && arm_fpu_desc->regs == VFP_REG_D32)
253
254 /* FPU supports VFPv3 instructions. */
255 #define TARGET_VFP3 (TARGET_VFP && arm_fpu_desc->rev >= 3)
256
257 /* FPU only supports VFP single-precision instructions. */
258 #define TARGET_VFP_SINGLE (TARGET_VFP && arm_fpu_desc->regs == VFP_REG_SINGLE)
259
260 /* FPU supports VFP double-precision instructions. */
261 #define TARGET_VFP_DOUBLE (TARGET_VFP && arm_fpu_desc->regs != VFP_REG_SINGLE)
262
263 /* FPU supports half-precision floating-point with NEON element load/store. */
264 #define TARGET_NEON_FP16 \
265 (TARGET_VFP && arm_fpu_desc->neon && arm_fpu_desc->fp16)
266
267 /* FPU supports VFP half-precision floating-point. */
268 #define TARGET_FP16 (TARGET_VFP && arm_fpu_desc->fp16)
269
270 /* FPU supports Neon instructions. The setting of this macro gets
271 revealed via __ARM_NEON__ so we add extra guards upon TARGET_32BIT
272 and TARGET_HARD_FLOAT to ensure that NEON instructions are
273 available. */
274 #define TARGET_NEON (TARGET_32BIT && TARGET_HARD_FLOAT \
275 && TARGET_VFP && arm_fpu_desc->neon)
276
277 /* "DSP" multiply instructions, eg. SMULxy. */
278 #define TARGET_DSP_MULTIPLY \
279 (TARGET_32BIT && arm_arch5e && (arm_arch_notm || arm_arch7em))
280 /* Integer SIMD instructions, and extend-accumulate instructions. */
281 #define TARGET_INT_SIMD \
282 (TARGET_32BIT && arm_arch6 && (arm_arch_notm || arm_arch7em))
283
284 /* Should MOVW/MOVT be used in preference to a constant pool. */
285 #define TARGET_USE_MOVT (arm_arch_thumb2 && !optimize_size)
286
287 /* We could use unified syntax for arm mode, but for now we just use it
288 for Thumb-2. */
289 #define TARGET_UNIFIED_ASM TARGET_THUMB2
290
291 /* Nonzero if this chip provides the DMB instruction. */
292 #define TARGET_HAVE_DMB (arm_arch7)
293
294 /* Nonzero if this chip implements a memory barrier via CP15. */
295 #define TARGET_HAVE_DMB_MCR (arm_arch6k && ! TARGET_HAVE_DMB)
296
297 /* Nonzero if this chip implements a memory barrier instruction. */
298 #define TARGET_HAVE_MEMORY_BARRIER (TARGET_HAVE_DMB || TARGET_HAVE_DMB_MCR)
299
300 /* Nonzero if this chip supports ldrex and strex */
301 #define TARGET_HAVE_LDREX ((arm_arch6 && TARGET_ARM) || arm_arch7)
302
303 /* Nonzero if this chip supports ldrex{bhd} and strex{bhd}. */
304 #define TARGET_HAVE_LDREXBHD ((arm_arch6k && TARGET_ARM) || arm_arch7)
305
306 /* True iff the full BPABI is being used. If TARGET_BPABI is true,
307 then TARGET_AAPCS_BASED must be true -- but the converse does not
308 hold. TARGET_BPABI implies the use of the BPABI runtime library,
309 etc., in addition to just the AAPCS calling conventions. */
310 #ifndef TARGET_BPABI
311 #define TARGET_BPABI false
312 #endif
313
314 /* Support for a compile-time default CPU, et cetera. The rules are:
315 --with-arch is ignored if -march or -mcpu are specified.
316 --with-cpu is ignored if -march or -mcpu are specified, and is overridden
317 by --with-arch.
318 --with-tune is ignored if -mtune or -mcpu are specified (but not affected
319 by -march).
320 --with-float is ignored if -mhard-float, -msoft-float or -mfloat-abi are
321 specified.
322 --with-fpu is ignored if -mfpu is specified.
323 --with-abi is ignored is -mabi is specified. */
324 #define OPTION_DEFAULT_SPECS \
325 {"arch", "%{!march=*:%{!mcpu=*:-march=%(VALUE)}}" }, \
326 {"cpu", "%{!march=*:%{!mcpu=*:-mcpu=%(VALUE)}}" }, \
327 {"tune", "%{!mcpu=*:%{!mtune=*:-mtune=%(VALUE)}}" }, \
328 {"float", \
329 "%{!msoft-float:%{!mhard-float:%{!mfloat-abi=*:-mfloat-abi=%(VALUE)}}}" }, \
330 {"fpu", "%{!mfpu=*:-mfpu=%(VALUE)}"}, \
331 {"abi", "%{!mabi=*:-mabi=%(VALUE)}"}, \
332 {"mode", "%{!marm:%{!mthumb:-m%(VALUE)}}"},
333
334 /* Which floating point model to use. */
335 enum arm_fp_model
336 {
337 ARM_FP_MODEL_UNKNOWN,
338 /* FPA model (Hardware or software). */
339 ARM_FP_MODEL_FPA,
340 /* Cirrus Maverick floating point model. */
341 ARM_FP_MODEL_MAVERICK,
342 /* VFP floating point model. */
343 ARM_FP_MODEL_VFP
344 };
345
346 enum vfp_reg_type
347 {
348 VFP_NONE = 0,
349 VFP_REG_D16,
350 VFP_REG_D32,
351 VFP_REG_SINGLE
352 };
353
354 extern const struct arm_fpu_desc
355 {
356 const char *name;
357 enum arm_fp_model model;
358 int rev;
359 enum vfp_reg_type regs;
360 int neon;
361 int fp16;
362 } *arm_fpu_desc;
363
364 /* Which floating point hardware to schedule for. */
365 extern int arm_fpu_attr;
366
367 enum float_abi_type
368 {
369 ARM_FLOAT_ABI_SOFT,
370 ARM_FLOAT_ABI_SOFTFP,
371 ARM_FLOAT_ABI_HARD
372 };
373
374 extern enum float_abi_type arm_float_abi;
375
376 #ifndef TARGET_DEFAULT_FLOAT_ABI
377 #define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT
378 #endif
379
380 /* Which __fp16 format to use.
381 The enumeration values correspond to the numbering for the
382 Tag_ABI_FP_16bit_format attribute.
383 */
384 enum arm_fp16_format_type
385 {
386 ARM_FP16_FORMAT_NONE = 0,
387 ARM_FP16_FORMAT_IEEE = 1,
388 ARM_FP16_FORMAT_ALTERNATIVE = 2
389 };
390
391 extern enum arm_fp16_format_type arm_fp16_format;
392 #define LARGEST_EXPONENT_IS_NORMAL(bits) \
393 ((bits) == 16 && arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE)
394
395 /* Which ABI to use. */
396 enum arm_abi_type
397 {
398 ARM_ABI_APCS,
399 ARM_ABI_ATPCS,
400 ARM_ABI_AAPCS,
401 ARM_ABI_IWMMXT,
402 ARM_ABI_AAPCS_LINUX
403 };
404
405 extern enum arm_abi_type arm_abi;
406
407 #ifndef ARM_DEFAULT_ABI
408 #define ARM_DEFAULT_ABI ARM_ABI_APCS
409 #endif
410
411 /* Which thread pointer access sequence to use. */
412 enum arm_tp_type {
413 TP_AUTO,
414 TP_SOFT,
415 TP_CP15
416 };
417
418 extern enum arm_tp_type target_thread_pointer;
419
420 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
421 extern int arm_arch3m;
422
423 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
424 extern int arm_arch4;
425
426 /* Nonzero if this chip supports the ARM Architecture 4T extensions. */
427 extern int arm_arch4t;
428
429 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
430 extern int arm_arch5;
431
432 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
433 extern int arm_arch5e;
434
435 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
436 extern int arm_arch6;
437
438 /* Nonzero if this chip supports the ARM Architecture 6k extensions. */
439 extern int arm_arch6k;
440
441 /* Nonzero if this chip supports the ARM Architecture 7 extensions. */
442 extern int arm_arch7;
443
444 /* Nonzero if instructions not present in the 'M' profile can be used. */
445 extern int arm_arch_notm;
446
447 /* Nonzero if instructions present in ARMv7E-M can be used. */
448 extern int arm_arch7em;
449
450 /* Nonzero if this chip can benefit from load scheduling. */
451 extern int arm_ld_sched;
452
453 /* Nonzero if generating Thumb code, either Thumb-1 or Thumb-2. */
454 extern int thumb_code;
455
456 /* Nonzero if generating Thumb-1 code. */
457 extern int thumb1_code;
458
459 /* Nonzero if this chip is a StrongARM. */
460 extern int arm_tune_strongarm;
461
462 /* Nonzero if this chip is a Cirrus variant. */
463 extern int arm_arch_cirrus;
464
465 /* Nonzero if this chip supports Intel XScale with Wireless MMX technology. */
466 extern int arm_arch_iwmmxt;
467
468 /* Nonzero if this chip is an XScale. */
469 extern int arm_arch_xscale;
470
471 /* Nonzero if tuning for XScale. */
472 extern int arm_tune_xscale;
473
474 /* Nonzero if tuning for stores via the write buffer. */
475 extern int arm_tune_wbuf;
476
477 /* Nonzero if tuning for Cortex-A9. */
478 extern int arm_tune_cortex_a9;
479
480 /* Nonzero if we should define __THUMB_INTERWORK__ in the
481 preprocessor.
482 XXX This is a bit of a hack, it's intended to help work around
483 problems in GLD which doesn't understand that armv5t code is
484 interworking clean. */
485 extern int arm_cpp_interwork;
486
487 /* Nonzero if chip supports Thumb 2. */
488 extern int arm_arch_thumb2;
489
490 /* Nonzero if chip supports integer division instruction. */
491 extern int arm_arch_hwdiv;
492
493 #ifndef TARGET_DEFAULT
494 #define TARGET_DEFAULT (MASK_APCS_FRAME)
495 #endif
496
497 /* Nonzero if PIC code requires explicit qualifiers to generate
498 PLT and GOT relocs rather than the assembler doing so implicitly.
499 Subtargets can override these if required. */
500 #ifndef NEED_GOT_RELOC
501 #define NEED_GOT_RELOC 0
502 #endif
503 #ifndef NEED_PLT_RELOC
504 #define NEED_PLT_RELOC 0
505 #endif
506
507 /* Nonzero if we need to refer to the GOT with a PC-relative
508 offset. In other words, generate
509
510 .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
511
512 rather than
513
514 .word _GLOBAL_OFFSET_TABLE_ - (.Lxx + 8)
515
516 The default is true, which matches NetBSD. Subtargets can
517 override this if required. */
518 #ifndef GOT_PCREL
519 #define GOT_PCREL 1
520 #endif
521 \f
522 /* Target machine storage Layout. */
523
524
525 /* Define this macro if it is advisable to hold scalars in registers
526 in a wider mode than that declared by the program. In such cases,
527 the value is constrained to be within the bounds of the declared
528 type, but kept valid in the wider mode. The signedness of the
529 extension may differ from that of the type. */
530
531 /* It is far faster to zero extend chars than to sign extend them */
532
533 #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
534 if (GET_MODE_CLASS (MODE) == MODE_INT \
535 && GET_MODE_SIZE (MODE) < 4) \
536 { \
537 if (MODE == QImode) \
538 UNSIGNEDP = 1; \
539 else if (MODE == HImode) \
540 UNSIGNEDP = 1; \
541 (MODE) = SImode; \
542 }
543
544 /* Define this if most significant bit is lowest numbered
545 in instructions that operate on numbered bit-fields. */
546 #define BITS_BIG_ENDIAN 0
547
548 /* Define this if most significant byte of a word is the lowest numbered.
549 Most ARM processors are run in little endian mode, so that is the default.
550 If you want to have it run-time selectable, change the definition in a
551 cover file to be TARGET_BIG_ENDIAN. */
552 #define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
553
554 /* Define this if most significant word of a multiword number is the lowest
555 numbered.
556 This is always false, even when in big-endian mode. */
557 #define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
558
559 /* Define this if most significant word of doubles is the lowest numbered.
560 The rules are different based on whether or not we use FPA-format,
561 VFP-format or some other floating point co-processor's format doubles. */
562 #define FLOAT_WORDS_BIG_ENDIAN (arm_float_words_big_endian ())
563
564 #define UNITS_PER_WORD 4
565
566 /* True if natural alignment is used for doubleword types. */
567 #define ARM_DOUBLEWORD_ALIGN TARGET_AAPCS_BASED
568
569 #define DOUBLEWORD_ALIGNMENT 64
570
571 #define PARM_BOUNDARY 32
572
573 #define STACK_BOUNDARY (ARM_DOUBLEWORD_ALIGN ? DOUBLEWORD_ALIGNMENT : 32)
574
575 #define PREFERRED_STACK_BOUNDARY \
576 (arm_abi == ARM_ABI_ATPCS ? 64 : STACK_BOUNDARY)
577
578 #define FUNCTION_BOUNDARY ((TARGET_THUMB && optimize_size) ? 16 : 32)
579
580 /* The lowest bit is used to indicate Thumb-mode functions, so the
581 vbit must go into the delta field of pointers to member
582 functions. */
583 #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
584
585 #define EMPTY_FIELD_BOUNDARY 32
586
587 #define BIGGEST_ALIGNMENT (ARM_DOUBLEWORD_ALIGN ? DOUBLEWORD_ALIGNMENT : 32)
588
589 /* XXX Blah -- this macro is used directly by libobjc. Since it
590 supports no vector modes, cut out the complexity and fall back
591 on BIGGEST_FIELD_ALIGNMENT. */
592 #ifdef IN_TARGET_LIBS
593 #define BIGGEST_FIELD_ALIGNMENT 64
594 #endif
595
596 /* Make strings word-aligned so strcpy from constants will be faster. */
597 #define CONSTANT_ALIGNMENT_FACTOR (TARGET_THUMB || ! arm_tune_xscale ? 1 : 2)
598
599 #define CONSTANT_ALIGNMENT(EXP, ALIGN) \
600 ((TREE_CODE (EXP) == STRING_CST \
601 && !optimize_size \
602 && (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
603 ? BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR : (ALIGN))
604
605 /* Align definitions of arrays, unions and structures so that
606 initializations and copies can be made more efficient. This is not
607 ABI-changing, so it only affects places where we can see the
608 definition. Increasing the alignment tends to introduce padding,
609 so don't do this when optimizing for size/conserving stack space. */
610 #define ARM_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \
611 (((COND) && ((ALIGN) < BITS_PER_WORD) \
612 && (TREE_CODE (EXP) == ARRAY_TYPE \
613 || TREE_CODE (EXP) == UNION_TYPE \
614 || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
615
616 /* Align global data. */
617 #define DATA_ALIGNMENT(EXP, ALIGN) \
618 ARM_EXPAND_ALIGNMENT(!optimize_size, EXP, ALIGN)
619
620 /* Similarly, make sure that objects on the stack are sensibly aligned. */
621 #define LOCAL_ALIGNMENT(EXP, ALIGN) \
622 ARM_EXPAND_ALIGNMENT(!flag_conserve_stack, EXP, ALIGN)
623
624 /* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
625 value set in previous versions of this toolchain was 8, which produces more
626 compact structures. The command line option -mstructure_size_boundary=<n>
627 can be used to change this value. For compatibility with the ARM SDK
628 however the value should be left at 32. ARM SDT Reference Manual (ARM DUI
629 0020D) page 2-20 says "Structures are aligned on word boundaries".
630 The AAPCS specifies a value of 8. */
631 #define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
632 extern int arm_structure_size_boundary;
633
634 /* This is the value used to initialize arm_structure_size_boundary. If a
635 particular arm target wants to change the default value it should change
636 the definition of this macro, not STRUCTURE_SIZE_BOUNDARY. See netbsd.h
637 for an example of this. */
638 #ifndef DEFAULT_STRUCTURE_SIZE_BOUNDARY
639 #define DEFAULT_STRUCTURE_SIZE_BOUNDARY 32
640 #endif
641
642 /* Nonzero if move instructions will actually fail to work
643 when given unaligned data. */
644 #define STRICT_ALIGNMENT 1
645
646 /* wchar_t is unsigned under the AAPCS. */
647 #ifndef WCHAR_TYPE
648 #define WCHAR_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "int")
649
650 #define WCHAR_TYPE_SIZE BITS_PER_WORD
651 #endif
652
653 #ifndef SIZE_TYPE
654 #define SIZE_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "long unsigned int")
655 #endif
656
657 #ifndef PTRDIFF_TYPE
658 #define PTRDIFF_TYPE (TARGET_AAPCS_BASED ? "int" : "long int")
659 #endif
660
661 /* AAPCS requires that structure alignment is affected by bitfields. */
662 #ifndef PCC_BITFIELD_TYPE_MATTERS
663 #define PCC_BITFIELD_TYPE_MATTERS TARGET_AAPCS_BASED
664 #endif
665
666 \f
667 /* Standard register usage. */
668
669 /* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
670 (S - saved over call).
671
672 r0 * argument word/integer result
673 r1-r3 argument word
674
675 r4-r8 S register variable
676 r9 S (rfp) register variable (real frame pointer)
677
678 r10 F S (sl) stack limit (used by -mapcs-stack-check)
679 r11 F S (fp) argument pointer
680 r12 (ip) temp workspace
681 r13 F S (sp) lower end of current stack frame
682 r14 (lr) link address/workspace
683 r15 F (pc) program counter
684
685 f0 floating point result
686 f1-f3 floating point scratch
687
688 f4-f7 S floating point variable
689
690 cc This is NOT a real register, but is used internally
691 to represent things that use or set the condition
692 codes.
693 sfp This isn't either. It is used during rtl generation
694 since the offset between the frame pointer and the
695 auto's isn't known until after register allocation.
696 afp Nor this, we only need this because of non-local
697 goto. Without it fp appears to be used and the
698 elimination code won't get rid of sfp. It tracks
699 fp exactly at all times.
700
701 *: See CONDITIONAL_REGISTER_USAGE */
702
703 /*
704 mvf0 Cirrus floating point result
705 mvf1-mvf3 Cirrus floating point scratch
706 mvf4-mvf15 S Cirrus floating point variable. */
707
708 /* s0-s15 VFP scratch (aka d0-d7).
709 s16-s31 S VFP variable (aka d8-d15).
710 vfpcc Not a real register. Represents the VFP condition
711 code flags. */
712
713 /* The stack backtrace structure is as follows:
714 fp points to here: | save code pointer | [fp]
715 | return link value | [fp, #-4]
716 | return sp value | [fp, #-8]
717 | return fp value | [fp, #-12]
718 [| saved r10 value |]
719 [| saved r9 value |]
720 [| saved r8 value |]
721 [| saved r7 value |]
722 [| saved r6 value |]
723 [| saved r5 value |]
724 [| saved r4 value |]
725 [| saved r3 value |]
726 [| saved r2 value |]
727 [| saved r1 value |]
728 [| saved r0 value |]
729 [| saved f7 value |] three words
730 [| saved f6 value |] three words
731 [| saved f5 value |] three words
732 [| saved f4 value |] three words
733 r0-r3 are not normally saved in a C function. */
734
735 /* 1 for registers that have pervasive standard uses
736 and are not available for the register allocator. */
737 #define FIXED_REGISTERS \
738 { \
739 0,0,0,0,0,0,0,0, \
740 0,0,0,0,0,1,0,1, \
741 0,0,0,0,0,0,0,0, \
742 1,1,1, \
743 1,1,1,1,1,1,1,1, \
744 1,1,1,1,1,1,1,1, \
745 1,1,1,1,1,1,1,1, \
746 1,1,1,1,1,1,1,1, \
747 1,1,1,1, \
748 1,1,1,1,1,1,1,1, \
749 1,1,1,1,1,1,1,1, \
750 1,1,1,1,1,1,1,1, \
751 1,1,1,1,1,1,1,1, \
752 1,1,1,1,1,1,1,1, \
753 1,1,1,1,1,1,1,1, \
754 1,1,1,1,1,1,1,1, \
755 1,1,1,1,1,1,1,1, \
756 1 \
757 }
758
759 /* 1 for registers not available across function calls.
760 These must include the FIXED_REGISTERS and also any
761 registers that can be used without being saved.
762 The latter must include the registers where values are returned
763 and the register where structure-value addresses are passed.
764 Aside from that, you can include as many other registers as you like.
765 The CC is not preserved over function calls on the ARM 6, so it is
766 easier to assume this for all. SFP is preserved, since FP is. */
767 #define CALL_USED_REGISTERS \
768 { \
769 1,1,1,1,0,0,0,0, \
770 0,0,0,0,1,1,1,1, \
771 1,1,1,1,0,0,0,0, \
772 1,1,1, \
773 1,1,1,1,1,1,1,1, \
774 1,1,1,1,1,1,1,1, \
775 1,1,1,1,1,1,1,1, \
776 1,1,1,1,1,1,1,1, \
777 1,1,1,1, \
778 1,1,1,1,1,1,1,1, \
779 1,1,1,1,1,1,1,1, \
780 1,1,1,1,1,1,1,1, \
781 1,1,1,1,1,1,1,1, \
782 1,1,1,1,1,1,1,1, \
783 1,1,1,1,1,1,1,1, \
784 1,1,1,1,1,1,1,1, \
785 1,1,1,1,1,1,1,1, \
786 1 \
787 }
788
789 #ifndef SUBTARGET_CONDITIONAL_REGISTER_USAGE
790 #define SUBTARGET_CONDITIONAL_REGISTER_USAGE
791 #endif
792
793 #define CONDITIONAL_REGISTER_USAGE \
794 { \
795 int regno; \
796 \
797 if (TARGET_SOFT_FLOAT || TARGET_THUMB1 || !TARGET_FPA) \
798 { \
799 for (regno = FIRST_FPA_REGNUM; \
800 regno <= LAST_FPA_REGNUM; ++regno) \
801 fixed_regs[regno] = call_used_regs[regno] = 1; \
802 } \
803 \
804 if (TARGET_THUMB1 && optimize_size) \
805 { \
806 /* When optimizing for size on Thumb-1, it's better not \
807 to use the HI regs, because of the overhead of \
808 stacking them. */ \
809 for (regno = FIRST_HI_REGNUM; \
810 regno <= LAST_HI_REGNUM; ++regno) \
811 fixed_regs[regno] = call_used_regs[regno] = 1; \
812 } \
813 \
814 /* The link register can be clobbered by any branch insn, \
815 but we have no way to track that at present, so mark \
816 it as unavailable. */ \
817 if (TARGET_THUMB1) \
818 fixed_regs[LR_REGNUM] = call_used_regs[LR_REGNUM] = 1; \
819 \
820 if (TARGET_32BIT && TARGET_HARD_FLOAT) \
821 { \
822 if (TARGET_MAVERICK) \
823 { \
824 for (regno = FIRST_FPA_REGNUM; \
825 regno <= LAST_FPA_REGNUM; ++ regno) \
826 fixed_regs[regno] = call_used_regs[regno] = 1; \
827 for (regno = FIRST_CIRRUS_FP_REGNUM; \
828 regno <= LAST_CIRRUS_FP_REGNUM; ++ regno) \
829 { \
830 fixed_regs[regno] = 0; \
831 call_used_regs[regno] = regno < FIRST_CIRRUS_FP_REGNUM + 4; \
832 } \
833 } \
834 if (TARGET_VFP) \
835 { \
836 /* VFPv3 registers are disabled when earlier VFP \
837 versions are selected due to the definition of \
838 LAST_VFP_REGNUM. */ \
839 for (regno = FIRST_VFP_REGNUM; \
840 regno <= LAST_VFP_REGNUM; ++ regno) \
841 { \
842 fixed_regs[regno] = 0; \
843 call_used_regs[regno] = regno < FIRST_VFP_REGNUM + 16 \
844 || regno >= FIRST_VFP_REGNUM + 32; \
845 } \
846 } \
847 } \
848 \
849 if (TARGET_REALLY_IWMMXT) \
850 { \
851 regno = FIRST_IWMMXT_GR_REGNUM; \
852 /* The 2002/10/09 revision of the XScale ABI has wCG0 \
853 and wCG1 as call-preserved registers. The 2002/11/21 \
854 revision changed this so that all wCG registers are \
855 scratch registers. */ \
856 for (regno = FIRST_IWMMXT_GR_REGNUM; \
857 regno <= LAST_IWMMXT_GR_REGNUM; ++ regno) \
858 fixed_regs[regno] = 0; \
859 /* The XScale ABI has wR0 - wR9 as scratch registers, \
860 the rest as call-preserved registers. */ \
861 for (regno = FIRST_IWMMXT_REGNUM; \
862 regno <= LAST_IWMMXT_REGNUM; ++ regno) \
863 { \
864 fixed_regs[regno] = 0; \
865 call_used_regs[regno] = regno < FIRST_IWMMXT_REGNUM + 10; \
866 } \
867 } \
868 \
869 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
870 { \
871 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
872 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
873 } \
874 else if (TARGET_APCS_STACK) \
875 { \
876 fixed_regs[10] = 1; \
877 call_used_regs[10] = 1; \
878 } \
879 /* -mcaller-super-interworking reserves r11 for calls to \
880 _interwork_r11_call_via_rN(). Making the register global \
881 is an easy way of ensuring that it remains valid for all \
882 calls. */ \
883 if (TARGET_APCS_FRAME || TARGET_CALLER_INTERWORKING \
884 || TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) \
885 { \
886 fixed_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1; \
887 call_used_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1; \
888 if (TARGET_CALLER_INTERWORKING) \
889 global_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1; \
890 } \
891 SUBTARGET_CONDITIONAL_REGISTER_USAGE \
892 }
893
894 /* These are a couple of extensions to the formats accepted
895 by asm_fprintf:
896 %@ prints out ASM_COMMENT_START
897 %r prints out REGISTER_PREFIX reg_names[arg] */
898 #define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \
899 case '@': \
900 fputs (ASM_COMMENT_START, FILE); \
901 break; \
902 \
903 case 'r': \
904 fputs (REGISTER_PREFIX, FILE); \
905 fputs (reg_names [va_arg (ARGS, int)], FILE); \
906 break;
907
908 /* Round X up to the nearest word. */
909 #define ROUND_UP_WORD(X) (((X) + 3) & ~3)
910
911 /* Convert fron bytes to ints. */
912 #define ARM_NUM_INTS(X) (((X) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
913
914 /* The number of (integer) registers required to hold a quantity of type MODE.
915 Also used for VFP registers. */
916 #define ARM_NUM_REGS(MODE) \
917 ARM_NUM_INTS (GET_MODE_SIZE (MODE))
918
919 /* The number of (integer) registers required to hold a quantity of TYPE MODE. */
920 #define ARM_NUM_REGS2(MODE, TYPE) \
921 ARM_NUM_INTS ((MODE) == BLKmode ? \
922 int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE))
923
924 /* The number of (integer) argument register available. */
925 #define NUM_ARG_REGS 4
926
927 /* And similarly for the VFP. */
928 #define NUM_VFP_ARG_REGS 16
929
930 /* Return the register number of the N'th (integer) argument. */
931 #define ARG_REGISTER(N) (N - 1)
932
933 /* Specify the registers used for certain standard purposes.
934 The values of these macros are register numbers. */
935
936 /* The number of the last argument register. */
937 #define LAST_ARG_REGNUM ARG_REGISTER (NUM_ARG_REGS)
938
939 /* The numbers of the Thumb register ranges. */
940 #define FIRST_LO_REGNUM 0
941 #define LAST_LO_REGNUM 7
942 #define FIRST_HI_REGNUM 8
943 #define LAST_HI_REGNUM 11
944
945 /* Overridden by config/arm/bpabi.h. */
946 #ifndef ARM_UNWIND_INFO
947 #define ARM_UNWIND_INFO 0
948 #endif
949
950 /* Use r0 and r1 to pass exception handling information. */
951 #define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? N : INVALID_REGNUM)
952
953 /* The register that holds the return address in exception handlers. */
954 #define ARM_EH_STACKADJ_REGNUM 2
955 #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (SImode, ARM_EH_STACKADJ_REGNUM)
956
957 /* The native (Norcroft) Pascal compiler for the ARM passes the static chain
958 as an invisible last argument (possible since varargs don't exist in
959 Pascal), so the following is not true. */
960 #define STATIC_CHAIN_REGNUM 12
961
962 /* Define this to be where the real frame pointer is if it is not possible to
963 work out the offset between the frame pointer and the automatic variables
964 until after register allocation has taken place. FRAME_POINTER_REGNUM
965 should point to a special register that we will make sure is eliminated.
966
967 For the Thumb we have another problem. The TPCS defines the frame pointer
968 as r11, and GCC believes that it is always possible to use the frame pointer
969 as base register for addressing purposes. (See comments in
970 find_reloads_address()). But - the Thumb does not allow high registers,
971 including r11, to be used as base address registers. Hence our problem.
972
973 The solution used here, and in the old thumb port is to use r7 instead of
974 r11 as the hard frame pointer and to have special code to generate
975 backtrace structures on the stack (if required to do so via a command line
976 option) using r11. This is the only 'user visible' use of r11 as a frame
977 pointer. */
978 #define ARM_HARD_FRAME_POINTER_REGNUM 11
979 #define THUMB_HARD_FRAME_POINTER_REGNUM 7
980
981 #define HARD_FRAME_POINTER_REGNUM \
982 (TARGET_ARM \
983 ? ARM_HARD_FRAME_POINTER_REGNUM \
984 : THUMB_HARD_FRAME_POINTER_REGNUM)
985
986 #define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
987 #define HARD_FRAME_POINTER_IS_ARG_POINTER 0
988
989 #define FP_REGNUM HARD_FRAME_POINTER_REGNUM
990
991 /* Register to use for pushing function arguments. */
992 #define STACK_POINTER_REGNUM SP_REGNUM
993
994 /* ARM floating pointer registers. */
995 #define FIRST_FPA_REGNUM 16
996 #define LAST_FPA_REGNUM 23
997 #define IS_FPA_REGNUM(REGNUM) \
998 (((REGNUM) >= FIRST_FPA_REGNUM) && ((REGNUM) <= LAST_FPA_REGNUM))
999
1000 #define FIRST_IWMMXT_GR_REGNUM 43
1001 #define LAST_IWMMXT_GR_REGNUM 46
1002 #define FIRST_IWMMXT_REGNUM 47
1003 #define LAST_IWMMXT_REGNUM 62
1004 #define IS_IWMMXT_REGNUM(REGNUM) \
1005 (((REGNUM) >= FIRST_IWMMXT_REGNUM) && ((REGNUM) <= LAST_IWMMXT_REGNUM))
1006 #define IS_IWMMXT_GR_REGNUM(REGNUM) \
1007 (((REGNUM) >= FIRST_IWMMXT_GR_REGNUM) && ((REGNUM) <= LAST_IWMMXT_GR_REGNUM))
1008
1009 /* Base register for access to local variables of the function. */
1010 #define FRAME_POINTER_REGNUM 25
1011
1012 /* Base register for access to arguments of the function. */
1013 #define ARG_POINTER_REGNUM 26
1014
1015 #define FIRST_CIRRUS_FP_REGNUM 27
1016 #define LAST_CIRRUS_FP_REGNUM 42
1017 #define IS_CIRRUS_REGNUM(REGNUM) \
1018 (((REGNUM) >= FIRST_CIRRUS_FP_REGNUM) && ((REGNUM) <= LAST_CIRRUS_FP_REGNUM))
1019
1020 #define FIRST_VFP_REGNUM 63
1021 #define D7_VFP_REGNUM 78 /* Registers 77 and 78 == VFP reg D7. */
1022 #define LAST_VFP_REGNUM \
1023 (TARGET_VFPD32 ? LAST_HI_VFP_REGNUM : LAST_LO_VFP_REGNUM)
1024
1025 #define IS_VFP_REGNUM(REGNUM) \
1026 (((REGNUM) >= FIRST_VFP_REGNUM) && ((REGNUM) <= LAST_VFP_REGNUM))
1027
1028 /* VFP registers are split into two types: those defined by VFP versions < 3
1029 have D registers overlaid on consecutive pairs of S registers. VFP version 3
1030 defines 16 new D registers (d16-d31) which, for simplicity and correctness
1031 in various parts of the backend, we implement as "fake" single-precision
1032 registers (which would be S32-S63, but cannot be used in that way). The
1033 following macros define these ranges of registers. */
1034 #define LAST_LO_VFP_REGNUM 94
1035 #define FIRST_HI_VFP_REGNUM 95
1036 #define LAST_HI_VFP_REGNUM 126
1037
1038 #define VFP_REGNO_OK_FOR_SINGLE(REGNUM) \
1039 ((REGNUM) <= LAST_LO_VFP_REGNUM)
1040
1041 /* DFmode values are only valid in even register pairs. */
1042 #define VFP_REGNO_OK_FOR_DOUBLE(REGNUM) \
1043 ((((REGNUM) - FIRST_VFP_REGNUM) & 1) == 0)
1044
1045 /* Neon Quad values must start at a multiple of four registers. */
1046 #define NEON_REGNO_OK_FOR_QUAD(REGNUM) \
1047 ((((REGNUM) - FIRST_VFP_REGNUM) & 3) == 0)
1048
1049 /* Neon structures of vectors must be in even register pairs and there
1050 must be enough registers available. Because of various patterns
1051 requiring quad registers, we require them to start at a multiple of
1052 four. */
1053 #define NEON_REGNO_OK_FOR_NREGS(REGNUM, N) \
1054 ((((REGNUM) - FIRST_VFP_REGNUM) & 3) == 0 \
1055 && (LAST_VFP_REGNUM - (REGNUM) >= 2 * (N) - 1))
1056
1057 /* The number of hard registers is 16 ARM + 8 FPA + 1 CC + 1 SFP + 1 AFP. */
1058 /* + 16 Cirrus registers take us up to 43. */
1059 /* Intel Wireless MMX Technology registers add 16 + 4 more. */
1060 /* VFP (VFP3) adds 32 (64) + 1 more. */
1061 #define FIRST_PSEUDO_REGISTER 128
1062
1063 #define DBX_REGISTER_NUMBER(REGNO) arm_dbx_register_number (REGNO)
1064
1065 /* Value should be nonzero if functions must have frame pointers.
1066 Zero means the frame pointer need not be set up (and parms may be accessed
1067 via the stack pointer) in functions that seem suitable.
1068 If we have to have a frame pointer we might as well make use of it.
1069 APCS says that the frame pointer does not need to be pushed in leaf
1070 functions, or simple tail call functions. */
1071
1072 #ifndef SUBTARGET_FRAME_POINTER_REQUIRED
1073 #define SUBTARGET_FRAME_POINTER_REQUIRED 0
1074 #endif
1075
1076 /* Return number of consecutive hard regs needed starting at reg REGNO
1077 to hold something of mode MODE.
1078 This is ordinarily the length in words of a value of mode MODE
1079 but can be less for certain modes in special long registers.
1080
1081 On the ARM regs are UNITS_PER_WORD bits wide; FPA regs can hold any FP
1082 mode. */
1083 #define HARD_REGNO_NREGS(REGNO, MODE) \
1084 ((TARGET_32BIT \
1085 && REGNO >= FIRST_FPA_REGNUM \
1086 && REGNO != FRAME_POINTER_REGNUM \
1087 && REGNO != ARG_POINTER_REGNUM) \
1088 && !IS_VFP_REGNUM (REGNO) \
1089 ? 1 : ARM_NUM_REGS (MODE))
1090
1091 /* Return true if REGNO is suitable for holding a quantity of type MODE. */
1092 #define HARD_REGNO_MODE_OK(REGNO, MODE) \
1093 arm_hard_regno_mode_ok ((REGNO), (MODE))
1094
1095 /* Value is 1 if it is a good idea to tie two pseudo registers
1096 when one has mode MODE1 and one has mode MODE2.
1097 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
1098 for any hard reg, then this must be 0 for correct output. */
1099 #define MODES_TIEABLE_P(MODE1, MODE2) \
1100 (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
1101
1102 #define VALID_IWMMXT_REG_MODE(MODE) \
1103 (arm_vector_mode_supported_p (MODE) || (MODE) == DImode)
1104
1105 /* Modes valid for Neon D registers. */
1106 #define VALID_NEON_DREG_MODE(MODE) \
1107 ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \
1108 || (MODE) == V2SFmode || (MODE) == DImode)
1109
1110 /* Modes valid for Neon Q registers. */
1111 #define VALID_NEON_QREG_MODE(MODE) \
1112 ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
1113 || (MODE) == V4SFmode || (MODE) == V2DImode)
1114
1115 /* Structure modes valid for Neon registers. */
1116 #define VALID_NEON_STRUCT_MODE(MODE) \
1117 ((MODE) == TImode || (MODE) == EImode || (MODE) == OImode \
1118 || (MODE) == CImode || (MODE) == XImode)
1119
1120 /* The register numbers in sequence, for passing to arm_gen_load_multiple. */
1121 extern int arm_regs_in_sequence[];
1122
1123 /* The order in which register should be allocated. It is good to use ip
1124 since no saving is required (though calls clobber it) and it never contains
1125 function parameters. It is quite good to use lr since other calls may
1126 clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
1127 least likely to contain a function parameter; in addition results are
1128 returned in r0.
1129 For VFP/VFPv3, allocate D16-D31 first, then caller-saved registers (D0-D7),
1130 then D8-D15. The reason for doing this is to attempt to reduce register
1131 pressure when both single- and double-precision registers are used in a
1132 function. */
1133
1134 #define REG_ALLOC_ORDER \
1135 { \
1136 3, 2, 1, 0, 12, 14, 4, 5, \
1137 6, 7, 8, 10, 9, 11, 13, 15, \
1138 16, 17, 18, 19, 20, 21, 22, 23, \
1139 27, 28, 29, 30, 31, 32, 33, 34, \
1140 35, 36, 37, 38, 39, 40, 41, 42, \
1141 43, 44, 45, 46, 47, 48, 49, 50, \
1142 51, 52, 53, 54, 55, 56, 57, 58, \
1143 59, 60, 61, 62, \
1144 24, 25, 26, \
1145 95, 96, 97, 98, 99, 100, 101, 102, \
1146 103, 104, 105, 106, 107, 108, 109, 110, \
1147 111, 112, 113, 114, 115, 116, 117, 118, \
1148 119, 120, 121, 122, 123, 124, 125, 126, \
1149 78, 77, 76, 75, 74, 73, 72, 71, \
1150 70, 69, 68, 67, 66, 65, 64, 63, \
1151 79, 80, 81, 82, 83, 84, 85, 86, \
1152 87, 88, 89, 90, 91, 92, 93, 94, \
1153 127 \
1154 }
1155
1156 /* Use different register alloc ordering for Thumb. */
1157 #define ADJUST_REG_ALLOC_ORDER arm_order_regs_for_local_alloc ()
1158
1159 /* Tell IRA to use the order we define rather than messing it up with its
1160 own cost calculations. */
1161 #define HONOR_REG_ALLOC_ORDER
1162
1163 /* Interrupt functions can only use registers that have already been
1164 saved by the prologue, even if they would normally be
1165 call-clobbered. */
1166 #define HARD_REGNO_RENAME_OK(SRC, DST) \
1167 (! IS_INTERRUPT (cfun->machine->func_type) || \
1168 df_regs_ever_live_p (DST))
1169 \f
1170 /* Register and constant classes. */
1171
1172 /* Register classes: used to be simple, just all ARM regs or all FPA regs
1173 Now that the Thumb is involved it has become more complicated. */
1174 enum reg_class
1175 {
1176 NO_REGS,
1177 FPA_REGS,
1178 CIRRUS_REGS,
1179 VFP_D0_D7_REGS,
1180 VFP_LO_REGS,
1181 VFP_HI_REGS,
1182 VFP_REGS,
1183 IWMMXT_GR_REGS,
1184 IWMMXT_REGS,
1185 LO_REGS,
1186 STACK_REG,
1187 BASE_REGS,
1188 HI_REGS,
1189 CC_REG,
1190 VFPCC_REG,
1191 GENERAL_REGS,
1192 CORE_REGS,
1193 ALL_REGS,
1194 LIM_REG_CLASSES
1195 };
1196
1197 #define N_REG_CLASSES (int) LIM_REG_CLASSES
1198
1199 /* Give names of register classes as strings for dump file. */
1200 #define REG_CLASS_NAMES \
1201 { \
1202 "NO_REGS", \
1203 "FPA_REGS", \
1204 "CIRRUS_REGS", \
1205 "VFP_D0_D7_REGS", \
1206 "VFP_LO_REGS", \
1207 "VFP_HI_REGS", \
1208 "VFP_REGS", \
1209 "IWMMXT_GR_REGS", \
1210 "IWMMXT_REGS", \
1211 "LO_REGS", \
1212 "STACK_REG", \
1213 "BASE_REGS", \
1214 "HI_REGS", \
1215 "CC_REG", \
1216 "VFPCC_REG", \
1217 "GENERAL_REGS", \
1218 "CORE_REGS", \
1219 "ALL_REGS", \
1220 }
1221
1222 /* Define which registers fit in which classes.
1223 This is an initializer for a vector of HARD_REG_SET
1224 of length N_REG_CLASSES. */
1225 #define REG_CLASS_CONTENTS \
1226 { \
1227 { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
1228 { 0x00FF0000, 0x00000000, 0x00000000, 0x00000000 }, /* FPA_REGS */ \
1229 { 0xF8000000, 0x000007FF, 0x00000000, 0x00000000 }, /* CIRRUS_REGS */ \
1230 { 0x00000000, 0x80000000, 0x00007FFF, 0x00000000 }, /* VFP_D0_D7_REGS */ \
1231 { 0x00000000, 0x80000000, 0x7FFFFFFF, 0x00000000 }, /* VFP_LO_REGS */ \
1232 { 0x00000000, 0x00000000, 0x80000000, 0x7FFFFFFF }, /* VFP_HI_REGS */ \
1233 { 0x00000000, 0x80000000, 0xFFFFFFFF, 0x7FFFFFFF }, /* VFP_REGS */ \
1234 { 0x00000000, 0x00007800, 0x00000000, 0x00000000 }, /* IWMMXT_GR_REGS */ \
1235 { 0x00000000, 0x7FFF8000, 0x00000000, 0x00000000 }, /* IWMMXT_REGS */ \
1236 { 0x000000FF, 0x00000000, 0x00000000, 0x00000000 }, /* LO_REGS */ \
1237 { 0x00002000, 0x00000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
1238 { 0x000020FF, 0x00000000, 0x00000000, 0x00000000 }, /* BASE_REGS */ \
1239 { 0x0000DF00, 0x00000000, 0x00000000, 0x00000000 }, /* HI_REGS */ \
1240 { 0x01000000, 0x00000000, 0x00000000, 0x00000000 }, /* CC_REG */ \
1241 { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, /* VFPCC_REG */ \
1242 { 0x0000DFFF, 0x00000000, 0x00000000, 0x00000000 }, /* GENERAL_REGS */ \
1243 { 0x0000FFFF, 0x00000000, 0x00000000, 0x00000000 }, /* CORE_REGS */ \
1244 { 0xFAFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF } /* ALL_REGS */ \
1245 }
1246
1247 /* Any of the VFP register classes. */
1248 #define IS_VFP_CLASS(X) \
1249 ((X) == VFP_D0_D7_REGS || (X) == VFP_LO_REGS \
1250 || (X) == VFP_HI_REGS || (X) == VFP_REGS)
1251
1252 /* The same information, inverted:
1253 Return the class number of the smallest class containing
1254 reg number REGNO. This could be a conditional expression
1255 or could index an array. */
1256 #define REGNO_REG_CLASS(REGNO) arm_regno_class (REGNO)
1257
1258 /* The following macro defines cover classes for Integrated Register
1259 Allocator. Cover classes is a set of non-intersected register
1260 classes covering all hard registers used for register allocation
1261 purpose. Any move between two registers of a cover class should be
1262 cheaper than load or store of the registers. The macro value is
1263 array of register classes with LIM_REG_CLASSES used as the end
1264 marker. */
1265
1266 #define IRA_COVER_CLASSES \
1267 { \
1268 GENERAL_REGS, FPA_REGS, CIRRUS_REGS, VFP_REGS, IWMMXT_GR_REGS, IWMMXT_REGS,\
1269 LIM_REG_CLASSES \
1270 }
1271
1272 /* FPA registers can't do subreg as all values are reformatted to internal
1273 precision. VFP registers may only be accessed in the mode they
1274 were set. */
1275 #define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
1276 (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
1277 ? reg_classes_intersect_p (FPA_REGS, (CLASS)) \
1278 || reg_classes_intersect_p (VFP_REGS, (CLASS)) \
1279 : 0)
1280
1281 /* The class value for index registers, and the one for base regs. */
1282 #define INDEX_REG_CLASS (TARGET_THUMB1 ? LO_REGS : GENERAL_REGS)
1283 #define BASE_REG_CLASS (TARGET_THUMB1 ? LO_REGS : CORE_REGS)
1284
1285 /* For the Thumb the high registers cannot be used as base registers
1286 when addressing quantities in QI or HI mode; if we don't know the
1287 mode, then we must be conservative. */
1288 #define MODE_BASE_REG_CLASS(MODE) \
1289 (TARGET_32BIT ? CORE_REGS : \
1290 (((MODE) == SImode) ? BASE_REGS : LO_REGS))
1291
1292 /* For Thumb we can not support SP+reg addressing, so we return LO_REGS
1293 instead of BASE_REGS. */
1294 #define MODE_BASE_REG_REG_CLASS(MODE) BASE_REG_CLASS
1295
1296 /* When this hook returns true for MODE, the compiler allows
1297 registers explicitly used in the rtl to be used as spill registers
1298 but prevents the compiler from extending the lifetime of these
1299 registers. */
1300 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
1301 arm_small_register_classes_for_mode_p
1302
1303 /* Given an rtx X being reloaded into a reg required to be
1304 in class CLASS, return the class of reg to actually use.
1305 In general this is just CLASS, but for the Thumb core registers and
1306 immediate constants we prefer a LO_REGS class or a subset. */
1307 #define PREFERRED_RELOAD_CLASS(X, CLASS) \
1308 (TARGET_32BIT ? (CLASS) : \
1309 ((CLASS) == GENERAL_REGS || (CLASS) == HI_REGS \
1310 || (CLASS) == NO_REGS || (CLASS) == STACK_REG \
1311 ? LO_REGS : (CLASS)))
1312
1313 /* Must leave BASE_REGS reloads alone */
1314 #define THUMB_SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
1315 ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
1316 ? ((true_regnum (X) == -1 ? LO_REGS \
1317 : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
1318 : NO_REGS)) \
1319 : NO_REGS)
1320
1321 #define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
1322 ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
1323 ? ((true_regnum (X) == -1 ? LO_REGS \
1324 : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
1325 : NO_REGS)) \
1326 : NO_REGS)
1327
1328 /* Return the register class of a scratch register needed to copy IN into
1329 or out of a register in CLASS in MODE. If it can be done directly,
1330 NO_REGS is returned. */
1331 #define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
1332 /* Restrict which direct reloads are allowed for VFP/iWMMXt regs. */ \
1333 ((TARGET_VFP && TARGET_HARD_FLOAT \
1334 && IS_VFP_CLASS (CLASS)) \
1335 ? coproc_secondary_reload_class (MODE, X, FALSE) \
1336 : (TARGET_IWMMXT && (CLASS) == IWMMXT_REGS) \
1337 ? coproc_secondary_reload_class (MODE, X, TRUE) \
1338 : TARGET_32BIT \
1339 ? (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
1340 ? GENERAL_REGS : NO_REGS) \
1341 : THUMB_SECONDARY_OUTPUT_RELOAD_CLASS (CLASS, MODE, X))
1342
1343 /* If we need to load shorts byte-at-a-time, then we need a scratch. */
1344 #define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
1345 /* Restrict which direct reloads are allowed for VFP/iWMMXt regs. */ \
1346 ((TARGET_VFP && TARGET_HARD_FLOAT \
1347 && IS_VFP_CLASS (CLASS)) \
1348 ? coproc_secondary_reload_class (MODE, X, FALSE) : \
1349 (TARGET_IWMMXT && (CLASS) == IWMMXT_REGS) ? \
1350 coproc_secondary_reload_class (MODE, X, TRUE) : \
1351 /* Cannot load constants into Cirrus registers. */ \
1352 (TARGET_MAVERICK && TARGET_HARD_FLOAT \
1353 && (CLASS) == CIRRUS_REGS \
1354 && (CONSTANT_P (X) || GET_CODE (X) == SYMBOL_REF)) \
1355 ? GENERAL_REGS : \
1356 (TARGET_32BIT ? \
1357 (((CLASS) == IWMMXT_REGS || (CLASS) == IWMMXT_GR_REGS) \
1358 && CONSTANT_P (X)) \
1359 ? GENERAL_REGS : \
1360 (((MODE) == HImode && ! arm_arch4 \
1361 && (GET_CODE (X) == MEM \
1362 || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
1363 && true_regnum (X) == -1))) \
1364 ? GENERAL_REGS : NO_REGS) \
1365 : THUMB_SECONDARY_INPUT_RELOAD_CLASS (CLASS, MODE, X)))
1366
1367 /* Try a machine-dependent way of reloading an illegitimate address
1368 operand. If we find one, push the reload and jump to WIN. This
1369 macro is used in only one place: `find_reloads_address' in reload.c.
1370
1371 For the ARM, we wish to handle large displacements off a base
1372 register by splitting the addend across a MOV and the mem insn.
1373 This can cut the number of reloads needed. */
1374 #define ARM_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND, WIN) \
1375 do \
1376 { \
1377 if (GET_CODE (X) == PLUS \
1378 && GET_CODE (XEXP (X, 0)) == REG \
1379 && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
1380 && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
1381 && GET_CODE (XEXP (X, 1)) == CONST_INT) \
1382 { \
1383 HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
1384 HOST_WIDE_INT low, high; \
1385 \
1386 if (MODE == DImode || (MODE == DFmode && TARGET_SOFT_FLOAT)) \
1387 low = ((val & 0xf) ^ 0x8) - 0x8; \
1388 else if (TARGET_MAVERICK && TARGET_HARD_FLOAT) \
1389 /* Need to be careful, -256 is not a valid offset. */ \
1390 low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
1391 else if (MODE == SImode \
1392 || (MODE == SFmode && TARGET_SOFT_FLOAT) \
1393 || ((MODE == HImode || MODE == QImode) && ! arm_arch4)) \
1394 /* Need to be careful, -4096 is not a valid offset. */ \
1395 low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
1396 else if ((MODE == HImode || MODE == QImode) && arm_arch4) \
1397 /* Need to be careful, -256 is not a valid offset. */ \
1398 low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
1399 else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
1400 && TARGET_HARD_FLOAT && TARGET_FPA) \
1401 /* Need to be careful, -1024 is not a valid offset. */ \
1402 low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
1403 else \
1404 break; \
1405 \
1406 high = ((((val - low) & (unsigned HOST_WIDE_INT) 0xffffffff) \
1407 ^ (unsigned HOST_WIDE_INT) 0x80000000) \
1408 - (unsigned HOST_WIDE_INT) 0x80000000); \
1409 /* Check for overflow or zero */ \
1410 if (low == 0 || high == 0 || (high + low != val)) \
1411 break; \
1412 \
1413 /* Reload the high part into a base reg; leave the low part \
1414 in the mem. */ \
1415 X = gen_rtx_PLUS (GET_MODE (X), \
1416 gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
1417 GEN_INT (high)), \
1418 GEN_INT (low)); \
1419 push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL, \
1420 MODE_BASE_REG_CLASS (MODE), GET_MODE (X), \
1421 VOIDmode, 0, 0, OPNUM, TYPE); \
1422 goto WIN; \
1423 } \
1424 } \
1425 while (0)
1426
1427 /* XXX If an HImode FP+large_offset address is converted to an HImode
1428 SP+large_offset address, then reload won't know how to fix it. It sees
1429 only that SP isn't valid for HImode, and so reloads the SP into an index
1430 register, but the resulting address is still invalid because the offset
1431 is too big. We fix it here instead by reloading the entire address. */
1432 /* We could probably achieve better results by defining PROMOTE_MODE to help
1433 cope with the variances between the Thumb's signed and unsigned byte and
1434 halfword load instructions. */
1435 /* ??? This should be safe for thumb2, but we may be able to do better. */
1436 #define THUMB_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \
1437 do { \
1438 rtx new_x = thumb_legitimize_reload_address (&X, MODE, OPNUM, TYPE, IND_L); \
1439 if (new_x) \
1440 { \
1441 X = new_x; \
1442 goto WIN; \
1443 } \
1444 } while (0)
1445
1446 #define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_LEVELS, WIN) \
1447 if (TARGET_ARM) \
1448 ARM_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN); \
1449 else \
1450 THUMB_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)
1451
1452 /* Return the maximum number of consecutive registers
1453 needed to represent mode MODE in a register of class CLASS.
1454 ARM regs are UNITS_PER_WORD bits while FPA regs can hold any FP mode */
1455 #define CLASS_MAX_NREGS(CLASS, MODE) \
1456 (((CLASS) == FPA_REGS || (CLASS) == CIRRUS_REGS) ? 1 : ARM_NUM_REGS (MODE))
1457
1458 /* If defined, gives a class of registers that cannot be used as the
1459 operand of a SUBREG that changes the mode of the object illegally. */
1460
1461 /* Moves between FPA_REGS and GENERAL_REGS are two memory insns.
1462 Moves between VFP_REGS and GENERAL_REGS are a single insn, but
1463 it is typically more expensive than a single memory access. We set
1464 the cost to less than two memory accesses so that floating
1465 point to integer conversion does not go through memory. */
1466 #define REGISTER_MOVE_COST(MODE, FROM, TO) \
1467 (TARGET_32BIT ? \
1468 ((FROM) == FPA_REGS && (TO) != FPA_REGS ? 20 : \
1469 (FROM) != FPA_REGS && (TO) == FPA_REGS ? 20 : \
1470 IS_VFP_CLASS (FROM) && !IS_VFP_CLASS (TO) ? 15 : \
1471 !IS_VFP_CLASS (FROM) && IS_VFP_CLASS (TO) ? 15 : \
1472 (FROM) == IWMMXT_REGS && (TO) != IWMMXT_REGS ? 4 : \
1473 (FROM) != IWMMXT_REGS && (TO) == IWMMXT_REGS ? 4 : \
1474 (FROM) == IWMMXT_GR_REGS || (TO) == IWMMXT_GR_REGS ? 20 : \
1475 (FROM) == CIRRUS_REGS && (TO) != CIRRUS_REGS ? 20 : \
1476 (FROM) != CIRRUS_REGS && (TO) == CIRRUS_REGS ? 20 : \
1477 2) \
1478 : \
1479 ((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
1480 \f
1481 /* Stack layout; function entry, exit and calling. */
1482
1483 /* Define this if pushing a word on the stack
1484 makes the stack pointer a smaller address. */
1485 #define STACK_GROWS_DOWNWARD 1
1486
1487 /* Define this to nonzero if the nominal address of the stack frame
1488 is at the high-address end of the local variables;
1489 that is, each additional local variable allocated
1490 goes at a more negative offset in the frame. */
1491 #define FRAME_GROWS_DOWNWARD 1
1492
1493 /* The amount of scratch space needed by _interwork_{r7,r11}_call_via_rN().
1494 When present, it is one word in size, and sits at the top of the frame,
1495 between the soft frame pointer and either r7 or r11.
1496
1497 We only need _interwork_rM_call_via_rN() for -mcaller-super-interworking,
1498 and only then if some outgoing arguments are passed on the stack. It would
1499 be tempting to also check whether the stack arguments are passed by indirect
1500 calls, but there seems to be no reason in principle why a post-reload pass
1501 couldn't convert a direct call into an indirect one. */
1502 #define CALLER_INTERWORKING_SLOT_SIZE \
1503 (TARGET_CALLER_INTERWORKING \
1504 && crtl->outgoing_args_size != 0 \
1505 ? UNITS_PER_WORD : 0)
1506
1507 /* Offset within stack frame to start allocating local variables at.
1508 If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
1509 first local allocated. Otherwise, it is the offset to the BEGINNING
1510 of the first local allocated. */
1511 #define STARTING_FRAME_OFFSET 0
1512
1513 /* If we generate an insn to push BYTES bytes,
1514 this says how many the stack pointer really advances by. */
1515 /* The push insns do not do this rounding implicitly.
1516 So don't define this. */
1517 /* #define PUSH_ROUNDING(NPUSHED) ROUND_UP_WORD (NPUSHED) */
1518
1519 /* Define this if the maximum size of all the outgoing args is to be
1520 accumulated and pushed during the prologue. The amount can be
1521 found in the variable crtl->outgoing_args_size. */
1522 #define ACCUMULATE_OUTGOING_ARGS 1
1523
1524 /* Offset of first parameter from the argument pointer register value. */
1525 #define FIRST_PARM_OFFSET(FNDECL) (TARGET_ARM ? 4 : 0)
1526
1527 /* Define how to find the value returned by a library function
1528 assuming the value has mode MODE. */
1529 #define LIBCALL_VALUE(MODE) \
1530 (TARGET_AAPCS_BASED ? aapcs_libcall_value (MODE) \
1531 : (TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_FPA \
1532 && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
1533 ? gen_rtx_REG (MODE, FIRST_FPA_REGNUM) \
1534 : TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK \
1535 && GET_MODE_CLASS (MODE) == MODE_FLOAT \
1536 ? gen_rtx_REG (MODE, FIRST_CIRRUS_FP_REGNUM) \
1537 : TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (MODE) \
1538 ? gen_rtx_REG (MODE, FIRST_IWMMXT_REGNUM) \
1539 : gen_rtx_REG (MODE, ARG_REGISTER (1)))
1540
1541 /* 1 if REGNO is a possible register number for a function value. */
1542 #define FUNCTION_VALUE_REGNO_P(REGNO) \
1543 ((REGNO) == ARG_REGISTER (1) \
1544 || (TARGET_AAPCS_BASED && TARGET_32BIT \
1545 && TARGET_VFP && TARGET_HARD_FLOAT \
1546 && (REGNO) == FIRST_VFP_REGNUM) \
1547 || (TARGET_32BIT && ((REGNO) == FIRST_CIRRUS_FP_REGNUM) \
1548 && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK) \
1549 || ((REGNO) == FIRST_IWMMXT_REGNUM && TARGET_IWMMXT_ABI) \
1550 || (TARGET_32BIT && ((REGNO) == FIRST_FPA_REGNUM) \
1551 && TARGET_HARD_FLOAT_ABI && TARGET_FPA))
1552
1553 /* Amount of memory needed for an untyped call to save all possible return
1554 registers. */
1555 #define APPLY_RESULT_SIZE arm_apply_result_size()
1556
1557 /* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
1558 values must be in memory. On the ARM, they need only do so if larger
1559 than a word, or if they contain elements offset from zero in the struct. */
1560 #define DEFAULT_PCC_STRUCT_RETURN 0
1561
1562 /* These bits describe the different types of function supported
1563 by the ARM backend. They are exclusive. i.e. a function cannot be both a
1564 normal function and an interworked function, for example. Knowing the
1565 type of a function is important for determining its prologue and
1566 epilogue sequences.
1567 Note value 7 is currently unassigned. Also note that the interrupt
1568 function types all have bit 2 set, so that they can be tested for easily.
1569 Note that 0 is deliberately chosen for ARM_FT_UNKNOWN so that when the
1570 machine_function structure is initialized (to zero) func_type will
1571 default to unknown. This will force the first use of arm_current_func_type
1572 to call arm_compute_func_type. */
1573 #define ARM_FT_UNKNOWN 0 /* Type has not yet been determined. */
1574 #define ARM_FT_NORMAL 1 /* Your normal, straightforward function. */
1575 #define ARM_FT_INTERWORKED 2 /* A function that supports interworking. */
1576 #define ARM_FT_ISR 4 /* An interrupt service routine. */
1577 #define ARM_FT_FIQ 5 /* A fast interrupt service routine. */
1578 #define ARM_FT_EXCEPTION 6 /* An ARM exception handler (subcase of ISR). */
1579
1580 #define ARM_FT_TYPE_MASK ((1 << 3) - 1)
1581
1582 /* In addition functions can have several type modifiers,
1583 outlined by these bit masks: */
1584 #define ARM_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR and above. */
1585 #define ARM_FT_NAKED (1 << 3) /* No prologue or epilogue. */
1586 #define ARM_FT_VOLATILE (1 << 4) /* Does not return. */
1587 #define ARM_FT_NESTED (1 << 5) /* Embedded inside another func. */
1588 #define ARM_FT_STACKALIGN (1 << 6) /* Called with misaligned stack. */
1589
1590 /* Some macros to test these flags. */
1591 #define ARM_FUNC_TYPE(t) (t & ARM_FT_TYPE_MASK)
1592 #define IS_INTERRUPT(t) (t & ARM_FT_INTERRUPT)
1593 #define IS_VOLATILE(t) (t & ARM_FT_VOLATILE)
1594 #define IS_NAKED(t) (t & ARM_FT_NAKED)
1595 #define IS_NESTED(t) (t & ARM_FT_NESTED)
1596 #define IS_STACKALIGN(t) (t & ARM_FT_STACKALIGN)
1597
1598
1599 /* Structure used to hold the function stack frame layout. Offsets are
1600 relative to the stack pointer on function entry. Positive offsets are
1601 in the direction of stack growth.
1602 Only soft_frame is used in thumb mode. */
1603
1604 typedef struct GTY(()) arm_stack_offsets
1605 {
1606 int saved_args; /* ARG_POINTER_REGNUM. */
1607 int frame; /* ARM_HARD_FRAME_POINTER_REGNUM. */
1608 int saved_regs;
1609 int soft_frame; /* FRAME_POINTER_REGNUM. */
1610 int locals_base; /* THUMB_HARD_FRAME_POINTER_REGNUM. */
1611 int outgoing_args; /* STACK_POINTER_REGNUM. */
1612 unsigned int saved_regs_mask;
1613 }
1614 arm_stack_offsets;
1615
1616 #ifndef GENERATOR_FILE
1617 /* A C structure for machine-specific, per-function data.
1618 This is added to the cfun structure. */
1619 typedef struct GTY(()) machine_function
1620 {
1621 /* Additional stack adjustment in __builtin_eh_throw. */
1622 rtx eh_epilogue_sp_ofs;
1623 /* Records if LR has to be saved for far jumps. */
1624 int far_jump_used;
1625 /* Records if ARG_POINTER was ever live. */
1626 int arg_pointer_live;
1627 /* Records if the save of LR has been eliminated. */
1628 int lr_save_eliminated;
1629 /* The size of the stack frame. Only valid after reload. */
1630 arm_stack_offsets stack_offsets;
1631 /* Records the type of the current function. */
1632 unsigned long func_type;
1633 /* Record if the function has a variable argument list. */
1634 int uses_anonymous_args;
1635 /* Records if sibcalls are blocked because an argument
1636 register is needed to preserve stack alignment. */
1637 int sibcall_blocked;
1638 /* The PIC register for this function. This might be a pseudo. */
1639 rtx pic_reg;
1640 /* Labels for per-function Thumb call-via stubs. One per potential calling
1641 register. We can never call via LR or PC. We can call via SP if a
1642 trampoline happens to be on the top of the stack. */
1643 rtx call_via[14];
1644 /* Set to 1 when a return insn is output, this means that the epilogue
1645 is not needed. */
1646 int return_used_this_function;
1647 /* When outputting Thumb-1 code, record the last insn that provides
1648 information about condition codes, and the comparison operands. */
1649 rtx thumb1_cc_insn;
1650 rtx thumb1_cc_op0;
1651 rtx thumb1_cc_op1;
1652 /* Also record the CC mode that is supported. */
1653 enum machine_mode thumb1_cc_mode;
1654 }
1655 machine_function;
1656 #endif
1657
1658 /* As in the machine_function, a global set of call-via labels, for code
1659 that is in text_section. */
1660 extern GTY(()) rtx thumb_call_via_label[14];
1661
1662 /* The number of potential ways of assigning to a co-processor. */
1663 #define ARM_NUM_COPROC_SLOTS 1
1664
1665 /* Enumeration of procedure calling standard variants. We don't really
1666 support all of these yet. */
1667 enum arm_pcs
1668 {
1669 ARM_PCS_AAPCS, /* Base standard AAPCS. */
1670 ARM_PCS_AAPCS_VFP, /* Use VFP registers for floating point values. */
1671 ARM_PCS_AAPCS_IWMMXT, /* Use iWMMXT registers for vectors. */
1672 /* This must be the last AAPCS variant. */
1673 ARM_PCS_AAPCS_LOCAL, /* Private call within this compilation unit. */
1674 ARM_PCS_ATPCS, /* ATPCS. */
1675 ARM_PCS_APCS, /* APCS (legacy Linux etc). */
1676 ARM_PCS_UNKNOWN
1677 };
1678
1679 /* Default procedure calling standard of current compilation unit. */
1680 extern enum arm_pcs arm_pcs_default;
1681
1682 /* A C type for declaring a variable that is used as the first argument of
1683 `FUNCTION_ARG' and other related values. */
1684 typedef struct
1685 {
1686 /* This is the number of registers of arguments scanned so far. */
1687 int nregs;
1688 /* This is the number of iWMMXt register arguments scanned so far. */
1689 int iwmmxt_nregs;
1690 int named_count;
1691 int nargs;
1692 /* Which procedure call variant to use for this call. */
1693 enum arm_pcs pcs_variant;
1694
1695 /* AAPCS related state tracking. */
1696 int aapcs_arg_processed; /* No need to lay out this argument again. */
1697 int aapcs_cprc_slot; /* Index of co-processor rules to handle
1698 this argument, or -1 if using core
1699 registers. */
1700 int aapcs_ncrn;
1701 int aapcs_next_ncrn;
1702 rtx aapcs_reg; /* Register assigned to this argument. */
1703 int aapcs_partial; /* How many bytes are passed in regs (if
1704 split between core regs and stack.
1705 Zero otherwise. */
1706 int aapcs_cprc_failed[ARM_NUM_COPROC_SLOTS];
1707 int can_split; /* Argument can be split between core regs
1708 and the stack. */
1709 /* Private data for tracking VFP register allocation */
1710 unsigned aapcs_vfp_regs_free;
1711 unsigned aapcs_vfp_reg_alloc;
1712 int aapcs_vfp_rcount;
1713 MACHMODE aapcs_vfp_rmode;
1714 } CUMULATIVE_ARGS;
1715
1716 #define FUNCTION_ARG_PADDING(MODE, TYPE) \
1717 (arm_pad_arg_upward (MODE, TYPE) ? upward : downward)
1718
1719 #define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
1720 (arm_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward)
1721
1722 /* For AAPCS, padding should never be below the argument. For other ABIs,
1723 * mimic the default. */
1724 #define PAD_VARARGS_DOWN \
1725 ((TARGET_AAPCS_BASED) ? 0 : BYTES_BIG_ENDIAN)
1726
1727 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1728 for a call to a function whose data type is FNTYPE.
1729 For a library call, FNTYPE is 0.
1730 On the ARM, the offset starts at 0. */
1731 #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
1732 arm_init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL))
1733
1734 /* If defined, a C expression that gives the alignment boundary, in bits, of an
1735 argument with the specified mode and type. If it is not defined,
1736 `PARM_BOUNDARY' is used for all arguments. */
1737 #define FUNCTION_ARG_BOUNDARY(MODE,TYPE) \
1738 ((ARM_DOUBLEWORD_ALIGN && arm_needs_doubleword_align (MODE, TYPE)) \
1739 ? DOUBLEWORD_ALIGNMENT \
1740 : PARM_BOUNDARY )
1741
1742 /* 1 if N is a possible register number for function argument passing.
1743 On the ARM, r0-r3 are used to pass args. */
1744 #define FUNCTION_ARG_REGNO_P(REGNO) \
1745 (IN_RANGE ((REGNO), 0, 3) \
1746 || (TARGET_AAPCS_BASED && TARGET_VFP && TARGET_HARD_FLOAT \
1747 && IN_RANGE ((REGNO), FIRST_VFP_REGNUM, FIRST_VFP_REGNUM + 15)) \
1748 || (TARGET_IWMMXT_ABI \
1749 && IN_RANGE ((REGNO), FIRST_IWMMXT_REGNUM, FIRST_IWMMXT_REGNUM + 9)))
1750
1751 \f
1752 /* If your target environment doesn't prefix user functions with an
1753 underscore, you may wish to re-define this to prevent any conflicts. */
1754 #ifndef ARM_MCOUNT_NAME
1755 #define ARM_MCOUNT_NAME "*mcount"
1756 #endif
1757
1758 /* Call the function profiler with a given profile label. The Acorn
1759 compiler puts this BEFORE the prolog but gcc puts it afterwards.
1760 On the ARM the full profile code will look like:
1761 .data
1762 LP1
1763 .word 0
1764 .text
1765 mov ip, lr
1766 bl mcount
1767 .word LP1
1768
1769 profile_function() in final.c outputs the .data section, FUNCTION_PROFILER
1770 will output the .text section.
1771
1772 The ``mov ip,lr'' seems like a good idea to stick with cc convention.
1773 ``prof'' doesn't seem to mind about this!
1774
1775 Note - this version of the code is designed to work in both ARM and
1776 Thumb modes. */
1777 #ifndef ARM_FUNCTION_PROFILER
1778 #define ARM_FUNCTION_PROFILER(STREAM, LABELNO) \
1779 { \
1780 char temp[20]; \
1781 rtx sym; \
1782 \
1783 asm_fprintf (STREAM, "\tmov\t%r, %r\n\tbl\t", \
1784 IP_REGNUM, LR_REGNUM); \
1785 assemble_name (STREAM, ARM_MCOUNT_NAME); \
1786 fputc ('\n', STREAM); \
1787 ASM_GENERATE_INTERNAL_LABEL (temp, "LP", LABELNO); \
1788 sym = gen_rtx_SYMBOL_REF (Pmode, temp); \
1789 assemble_aligned_integer (UNITS_PER_WORD, sym); \
1790 }
1791 #endif
1792
1793 #ifdef THUMB_FUNCTION_PROFILER
1794 #define FUNCTION_PROFILER(STREAM, LABELNO) \
1795 if (TARGET_ARM) \
1796 ARM_FUNCTION_PROFILER (STREAM, LABELNO) \
1797 else \
1798 THUMB_FUNCTION_PROFILER (STREAM, LABELNO)
1799 #else
1800 #define FUNCTION_PROFILER(STREAM, LABELNO) \
1801 ARM_FUNCTION_PROFILER (STREAM, LABELNO)
1802 #endif
1803
1804 /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
1805 the stack pointer does not matter. The value is tested only in
1806 functions that have frame pointers.
1807 No definition is equivalent to always zero.
1808
1809 On the ARM, the function epilogue recovers the stack pointer from the
1810 frame. */
1811 #define EXIT_IGNORE_STACK 1
1812
1813 #define EPILOGUE_USES(REGNO) ((REGNO) == LR_REGNUM)
1814
1815 /* Determine if the epilogue should be output as RTL.
1816 You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
1817 #define USE_RETURN_INSN(ISCOND) \
1818 (TARGET_32BIT ? use_return_insn (ISCOND, NULL) : 0)
1819
1820 /* Definitions for register eliminations.
1821
1822 This is an array of structures. Each structure initializes one pair
1823 of eliminable registers. The "from" register number is given first,
1824 followed by "to". Eliminations of the same "from" register are listed
1825 in order of preference.
1826
1827 We have two registers that can be eliminated on the ARM. First, the
1828 arg pointer register can often be eliminated in favor of the stack
1829 pointer register. Secondly, the pseudo frame pointer register can always
1830 be eliminated; it is replaced with either the stack or the real frame
1831 pointer. Note we have to use {ARM|THUMB}_HARD_FRAME_POINTER_REGNUM
1832 because the definition of HARD_FRAME_POINTER_REGNUM is not a constant. */
1833
1834 #define ELIMINABLE_REGS \
1835 {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },\
1836 { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM },\
1837 { ARG_POINTER_REGNUM, ARM_HARD_FRAME_POINTER_REGNUM },\
1838 { ARG_POINTER_REGNUM, THUMB_HARD_FRAME_POINTER_REGNUM },\
1839 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM },\
1840 { FRAME_POINTER_REGNUM, ARM_HARD_FRAME_POINTER_REGNUM },\
1841 { FRAME_POINTER_REGNUM, THUMB_HARD_FRAME_POINTER_REGNUM }}
1842
1843 /* Define the offset between two registers, one to be eliminated, and the
1844 other its replacement, at the start of a routine. */
1845 #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
1846 if (TARGET_ARM) \
1847 (OFFSET) = arm_compute_initial_elimination_offset (FROM, TO); \
1848 else \
1849 (OFFSET) = thumb_compute_initial_elimination_offset (FROM, TO)
1850
1851 /* Special case handling of the location of arguments passed on the stack. */
1852 #define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
1853
1854 /* Initialize data used by insn expanders. This is called from insn_emit,
1855 once for every function before code is generated. */
1856 #define INIT_EXPANDERS arm_init_expanders ()
1857
1858 /* Length in units of the trampoline for entering a nested function. */
1859 #define TRAMPOLINE_SIZE (TARGET_32BIT ? 16 : 20)
1860
1861 /* Alignment required for a trampoline in bits. */
1862 #define TRAMPOLINE_ALIGNMENT 32
1863 \f
1864 /* Addressing modes, and classification of registers for them. */
1865 #define HAVE_POST_INCREMENT 1
1866 #define HAVE_PRE_INCREMENT TARGET_32BIT
1867 #define HAVE_POST_DECREMENT TARGET_32BIT
1868 #define HAVE_PRE_DECREMENT TARGET_32BIT
1869 #define HAVE_PRE_MODIFY_DISP TARGET_32BIT
1870 #define HAVE_POST_MODIFY_DISP TARGET_32BIT
1871 #define HAVE_PRE_MODIFY_REG TARGET_32BIT
1872 #define HAVE_POST_MODIFY_REG TARGET_32BIT
1873
1874 /* Macros to check register numbers against specific register classes. */
1875
1876 /* These assume that REGNO is a hard or pseudo reg number.
1877 They give nonzero only if REGNO is a hard reg of the suitable class
1878 or a pseudo reg currently allocated to a suitable hard reg.
1879 Since they use reg_renumber, they are safe only once reg_renumber
1880 has been allocated, which happens in local-alloc.c. */
1881 #define TEST_REGNO(R, TEST, VALUE) \
1882 ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
1883
1884 /* Don't allow the pc to be used. */
1885 #define ARM_REGNO_OK_FOR_BASE_P(REGNO) \
1886 (TEST_REGNO (REGNO, <, PC_REGNUM) \
1887 || TEST_REGNO (REGNO, ==, FRAME_POINTER_REGNUM) \
1888 || TEST_REGNO (REGNO, ==, ARG_POINTER_REGNUM))
1889
1890 #define THUMB1_REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
1891 (TEST_REGNO (REGNO, <=, LAST_LO_REGNUM) \
1892 || (GET_MODE_SIZE (MODE) >= 4 \
1893 && TEST_REGNO (REGNO, ==, STACK_POINTER_REGNUM)))
1894
1895 #define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
1896 (TARGET_THUMB1 \
1897 ? THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO, MODE) \
1898 : ARM_REGNO_OK_FOR_BASE_P (REGNO))
1899
1900 /* Nonzero if X can be the base register in a reg+reg addressing mode.
1901 For Thumb, we can not use SP + reg, so reject SP. */
1902 #define REGNO_MODE_OK_FOR_REG_BASE_P(X, MODE) \
1903 REGNO_MODE_OK_FOR_BASE_P (X, QImode)
1904
1905 /* For ARM code, we don't care about the mode, but for Thumb, the index
1906 must be suitable for use in a QImode load. */
1907 #define REGNO_OK_FOR_INDEX_P(REGNO) \
1908 (REGNO_MODE_OK_FOR_BASE_P (REGNO, QImode) \
1909 && !TEST_REGNO (REGNO, ==, STACK_POINTER_REGNUM))
1910
1911 /* Maximum number of registers that can appear in a valid memory address.
1912 Shifts in addresses can't be by a register. */
1913 #define MAX_REGS_PER_ADDRESS 2
1914
1915 /* Recognize any constant value that is a valid address. */
1916 /* XXX We can address any constant, eventually... */
1917 /* ??? Should the TARGET_ARM here also apply to thumb2? */
1918 #define CONSTANT_ADDRESS_P(X) \
1919 (GET_CODE (X) == SYMBOL_REF \
1920 && (CONSTANT_POOL_ADDRESS_P (X) \
1921 || (TARGET_ARM && optimize > 0 && SYMBOL_REF_FLAG (X))))
1922
1923 /* True if SYMBOL + OFFSET constants must refer to something within
1924 SYMBOL's section. */
1925 #define ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 0
1926
1927 /* Nonzero if all target requires all absolute relocations be R_ARM_ABS32. */
1928 #ifndef TARGET_DEFAULT_WORD_RELOCATIONS
1929 #define TARGET_DEFAULT_WORD_RELOCATIONS 0
1930 #endif
1931
1932 /* Nonzero if the constant value X is a legitimate general operand.
1933 It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
1934
1935 On the ARM, allow any integer (invalid ones are removed later by insn
1936 patterns), nice doubles and symbol_refs which refer to the function's
1937 constant pool XXX.
1938
1939 When generating pic allow anything. */
1940 #define ARM_LEGITIMATE_CONSTANT_P(X) (flag_pic || ! label_mentioned_p (X))
1941
1942 #define THUMB_LEGITIMATE_CONSTANT_P(X) \
1943 ( GET_CODE (X) == CONST_INT \
1944 || GET_CODE (X) == CONST_DOUBLE \
1945 || CONSTANT_ADDRESS_P (X) \
1946 || flag_pic)
1947
1948 #define LEGITIMATE_CONSTANT_P(X) \
1949 (!arm_cannot_force_const_mem (X) \
1950 && (TARGET_32BIT ? ARM_LEGITIMATE_CONSTANT_P (X) \
1951 : THUMB_LEGITIMATE_CONSTANT_P (X)))
1952
1953 #ifndef SUBTARGET_NAME_ENCODING_LENGTHS
1954 #define SUBTARGET_NAME_ENCODING_LENGTHS
1955 #endif
1956
1957 /* This is a C fragment for the inside of a switch statement.
1958 Each case label should return the number of characters to
1959 be stripped from the start of a function's name, if that
1960 name starts with the indicated character. */
1961 #define ARM_NAME_ENCODING_LENGTHS \
1962 case '*': return 1; \
1963 SUBTARGET_NAME_ENCODING_LENGTHS
1964
1965 /* This is how to output a reference to a user-level label named NAME.
1966 `assemble_name' uses this. */
1967 #undef ASM_OUTPUT_LABELREF
1968 #define ASM_OUTPUT_LABELREF(FILE, NAME) \
1969 arm_asm_output_labelref (FILE, NAME)
1970
1971 /* Output IT instructions for conditionally executed Thumb-2 instructions. */
1972 #define ASM_OUTPUT_OPCODE(STREAM, PTR) \
1973 if (TARGET_THUMB2) \
1974 thumb2_asm_output_opcode (STREAM);
1975
1976 /* The EABI specifies that constructors should go in .init_array.
1977 Other targets use .ctors for compatibility. */
1978 #ifndef ARM_EABI_CTORS_SECTION_OP
1979 #define ARM_EABI_CTORS_SECTION_OP \
1980 "\t.section\t.init_array,\"aw\",%init_array"
1981 #endif
1982 #ifndef ARM_EABI_DTORS_SECTION_OP
1983 #define ARM_EABI_DTORS_SECTION_OP \
1984 "\t.section\t.fini_array,\"aw\",%fini_array"
1985 #endif
1986 #define ARM_CTORS_SECTION_OP \
1987 "\t.section\t.ctors,\"aw\",%progbits"
1988 #define ARM_DTORS_SECTION_OP \
1989 "\t.section\t.dtors,\"aw\",%progbits"
1990
1991 /* Define CTORS_SECTION_ASM_OP. */
1992 #undef CTORS_SECTION_ASM_OP
1993 #undef DTORS_SECTION_ASM_OP
1994 #ifndef IN_LIBGCC2
1995 # define CTORS_SECTION_ASM_OP \
1996 (TARGET_AAPCS_BASED ? ARM_EABI_CTORS_SECTION_OP : ARM_CTORS_SECTION_OP)
1997 # define DTORS_SECTION_ASM_OP \
1998 (TARGET_AAPCS_BASED ? ARM_EABI_DTORS_SECTION_OP : ARM_DTORS_SECTION_OP)
1999 #else /* !defined (IN_LIBGCC2) */
2000 /* In libgcc, CTORS_SECTION_ASM_OP must be a compile-time constant,
2001 so we cannot use the definition above. */
2002 # ifdef __ARM_EABI__
2003 /* The .ctors section is not part of the EABI, so we do not define
2004 CTORS_SECTION_ASM_OP when in libgcc; that prevents crtstuff
2005 from trying to use it. We do define it when doing normal
2006 compilation, as .init_array can be used instead of .ctors. */
2007 /* There is no need to emit begin or end markers when using
2008 init_array; the dynamic linker will compute the size of the
2009 array itself based on special symbols created by the static
2010 linker. However, we do need to arrange to set up
2011 exception-handling here. */
2012 # define CTOR_LIST_BEGIN asm (ARM_EABI_CTORS_SECTION_OP)
2013 # define CTOR_LIST_END /* empty */
2014 # define DTOR_LIST_BEGIN asm (ARM_EABI_DTORS_SECTION_OP)
2015 # define DTOR_LIST_END /* empty */
2016 # else /* !defined (__ARM_EABI__) */
2017 # define CTORS_SECTION_ASM_OP ARM_CTORS_SECTION_OP
2018 # define DTORS_SECTION_ASM_OP ARM_DTORS_SECTION_OP
2019 # endif /* !defined (__ARM_EABI__) */
2020 #endif /* !defined (IN_LIBCC2) */
2021
2022 /* True if the operating system can merge entities with vague linkage
2023 (e.g., symbols in COMDAT group) during dynamic linking. */
2024 #ifndef TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P
2025 #define TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P true
2026 #endif
2027
2028 #define ARM_OUTPUT_FN_UNWIND(F, PROLOGUE) arm_output_fn_unwind (F, PROLOGUE)
2029
2030 /* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
2031 and check its validity for a certain class.
2032 We have two alternate definitions for each of them.
2033 The usual definition accepts all pseudo regs; the other rejects
2034 them unless they have been allocated suitable hard regs.
2035 The symbol REG_OK_STRICT causes the latter definition to be used.
2036 Thumb-2 has the same restrictions as arm. */
2037 #ifndef REG_OK_STRICT
2038
2039 #define ARM_REG_OK_FOR_BASE_P(X) \
2040 (REGNO (X) <= LAST_ARM_REGNUM \
2041 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
2042 || REGNO (X) == FRAME_POINTER_REGNUM \
2043 || REGNO (X) == ARG_POINTER_REGNUM)
2044
2045 #define ARM_REG_OK_FOR_INDEX_P(X) \
2046 ((REGNO (X) <= LAST_ARM_REGNUM \
2047 && REGNO (X) != STACK_POINTER_REGNUM) \
2048 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
2049 || REGNO (X) == FRAME_POINTER_REGNUM \
2050 || REGNO (X) == ARG_POINTER_REGNUM)
2051
2052 #define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \
2053 (REGNO (X) <= LAST_LO_REGNUM \
2054 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
2055 || (GET_MODE_SIZE (MODE) >= 4 \
2056 && (REGNO (X) == STACK_POINTER_REGNUM \
2057 || (X) == hard_frame_pointer_rtx \
2058 || (X) == arg_pointer_rtx)))
2059
2060 #define REG_STRICT_P 0
2061
2062 #else /* REG_OK_STRICT */
2063
2064 #define ARM_REG_OK_FOR_BASE_P(X) \
2065 ARM_REGNO_OK_FOR_BASE_P (REGNO (X))
2066
2067 #define ARM_REG_OK_FOR_INDEX_P(X) \
2068 ARM_REGNO_OK_FOR_INDEX_P (REGNO (X))
2069
2070 #define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \
2071 THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
2072
2073 #define REG_STRICT_P 1
2074
2075 #endif /* REG_OK_STRICT */
2076
2077 /* Now define some helpers in terms of the above. */
2078
2079 #define REG_MODE_OK_FOR_BASE_P(X, MODE) \
2080 (TARGET_THUMB1 \
2081 ? THUMB1_REG_MODE_OK_FOR_BASE_P (X, MODE) \
2082 : ARM_REG_OK_FOR_BASE_P (X))
2083
2084 /* For 16-bit Thumb, a valid index register is anything that can be used in
2085 a byte load instruction. */
2086 #define THUMB1_REG_OK_FOR_INDEX_P(X) \
2087 THUMB1_REG_MODE_OK_FOR_BASE_P (X, QImode)
2088
2089 /* Nonzero if X is a hard reg that can be used as an index
2090 or if it is a pseudo reg. On the Thumb, the stack pointer
2091 is not suitable. */
2092 #define REG_OK_FOR_INDEX_P(X) \
2093 (TARGET_THUMB1 \
2094 ? THUMB1_REG_OK_FOR_INDEX_P (X) \
2095 : ARM_REG_OK_FOR_INDEX_P (X))
2096
2097 /* Nonzero if X can be the base register in a reg+reg addressing mode.
2098 For Thumb, we can not use SP + reg, so reject SP. */
2099 #define REG_MODE_OK_FOR_REG_BASE_P(X, MODE) \
2100 REG_OK_FOR_INDEX_P (X)
2101 \f
2102 #define ARM_BASE_REGISTER_RTX_P(X) \
2103 (GET_CODE (X) == REG && ARM_REG_OK_FOR_BASE_P (X))
2104
2105 #define ARM_INDEX_REGISTER_RTX_P(X) \
2106 (GET_CODE (X) == REG && ARM_REG_OK_FOR_INDEX_P (X))
2107 \f
2108 /* Define this for compatibility reasons. */
2109 #define HANDLE_PRAGMA_PACK_PUSH_POP
2110
2111 /* Specify the machine mode that this machine uses
2112 for the index in the tablejump instruction. */
2113 #define CASE_VECTOR_MODE Pmode
2114
2115 #define CASE_VECTOR_PC_RELATIVE (TARGET_THUMB2 \
2116 || (TARGET_THUMB1 \
2117 && (optimize_size || flag_pic)))
2118
2119 #define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
2120 (TARGET_THUMB1 \
2121 ? (min >= 0 && max < 512 \
2122 ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 1, QImode) \
2123 : min >= -256 && max < 256 \
2124 ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 0, QImode) \
2125 : min >= 0 && max < 8192 \
2126 ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 1, HImode) \
2127 : min >= -4096 && max < 4096 \
2128 ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 0, HImode) \
2129 : SImode) \
2130 : ((min < 0 || max >= 0x2000 || !TARGET_THUMB2) ? SImode \
2131 : (max >= 0x200) ? HImode \
2132 : QImode))
2133
2134 /* signed 'char' is most compatible, but RISC OS wants it unsigned.
2135 unsigned is probably best, but may break some code. */
2136 #ifndef DEFAULT_SIGNED_CHAR
2137 #define DEFAULT_SIGNED_CHAR 0
2138 #endif
2139
2140 /* Max number of bytes we can move from memory to memory
2141 in one reasonably fast instruction. */
2142 #define MOVE_MAX 4
2143
2144 #undef MOVE_RATIO
2145 #define MOVE_RATIO(speed) (arm_tune_xscale ? 4 : 2)
2146
2147 /* Define if operations between registers always perform the operation
2148 on the full register even if a narrower mode is specified. */
2149 #define WORD_REGISTER_OPERATIONS
2150
2151 /* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
2152 will either zero-extend or sign-extend. The value of this macro should
2153 be the code that says which one of the two operations is implicitly
2154 done, UNKNOWN if none. */
2155 #define LOAD_EXTEND_OP(MODE) \
2156 (TARGET_THUMB ? ZERO_EXTEND : \
2157 ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
2158 : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)))
2159
2160 /* Nonzero if access to memory by bytes is slow and undesirable. */
2161 #define SLOW_BYTE_ACCESS 0
2162
2163 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
2164
2165 /* Immediate shift counts are truncated by the output routines (or was it
2166 the assembler?). Shift counts in a register are truncated by ARM. Note
2167 that the native compiler puts too large (> 32) immediate shift counts
2168 into a register and shifts by the register, letting the ARM decide what
2169 to do instead of doing that itself. */
2170 /* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
2171 code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
2172 On the arm, Y in a register is used modulo 256 for the shift. Only for
2173 rotates is modulo 32 used. */
2174 /* #define SHIFT_COUNT_TRUNCATED 1 */
2175
2176 /* All integers have the same format so truncation is easy. */
2177 #define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
2178
2179 /* Calling from registers is a massive pain. */
2180 #define NO_FUNCTION_CSE 1
2181
2182 /* The machine modes of pointers and functions */
2183 #define Pmode SImode
2184 #define FUNCTION_MODE Pmode
2185
2186 #define ARM_FRAME_RTX(X) \
2187 ( (X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
2188 || (X) == arg_pointer_rtx)
2189
2190 /* Moves to and from memory are quite expensive */
2191 #define MEMORY_MOVE_COST(M, CLASS, IN) \
2192 (TARGET_32BIT ? 10 : \
2193 ((GET_MODE_SIZE (M) < 4 ? 8 : 2 * GET_MODE_SIZE (M)) \
2194 * (CLASS == LO_REGS ? 1 : 2)))
2195
2196 /* Try to generate sequences that don't involve branches, we can then use
2197 conditional instructions */
2198 #define BRANCH_COST(speed_p, predictable_p) \
2199 (TARGET_32BIT ? 4 : (optimize > 0 ? 2 : 0))
2200 \f
2201 /* Position Independent Code. */
2202 /* We decide which register to use based on the compilation options and
2203 the assembler in use; this is more general than the APCS restriction of
2204 using sb (r9) all the time. */
2205 extern unsigned arm_pic_register;
2206
2207 /* The register number of the register used to address a table of static
2208 data addresses in memory. */
2209 #define PIC_OFFSET_TABLE_REGNUM arm_pic_register
2210
2211 /* We can't directly access anything that contains a symbol,
2212 nor can we indirect via the constant pool. One exception is
2213 UNSPEC_TLS, which is always PIC. */
2214 #define LEGITIMATE_PIC_OPERAND_P(X) \
2215 (!(symbol_mentioned_p (X) \
2216 || label_mentioned_p (X) \
2217 || (GET_CODE (X) == SYMBOL_REF \
2218 && CONSTANT_POOL_ADDRESS_P (X) \
2219 && (symbol_mentioned_p (get_pool_constant (X)) \
2220 || label_mentioned_p (get_pool_constant (X))))) \
2221 || tls_mentioned_p (X))
2222
2223 /* We need to know when we are making a constant pool; this determines
2224 whether data needs to be in the GOT or can be referenced via a GOT
2225 offset. */
2226 extern int making_const_table;
2227 \f
2228 /* Handle pragmas for compatibility with Intel's compilers. */
2229 /* Also abuse this to register additional C specific EABI attributes. */
2230 #define REGISTER_TARGET_PRAGMAS() do { \
2231 c_register_pragma (0, "long_calls", arm_pr_long_calls); \
2232 c_register_pragma (0, "no_long_calls", arm_pr_no_long_calls); \
2233 c_register_pragma (0, "long_calls_off", arm_pr_long_calls_off); \
2234 arm_lang_object_attributes_init(); \
2235 } while (0)
2236
2237 /* Condition code information. */
2238 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2239 return the mode to be used for the comparison. */
2240
2241 #define SELECT_CC_MODE(OP, X, Y) arm_select_cc_mode (OP, X, Y)
2242
2243 #define REVERSIBLE_CC_MODE(MODE) 1
2244
2245 #define REVERSE_CONDITION(CODE,MODE) \
2246 (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
2247 ? reverse_condition_maybe_unordered (code) \
2248 : reverse_condition (code))
2249
2250 #define CANONICALIZE_COMPARISON(CODE, OP0, OP1) \
2251 (CODE) = arm_canonicalize_comparison (CODE, &(OP0), &(OP1))
2252
2253 /* The arm5 clz instruction returns 32. */
2254 #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
2255 #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
2256 \f
2257 #define CC_STATUS_INIT \
2258 do { cfun->machine->thumb1_cc_insn = NULL_RTX; } while (0)
2259
2260 #undef ASM_APP_OFF
2261 #define ASM_APP_OFF (TARGET_THUMB1 ? "\t.code\t16\n" : \
2262 TARGET_THUMB2 ? "\t.thumb\n" : "")
2263
2264 /* Output a push or a pop instruction (only used when profiling).
2265 We can't push STATIC_CHAIN_REGNUM (r12) directly with Thumb-1. We know
2266 that ASM_OUTPUT_REG_PUSH will be matched with ASM_OUTPUT_REG_POP, and
2267 that r7 isn't used by the function profiler, so we can use it as a
2268 scratch reg. WARNING: This isn't safe in the general case! It may be
2269 sensitive to future changes in final.c:profile_function. */
2270 #define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \
2271 do \
2272 { \
2273 if (TARGET_ARM) \
2274 asm_fprintf (STREAM,"\tstmfd\t%r!,{%r}\n", \
2275 STACK_POINTER_REGNUM, REGNO); \
2276 else if (TARGET_THUMB1 \
2277 && (REGNO) == STATIC_CHAIN_REGNUM) \
2278 { \
2279 asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
2280 asm_fprintf (STREAM, "\tmov\tr7, %r\n", REGNO);\
2281 asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
2282 } \
2283 else \
2284 asm_fprintf (STREAM, "\tpush {%r}\n", REGNO); \
2285 } while (0)
2286
2287
2288 /* See comment for ASM_OUTPUT_REG_PUSH concerning Thumb-1 issue. */
2289 #define ASM_OUTPUT_REG_POP(STREAM, REGNO) \
2290 do \
2291 { \
2292 if (TARGET_ARM) \
2293 asm_fprintf (STREAM, "\tldmfd\t%r!,{%r}\n", \
2294 STACK_POINTER_REGNUM, REGNO); \
2295 else if (TARGET_THUMB1 \
2296 && (REGNO) == STATIC_CHAIN_REGNUM) \
2297 { \
2298 asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
2299 asm_fprintf (STREAM, "\tmov\t%r, r7\n", REGNO);\
2300 asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
2301 } \
2302 else \
2303 asm_fprintf (STREAM, "\tpop {%r}\n", REGNO); \
2304 } while (0)
2305
2306 /* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
2307 #define ADDR_VEC_ALIGN(JUMPTABLE) 0
2308
2309 /* This is how to output a label which precedes a jumptable. Since
2310 Thumb instructions are 2 bytes, we may need explicit alignment here. */
2311 #undef ASM_OUTPUT_CASE_LABEL
2312 #define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
2313 do \
2314 { \
2315 if (TARGET_THUMB && GET_MODE (PATTERN (JUMPTABLE)) == SImode) \
2316 ASM_OUTPUT_ALIGN (FILE, 2); \
2317 (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
2318 } \
2319 while (0)
2320
2321 /* Make sure subsequent insns are aligned after a TBB. */
2322 #define ASM_OUTPUT_CASE_END(FILE, NUM, JUMPTABLE) \
2323 do \
2324 { \
2325 if (GET_MODE (PATTERN (JUMPTABLE)) == QImode) \
2326 ASM_OUTPUT_ALIGN (FILE, 1); \
2327 } \
2328 while (0)
2329
2330 #define ARM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
2331 do \
2332 { \
2333 if (TARGET_THUMB) \
2334 { \
2335 if (is_called_in_ARM_mode (DECL) \
2336 || (TARGET_THUMB1 && !TARGET_THUMB1_ONLY \
2337 && cfun->is_thunk)) \
2338 fprintf (STREAM, "\t.code 32\n") ; \
2339 else if (TARGET_THUMB1) \
2340 fprintf (STREAM, "\t.code\t16\n\t.thumb_func\n") ; \
2341 else \
2342 fprintf (STREAM, "\t.thumb\n\t.thumb_func\n") ; \
2343 } \
2344 if (TARGET_POKE_FUNCTION_NAME) \
2345 arm_poke_function_name (STREAM, (const char *) NAME); \
2346 } \
2347 while (0)
2348
2349 /* For aliases of functions we use .thumb_set instead. */
2350 #define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL1, DECL2) \
2351 do \
2352 { \
2353 const char *const LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
2354 const char *const LABEL2 = IDENTIFIER_POINTER (DECL2); \
2355 \
2356 if (TARGET_THUMB && TREE_CODE (DECL1) == FUNCTION_DECL) \
2357 { \
2358 fprintf (FILE, "\t.thumb_set "); \
2359 assemble_name (FILE, LABEL1); \
2360 fprintf (FILE, ","); \
2361 assemble_name (FILE, LABEL2); \
2362 fprintf (FILE, "\n"); \
2363 } \
2364 else \
2365 ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
2366 } \
2367 while (0)
2368
2369 #ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
2370 /* To support -falign-* switches we need to use .p2align so
2371 that alignment directives in code sections will be padded
2372 with no-op instructions, rather than zeroes. */
2373 #define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
2374 if ((LOG) != 0) \
2375 { \
2376 if ((MAX_SKIP) == 0) \
2377 fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
2378 else \
2379 fprintf ((FILE), "\t.p2align %d,,%d\n", \
2380 (int) (LOG), (int) (MAX_SKIP)); \
2381 }
2382 #endif
2383 \f
2384 /* Add two bytes to the length of conditionally executed Thumb-2
2385 instructions for the IT instruction. */
2386 #define ADJUST_INSN_LENGTH(insn, length) \
2387 if (TARGET_THUMB2 && GET_CODE (PATTERN (insn)) == COND_EXEC) \
2388 length += 2;
2389
2390 /* Only perform branch elimination (by making instructions conditional) if
2391 we're optimizing. For Thumb-2 check if any IT instructions need
2392 outputting. */
2393 #define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
2394 if (TARGET_ARM && optimize) \
2395 arm_final_prescan_insn (INSN); \
2396 else if (TARGET_THUMB2) \
2397 thumb2_final_prescan_insn (INSN); \
2398 else if (TARGET_THUMB1) \
2399 thumb1_final_prescan_insn (INSN)
2400
2401 #define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
2402 (HOST_BITS_PER_WIDE_INT <= 32 ? (unsigned HOST_WIDE_INT) (x) \
2403 : ((((unsigned HOST_WIDE_INT)(x)) & (unsigned HOST_WIDE_INT) 0xffffffff) |\
2404 ((((unsigned HOST_WIDE_INT)(x)) & (unsigned HOST_WIDE_INT) 0x80000000) \
2405 ? ((~ (unsigned HOST_WIDE_INT) 0) \
2406 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
2407 : 0))))
2408
2409 /* A C expression whose value is RTL representing the value of the return
2410 address for the frame COUNT steps up from the current frame. */
2411
2412 #define RETURN_ADDR_RTX(COUNT, FRAME) \
2413 arm_return_addr (COUNT, FRAME)
2414
2415 /* Mask of the bits in the PC that contain the real return address
2416 when running in 26-bit mode. */
2417 #define RETURN_ADDR_MASK26 (0x03fffffc)
2418
2419 /* Pick up the return address upon entry to a procedure. Used for
2420 dwarf2 unwind information. This also enables the table driven
2421 mechanism. */
2422 #define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
2423 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
2424
2425 /* Used to mask out junk bits from the return address, such as
2426 processor state, interrupt status, condition codes and the like. */
2427 #define MASK_RETURN_ADDR \
2428 /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
2429 in 26 bit mode, the condition codes must be masked out of the \
2430 return address. This does not apply to ARM6 and later processors \
2431 when running in 32 bit mode. */ \
2432 ((arm_arch4 || TARGET_THUMB) \
2433 ? (gen_int_mode ((unsigned long)0xffffffff, Pmode)) \
2434 : arm_gen_return_addr_mask ())
2435
2436 \f
2437 /* Neon defines builtins from ARM_BUILTIN_MAX upwards, though they don't have
2438 symbolic names defined here (which would require too much duplication).
2439 FIXME? */
2440 enum arm_builtins
2441 {
2442 ARM_BUILTIN_GETWCX,
2443 ARM_BUILTIN_SETWCX,
2444
2445 ARM_BUILTIN_WZERO,
2446
2447 ARM_BUILTIN_WAVG2BR,
2448 ARM_BUILTIN_WAVG2HR,
2449 ARM_BUILTIN_WAVG2B,
2450 ARM_BUILTIN_WAVG2H,
2451
2452 ARM_BUILTIN_WACCB,
2453 ARM_BUILTIN_WACCH,
2454 ARM_BUILTIN_WACCW,
2455
2456 ARM_BUILTIN_WMACS,
2457 ARM_BUILTIN_WMACSZ,
2458 ARM_BUILTIN_WMACU,
2459 ARM_BUILTIN_WMACUZ,
2460
2461 ARM_BUILTIN_WSADB,
2462 ARM_BUILTIN_WSADBZ,
2463 ARM_BUILTIN_WSADH,
2464 ARM_BUILTIN_WSADHZ,
2465
2466 ARM_BUILTIN_WALIGN,
2467
2468 ARM_BUILTIN_TMIA,
2469 ARM_BUILTIN_TMIAPH,
2470 ARM_BUILTIN_TMIABB,
2471 ARM_BUILTIN_TMIABT,
2472 ARM_BUILTIN_TMIATB,
2473 ARM_BUILTIN_TMIATT,
2474
2475 ARM_BUILTIN_TMOVMSKB,
2476 ARM_BUILTIN_TMOVMSKH,
2477 ARM_BUILTIN_TMOVMSKW,
2478
2479 ARM_BUILTIN_TBCSTB,
2480 ARM_BUILTIN_TBCSTH,
2481 ARM_BUILTIN_TBCSTW,
2482
2483 ARM_BUILTIN_WMADDS,
2484 ARM_BUILTIN_WMADDU,
2485
2486 ARM_BUILTIN_WPACKHSS,
2487 ARM_BUILTIN_WPACKWSS,
2488 ARM_BUILTIN_WPACKDSS,
2489 ARM_BUILTIN_WPACKHUS,
2490 ARM_BUILTIN_WPACKWUS,
2491 ARM_BUILTIN_WPACKDUS,
2492
2493 ARM_BUILTIN_WADDB,
2494 ARM_BUILTIN_WADDH,
2495 ARM_BUILTIN_WADDW,
2496 ARM_BUILTIN_WADDSSB,
2497 ARM_BUILTIN_WADDSSH,
2498 ARM_BUILTIN_WADDSSW,
2499 ARM_BUILTIN_WADDUSB,
2500 ARM_BUILTIN_WADDUSH,
2501 ARM_BUILTIN_WADDUSW,
2502 ARM_BUILTIN_WSUBB,
2503 ARM_BUILTIN_WSUBH,
2504 ARM_BUILTIN_WSUBW,
2505 ARM_BUILTIN_WSUBSSB,
2506 ARM_BUILTIN_WSUBSSH,
2507 ARM_BUILTIN_WSUBSSW,
2508 ARM_BUILTIN_WSUBUSB,
2509 ARM_BUILTIN_WSUBUSH,
2510 ARM_BUILTIN_WSUBUSW,
2511
2512 ARM_BUILTIN_WAND,
2513 ARM_BUILTIN_WANDN,
2514 ARM_BUILTIN_WOR,
2515 ARM_BUILTIN_WXOR,
2516
2517 ARM_BUILTIN_WCMPEQB,
2518 ARM_BUILTIN_WCMPEQH,
2519 ARM_BUILTIN_WCMPEQW,
2520 ARM_BUILTIN_WCMPGTUB,
2521 ARM_BUILTIN_WCMPGTUH,
2522 ARM_BUILTIN_WCMPGTUW,
2523 ARM_BUILTIN_WCMPGTSB,
2524 ARM_BUILTIN_WCMPGTSH,
2525 ARM_BUILTIN_WCMPGTSW,
2526
2527 ARM_BUILTIN_TEXTRMSB,
2528 ARM_BUILTIN_TEXTRMSH,
2529 ARM_BUILTIN_TEXTRMSW,
2530 ARM_BUILTIN_TEXTRMUB,
2531 ARM_BUILTIN_TEXTRMUH,
2532 ARM_BUILTIN_TEXTRMUW,
2533 ARM_BUILTIN_TINSRB,
2534 ARM_BUILTIN_TINSRH,
2535 ARM_BUILTIN_TINSRW,
2536
2537 ARM_BUILTIN_WMAXSW,
2538 ARM_BUILTIN_WMAXSH,
2539 ARM_BUILTIN_WMAXSB,
2540 ARM_BUILTIN_WMAXUW,
2541 ARM_BUILTIN_WMAXUH,
2542 ARM_BUILTIN_WMAXUB,
2543 ARM_BUILTIN_WMINSW,
2544 ARM_BUILTIN_WMINSH,
2545 ARM_BUILTIN_WMINSB,
2546 ARM_BUILTIN_WMINUW,
2547 ARM_BUILTIN_WMINUH,
2548 ARM_BUILTIN_WMINUB,
2549
2550 ARM_BUILTIN_WMULUM,
2551 ARM_BUILTIN_WMULSM,
2552 ARM_BUILTIN_WMULUL,
2553
2554 ARM_BUILTIN_PSADBH,
2555 ARM_BUILTIN_WSHUFH,
2556
2557 ARM_BUILTIN_WSLLH,
2558 ARM_BUILTIN_WSLLW,
2559 ARM_BUILTIN_WSLLD,
2560 ARM_BUILTIN_WSRAH,
2561 ARM_BUILTIN_WSRAW,
2562 ARM_BUILTIN_WSRAD,
2563 ARM_BUILTIN_WSRLH,
2564 ARM_BUILTIN_WSRLW,
2565 ARM_BUILTIN_WSRLD,
2566 ARM_BUILTIN_WRORH,
2567 ARM_BUILTIN_WRORW,
2568 ARM_BUILTIN_WRORD,
2569 ARM_BUILTIN_WSLLHI,
2570 ARM_BUILTIN_WSLLWI,
2571 ARM_BUILTIN_WSLLDI,
2572 ARM_BUILTIN_WSRAHI,
2573 ARM_BUILTIN_WSRAWI,
2574 ARM_BUILTIN_WSRADI,
2575 ARM_BUILTIN_WSRLHI,
2576 ARM_BUILTIN_WSRLWI,
2577 ARM_BUILTIN_WSRLDI,
2578 ARM_BUILTIN_WRORHI,
2579 ARM_BUILTIN_WRORWI,
2580 ARM_BUILTIN_WRORDI,
2581
2582 ARM_BUILTIN_WUNPCKIHB,
2583 ARM_BUILTIN_WUNPCKIHH,
2584 ARM_BUILTIN_WUNPCKIHW,
2585 ARM_BUILTIN_WUNPCKILB,
2586 ARM_BUILTIN_WUNPCKILH,
2587 ARM_BUILTIN_WUNPCKILW,
2588
2589 ARM_BUILTIN_WUNPCKEHSB,
2590 ARM_BUILTIN_WUNPCKEHSH,
2591 ARM_BUILTIN_WUNPCKEHSW,
2592 ARM_BUILTIN_WUNPCKEHUB,
2593 ARM_BUILTIN_WUNPCKEHUH,
2594 ARM_BUILTIN_WUNPCKEHUW,
2595 ARM_BUILTIN_WUNPCKELSB,
2596 ARM_BUILTIN_WUNPCKELSH,
2597 ARM_BUILTIN_WUNPCKELSW,
2598 ARM_BUILTIN_WUNPCKELUB,
2599 ARM_BUILTIN_WUNPCKELUH,
2600 ARM_BUILTIN_WUNPCKELUW,
2601
2602 ARM_BUILTIN_THREAD_POINTER,
2603
2604 ARM_BUILTIN_NEON_BASE,
2605
2606 ARM_BUILTIN_MAX = ARM_BUILTIN_NEON_BASE /* FIXME: Wrong! */
2607 };
2608
2609 /* Do not emit .note.GNU-stack by default. */
2610 #ifndef NEED_INDICATE_EXEC_STACK
2611 #define NEED_INDICATE_EXEC_STACK 0
2612 #endif
2613
2614 /* The maximum number of parallel loads or stores we support in an ldm/stm
2615 instruction. */
2616 #define MAX_LDM_STM_OPS 4
2617
2618 #endif /* ! GCC_ARM_H */