]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/aarch64/aarch64.h
[PR82096] Fix ICE in int_mode_for_mode with arm-linux-gnueabi
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64.h
CommitLineData
43e9d192 1/* Machine description for AArch64 architecture.
85ec4feb 2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
43e9d192
IB
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22#ifndef GCC_AARCH64_H
23#define GCC_AARCH64_H
24
25/* Target CPU builtins. */
e4ea20c8
KT
26#define TARGET_CPU_CPP_BUILTINS() \
27 aarch64_cpu_cpp_builtins (pfile)
43e9d192
IB
28
29\f
30
e4ea20c8
KT
31#define REGISTER_TARGET_PRAGMAS() aarch64_register_pragmas ()
32
43e9d192
IB
33/* Target machine storage layout. */
34
35#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
36 if (GET_MODE_CLASS (MODE) == MODE_INT \
37 && GET_MODE_SIZE (MODE) < 4) \
38 { \
39 if (MODE == QImode || MODE == HImode) \
40 { \
41 MODE = SImode; \
42 } \
43 }
44
45/* Bits are always numbered from the LSBit. */
46#define BITS_BIG_ENDIAN 0
47
48/* Big/little-endian flavour. */
49#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
50#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
51
52/* AdvSIMD is supported in the default configuration, unless disabled by
683e3333
KT
53 -mgeneral-regs-only or by the +nosimd extension. */
54#define TARGET_SIMD (!TARGET_GENERAL_REGS_ONLY && AARCH64_ISA_SIMD)
55#define TARGET_FLOAT (!TARGET_GENERAL_REGS_ONLY && AARCH64_ISA_FP)
43e9d192
IB
56
57#define UNITS_PER_WORD 8
58
59#define UNITS_PER_VREG 16
60
61#define PARM_BOUNDARY 64
62
63#define STACK_BOUNDARY 128
64
65#define FUNCTION_BOUNDARY 32
66
67#define EMPTY_FIELD_BOUNDARY 32
68
69#define BIGGEST_ALIGNMENT 128
70
71#define SHORT_TYPE_SIZE 16
72
73#define INT_TYPE_SIZE 32
74
17a819cb
YZ
75#define LONG_TYPE_SIZE (TARGET_ILP32 ? 32 : 64)
76
77#define POINTER_SIZE (TARGET_ILP32 ? 32 : 64)
43e9d192
IB
78
79#define LONG_LONG_TYPE_SIZE 64
80
81#define FLOAT_TYPE_SIZE 32
82
83#define DOUBLE_TYPE_SIZE 64
84
85#define LONG_DOUBLE_TYPE_SIZE 128
86
87/* The architecture reserves all bits of the address for hardware use,
88 so the vbit must go into the delta field of pointers to member
89 functions. This is the same config as that in the AArch32
90 port. */
91#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
92
98503487
RR
93/* Align definitions of arrays, unions and structures so that
94 initializations and copies can be made more efficient. This is not
95 ABI-changing, so it only affects places where we can see the
96 definition. Increasing the alignment tends to introduce padding,
97 so don't do this when optimizing for size/conserving stack space. */
98#define AARCH64_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \
99 (((COND) && ((ALIGN) < BITS_PER_WORD) \
100 && (TREE_CODE (EXP) == ARRAY_TYPE \
101 || TREE_CODE (EXP) == UNION_TYPE \
102 || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
103
104/* Align global data. */
105#define DATA_ALIGNMENT(EXP, ALIGN) \
106 AARCH64_EXPAND_ALIGNMENT (!optimize_size, EXP, ALIGN)
107
108/* Similarly, make sure that objects on the stack are sensibly aligned. */
109#define LOCAL_ALIGNMENT(EXP, ALIGN) \
110 AARCH64_EXPAND_ALIGNMENT (!flag_conserve_stack, EXP, ALIGN)
43e9d192
IB
111
112#define STRUCTURE_SIZE_BOUNDARY 8
113
e10dbae3
WD
114/* Heap alignment (same as BIGGEST_ALIGNMENT and STACK_BOUNDARY). */
115#define MALLOC_ABI_ALIGNMENT 128
116
43e9d192
IB
117/* Defined by the ABI */
118#define WCHAR_TYPE "unsigned int"
119#define WCHAR_TYPE_SIZE 32
120
121/* Using long long breaks -ansi and -std=c90, so these will need to be
122 made conditional for an LLP64 ABI. */
123
124#define SIZE_TYPE "long unsigned int"
125
126#define PTRDIFF_TYPE "long int"
127
128#define PCC_BITFIELD_TYPE_MATTERS 1
129
0c6caaf8
RL
130/* Major revision number of the ARM Architecture implemented by the target. */
131extern unsigned aarch64_architecture_version;
43e9d192
IB
132
133/* Instruction tuning/selection flags. */
134
135/* Bit values used to identify processor capabilities. */
136#define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */
137#define AARCH64_FL_FP (1 << 1) /* Has FP. */
138#define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */
95f99170 139#define AARCH64_FL_CRC (1 << 3) /* Has CRC. */
74bb9de4 140/* ARMv8.1-A architecture extensions. */
dfba575f 141#define AARCH64_FL_LSE (1 << 4) /* Has Large System Extensions. */
1ddc47c0
TC
142#define AARCH64_FL_RDMA (1 << 5) /* Has Round Double Multiply Add. */
143#define AARCH64_FL_V8_1 (1 << 6) /* Has ARMv8.1-A extensions. */
c61465bd 144/* ARMv8.2-A architecture extensions. */
1ddc47c0 145#define AARCH64_FL_V8_2 (1 << 8) /* Has ARMv8.2-A features. */
c61465bd 146#define AARCH64_FL_F16 (1 << 9) /* Has ARMv8.2-A FP16 extensions. */
d766c52b 147/* ARMv8.3-A architecture extensions. */
1ddc47c0
TC
148#define AARCH64_FL_V8_3 (1 << 10) /* Has ARMv8.3-A features. */
149#define AARCH64_FL_RCPC (1 << 11) /* Has support for RCpc model. */
150#define AARCH64_FL_DOTPROD (1 << 12) /* Has ARMv8.2-A Dot Product ins. */
27086ea3
MC
151/* New flags to split crypto into aes and sha2. */
152#define AARCH64_FL_AES (1 << 13) /* Has Crypto AES. */
153#define AARCH64_FL_SHA2 (1 << 14) /* Has Crypto SHA2. */
154/* ARMv8.4-A architecture extensions. */
155#define AARCH64_FL_V8_4 (1 << 15) /* Has ARMv8.4-A features. */
156#define AARCH64_FL_SM4 (1 << 16) /* Has ARMv8.4-A SM3 and SM4. */
157#define AARCH64_FL_SHA3 (1 << 17) /* Has ARMv8.4-a SHA3 and SHA512. */
158#define AARCH64_FL_F16FML (1 << 18) /* Has ARMv8.4-a FP16 extensions. */
43e9d192
IB
159
160/* Has FP and SIMD. */
161#define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD)
162
163/* Has FP without SIMD. */
164#define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD)
165
166/* Architecture flags that effect instruction selection. */
167#define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD)
ff09c88d 168#define AARCH64_FL_FOR_ARCH8_1 \
43f84f6c
JW
169 (AARCH64_FL_FOR_ARCH8 | AARCH64_FL_LSE | AARCH64_FL_CRC \
170 | AARCH64_FL_RDMA | AARCH64_FL_V8_1)
c61465bd
MW
171#define AARCH64_FL_FOR_ARCH8_2 \
172 (AARCH64_FL_FOR_ARCH8_1 | AARCH64_FL_V8_2)
d766c52b
JW
173#define AARCH64_FL_FOR_ARCH8_3 \
174 (AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_V8_3)
27086ea3
MC
175#define AARCH64_FL_FOR_ARCH8_4 \
176 (AARCH64_FL_FOR_ARCH8_3 | AARCH64_FL_V8_4 | AARCH64_FL_F16FML)
43e9d192
IB
177
178/* Macros to test ISA flags. */
361fb3ee 179
5922847b 180#define AARCH64_ISA_CRC (aarch64_isa_flags & AARCH64_FL_CRC)
43e9d192
IB
181#define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO)
182#define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP)
183#define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD)
045c2d32 184#define AARCH64_ISA_LSE (aarch64_isa_flags & AARCH64_FL_LSE)
43f84f6c 185#define AARCH64_ISA_RDMA (aarch64_isa_flags & AARCH64_FL_RDMA)
c61465bd
MW
186#define AARCH64_ISA_V8_2 (aarch64_isa_flags & AARCH64_FL_V8_2)
187#define AARCH64_ISA_F16 (aarch64_isa_flags & AARCH64_FL_F16)
d766c52b 188#define AARCH64_ISA_V8_3 (aarch64_isa_flags & AARCH64_FL_V8_3)
1ddc47c0 189#define AARCH64_ISA_DOTPROD (aarch64_isa_flags & AARCH64_FL_DOTPROD)
27086ea3
MC
190#define AARCH64_ISA_AES (aarch64_isa_flags & AARCH64_FL_AES)
191#define AARCH64_ISA_SHA2 (aarch64_isa_flags & AARCH64_FL_SHA2)
192#define AARCH64_ISA_V8_4 (aarch64_isa_flags & AARCH64_FL_V8_4)
193#define AARCH64_ISA_SM4 (aarch64_isa_flags & AARCH64_FL_SM4)
194#define AARCH64_ISA_SHA3 (aarch64_isa_flags & AARCH64_FL_SHA3)
195#define AARCH64_ISA_F16FML (aarch64_isa_flags & AARCH64_FL_F16FML)
43e9d192 196
683e3333
KT
197/* Crypto is an optional extension to AdvSIMD. */
198#define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO)
43e9d192 199
27086ea3
MC
200/* SHA2 is an optional extension to AdvSIMD. */
201#define TARGET_SHA2 ((TARGET_SIMD && AARCH64_ISA_SHA2) || TARGET_CRYPTO)
202
203/* SHA3 is an optional extension to AdvSIMD. */
204#define TARGET_SHA3 (TARGET_SIMD && AARCH64_ISA_SHA3)
205
206/* AES is an optional extension to AdvSIMD. */
207#define TARGET_AES ((TARGET_SIMD && AARCH64_ISA_AES) || TARGET_CRYPTO)
208
209/* SM is an optional extension to AdvSIMD. */
210#define TARGET_SM4 (TARGET_SIMD && AARCH64_ISA_SM4)
211
212/* FP16FML is an optional extension to AdvSIMD. */
213#define TARGET_F16FML (TARGET_SIMD && AARCH64_ISA_F16FML && TARGET_FP_F16INST)
214
5d357f26
KT
215/* CRC instructions that can be enabled through +crc arch extension. */
216#define TARGET_CRC32 (AARCH64_ISA_CRC)
217
045c2d32
MW
218/* Atomic instructions that can be enabled through the +lse extension. */
219#define TARGET_LSE (AARCH64_ISA_LSE)
220
c61465bd
MW
221/* ARMv8.2-A FP16 support that can be enabled through the +fp16 extension. */
222#define TARGET_FP_F16INST (TARGET_FLOAT && AARCH64_ISA_F16)
223#define TARGET_SIMD_F16INST (TARGET_SIMD && AARCH64_ISA_F16)
224
1ddc47c0
TC
225/* Dot Product is an optional extension to AdvSIMD enabled through +dotprod. */
226#define TARGET_DOTPROD (TARGET_SIMD && AARCH64_ISA_DOTPROD)
227
d766c52b
JW
228/* ARMv8.3-A features. */
229#define TARGET_ARMV8_3 (AARCH64_ISA_V8_3)
230
b32c1043
KT
231/* Make sure this is always defined so we don't have to check for ifdefs
232 but rather use normal ifs. */
233#ifndef TARGET_FIX_ERR_A53_835769_DEFAULT
234#define TARGET_FIX_ERR_A53_835769_DEFAULT 0
235#else
236#undef TARGET_FIX_ERR_A53_835769_DEFAULT
237#define TARGET_FIX_ERR_A53_835769_DEFAULT 1
238#endif
239
240/* Apply the workaround for Cortex-A53 erratum 835769. */
241#define TARGET_FIX_ERR_A53_835769 \
242 ((aarch64_fix_a53_err835769 == 2) \
243 ? TARGET_FIX_ERR_A53_835769_DEFAULT : aarch64_fix_a53_err835769)
244
48bb1a55
CL
245/* Make sure this is always defined so we don't have to check for ifdefs
246 but rather use normal ifs. */
247#ifndef TARGET_FIX_ERR_A53_843419_DEFAULT
248#define TARGET_FIX_ERR_A53_843419_DEFAULT 0
249#else
250#undef TARGET_FIX_ERR_A53_843419_DEFAULT
251#define TARGET_FIX_ERR_A53_843419_DEFAULT 1
252#endif
253
254/* Apply the workaround for Cortex-A53 erratum 843419. */
255#define TARGET_FIX_ERR_A53_843419 \
256 ((aarch64_fix_a53_err843419 == 2) \
257 ? TARGET_FIX_ERR_A53_843419_DEFAULT : aarch64_fix_a53_err843419)
258
74bb9de4 259/* ARMv8.1-A Adv.SIMD support. */
a3735e01
MW
260#define TARGET_SIMD_RDMA (TARGET_SIMD && AARCH64_ISA_RDMA)
261
43e9d192
IB
262/* Standard register usage. */
263
264/* 31 64-bit general purpose registers R0-R30:
265 R30 LR (link register)
266 R29 FP (frame pointer)
267 R19-R28 Callee-saved registers
268 R18 The platform register; use as temporary register.
269 R17 IP1 The second intra-procedure-call temporary register
270 (can be used by call veneers and PLT code); otherwise use
271 as a temporary register
272 R16 IP0 The first intra-procedure-call temporary register (can
273 be used by call veneers and PLT code); otherwise use as a
274 temporary register
275 R9-R15 Temporary registers
276 R8 Structure value parameter / temporary register
277 R0-R7 Parameter/result registers
278
279 SP stack pointer, encoded as X/R31 where permitted.
280 ZR zero register, encoded as X/R31 elsewhere
281
282 32 x 128-bit floating-point/vector registers
283 V16-V31 Caller-saved (temporary) registers
284 V8-V15 Callee-saved registers
285 V0-V7 Parameter/result registers
286
287 The vector register V0 holds scalar B0, H0, S0 and D0 in its least
288 significant bits. Unlike AArch32 S1 is not packed into D0,
289 etc. */
290
291/* Note that we don't mark X30 as a call-clobbered register. The idea is
292 that it's really the call instructions themselves which clobber X30.
293 We don't care what the called function does with it afterwards.
294
295 This approach makes it easier to implement sibcalls. Unlike normal
296 calls, sibcalls don't clobber X30, so the register reaches the
297 called function intact. EPILOGUE_USES says that X30 is useful
298 to the called function. */
299
300#define FIXED_REGISTERS \
301 { \
302 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \
303 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \
304 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \
305 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
306 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \
307 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
308 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \
309 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \
310 1, 1, 1, /* SFP, AP, CC */ \
311 }
312
313#define CALL_USED_REGISTERS \
314 { \
315 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \
316 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \
317 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \
1c923b60 318 0, 0, 0, 0, 0, 1, 1, 1, /* R24 - R30, SP */ \
43e9d192
IB
319 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \
320 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
321 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \
322 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \
323 1, 1, 1, /* SFP, AP, CC */ \
324 }
325
326#define REGISTER_NAMES \
327 { \
328 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \
329 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \
330 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
331 "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \
332 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
333 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
334 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
335 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
336 "sfp", "ap", "cc", \
337 }
338
339/* Generate the register aliases for core register N */
340#define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \
341 {"w" # N, R0_REGNUM + (N)}
342
343#define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \
344 {"d" # N, V0_REGNUM + (N)}, \
345 {"s" # N, V0_REGNUM + (N)}, \
346 {"h" # N, V0_REGNUM + (N)}, \
347 {"b" # N, V0_REGNUM + (N)}
348
349/* Provide aliases for all of the ISA defined register name forms.
350 These aliases are convenient for use in the clobber lists of inline
351 asm statements. */
352
353#define ADDITIONAL_REGISTER_NAMES \
354 { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \
355 R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \
356 R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \
357 R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \
358 R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \
359 R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \
360 R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \
9259db42 361 R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), {"wsp", R0_REGNUM + 31}, \
43e9d192
IB
362 V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \
363 V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \
364 V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \
365 V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \
366 V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
367 V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
368 V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
369 V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \
370 }
371
372/* Say that the epilogue uses the return address register. Note that
373 in the case of sibcalls, the values "used by the epilogue" are
374 considered live at the start of the called function. */
375
376#define EPILOGUE_USES(REGNO) \
1c923b60 377 (epilogue_completed && (REGNO) == LR_REGNUM)
43e9d192
IB
378
379/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
204d2c03
WD
380 the stack pointer does not matter. This is only true if the function
381 uses alloca. */
382#define EXIT_IGNORE_STACK (cfun->calls_alloca)
43e9d192
IB
383
384#define STATIC_CHAIN_REGNUM R18_REGNUM
385#define HARD_FRAME_POINTER_REGNUM R29_REGNUM
386#define FRAME_POINTER_REGNUM SFP_REGNUM
387#define STACK_POINTER_REGNUM SP_REGNUM
388#define ARG_POINTER_REGNUM AP_REGNUM
389#define FIRST_PSEUDO_REGISTER 67
390
391/* The number of (integer) argument register available. */
392#define NUM_ARG_REGS 8
393#define NUM_FP_ARG_REGS 8
394
395/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
396 four members. */
397#define HA_MAX_NUM_FLDS 4
398
399/* External dwarf register number scheme. These number are used to
400 identify registers in dwarf debug information, the values are
401 defined by the AArch64 ABI. The numbering scheme is independent of
402 GCC's internal register numbering scheme. */
403
404#define AARCH64_DWARF_R0 0
405
406/* The number of R registers, note 31! not 32. */
407#define AARCH64_DWARF_NUMBER_R 31
408
409#define AARCH64_DWARF_SP 31
410#define AARCH64_DWARF_V0 64
411
412/* The number of V registers. */
413#define AARCH64_DWARF_NUMBER_V 32
414
415/* For signal frames we need to use an alternative return column. This
416 value must not correspond to a hard register and must be out of the
417 range of DWARF_FRAME_REGNUM(). */
418#define DWARF_ALT_FRAME_RETURN_COLUMN \
419 (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V)
420
421/* We add 1 extra frame register for use as the
422 DWARF_ALT_FRAME_RETURN_COLUMN. */
423#define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1)
424
425
426#define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO)
427/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders
428 can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same
429 as the default definition in dwarf2out.c. */
430#undef DWARF_FRAME_REGNUM
431#define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO)
432
433#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
434
43e9d192
IB
435#define DWARF2_UNWIND_INFO 1
436
437/* Use R0 through R3 to pass exception handling information. */
438#define EH_RETURN_DATA_REGNO(N) \
439 ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM)
440
441/* Select a format to encode pointers in exception handling data. */
442#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
443 aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL))
444
361fb3ee
KT
445/* Output the assembly strings we want to add to a function definition. */
446#define ASM_DECLARE_FUNCTION_NAME(STR, NAME, DECL) \
447 aarch64_declare_function_name (STR, NAME, DECL)
448
8144a493
WD
449/* For EH returns X4 contains the stack adjustment. */
450#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, R4_REGNUM)
451#define EH_RETURN_HANDLER_RTX aarch64_eh_return_handler_rtx ()
43e9d192
IB
452
453/* Don't use __builtin_setjmp until we've defined it. */
454#undef DONT_USE_BUILTIN_SETJMP
455#define DONT_USE_BUILTIN_SETJMP 1
456
457/* Register in which the structure value is to be returned. */
458#define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM
459
460/* Non-zero if REGNO is part of the Core register set.
461
462 The rather unusual way of expressing this check is to avoid
463 warnings when building the compiler when R0_REGNUM is 0 and REGNO
464 is unsigned. */
465#define GP_REGNUM_P(REGNO) \
466 (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
467
468#define FP_REGNUM_P(REGNO) \
469 (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
470
471#define FP_LO_REGNUM_P(REGNO) \
472 (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM))
473
474\f
475/* Register and constant classes. */
476
477enum reg_class
478{
479 NO_REGS,
fee9ba42 480 CALLER_SAVE_REGS,
43e9d192
IB
481 GENERAL_REGS,
482 STACK_REG,
483 POINTER_REGS,
484 FP_LO_REGS,
485 FP_REGS,
f25a140b 486 POINTER_AND_FP_REGS,
43e9d192
IB
487 ALL_REGS,
488 LIM_REG_CLASSES /* Last */
489};
490
491#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
492
493#define REG_CLASS_NAMES \
494{ \
495 "NO_REGS", \
fee9ba42 496 "CALLER_SAVE_REGS", \
43e9d192
IB
497 "GENERAL_REGS", \
498 "STACK_REG", \
499 "POINTER_REGS", \
500 "FP_LO_REGS", \
501 "FP_REGS", \
f25a140b 502 "POINTER_AND_FP_REGS", \
43e9d192
IB
503 "ALL_REGS" \
504}
505
506#define REG_CLASS_CONTENTS \
507{ \
508 { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
fee9ba42 509 { 0x0007ffff, 0x00000000, 0x00000000 }, /* CALLER_SAVE_REGS */ \
43e9d192
IB
510 { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
511 { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
512 { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
513 { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \
514 { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
f25a140b 515 { 0xffffffff, 0xffffffff, 0x00000003 }, /* POINTER_AND_FP_REGS */\
43e9d192
IB
516 { 0xffffffff, 0xffffffff, 0x00000007 } /* ALL_REGS */ \
517}
518
519#define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO)
520
a4a182c6 521#define INDEX_REG_CLASS GENERAL_REGS
43e9d192
IB
522#define BASE_REG_CLASS POINTER_REGS
523
6991c977 524/* Register pairs used to eliminate unneeded registers that point into
43e9d192
IB
525 the stack frame. */
526#define ELIMINABLE_REGS \
527{ \
528 { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
529 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
530 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
531 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
532}
533
534#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
535 (OFFSET) = aarch64_initial_elimination_offset (FROM, TO)
536
537/* CPU/ARCH option handling. */
538#include "config/aarch64/aarch64-opts.h"
539
540enum target_cpus
541{
e8fcc9fa 542#define AARCH64_CORE(NAME, INTERNAL_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
192ed1dd 543 TARGET_CPU_##INTERNAL_IDENT,
43e9d192 544#include "aarch64-cores.def"
43e9d192
IB
545 TARGET_CPU_generic
546};
547
a3cd0246 548/* If there is no CPU defined at configure, use generic as default. */
43e9d192
IB
549#ifndef TARGET_CPU_DEFAULT
550#define TARGET_CPU_DEFAULT \
a3cd0246 551 (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6))
43e9d192
IB
552#endif
553
75cf1494
KT
554/* If inserting NOP before a mult-accumulate insn remember to adjust the
555 length so that conditional branching code is updated appropriately. */
556#define ADJUST_INSN_LENGTH(insn, length) \
8baff86e
KT
557 do \
558 { \
559 if (aarch64_madd_needs_nop (insn)) \
560 length += 4; \
561 } while (0)
75cf1494
KT
562
563#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
564 aarch64_final_prescan_insn (INSN); \
565
43e9d192
IB
566/* The processor for which instructions should be scheduled. */
567extern enum aarch64_processor aarch64_tune;
568
569/* RTL generation support. */
570#define INIT_EXPANDERS aarch64_init_expanders ()
571\f
572
573/* Stack layout; function entry, exit and calling. */
574#define STACK_GROWS_DOWNWARD 1
575
6991c977 576#define FRAME_GROWS_DOWNWARD 1
43e9d192 577
43e9d192
IB
578#define ACCUMULATE_OUTGOING_ARGS 1
579
580#define FIRST_PARM_OFFSET(FNDECL) 0
581
582/* Fix for VFP */
583#define LIBCALL_VALUE(MODE) \
584 gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM)
585
586#define DEFAULT_PCC_STRUCT_RETURN 0
587
43e9d192
IB
588#ifdef HOST_WIDE_INT
589struct GTY (()) aarch64_frame
590{
591 HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
8799637a
MS
592
593 /* The number of extra stack bytes taken up by register varargs.
594 This area is allocated by the callee at the very top of the
595 frame. This value is rounded up to a multiple of
596 STACK_BOUNDARY. */
597 HOST_WIDE_INT saved_varargs_size;
598
71bfb77a
WD
599 /* The size of the saved callee-save int/FP registers. */
600
43e9d192 601 HOST_WIDE_INT saved_regs_size;
71bfb77a
WD
602
603 /* Offset from the base of the frame (incomming SP) to the
604 top of the locals area. This value is always a multiple of
605 STACK_BOUNDARY. */
606 HOST_WIDE_INT locals_offset;
43e9d192 607
1c960e02
MS
608 /* Offset from the base of the frame (incomming SP) to the
609 hard_frame_pointer. This value is always a multiple of
610 STACK_BOUNDARY. */
611 HOST_WIDE_INT hard_fp_offset;
612
613 /* The size of the frame. This value is the offset from base of the
614 * frame (incomming SP) to the stack_pointer. This value is always
615 * a multiple of STACK_BOUNDARY. */
71bfb77a
WD
616 HOST_WIDE_INT frame_size;
617
618 /* The size of the initial stack adjustment before saving callee-saves. */
619 HOST_WIDE_INT initial_adjust;
620
621 /* The writeback value when pushing callee-save registers.
622 It is zero when no push is used. */
623 HOST_WIDE_INT callee_adjust;
624
625 /* The offset from SP to the callee-save registers after initial_adjust.
626 It may be non-zero if no push is used (ie. callee_adjust == 0). */
627 HOST_WIDE_INT callee_offset;
628
629 /* The size of the stack adjustment after saving callee-saves. */
630 HOST_WIDE_INT final_adjust;
1c960e02 631
204d2c03
WD
632 /* Store FP,LR and setup a frame pointer. */
633 bool emit_frame_chain;
634
363ffa50
JW
635 unsigned wb_candidate1;
636 unsigned wb_candidate2;
637
43e9d192
IB
638 bool laid_out;
639};
640
641typedef struct GTY (()) machine_function
642{
643 struct aarch64_frame frame;
827ab47a
KT
644 /* One entry for each hard register. */
645 bool reg_is_wrapped_separately[LAST_SAVED_REGNUM];
43e9d192
IB
646} machine_function;
647#endif
648
17a819cb
YZ
649/* Which ABI to use. */
650enum aarch64_abi_type
651{
652 AARCH64_ABI_LP64 = 0,
653 AARCH64_ABI_ILP32 = 1
654};
655
656#ifndef AARCH64_ABI_DEFAULT
657#define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64
658#endif
659
660#define TARGET_ILP32 (aarch64_abi & AARCH64_ABI_ILP32)
43e9d192 661
43e9d192
IB
662enum arm_pcs
663{
664 ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */
665 ARM_PCS_UNKNOWN
666};
667
668
43e9d192 669
43e9d192 670
ef4bddc2 671/* We can't use machine_mode inside a generator file because it
43e9d192
IB
672 hasn't been created yet; we shouldn't be using any code that
673 needs the real definition though, so this ought to be safe. */
674#ifdef GENERATOR_FILE
675#define MACHMODE int
676#else
677#include "insn-modes.h"
febd3244 678#define MACHMODE machine_mode
43e9d192
IB
679#endif
680
febd3244 681#ifndef USED_FOR_TARGET
43e9d192
IB
682/* AAPCS related state tracking. */
683typedef struct
684{
685 enum arm_pcs pcs_variant;
686 int aapcs_arg_processed; /* No need to lay out this argument again. */
687 int aapcs_ncrn; /* Next Core register number. */
688 int aapcs_nextncrn; /* Next next core register number. */
689 int aapcs_nvrn; /* Next Vector register number. */
690 int aapcs_nextnvrn; /* Next Next Vector register number. */
691 rtx aapcs_reg; /* Register assigned to this argument. This
692 is NULL_RTX if this parameter goes on
693 the stack. */
694 MACHMODE aapcs_vfp_rmode;
695 int aapcs_stack_words; /* If the argument is passed on the stack, this
696 is the number of words needed, after rounding
697 up. Only meaningful when
698 aapcs_reg == NULL_RTX. */
699 int aapcs_stack_size; /* The total size (in words, per 8 byte) of the
700 stack arg area so far. */
701} CUMULATIVE_ARGS;
febd3244 702#endif
43e9d192 703
43e9d192 704#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
76b0cbf8 705 (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? PAD_UPWARD : PAD_DOWNWARD)
43e9d192
IB
706
707#define PAD_VARARGS_DOWN 0
708
709#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
710 aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS)
711
712#define FUNCTION_ARG_REGNO_P(REGNO) \
713 aarch64_function_arg_regno_p(REGNO)
714\f
715
716/* ISA Features. */
717
718/* Addressing modes, etc. */
719#define HAVE_POST_INCREMENT 1
720#define HAVE_PRE_INCREMENT 1
721#define HAVE_POST_DECREMENT 1
722#define HAVE_PRE_DECREMENT 1
723#define HAVE_POST_MODIFY_DISP 1
724#define HAVE_PRE_MODIFY_DISP 1
725
726#define MAX_REGS_PER_ADDRESS 2
727
728#define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X)
729
43e9d192
IB
730#define REGNO_OK_FOR_BASE_P(REGNO) \
731 aarch64_regno_ok_for_base_p (REGNO, true)
732
733#define REGNO_OK_FOR_INDEX_P(REGNO) \
734 aarch64_regno_ok_for_index_p (REGNO, true)
735
736#define LEGITIMATE_PIC_OPERAND_P(X) \
737 aarch64_legitimate_pic_operand_p (X)
738
739#define CASE_VECTOR_MODE Pmode
740
741#define DEFAULT_SIGNED_CHAR 0
742
743/* An integer expression for the size in bits of the largest integer machine
744 mode that should actually be used. We allow pairs of registers. */
745#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
746
747/* Maximum bytes moved by a single instruction (load/store pair). */
748#define MOVE_MAX (UNITS_PER_WORD * 2)
749
750/* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */
751#define AARCH64_CALL_RATIO 8
752
e2c75eea
JG
753/* MOVE_RATIO dictates when we will use the move_by_pieces infrastructure.
754 move_by_pieces will continually copy the largest safe chunks. So a
755 7-byte copy is a 4-byte + 2-byte + byte copy. This proves inefficient
756 for both size and speed of copy, so we will instead use the "movmem"
757 standard name to implement the copy. This logic does not apply when
758 targeting -mstrict-align, so keep a sensible default in that case. */
43e9d192 759#define MOVE_RATIO(speed) \
e2c75eea 760 (!STRICT_ALIGNMENT ? 2 : (((speed) ? 15 : AARCH64_CALL_RATIO) / 2))
43e9d192
IB
761
762/* For CLEAR_RATIO, when optimizing for size, give a better estimate
763 of the length of a memset call, but use the default otherwise. */
764#define CLEAR_RATIO(speed) \
765 ((speed) ? 15 : AARCH64_CALL_RATIO)
766
767/* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when
768 optimizing for size adjust the ratio to account for the overhead of loading
769 the constant. */
770#define SET_RATIO(speed) \
771 ((speed) ? 15 : AARCH64_CALL_RATIO - 2)
772
43e9d192
IB
773/* Disable auto-increment in move_by_pieces et al. Use of auto-increment is
774 rarely a good idea in straight-line code since it adds an extra address
775 dependency between each instruction. Better to use incrementing offsets. */
776#define USE_LOAD_POST_INCREMENT(MODE) 0
777#define USE_LOAD_POST_DECREMENT(MODE) 0
778#define USE_LOAD_PRE_INCREMENT(MODE) 0
779#define USE_LOAD_PRE_DECREMENT(MODE) 0
780#define USE_STORE_POST_INCREMENT(MODE) 0
781#define USE_STORE_POST_DECREMENT(MODE) 0
782#define USE_STORE_PRE_INCREMENT(MODE) 0
783#define USE_STORE_PRE_DECREMENT(MODE) 0
784
56c9ef5f
KT
785/* WORD_REGISTER_OPERATIONS does not hold for AArch64.
786 The assigned word_mode is DImode but operations narrower than SImode
787 behave as 32-bit operations if using the W-form of the registers rather
788 than as word_mode (64-bit) operations as WORD_REGISTER_OPERATIONS
789 expects. */
790#define WORD_REGISTER_OPERATIONS 0
43e9d192
IB
791
792/* Define if loading from memory in MODE, an integral mode narrower than
793 BITS_PER_WORD will either zero-extend or sign-extend. The value of this
794 macro should be the code that says which one of the two operations is
795 implicitly done, or UNKNOWN if none. */
796#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
797
798/* Define this macro to be non-zero if instructions will fail to work
799 if given data not on the nominal alignment. */
800#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
801
802/* Define this macro to be non-zero if accessing less than a word of
803 memory is no faster than accessing a word of memory, i.e., if such
804 accesses require more than one instruction or if there is no
805 difference in cost.
806 Although there's no difference in instruction count or cycles,
807 in AArch64 we don't want to expand to a sub-word to a 64-bit access
808 if we don't have to, for power-saving reasons. */
809#define SLOW_BYTE_ACCESS 0
810
43e9d192
IB
811#define NO_FUNCTION_CSE 1
812
17a819cb
YZ
813/* Specify the machine mode that the hardware addresses have.
814 After generation of rtl, the compiler makes no further distinction
815 between pointers and any other objects of this machine mode. */
43e9d192 816#define Pmode DImode
17a819cb
YZ
817
818/* A C expression whose value is zero if pointers that need to be extended
819 from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and
820 greater then zero if they are zero-extended and less then zero if the
821 ptr_extend instruction should be used. */
822#define POINTERS_EXTEND_UNSIGNED 1
823
824/* Mode of a function address in a call instruction (for indexing purposes). */
43e9d192
IB
825#define FUNCTION_MODE Pmode
826
827#define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y)
828
f8bf91ab
N
829#define REVERSIBLE_CC_MODE(MODE) 1
830
43e9d192
IB
831#define REVERSE_CONDITION(CODE, MODE) \
832 (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
833 ? reverse_condition_maybe_unordered (CODE) \
834 : reverse_condition (CODE))
835
836#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
952e7819 837 ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
43e9d192 838#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
952e7819 839 ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
43e9d192
IB
840
841#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
842
843#define RETURN_ADDR_RTX aarch64_return_addr
844
28514dda
YZ
845/* 3 insns + padding + 2 pointer-sized entries. */
846#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32)
43e9d192
IB
847
848/* Trampolines contain dwords, so must be dword aligned. */
849#define TRAMPOLINE_ALIGNMENT 64
850
851/* Put trampolines in the text section so that mapping symbols work
852 correctly. */
853#define TRAMPOLINE_SECTION text_section
43e9d192
IB
854
855/* To start with. */
b9066f5a
MW
856#define BRANCH_COST(SPEED_P, PREDICTABLE_P) \
857 (aarch64_branch_cost (SPEED_P, PREDICTABLE_P))
43e9d192
IB
858\f
859
860/* Assembly output. */
861
862/* For now we'll make all jump tables pc-relative. */
863#define CASE_VECTOR_PC_RELATIVE 1
864
865#define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
866 ((min < -0x1fff0 || max > 0x1fff0) ? SImode \
867 : (min < -0x1f0 || max > 0x1f0) ? HImode \
868 : QImode)
869
870/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
871#define ADDR_VEC_ALIGN(JUMPTABLE) 0
872
92d649c4
VK
873#define MCOUNT_NAME "_mcount"
874
875#define NO_PROFILE_COUNTERS 1
876
877/* Emit rtl for profiling. Output assembler code to FILE
878 to call "_mcount" for profiling a function entry. */
3294102b
MS
879#define PROFILE_HOOK(LABEL) \
880 { \
881 rtx fun, lr; \
882 lr = get_hard_reg_initial_val (Pmode, LR_REGNUM); \
883 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
db69559b 884 emit_library_call (fun, LCT_NORMAL, VOIDmode, lr, Pmode); \
3294102b 885 }
92d649c4
VK
886
887/* All the work done in PROFILE_HOOK, but still required. */
888#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
43e9d192
IB
889
890/* For some reason, the Linux headers think they know how to define
891 these macros. They don't!!! */
892#undef ASM_APP_ON
893#undef ASM_APP_OFF
894#define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n"
895#define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n"
896
43e9d192
IB
897#define CONSTANT_POOL_BEFORE_FUNCTION 0
898
899/* This definition should be relocated to aarch64-elf-raw.h. This macro
900 should be undefined in aarch64-linux.h and a clear_cache pattern
901 implmented to emit either the call to __aarch64_sync_cache_range()
902 directly or preferably the appropriate sycall or cache clear
903 instructions inline. */
904#define CLEAR_INSN_CACHE(beg, end) \
905 extern void __aarch64_sync_cache_range (void *, void *); \
906 __aarch64_sync_cache_range (beg, end)
907
e6bd9fb9 908#define SHIFT_COUNT_TRUNCATED (!TARGET_SIMD)
43e9d192 909
73d9ac6a
IB
910/* Choose appropriate mode for caller saves, so we do the minimum
911 required size of load/store. */
912#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
913 aarch64_hard_regno_caller_save_mode ((REGNO), (NREGS), (MODE))
914
d78006d9
KT
915#undef SWITCHABLE_TARGET
916#define SWITCHABLE_TARGET 1
917
43e9d192
IB
918/* Check TLS Descriptors mechanism is selected. */
919#define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS)
920
921extern enum aarch64_code_model aarch64_cmodel;
922
923/* When using the tiny addressing model conditional and unconditional branches
924 can span the whole of the available address space (1MB). */
925#define HAS_LONG_COND_BRANCH \
926 (aarch64_cmodel == AARCH64_CMODEL_TINY \
927 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
928
929#define HAS_LONG_UNCOND_BRANCH \
930 (aarch64_cmodel == AARCH64_CMODEL_TINY \
931 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
932
2ca5b430
KT
933#define TARGET_SUPPORTS_WIDE_INT 1
934
635e66fe
AL
935/* Modes valid for AdvSIMD D registers, i.e. that fit in half a Q register. */
936#define AARCH64_VALID_SIMD_DREG_MODE(MODE) \
937 ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \
938 || (MODE) == V2SFmode || (MODE) == V4HFmode || (MODE) == DImode \
939 || (MODE) == DFmode)
940
43e9d192
IB
941/* Modes valid for AdvSIMD Q registers. */
942#define AARCH64_VALID_SIMD_QREG_MODE(MODE) \
943 ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
71a11456
AL
944 || (MODE) == V4SFmode || (MODE) == V8HFmode || (MODE) == V2DImode \
945 || (MODE) == V2DFmode)
43e9d192 946
7ac29c0f
RS
947#define ENDIAN_LANE_N(NUNITS, N) \
948 (BYTES_BIG_ENDIAN ? NUNITS - 1 - N : N)
e58bf20a 949
9815fafa
RE
950/* Support for a configure-time default CPU, etc. We currently support
951 --with-arch and --with-cpu. Both are ignored if either is specified
952 explicitly on the command line at run time. */
953#define OPTION_DEFAULT_SPECS \
954 {"arch", "%{!march=*:%{!mcpu=*:-march=%(VALUE)}}" }, \
955 {"cpu", "%{!march=*:%{!mcpu=*:-mcpu=%(VALUE)}}" },
956
054b4005
JG
957#define MCPU_TO_MARCH_SPEC \
958 " %{mcpu=*:-march=%:rewrite_mcpu(%{mcpu=*:%*})}"
682287fb
JG
959
960extern const char *aarch64_rewrite_mcpu (int argc, const char **argv);
054b4005 961#define MCPU_TO_MARCH_SPEC_FUNCTIONS \
682287fb
JG
962 { "rewrite_mcpu", aarch64_rewrite_mcpu },
963
7e1bcce3
KT
964#if defined(__aarch64__)
965extern const char *host_detect_local_cpu (int argc, const char **argv);
966# define EXTRA_SPEC_FUNCTIONS \
967 { "local_cpu_detect", host_detect_local_cpu }, \
054b4005 968 MCPU_TO_MARCH_SPEC_FUNCTIONS
7e1bcce3
KT
969
970# define MCPU_MTUNE_NATIVE_SPECS \
971 " %{march=native:%<march=native %:local_cpu_detect(arch)}" \
972 " %{mcpu=native:%<mcpu=native %:local_cpu_detect(cpu)}" \
973 " %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
974#else
975# define MCPU_MTUNE_NATIVE_SPECS ""
054b4005 976# define EXTRA_SPEC_FUNCTIONS MCPU_TO_MARCH_SPEC_FUNCTIONS
7e1bcce3
KT
977#endif
978
682287fb 979#define ASM_CPU_SPEC \
054b4005 980 MCPU_TO_MARCH_SPEC
682287fb 981
682287fb
JG
982#define EXTRA_SPECS \
983 { "asm_cpu_spec", ASM_CPU_SPEC }
984
5fca7b66
RH
985#define ASM_OUTPUT_POOL_EPILOGUE aarch64_asm_output_pool_epilogue
986
1b62ed4f
JG
987/* This type is the user-visible __fp16, and a pointer to that type. We
988 need it in many places in the backend. Defined in aarch64-builtins.c. */
989extern tree aarch64_fp16_type_node;
990extern tree aarch64_fp16_ptr_type_node;
991
817221cc
WD
992/* The generic unwind code in libgcc does not initialize the frame pointer.
993 So in order to unwind a function using a frame pointer, the very first
994 function that is unwound must save the frame pointer. That way the frame
995 pointer is restored and its value is now valid - otherwise _Unwind_GetGR
996 crashes. Libgcc can now be safely built with -fomit-frame-pointer. */
997#define LIBGCC2_UNWIND_ATTRIBUTE \
998 __attribute__((optimize ("no-omit-frame-pointer")))
999
43e9d192 1000#endif /* GCC_AARCH64_H */