]>
Commit | Line | Data |
---|---|---|
43e9d192 | 1 | /* Machine description for AArch64 architecture. |
23a5b65a | 2 | Copyright (C) 2009-2014 Free Software Foundation, Inc. |
43e9d192 IB |
3 | Contributed by ARM Ltd. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | ||
22 | #ifndef GCC_AARCH64_H | |
23 | #define GCC_AARCH64_H | |
24 | ||
25 | /* Target CPU builtins. */ | |
26 | #define TARGET_CPU_CPP_BUILTINS() \ | |
27 | do \ | |
28 | { \ | |
29 | builtin_define ("__aarch64__"); \ | |
30 | if (TARGET_BIG_END) \ | |
31 | builtin_define ("__AARCH64EB__"); \ | |
32 | else \ | |
33 | builtin_define ("__AARCH64EL__"); \ | |
34 | \ | |
5750e120 IB |
35 | if (!TARGET_GENERAL_REGS_ONLY) \ |
36 | builtin_define ("__ARM_NEON"); \ | |
37 | \ | |
43e9d192 IB |
38 | switch (aarch64_cmodel) \ |
39 | { \ | |
40 | case AARCH64_CMODEL_TINY: \ | |
41 | case AARCH64_CMODEL_TINY_PIC: \ | |
42 | builtin_define ("__AARCH64_CMODEL_TINY__"); \ | |
43 | break; \ | |
44 | case AARCH64_CMODEL_SMALL: \ | |
45 | case AARCH64_CMODEL_SMALL_PIC: \ | |
46 | builtin_define ("__AARCH64_CMODEL_SMALL__");\ | |
47 | break; \ | |
48 | case AARCH64_CMODEL_LARGE: \ | |
49 | builtin_define ("__AARCH64_CMODEL_LARGE__"); \ | |
50 | break; \ | |
51 | default: \ | |
52 | break; \ | |
53 | } \ | |
54 | \ | |
43be9a95 YZ |
55 | if (TARGET_ILP32) \ |
56 | { \ | |
57 | cpp_define (parse_in, "_ILP32"); \ | |
58 | cpp_define (parse_in, "__ILP32__"); \ | |
59 | } \ | |
afb582f1 TB |
60 | if (TARGET_CRYPTO) \ |
61 | builtin_define ("__ARM_FEATURE_CRYPTO"); \ | |
43e9d192 IB |
62 | } while (0) |
63 | ||
64 | \f | |
65 | ||
66 | /* Target machine storage layout. */ | |
67 | ||
68 | #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ | |
69 | if (GET_MODE_CLASS (MODE) == MODE_INT \ | |
70 | && GET_MODE_SIZE (MODE) < 4) \ | |
71 | { \ | |
72 | if (MODE == QImode || MODE == HImode) \ | |
73 | { \ | |
74 | MODE = SImode; \ | |
75 | } \ | |
76 | } | |
77 | ||
78 | /* Bits are always numbered from the LSBit. */ | |
79 | #define BITS_BIG_ENDIAN 0 | |
80 | ||
81 | /* Big/little-endian flavour. */ | |
82 | #define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0) | |
83 | #define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN) | |
84 | ||
85 | /* AdvSIMD is supported in the default configuration, unless disabled by | |
86 | -mgeneral-regs-only. */ | |
87 | #define TARGET_SIMD !TARGET_GENERAL_REGS_ONLY | |
88 | #define TARGET_FLOAT !TARGET_GENERAL_REGS_ONLY | |
89 | ||
90 | #define UNITS_PER_WORD 8 | |
91 | ||
92 | #define UNITS_PER_VREG 16 | |
93 | ||
94 | #define PARM_BOUNDARY 64 | |
95 | ||
96 | #define STACK_BOUNDARY 128 | |
97 | ||
98 | #define FUNCTION_BOUNDARY 32 | |
99 | ||
100 | #define EMPTY_FIELD_BOUNDARY 32 | |
101 | ||
102 | #define BIGGEST_ALIGNMENT 128 | |
103 | ||
104 | #define SHORT_TYPE_SIZE 16 | |
105 | ||
106 | #define INT_TYPE_SIZE 32 | |
107 | ||
17a819cb YZ |
108 | #define LONG_TYPE_SIZE (TARGET_ILP32 ? 32 : 64) |
109 | ||
110 | #define POINTER_SIZE (TARGET_ILP32 ? 32 : 64) | |
43e9d192 IB |
111 | |
112 | #define LONG_LONG_TYPE_SIZE 64 | |
113 | ||
114 | #define FLOAT_TYPE_SIZE 32 | |
115 | ||
116 | #define DOUBLE_TYPE_SIZE 64 | |
117 | ||
118 | #define LONG_DOUBLE_TYPE_SIZE 128 | |
119 | ||
120 | /* The architecture reserves all bits of the address for hardware use, | |
121 | so the vbit must go into the delta field of pointers to member | |
122 | functions. This is the same config as that in the AArch32 | |
123 | port. */ | |
124 | #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta | |
125 | ||
126 | /* Make strings word-aligned so that strcpy from constants will be | |
127 | faster. */ | |
128 | #define CONSTANT_ALIGNMENT(EXP, ALIGN) \ | |
129 | ((TREE_CODE (EXP) == STRING_CST \ | |
130 | && !optimize_size \ | |
131 | && (ALIGN) < BITS_PER_WORD) \ | |
132 | ? BITS_PER_WORD : ALIGN) | |
133 | ||
134 | #define DATA_ALIGNMENT(EXP, ALIGN) \ | |
135 | ((((ALIGN) < BITS_PER_WORD) \ | |
136 | && (TREE_CODE (EXP) == ARRAY_TYPE \ | |
137 | || TREE_CODE (EXP) == UNION_TYPE \ | |
138 | || TREE_CODE (EXP) == RECORD_TYPE)) \ | |
139 | ? BITS_PER_WORD : (ALIGN)) | |
140 | ||
141 | #define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN) | |
142 | ||
143 | #define STRUCTURE_SIZE_BOUNDARY 8 | |
144 | ||
145 | /* Defined by the ABI */ | |
146 | #define WCHAR_TYPE "unsigned int" | |
147 | #define WCHAR_TYPE_SIZE 32 | |
148 | ||
149 | /* Using long long breaks -ansi and -std=c90, so these will need to be | |
150 | made conditional for an LLP64 ABI. */ | |
151 | ||
152 | #define SIZE_TYPE "long unsigned int" | |
153 | ||
154 | #define PTRDIFF_TYPE "long int" | |
155 | ||
156 | #define PCC_BITFIELD_TYPE_MATTERS 1 | |
157 | ||
158 | ||
159 | /* Instruction tuning/selection flags. */ | |
160 | ||
161 | /* Bit values used to identify processor capabilities. */ | |
162 | #define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */ | |
163 | #define AARCH64_FL_FP (1 << 1) /* Has FP. */ | |
164 | #define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */ | |
165 | #define AARCH64_FL_SLOWMUL (1 << 3) /* A slow multiply core. */ | |
5922847b | 166 | #define AARCH64_FL_CRC (1 << 4) /* Has CRC. */ |
43e9d192 IB |
167 | |
168 | /* Has FP and SIMD. */ | |
169 | #define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD) | |
170 | ||
171 | /* Has FP without SIMD. */ | |
172 | #define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD) | |
173 | ||
174 | /* Architecture flags that effect instruction selection. */ | |
175 | #define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD) | |
176 | ||
177 | /* Macros to test ISA flags. */ | |
178 | extern unsigned long aarch64_isa_flags; | |
5922847b | 179 | #define AARCH64_ISA_CRC (aarch64_isa_flags & AARCH64_FL_CRC) |
43e9d192 IB |
180 | #define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO) |
181 | #define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP) | |
182 | #define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD) | |
183 | ||
184 | /* Macros to test tuning flags. */ | |
185 | extern unsigned long aarch64_tune_flags; | |
186 | #define AARCH64_TUNE_SLOWMUL (aarch64_tune_flags & AARCH64_FL_SLOWMUL) | |
187 | ||
afb582f1 TB |
188 | /* Crypto is an optional feature. */ |
189 | #define TARGET_CRYPTO AARCH64_ISA_CRYPTO | |
43e9d192 IB |
190 | |
191 | /* Standard register usage. */ | |
192 | ||
193 | /* 31 64-bit general purpose registers R0-R30: | |
194 | R30 LR (link register) | |
195 | R29 FP (frame pointer) | |
196 | R19-R28 Callee-saved registers | |
197 | R18 The platform register; use as temporary register. | |
198 | R17 IP1 The second intra-procedure-call temporary register | |
199 | (can be used by call veneers and PLT code); otherwise use | |
200 | as a temporary register | |
201 | R16 IP0 The first intra-procedure-call temporary register (can | |
202 | be used by call veneers and PLT code); otherwise use as a | |
203 | temporary register | |
204 | R9-R15 Temporary registers | |
205 | R8 Structure value parameter / temporary register | |
206 | R0-R7 Parameter/result registers | |
207 | ||
208 | SP stack pointer, encoded as X/R31 where permitted. | |
209 | ZR zero register, encoded as X/R31 elsewhere | |
210 | ||
211 | 32 x 128-bit floating-point/vector registers | |
212 | V16-V31 Caller-saved (temporary) registers | |
213 | V8-V15 Callee-saved registers | |
214 | V0-V7 Parameter/result registers | |
215 | ||
216 | The vector register V0 holds scalar B0, H0, S0 and D0 in its least | |
217 | significant bits. Unlike AArch32 S1 is not packed into D0, | |
218 | etc. */ | |
219 | ||
220 | /* Note that we don't mark X30 as a call-clobbered register. The idea is | |
221 | that it's really the call instructions themselves which clobber X30. | |
222 | We don't care what the called function does with it afterwards. | |
223 | ||
224 | This approach makes it easier to implement sibcalls. Unlike normal | |
225 | calls, sibcalls don't clobber X30, so the register reaches the | |
226 | called function intact. EPILOGUE_USES says that X30 is useful | |
227 | to the called function. */ | |
228 | ||
229 | #define FIXED_REGISTERS \ | |
230 | { \ | |
231 | 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \ | |
232 | 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \ | |
233 | 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \ | |
234 | 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \ | |
235 | 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \ | |
236 | 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \ | |
237 | 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \ | |
238 | 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \ | |
239 | 1, 1, 1, /* SFP, AP, CC */ \ | |
240 | } | |
241 | ||
242 | #define CALL_USED_REGISTERS \ | |
243 | { \ | |
244 | 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \ | |
245 | 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \ | |
246 | 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \ | |
247 | 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \ | |
248 | 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \ | |
249 | 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \ | |
250 | 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \ | |
251 | 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \ | |
252 | 1, 1, 1, /* SFP, AP, CC */ \ | |
253 | } | |
254 | ||
255 | #define REGISTER_NAMES \ | |
256 | { \ | |
257 | "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \ | |
258 | "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \ | |
259 | "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \ | |
260 | "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \ | |
261 | "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ | |
262 | "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ | |
263 | "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ | |
264 | "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \ | |
265 | "sfp", "ap", "cc", \ | |
266 | } | |
267 | ||
268 | /* Generate the register aliases for core register N */ | |
269 | #define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \ | |
270 | {"w" # N, R0_REGNUM + (N)} | |
271 | ||
272 | #define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \ | |
273 | {"d" # N, V0_REGNUM + (N)}, \ | |
274 | {"s" # N, V0_REGNUM + (N)}, \ | |
275 | {"h" # N, V0_REGNUM + (N)}, \ | |
276 | {"b" # N, V0_REGNUM + (N)} | |
277 | ||
278 | /* Provide aliases for all of the ISA defined register name forms. | |
279 | These aliases are convenient for use in the clobber lists of inline | |
280 | asm statements. */ | |
281 | ||
282 | #define ADDITIONAL_REGISTER_NAMES \ | |
283 | { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \ | |
284 | R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \ | |
285 | R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \ | |
286 | R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \ | |
287 | R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \ | |
288 | R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \ | |
289 | R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \ | |
9259db42 | 290 | R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), {"wsp", R0_REGNUM + 31}, \ |
43e9d192 IB |
291 | V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \ |
292 | V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \ | |
293 | V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \ | |
294 | V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \ | |
295 | V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \ | |
296 | V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \ | |
297 | V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \ | |
298 | V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \ | |
299 | } | |
300 | ||
301 | /* Say that the epilogue uses the return address register. Note that | |
302 | in the case of sibcalls, the values "used by the epilogue" are | |
303 | considered live at the start of the called function. */ | |
304 | ||
305 | #define EPILOGUE_USES(REGNO) \ | |
306 | ((REGNO) == LR_REGNUM) | |
307 | ||
308 | /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, | |
309 | the stack pointer does not matter. The value is tested only in | |
310 | functions that have frame pointers. */ | |
311 | #define EXIT_IGNORE_STACK 1 | |
312 | ||
313 | #define STATIC_CHAIN_REGNUM R18_REGNUM | |
314 | #define HARD_FRAME_POINTER_REGNUM R29_REGNUM | |
315 | #define FRAME_POINTER_REGNUM SFP_REGNUM | |
316 | #define STACK_POINTER_REGNUM SP_REGNUM | |
317 | #define ARG_POINTER_REGNUM AP_REGNUM | |
318 | #define FIRST_PSEUDO_REGISTER 67 | |
319 | ||
320 | /* The number of (integer) argument register available. */ | |
321 | #define NUM_ARG_REGS 8 | |
322 | #define NUM_FP_ARG_REGS 8 | |
323 | ||
324 | /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most | |
325 | four members. */ | |
326 | #define HA_MAX_NUM_FLDS 4 | |
327 | ||
328 | /* External dwarf register number scheme. These number are used to | |
329 | identify registers in dwarf debug information, the values are | |
330 | defined by the AArch64 ABI. The numbering scheme is independent of | |
331 | GCC's internal register numbering scheme. */ | |
332 | ||
333 | #define AARCH64_DWARF_R0 0 | |
334 | ||
335 | /* The number of R registers, note 31! not 32. */ | |
336 | #define AARCH64_DWARF_NUMBER_R 31 | |
337 | ||
338 | #define AARCH64_DWARF_SP 31 | |
339 | #define AARCH64_DWARF_V0 64 | |
340 | ||
341 | /* The number of V registers. */ | |
342 | #define AARCH64_DWARF_NUMBER_V 32 | |
343 | ||
344 | /* For signal frames we need to use an alternative return column. This | |
345 | value must not correspond to a hard register and must be out of the | |
346 | range of DWARF_FRAME_REGNUM(). */ | |
347 | #define DWARF_ALT_FRAME_RETURN_COLUMN \ | |
348 | (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V) | |
349 | ||
350 | /* We add 1 extra frame register for use as the | |
351 | DWARF_ALT_FRAME_RETURN_COLUMN. */ | |
352 | #define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1) | |
353 | ||
354 | ||
355 | #define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO) | |
356 | /* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders | |
357 | can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same | |
358 | as the default definition in dwarf2out.c. */ | |
359 | #undef DWARF_FRAME_REGNUM | |
360 | #define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO) | |
361 | ||
362 | #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM) | |
363 | ||
364 | #define HARD_REGNO_NREGS(REGNO, MODE) aarch64_hard_regno_nregs (REGNO, MODE) | |
365 | ||
366 | #define HARD_REGNO_MODE_OK(REGNO, MODE) aarch64_hard_regno_mode_ok (REGNO, MODE) | |
367 | ||
368 | #define MODES_TIEABLE_P(MODE1, MODE2) \ | |
369 | (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) | |
370 | ||
371 | #define DWARF2_UNWIND_INFO 1 | |
372 | ||
373 | /* Use R0 through R3 to pass exception handling information. */ | |
374 | #define EH_RETURN_DATA_REGNO(N) \ | |
375 | ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM) | |
376 | ||
377 | /* Select a format to encode pointers in exception handling data. */ | |
378 | #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ | |
379 | aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL)) | |
380 | ||
381 | /* The register that holds the return address in exception handlers. */ | |
382 | #define AARCH64_EH_STACKADJ_REGNUM (R0_REGNUM + 4) | |
383 | #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, AARCH64_EH_STACKADJ_REGNUM) | |
384 | ||
385 | /* Don't use __builtin_setjmp until we've defined it. */ | |
386 | #undef DONT_USE_BUILTIN_SETJMP | |
387 | #define DONT_USE_BUILTIN_SETJMP 1 | |
388 | ||
389 | /* Register in which the structure value is to be returned. */ | |
390 | #define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM | |
391 | ||
392 | /* Non-zero if REGNO is part of the Core register set. | |
393 | ||
394 | The rather unusual way of expressing this check is to avoid | |
395 | warnings when building the compiler when R0_REGNUM is 0 and REGNO | |
396 | is unsigned. */ | |
397 | #define GP_REGNUM_P(REGNO) \ | |
398 | (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM)) | |
399 | ||
400 | #define FP_REGNUM_P(REGNO) \ | |
401 | (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM)) | |
402 | ||
403 | #define FP_LO_REGNUM_P(REGNO) \ | |
404 | (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM)) | |
405 | ||
406 | \f | |
407 | /* Register and constant classes. */ | |
408 | ||
409 | enum reg_class | |
410 | { | |
411 | NO_REGS, | |
412 | CORE_REGS, | |
413 | GENERAL_REGS, | |
414 | STACK_REG, | |
415 | POINTER_REGS, | |
416 | FP_LO_REGS, | |
417 | FP_REGS, | |
418 | ALL_REGS, | |
419 | LIM_REG_CLASSES /* Last */ | |
420 | }; | |
421 | ||
422 | #define N_REG_CLASSES ((int) LIM_REG_CLASSES) | |
423 | ||
424 | #define REG_CLASS_NAMES \ | |
425 | { \ | |
426 | "NO_REGS", \ | |
427 | "CORE_REGS", \ | |
428 | "GENERAL_REGS", \ | |
429 | "STACK_REG", \ | |
430 | "POINTER_REGS", \ | |
431 | "FP_LO_REGS", \ | |
432 | "FP_REGS", \ | |
433 | "ALL_REGS" \ | |
434 | } | |
435 | ||
436 | #define REG_CLASS_CONTENTS \ | |
437 | { \ | |
438 | { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ | |
439 | { 0x7fffffff, 0x00000000, 0x00000003 }, /* CORE_REGS */ \ | |
440 | { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \ | |
441 | { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \ | |
442 | { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \ | |
443 | { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \ | |
444 | { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ | |
445 | { 0xffffffff, 0xffffffff, 0x00000007 } /* ALL_REGS */ \ | |
446 | } | |
447 | ||
448 | #define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO) | |
449 | ||
450 | #define INDEX_REG_CLASS CORE_REGS | |
451 | #define BASE_REG_CLASS POINTER_REGS | |
452 | ||
6991c977 | 453 | /* Register pairs used to eliminate unneeded registers that point into |
43e9d192 IB |
454 | the stack frame. */ |
455 | #define ELIMINABLE_REGS \ | |
456 | { \ | |
457 | { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ | |
458 | { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ | |
459 | { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ | |
460 | { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ | |
461 | } | |
462 | ||
463 | #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ | |
464 | (OFFSET) = aarch64_initial_elimination_offset (FROM, TO) | |
465 | ||
466 | /* CPU/ARCH option handling. */ | |
467 | #include "config/aarch64/aarch64-opts.h" | |
468 | ||
469 | enum target_cpus | |
470 | { | |
192ed1dd JG |
471 | #define AARCH64_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \ |
472 | TARGET_CPU_##INTERNAL_IDENT, | |
43e9d192 IB |
473 | #include "aarch64-cores.def" |
474 | #undef AARCH64_CORE | |
475 | TARGET_CPU_generic | |
476 | }; | |
477 | ||
02fdbd5b | 478 | /* If there is no CPU defined at configure, use "cortex-a53" as default. */ |
43e9d192 IB |
479 | #ifndef TARGET_CPU_DEFAULT |
480 | #define TARGET_CPU_DEFAULT \ | |
02fdbd5b | 481 | (TARGET_CPU_cortexa53 | (AARCH64_CPU_DEFAULT_FLAGS << 6)) |
43e9d192 IB |
482 | #endif |
483 | ||
484 | /* The processor for which instructions should be scheduled. */ | |
485 | extern enum aarch64_processor aarch64_tune; | |
486 | ||
487 | /* RTL generation support. */ | |
488 | #define INIT_EXPANDERS aarch64_init_expanders () | |
489 | \f | |
490 | ||
491 | /* Stack layout; function entry, exit and calling. */ | |
492 | #define STACK_GROWS_DOWNWARD 1 | |
493 | ||
6991c977 | 494 | #define FRAME_GROWS_DOWNWARD 1 |
43e9d192 IB |
495 | |
496 | #define STARTING_FRAME_OFFSET 0 | |
497 | ||
498 | #define ACCUMULATE_OUTGOING_ARGS 1 | |
499 | ||
500 | #define FIRST_PARM_OFFSET(FNDECL) 0 | |
501 | ||
502 | /* Fix for VFP */ | |
503 | #define LIBCALL_VALUE(MODE) \ | |
504 | gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM) | |
505 | ||
506 | #define DEFAULT_PCC_STRUCT_RETURN 0 | |
507 | ||
508 | #define AARCH64_ROUND_UP(X, ALIGNMENT) \ | |
509 | (((X) + ((ALIGNMENT) - 1)) & ~((ALIGNMENT) - 1)) | |
510 | ||
511 | #define AARCH64_ROUND_DOWN(X, ALIGNMENT) \ | |
512 | ((X) & ~((ALIGNMENT) - 1)) | |
513 | ||
514 | #ifdef HOST_WIDE_INT | |
515 | struct GTY (()) aarch64_frame | |
516 | { | |
517 | HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER]; | |
518 | HOST_WIDE_INT saved_regs_size; | |
519 | /* Padding if needed after the all the callee save registers have | |
520 | been saved. */ | |
521 | HOST_WIDE_INT padding0; | |
522 | HOST_WIDE_INT hardfp_offset; /* HARD_FRAME_POINTER_REGNUM */ | |
523 | HOST_WIDE_INT fp_lr_offset; /* Space needed for saving fp and/or lr */ | |
524 | ||
525 | bool laid_out; | |
526 | }; | |
527 | ||
528 | typedef struct GTY (()) machine_function | |
529 | { | |
530 | struct aarch64_frame frame; | |
531 | ||
532 | /* The number of extra stack bytes taken up by register varargs. | |
533 | This area is allocated by the callee at the very top of the frame. */ | |
534 | HOST_WIDE_INT saved_varargs_size; | |
535 | ||
536 | } machine_function; | |
537 | #endif | |
538 | ||
17a819cb YZ |
539 | /* Which ABI to use. */ |
540 | enum aarch64_abi_type | |
541 | { | |
542 | AARCH64_ABI_LP64 = 0, | |
543 | AARCH64_ABI_ILP32 = 1 | |
544 | }; | |
545 | ||
546 | #ifndef AARCH64_ABI_DEFAULT | |
547 | #define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64 | |
548 | #endif | |
549 | ||
550 | #define TARGET_ILP32 (aarch64_abi & AARCH64_ABI_ILP32) | |
43e9d192 | 551 | |
43e9d192 IB |
552 | enum arm_pcs |
553 | { | |
554 | ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */ | |
555 | ARM_PCS_UNKNOWN | |
556 | }; | |
557 | ||
558 | ||
43e9d192 | 559 | extern enum arm_pcs arm_pcs_variant; |
43e9d192 IB |
560 | |
561 | #ifndef ARM_DEFAULT_PCS | |
562 | #define ARM_DEFAULT_PCS ARM_PCS_AAPCS64 | |
563 | #endif | |
564 | ||
565 | /* We can't use enum machine_mode inside a generator file because it | |
566 | hasn't been created yet; we shouldn't be using any code that | |
567 | needs the real definition though, so this ought to be safe. */ | |
568 | #ifdef GENERATOR_FILE | |
569 | #define MACHMODE int | |
570 | #else | |
571 | #include "insn-modes.h" | |
572 | #define MACHMODE enum machine_mode | |
573 | #endif | |
574 | ||
575 | ||
576 | /* AAPCS related state tracking. */ | |
577 | typedef struct | |
578 | { | |
579 | enum arm_pcs pcs_variant; | |
580 | int aapcs_arg_processed; /* No need to lay out this argument again. */ | |
581 | int aapcs_ncrn; /* Next Core register number. */ | |
582 | int aapcs_nextncrn; /* Next next core register number. */ | |
583 | int aapcs_nvrn; /* Next Vector register number. */ | |
584 | int aapcs_nextnvrn; /* Next Next Vector register number. */ | |
585 | rtx aapcs_reg; /* Register assigned to this argument. This | |
586 | is NULL_RTX if this parameter goes on | |
587 | the stack. */ | |
588 | MACHMODE aapcs_vfp_rmode; | |
589 | int aapcs_stack_words; /* If the argument is passed on the stack, this | |
590 | is the number of words needed, after rounding | |
591 | up. Only meaningful when | |
592 | aapcs_reg == NULL_RTX. */ | |
593 | int aapcs_stack_size; /* The total size (in words, per 8 byte) of the | |
594 | stack arg area so far. */ | |
595 | } CUMULATIVE_ARGS; | |
596 | ||
597 | #define FUNCTION_ARG_PADDING(MODE, TYPE) \ | |
598 | (aarch64_pad_arg_upward (MODE, TYPE) ? upward : downward) | |
599 | ||
600 | #define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ | |
601 | (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward) | |
602 | ||
603 | #define PAD_VARARGS_DOWN 0 | |
604 | ||
605 | #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ | |
606 | aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) | |
607 | ||
608 | #define FUNCTION_ARG_REGNO_P(REGNO) \ | |
609 | aarch64_function_arg_regno_p(REGNO) | |
610 | \f | |
611 | ||
612 | /* ISA Features. */ | |
613 | ||
614 | /* Addressing modes, etc. */ | |
615 | #define HAVE_POST_INCREMENT 1 | |
616 | #define HAVE_PRE_INCREMENT 1 | |
617 | #define HAVE_POST_DECREMENT 1 | |
618 | #define HAVE_PRE_DECREMENT 1 | |
619 | #define HAVE_POST_MODIFY_DISP 1 | |
620 | #define HAVE_PRE_MODIFY_DISP 1 | |
621 | ||
622 | #define MAX_REGS_PER_ADDRESS 2 | |
623 | ||
624 | #define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X) | |
625 | ||
626 | /* Try a machine-dependent way of reloading an illegitimate address | |
627 | operand. If we find one, push the reload and jump to WIN. This | |
628 | macro is used in only one place: `find_reloads_address' in reload.c. */ | |
629 | ||
630 | #define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \ | |
631 | do { \ | |
632 | rtx new_x = aarch64_legitimize_reload_address (&(X), MODE, OPNUM, TYPE, \ | |
633 | IND_L); \ | |
634 | if (new_x) \ | |
635 | { \ | |
636 | X = new_x; \ | |
637 | goto WIN; \ | |
638 | } \ | |
639 | } while (0) | |
640 | ||
641 | #define REGNO_OK_FOR_BASE_P(REGNO) \ | |
642 | aarch64_regno_ok_for_base_p (REGNO, true) | |
643 | ||
644 | #define REGNO_OK_FOR_INDEX_P(REGNO) \ | |
645 | aarch64_regno_ok_for_index_p (REGNO, true) | |
646 | ||
647 | #define LEGITIMATE_PIC_OPERAND_P(X) \ | |
648 | aarch64_legitimate_pic_operand_p (X) | |
649 | ||
650 | #define CASE_VECTOR_MODE Pmode | |
651 | ||
652 | #define DEFAULT_SIGNED_CHAR 0 | |
653 | ||
654 | /* An integer expression for the size in bits of the largest integer machine | |
655 | mode that should actually be used. We allow pairs of registers. */ | |
656 | #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode) | |
657 | ||
658 | /* Maximum bytes moved by a single instruction (load/store pair). */ | |
659 | #define MOVE_MAX (UNITS_PER_WORD * 2) | |
660 | ||
661 | /* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */ | |
662 | #define AARCH64_CALL_RATIO 8 | |
663 | ||
664 | /* When optimizing for size, give a better estimate of the length of a memcpy | |
665 | call, but use the default otherwise. But move_by_pieces_ninsns() counts | |
666 | memory-to-memory moves, and we'll have to generate a load & store for each, | |
667 | so halve the value to take that into account. */ | |
668 | #define MOVE_RATIO(speed) \ | |
669 | (((speed) ? 15 : AARCH64_CALL_RATIO) / 2) | |
670 | ||
671 | /* For CLEAR_RATIO, when optimizing for size, give a better estimate | |
672 | of the length of a memset call, but use the default otherwise. */ | |
673 | #define CLEAR_RATIO(speed) \ | |
674 | ((speed) ? 15 : AARCH64_CALL_RATIO) | |
675 | ||
676 | /* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when | |
677 | optimizing for size adjust the ratio to account for the overhead of loading | |
678 | the constant. */ | |
679 | #define SET_RATIO(speed) \ | |
680 | ((speed) ? 15 : AARCH64_CALL_RATIO - 2) | |
681 | ||
682 | /* STORE_BY_PIECES_P can be used when copying a constant string, but | |
683 | in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR). | |
684 | For now we always fail this and let the move_by_pieces code copy | |
685 | the string from read-only memory. */ | |
686 | #define STORE_BY_PIECES_P(SIZE, ALIGN) 0 | |
687 | ||
688 | /* Disable auto-increment in move_by_pieces et al. Use of auto-increment is | |
689 | rarely a good idea in straight-line code since it adds an extra address | |
690 | dependency between each instruction. Better to use incrementing offsets. */ | |
691 | #define USE_LOAD_POST_INCREMENT(MODE) 0 | |
692 | #define USE_LOAD_POST_DECREMENT(MODE) 0 | |
693 | #define USE_LOAD_PRE_INCREMENT(MODE) 0 | |
694 | #define USE_LOAD_PRE_DECREMENT(MODE) 0 | |
695 | #define USE_STORE_POST_INCREMENT(MODE) 0 | |
696 | #define USE_STORE_POST_DECREMENT(MODE) 0 | |
697 | #define USE_STORE_PRE_INCREMENT(MODE) 0 | |
698 | #define USE_STORE_PRE_DECREMENT(MODE) 0 | |
699 | ||
700 | /* ?? #define WORD_REGISTER_OPERATIONS */ | |
701 | ||
702 | /* Define if loading from memory in MODE, an integral mode narrower than | |
703 | BITS_PER_WORD will either zero-extend or sign-extend. The value of this | |
704 | macro should be the code that says which one of the two operations is | |
705 | implicitly done, or UNKNOWN if none. */ | |
706 | #define LOAD_EXTEND_OP(MODE) ZERO_EXTEND | |
707 | ||
708 | /* Define this macro to be non-zero if instructions will fail to work | |
709 | if given data not on the nominal alignment. */ | |
710 | #define STRICT_ALIGNMENT TARGET_STRICT_ALIGN | |
711 | ||
712 | /* Define this macro to be non-zero if accessing less than a word of | |
713 | memory is no faster than accessing a word of memory, i.e., if such | |
714 | accesses require more than one instruction or if there is no | |
715 | difference in cost. | |
716 | Although there's no difference in instruction count or cycles, | |
717 | in AArch64 we don't want to expand to a sub-word to a 64-bit access | |
718 | if we don't have to, for power-saving reasons. */ | |
719 | #define SLOW_BYTE_ACCESS 0 | |
720 | ||
721 | #define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 | |
722 | ||
723 | #define NO_FUNCTION_CSE 1 | |
724 | ||
17a819cb YZ |
725 | /* Specify the machine mode that the hardware addresses have. |
726 | After generation of rtl, the compiler makes no further distinction | |
727 | between pointers and any other objects of this machine mode. */ | |
43e9d192 | 728 | #define Pmode DImode |
17a819cb YZ |
729 | |
730 | /* A C expression whose value is zero if pointers that need to be extended | |
731 | from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and | |
732 | greater then zero if they are zero-extended and less then zero if the | |
733 | ptr_extend instruction should be used. */ | |
734 | #define POINTERS_EXTEND_UNSIGNED 1 | |
735 | ||
736 | /* Mode of a function address in a call instruction (for indexing purposes). */ | |
43e9d192 IB |
737 | #define FUNCTION_MODE Pmode |
738 | ||
739 | #define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y) | |
740 | ||
f8bf91ab N |
741 | #define REVERSIBLE_CC_MODE(MODE) 1 |
742 | ||
43e9d192 IB |
743 | #define REVERSE_CONDITION(CODE, MODE) \ |
744 | (((MODE) == CCFPmode || (MODE) == CCFPEmode) \ | |
745 | ? reverse_condition_maybe_unordered (CODE) \ | |
746 | : reverse_condition (CODE)) | |
747 | ||
748 | #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ | |
0fe04f5c | 749 | ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE)) |
43e9d192 IB |
750 | #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ |
751 | ((VALUE) = ((MODE) == SImode ? 32 : 64), 2) | |
752 | ||
753 | #define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM) | |
754 | ||
755 | #define RETURN_ADDR_RTX aarch64_return_addr | |
756 | ||
28514dda YZ |
757 | /* 3 insns + padding + 2 pointer-sized entries. */ |
758 | #define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32) | |
43e9d192 IB |
759 | |
760 | /* Trampolines contain dwords, so must be dword aligned. */ | |
761 | #define TRAMPOLINE_ALIGNMENT 64 | |
762 | ||
763 | /* Put trampolines in the text section so that mapping symbols work | |
764 | correctly. */ | |
765 | #define TRAMPOLINE_SECTION text_section | |
766 | \f | |
767 | /* Costs, etc. */ | |
768 | #define MEMORY_MOVE_COST(M, CLASS, IN) \ | |
769 | (GET_MODE_SIZE (M) < 8 ? 8 : GET_MODE_SIZE (M)) | |
770 | ||
771 | /* To start with. */ | |
772 | #define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2 | |
773 | \f | |
774 | ||
775 | /* Assembly output. */ | |
776 | ||
777 | /* For now we'll make all jump tables pc-relative. */ | |
778 | #define CASE_VECTOR_PC_RELATIVE 1 | |
779 | ||
780 | #define CASE_VECTOR_SHORTEN_MODE(min, max, body) \ | |
781 | ((min < -0x1fff0 || max > 0x1fff0) ? SImode \ | |
782 | : (min < -0x1f0 || max > 0x1f0) ? HImode \ | |
783 | : QImode) | |
784 | ||
785 | /* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */ | |
786 | #define ADDR_VEC_ALIGN(JUMPTABLE) 0 | |
787 | ||
788 | #define PRINT_OPERAND(STREAM, X, CODE) aarch64_print_operand (STREAM, X, CODE) | |
789 | ||
790 | #define PRINT_OPERAND_ADDRESS(STREAM, X) \ | |
791 | aarch64_print_operand_address (STREAM, X) | |
792 | ||
92d649c4 VK |
793 | #define MCOUNT_NAME "_mcount" |
794 | ||
795 | #define NO_PROFILE_COUNTERS 1 | |
796 | ||
797 | /* Emit rtl for profiling. Output assembler code to FILE | |
798 | to call "_mcount" for profiling a function entry. */ | |
3294102b MS |
799 | #define PROFILE_HOOK(LABEL) \ |
800 | { \ | |
801 | rtx fun, lr; \ | |
802 | lr = get_hard_reg_initial_val (Pmode, LR_REGNUM); \ | |
803 | fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ | |
804 | emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lr, Pmode); \ | |
805 | } | |
92d649c4 VK |
806 | |
807 | /* All the work done in PROFILE_HOOK, but still required. */ | |
808 | #define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) | |
43e9d192 IB |
809 | |
810 | /* For some reason, the Linux headers think they know how to define | |
811 | these macros. They don't!!! */ | |
812 | #undef ASM_APP_ON | |
813 | #undef ASM_APP_OFF | |
814 | #define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n" | |
815 | #define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n" | |
816 | ||
43e9d192 IB |
817 | #define CONSTANT_POOL_BEFORE_FUNCTION 0 |
818 | ||
819 | /* This definition should be relocated to aarch64-elf-raw.h. This macro | |
820 | should be undefined in aarch64-linux.h and a clear_cache pattern | |
821 | implmented to emit either the call to __aarch64_sync_cache_range() | |
822 | directly or preferably the appropriate sycall or cache clear | |
823 | instructions inline. */ | |
824 | #define CLEAR_INSN_CACHE(beg, end) \ | |
825 | extern void __aarch64_sync_cache_range (void *, void *); \ | |
826 | __aarch64_sync_cache_range (beg, end) | |
827 | ||
43e9d192 | 828 | #define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \ |
69675d50 | 829 | aarch64_cannot_change_mode_class (FROM, TO, CLASS) |
43e9d192 IB |
830 | |
831 | #define SHIFT_COUNT_TRUNCATED !TARGET_SIMD | |
832 | ||
833 | /* Callee only saves lower 64-bits of a 128-bit register. Tell the | |
834 | compiler the callee clobbers the top 64-bits when restoring the | |
835 | bottom 64-bits. */ | |
836 | #define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \ | |
837 | (FP_REGNUM_P (REGNO) && GET_MODE_SIZE (MODE) > 8) | |
838 | ||
839 | /* Check TLS Descriptors mechanism is selected. */ | |
840 | #define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS) | |
841 | ||
842 | extern enum aarch64_code_model aarch64_cmodel; | |
843 | ||
844 | /* When using the tiny addressing model conditional and unconditional branches | |
845 | can span the whole of the available address space (1MB). */ | |
846 | #define HAS_LONG_COND_BRANCH \ | |
847 | (aarch64_cmodel == AARCH64_CMODEL_TINY \ | |
848 | || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) | |
849 | ||
850 | #define HAS_LONG_UNCOND_BRANCH \ | |
851 | (aarch64_cmodel == AARCH64_CMODEL_TINY \ | |
852 | || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) | |
853 | ||
854 | /* Modes valid for AdvSIMD Q registers. */ | |
855 | #define AARCH64_VALID_SIMD_QREG_MODE(MODE) \ | |
856 | ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \ | |
857 | || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode) | |
858 | ||
e58bf20a TB |
859 | #define ENDIAN_LANE_N(mode, n) \ |
860 | (BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 - n : n) | |
861 | ||
682287fb | 862 | #define BIG_LITTLE_SPEC \ |
1c05df59 | 863 | " %{mcpu=*:-mcpu=%:rewrite_mcpu(%{mcpu=*:%*})}" |
682287fb JG |
864 | |
865 | extern const char *aarch64_rewrite_mcpu (int argc, const char **argv); | |
866 | #define BIG_LITTLE_CPU_SPEC_FUNCTIONS \ | |
867 | { "rewrite_mcpu", aarch64_rewrite_mcpu }, | |
868 | ||
869 | #define ASM_CPU_SPEC \ | |
870 | BIG_LITTLE_SPEC | |
871 | ||
872 | #define EXTRA_SPEC_FUNCTIONS BIG_LITTLE_CPU_SPEC_FUNCTIONS | |
873 | ||
874 | #define EXTRA_SPECS \ | |
875 | { "asm_cpu_spec", ASM_CPU_SPEC } | |
876 | ||
43e9d192 | 877 | #endif /* GCC_AARCH64_H */ |