]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/i386/i386.h
Builtin function roundeven folding implementation
[thirdparty/gcc.git] / gcc / config / i386 / i386.h
CommitLineData
188fc5b5 1/* Definitions of target machine for GCC for IA-32.
a5544970 2 Copyright (C) 1988-2019 Free Software Foundation, Inc.
c98f8742 3
188fc5b5 4This file is part of GCC.
c98f8742 5
188fc5b5 6GCC is free software; you can redistribute it and/or modify
c98f8742 7it under the terms of the GNU General Public License as published by
2f83c7d6 8the Free Software Foundation; either version 3, or (at your option)
c98f8742
JVA
9any later version.
10
188fc5b5 11GCC is distributed in the hope that it will be useful,
c98f8742
JVA
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
748086b7
JJ
16Under Section 7 of GPL version 3, you are granted additional
17permissions described in the GCC Runtime Library Exception, version
183.1, as published by the Free Software Foundation.
19
20You should have received a copy of the GNU General Public License and
21a copy of the GCC Runtime Library Exception along with this program;
22see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
2f83c7d6 23<http://www.gnu.org/licenses/>. */
c98f8742 24
ccf8e764
RH
25/* The purpose of this file is to define the characteristics of the i386,
26 independent of assembler syntax or operating system.
27
28 Three other files build on this one to describe a specific assembler syntax:
29 bsd386.h, att386.h, and sun386.h.
30
31 The actual tm.h file for a particular system should include
32 this file, and then the file for the appropriate assembler syntax.
33
34 Many macros that specify assembler syntax are omitted entirely from
35 this file because they really belong in the files for particular
36 assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR,
37 ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many
38 that start with ASM_ or end in ASM_OP. */
39
0a1c5e55
UB
40/* Redefines for option macros. */
41
90922d36 42#define TARGET_64BIT TARGET_ISA_64BIT
bf7b5747 43#define TARGET_64BIT_P(x) TARGET_ISA_64BIT_P(x)
90922d36 44#define TARGET_MMX TARGET_ISA_MMX
bf7b5747 45#define TARGET_MMX_P(x) TARGET_ISA_MMX_P(x)
90922d36 46#define TARGET_3DNOW TARGET_ISA_3DNOW
bf7b5747 47#define TARGET_3DNOW_P(x) TARGET_ISA_3DNOW_P(x)
90922d36 48#define TARGET_3DNOW_A TARGET_ISA_3DNOW_A
bf7b5747 49#define TARGET_3DNOW_A_P(x) TARGET_ISA_3DNOW_A_P(x)
90922d36 50#define TARGET_SSE TARGET_ISA_SSE
bf7b5747 51#define TARGET_SSE_P(x) TARGET_ISA_SSE_P(x)
90922d36 52#define TARGET_SSE2 TARGET_ISA_SSE2
bf7b5747 53#define TARGET_SSE2_P(x) TARGET_ISA_SSE2_P(x)
90922d36 54#define TARGET_SSE3 TARGET_ISA_SSE3
bf7b5747 55#define TARGET_SSE3_P(x) TARGET_ISA_SSE3_P(x)
90922d36 56#define TARGET_SSSE3 TARGET_ISA_SSSE3
bf7b5747 57#define TARGET_SSSE3_P(x) TARGET_ISA_SSSE3_P(x)
90922d36 58#define TARGET_SSE4_1 TARGET_ISA_SSE4_1
bf7b5747 59#define TARGET_SSE4_1_P(x) TARGET_ISA_SSE4_1_P(x)
90922d36 60#define TARGET_SSE4_2 TARGET_ISA_SSE4_2
bf7b5747 61#define TARGET_SSE4_2_P(x) TARGET_ISA_SSE4_2_P(x)
90922d36 62#define TARGET_AVX TARGET_ISA_AVX
bf7b5747 63#define TARGET_AVX_P(x) TARGET_ISA_AVX_P(x)
90922d36 64#define TARGET_AVX2 TARGET_ISA_AVX2
bf7b5747 65#define TARGET_AVX2_P(x) TARGET_ISA_AVX2_P(x)
cb610367
UB
66#define TARGET_AVX512F TARGET_ISA_AVX512F
67#define TARGET_AVX512F_P(x) TARGET_ISA_AVX512F_P(x)
68#define TARGET_AVX512PF TARGET_ISA_AVX512PF
69#define TARGET_AVX512PF_P(x) TARGET_ISA_AVX512PF_P(x)
70#define TARGET_AVX512ER TARGET_ISA_AVX512ER
71#define TARGET_AVX512ER_P(x) TARGET_ISA_AVX512ER_P(x)
72#define TARGET_AVX512CD TARGET_ISA_AVX512CD
73#define TARGET_AVX512CD_P(x) TARGET_ISA_AVX512CD_P(x)
07165dd7
AI
74#define TARGET_AVX512DQ TARGET_ISA_AVX512DQ
75#define TARGET_AVX512DQ_P(x) TARGET_ISA_AVX512DQ_P(x)
b525d943
AI
76#define TARGET_AVX512BW TARGET_ISA_AVX512BW
77#define TARGET_AVX512BW_P(x) TARGET_ISA_AVX512BW_P(x)
f4af595f
AI
78#define TARGET_AVX512VL TARGET_ISA_AVX512VL
79#define TARGET_AVX512VL_P(x) TARGET_ISA_AVX512VL_P(x)
3dcc8af5
IT
80#define TARGET_AVX512VBMI TARGET_ISA_AVX512VBMI
81#define TARGET_AVX512VBMI_P(x) TARGET_ISA_AVX512VBMI_P(x)
4190ea38
IT
82#define TARGET_AVX512IFMA TARGET_ISA_AVX512IFMA
83#define TARGET_AVX512IFMA_P(x) TARGET_ISA_AVX512IFMA_P(x)
5fbb13a7
KY
84#define TARGET_AVX5124FMAPS TARGET_ISA_AVX5124FMAPS
85#define TARGET_AVX5124FMAPS_P(x) TARGET_ISA_AVX5124FMAPS_P(x)
86#define TARGET_AVX5124VNNIW TARGET_ISA_AVX5124VNNIW
87#define TARGET_AVX5124VNNIW_P(x) TARGET_ISA_AVX5124VNNIW_P(x)
fca51879
JK
88#define TARGET_AVX512VBMI2 TARGET_ISA_AVX512VBMI2
89#define TARGET_AVX512VBMI2_P(x) TARGET_ISA_AVX512VBMI2_P(x)
79fc8ffe
AS
90#define TARGET_AVX512VPOPCNTDQ TARGET_ISA_AVX512VPOPCNTDQ
91#define TARGET_AVX512VPOPCNTDQ_P(x) TARGET_ISA_AVX512VPOPCNTDQ_P(x)
98966963
JK
92#define TARGET_AVX512VNNI TARGET_ISA_AVX512VNNI
93#define TARGET_AVX512VNNI_P(x) TARGET_ISA_AVX512VNNI_P(x)
e2a29465
JK
94#define TARGET_AVX512BITALG TARGET_ISA_AVX512BITALG
95#define TARGET_AVX512BITALG_P(x) TARGET_ISA_AVX512BITALG_P(x)
e21b52af
HL
96#define TARGET_AVX512VP2INTERSECT TARGET_ISA_AVX512VP2INTERSECT
97#define TARGET_AVX512VP2INTERSECT_P(x) TARGET_ISA_AVX512VP2INTERSECT_P(x)
90922d36 98#define TARGET_FMA TARGET_ISA_FMA
bf7b5747 99#define TARGET_FMA_P(x) TARGET_ISA_FMA_P(x)
90922d36 100#define TARGET_SSE4A TARGET_ISA_SSE4A
bf7b5747 101#define TARGET_SSE4A_P(x) TARGET_ISA_SSE4A_P(x)
90922d36 102#define TARGET_FMA4 TARGET_ISA_FMA4
bf7b5747 103#define TARGET_FMA4_P(x) TARGET_ISA_FMA4_P(x)
90922d36 104#define TARGET_XOP TARGET_ISA_XOP
bf7b5747 105#define TARGET_XOP_P(x) TARGET_ISA_XOP_P(x)
90922d36 106#define TARGET_LWP TARGET_ISA_LWP
bf7b5747 107#define TARGET_LWP_P(x) TARGET_ISA_LWP_P(x)
90922d36 108#define TARGET_ABM TARGET_ISA_ABM
bf7b5747 109#define TARGET_ABM_P(x) TARGET_ISA_ABM_P(x)
13b93d4b
OM
110#define TARGET_PCONFIG TARGET_ISA_PCONFIG
111#define TARGET_PCONFIG_P(x) TARGET_ISA_PCONFIG_P(x)
112#define TARGET_WBNOINVD TARGET_ISA_WBNOINVD
113#define TARGET_WBNOINVD_P(x) TARGET_ISA_WBNOINVD_P(x)
73e32c47
JK
114#define TARGET_SGX TARGET_ISA_SGX
115#define TARGET_SGX_P(x) TARGET_ISA_SGX_P(x)
1d516992
JK
116#define TARGET_RDPID TARGET_ISA_RDPID
117#define TARGET_RDPID_P(x) TARGET_ISA_RDPID_P(x)
b8cca31c
JK
118#define TARGET_GFNI TARGET_ISA_GFNI
119#define TARGET_GFNI_P(x) TARGET_ISA_GFNI_P(x)
b7b0a4fa
JK
120#define TARGET_VAES TARGET_ISA_VAES
121#define TARGET_VAES_P(x) TARGET_ISA_VAES_P(x)
6557be99
JK
122#define TARGET_VPCLMULQDQ TARGET_ISA_VPCLMULQDQ
123#define TARGET_VPCLMULQDQ_P(x) TARGET_ISA_VPCLMULQDQ_P(x)
90922d36 124#define TARGET_BMI TARGET_ISA_BMI
bf7b5747 125#define TARGET_BMI_P(x) TARGET_ISA_BMI_P(x)
90922d36 126#define TARGET_BMI2 TARGET_ISA_BMI2
bf7b5747 127#define TARGET_BMI2_P(x) TARGET_ISA_BMI2_P(x)
90922d36 128#define TARGET_LZCNT TARGET_ISA_LZCNT
bf7b5747 129#define TARGET_LZCNT_P(x) TARGET_ISA_LZCNT_P(x)
90922d36 130#define TARGET_TBM TARGET_ISA_TBM
bf7b5747 131#define TARGET_TBM_P(x) TARGET_ISA_TBM_P(x)
90922d36 132#define TARGET_POPCNT TARGET_ISA_POPCNT
bf7b5747 133#define TARGET_POPCNT_P(x) TARGET_ISA_POPCNT_P(x)
90922d36 134#define TARGET_SAHF TARGET_ISA_SAHF
bf7b5747 135#define TARGET_SAHF_P(x) TARGET_ISA_SAHF_P(x)
90922d36 136#define TARGET_MOVBE TARGET_ISA_MOVBE
bf7b5747 137#define TARGET_MOVBE_P(x) TARGET_ISA_MOVBE_P(x)
90922d36 138#define TARGET_CRC32 TARGET_ISA_CRC32
bf7b5747 139#define TARGET_CRC32_P(x) TARGET_ISA_CRC32_P(x)
90922d36 140#define TARGET_AES TARGET_ISA_AES
bf7b5747 141#define TARGET_AES_P(x) TARGET_ISA_AES_P(x)
c1618f82
AI
142#define TARGET_SHA TARGET_ISA_SHA
143#define TARGET_SHA_P(x) TARGET_ISA_SHA_P(x)
9cdea277
IT
144#define TARGET_CLFLUSHOPT TARGET_ISA_CLFLUSHOPT
145#define TARGET_CLFLUSHOPT_P(x) TARGET_ISA_CLFLUSHOPT_P(x)
9ce29eb0
VK
146#define TARGET_CLZERO TARGET_ISA_CLZERO
147#define TARGET_CLZERO_P(x) TARGET_ISA_CLZERO_P(x)
9cdea277
IT
148#define TARGET_XSAVEC TARGET_ISA_XSAVEC
149#define TARGET_XSAVEC_P(x) TARGET_ISA_XSAVEC_P(x)
150#define TARGET_XSAVES TARGET_ISA_XSAVES
151#define TARGET_XSAVES_P(x) TARGET_ISA_XSAVES_P(x)
90922d36 152#define TARGET_PCLMUL TARGET_ISA_PCLMUL
bf7b5747 153#define TARGET_PCLMUL_P(x) TARGET_ISA_PCLMUL_P(x)
cb610367
UB
154#define TARGET_CMPXCHG16B TARGET_ISA_CX16
155#define TARGET_CMPXCHG16B_P(x) TARGET_ISA_CX16_P(x)
90922d36 156#define TARGET_FSGSBASE TARGET_ISA_FSGSBASE
bf7b5747 157#define TARGET_FSGSBASE_P(x) TARGET_ISA_FSGSBASE_P(x)
90922d36 158#define TARGET_RDRND TARGET_ISA_RDRND
bf7b5747 159#define TARGET_RDRND_P(x) TARGET_ISA_RDRND_P(x)
90922d36 160#define TARGET_F16C TARGET_ISA_F16C
bf7b5747 161#define TARGET_F16C_P(x) TARGET_ISA_F16C_P(x)
cb610367
UB
162#define TARGET_RTM TARGET_ISA_RTM
163#define TARGET_RTM_P(x) TARGET_ISA_RTM_P(x)
90922d36 164#define TARGET_HLE TARGET_ISA_HLE
bf7b5747 165#define TARGET_HLE_P(x) TARGET_ISA_HLE_P(x)
90922d36 166#define TARGET_RDSEED TARGET_ISA_RDSEED
bf7b5747 167#define TARGET_RDSEED_P(x) TARGET_ISA_RDSEED_P(x)
90922d36 168#define TARGET_PRFCHW TARGET_ISA_PRFCHW
bf7b5747 169#define TARGET_PRFCHW_P(x) TARGET_ISA_PRFCHW_P(x)
90922d36 170#define TARGET_ADX TARGET_ISA_ADX
bf7b5747 171#define TARGET_ADX_P(x) TARGET_ISA_ADX_P(x)
3a0d99bb 172#define TARGET_FXSR TARGET_ISA_FXSR
bf7b5747 173#define TARGET_FXSR_P(x) TARGET_ISA_FXSR_P(x)
3a0d99bb 174#define TARGET_XSAVE TARGET_ISA_XSAVE
bf7b5747 175#define TARGET_XSAVE_P(x) TARGET_ISA_XSAVE_P(x)
3a0d99bb 176#define TARGET_XSAVEOPT TARGET_ISA_XSAVEOPT
bf7b5747 177#define TARGET_XSAVEOPT_P(x) TARGET_ISA_XSAVEOPT_P(x)
43b3f52f
IT
178#define TARGET_PREFETCHWT1 TARGET_ISA_PREFETCHWT1
179#define TARGET_PREFETCHWT1_P(x) TARGET_ISA_PREFETCHWT1_P(x)
9c3bca11
IT
180#define TARGET_CLWB TARGET_ISA_CLWB
181#define TARGET_CLWB_P(x) TARGET_ISA_CLWB_P(x)
500a08b2
VK
182#define TARGET_MWAITX TARGET_ISA_MWAITX
183#define TARGET_MWAITX_P(x) TARGET_ISA_MWAITX_P(x)
41a4ef22
KY
184#define TARGET_PKU TARGET_ISA_PKU
185#define TARGET_PKU_P(x) TARGET_ISA_PKU_P(x)
2a25448c
IT
186#define TARGET_SHSTK TARGET_ISA_SHSTK
187#define TARGET_SHSTK_P(x) TARGET_ISA_SHSTK_P(x)
37d51c75
SP
188#define TARGET_MOVDIRI TARGET_ISA_MOVDIRI
189#define TARGET_MOVDIRI_P(x) TARGET_ISA_MOVDIRI_P(x)
190#define TARGET_MOVDIR64B TARGET_ISA_MOVDIR64B
191#define TARGET_MOVDIR64B_P(x) TARGET_ISA_MOVDIR64B_P(x)
55f31ed1
SP
192#define TARGET_WAITPKG TARGET_ISA_WAITPKG
193#define TARGET_WAITPKG_P(x) TARGET_ISA_WAITPKG_P(x)
f8d9957e
SP
194#define TARGET_CLDEMOTE TARGET_ISA_CLDEMOTE
195#define TARGET_CLDEMOTE_P(x) TARGET_ISA_CLDEMOTE_P(x)
41f8d1fc
AK
196#define TARGET_PTWRITE TARGET_ISA_PTWRITE
197#define TARGET_PTWRITE_P(x) TARGET_ISA_PTWRITE_P(x)
4f0e90fa
HL
198#define TARGET_AVX512BF16 TARGET_ISA_AVX512BF16
199#define TARGET_AVX512BF16_P(x) TARGET_ISA_AVX512BF16_P(x)
6a10feda
XG
200#define TARGET_ENQCMD TARGET_ISA_ENQCMD
201#define TARGET_ENQCMD_P(x) TARGET_ISA_ENQCMD_P(x)
41a4ef22 202
90922d36 203#define TARGET_LP64 TARGET_ABI_64
bf7b5747 204#define TARGET_LP64_P(x) TARGET_ABI_64_P(x)
90922d36 205#define TARGET_X32 TARGET_ABI_X32
bf7b5747 206#define TARGET_X32_P(x) TARGET_ABI_X32_P(x)
d5d618b5
L
207#define TARGET_16BIT TARGET_CODE16
208#define TARGET_16BIT_P(x) TARGET_CODE16_P(x)
04e1d06b 209
dfa61b9e
L
210#define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2)
211
26b5109f
RS
212#include "config/vxworks-dummy.h"
213
7eb68c06 214#include "config/i386/i386-opts.h"
ccf8e764 215
c69fa2d4 216#define MAX_STRINGOP_ALGS 4
ccf8e764 217
8c996513
JH
218/* Specify what algorithm to use for stringops on known size.
219 When size is unknown, the UNKNOWN_SIZE alg is used. When size is
220 known at compile time or estimated via feedback, the SIZE array
221 is walked in order until MAX is greater then the estimate (or -1
4f3f76e6 222 means infinity). Corresponding ALG is used then.
340ef734
JH
223 When NOALIGN is true the code guaranting the alignment of the memory
224 block is skipped.
225
8c996513 226 For example initializer:
4f3f76e6 227 {{256, loop}, {-1, rep_prefix_4_byte}}
8c996513 228 will use loop for blocks smaller or equal to 256 bytes, rep prefix will
ccf8e764 229 be used otherwise. */
8c996513
JH
230struct stringop_algs
231{
232 const enum stringop_alg unknown_size;
233 const struct stringop_strategy {
234 const int max;
235 const enum stringop_alg alg;
340ef734 236 int noalign;
c69fa2d4 237 } size [MAX_STRINGOP_ALGS];
8c996513
JH
238};
239
d321551c
L
240/* Define the specific costs for a given cpu. NB: hard_register is used
241 by TARGET_REGISTER_MOVE_COST and TARGET_MEMORY_MOVE_COST to compute
242 hard register move costs by register allocator. Relative costs of
243 pseudo register load and store versus pseudo register moves in RTL
244 expressions for TARGET_RTX_COSTS can be different from relative
245 costs of hard registers to get the most efficient operations with
246 pseudo registers. */
d4ba09c0
SC
247
248struct processor_costs {
d321551c
L
249 /* Costs used by register allocator. integer->integer register move
250 cost is 2. */
251 struct
252 {
253 const int movzbl_load; /* cost of loading using movzbl */
254 const int int_load[3]; /* cost of loading integer registers
255 in QImode, HImode and SImode relative
256 to reg-reg move (2). */
257 const int int_store[3]; /* cost of storing integer register
258 in QImode, HImode and SImode */
259 const int fp_move; /* cost of reg,reg fld/fst */
260 const int fp_load[3]; /* cost of loading FP register
261 in SFmode, DFmode and XFmode */
262 const int fp_store[3]; /* cost of storing FP register
263 in SFmode, DFmode and XFmode */
264 const int mmx_move; /* cost of moving MMX register. */
265 const int mmx_load[2]; /* cost of loading MMX register
266 in SImode and DImode */
267 const int mmx_store[2]; /* cost of storing MMX register
268 in SImode and DImode */
269 const int xmm_move; /* cost of moving XMM register. */
270 const int ymm_move; /* cost of moving XMM register. */
271 const int zmm_move; /* cost of moving XMM register. */
272 const int sse_load[5]; /* cost of loading SSE register
273 in 32bit, 64bit, 128bit, 256bit and 512bit */
274 const int sse_store[5]; /* cost of storing SSE register
275 in SImode, DImode and TImode. */
276 const int sse_to_integer; /* cost of moving SSE register to integer. */
277 const int integer_to_sse; /* cost of moving integer register to SSE. */
278 } hard_register;
279
8b60264b
KG
280 const int add; /* cost of an add instruction */
281 const int lea; /* cost of a lea instruction */
282 const int shift_var; /* variable shift costs */
283 const int shift_const; /* constant shift costs */
f676971a 284 const int mult_init[5]; /* cost of starting a multiply
4977bab6 285 in QImode, HImode, SImode, DImode, TImode*/
8b60264b 286 const int mult_bit; /* cost of multiply per each bit set */
f676971a 287 const int divide[5]; /* cost of a divide/mod
4977bab6 288 in QImode, HImode, SImode, DImode, TImode*/
44cf5b6a
JH
289 int movsx; /* The cost of movsx operation. */
290 int movzx; /* The cost of movzx operation. */
8b60264b
KG
291 const int large_insn; /* insns larger than this cost more */
292 const int move_ratio; /* The threshold of number of scalar
ac775968 293 memory-to-memory move insns. */
8b60264b 294 const int int_load[3]; /* cost of loading integer registers
96e7ae40
JH
295 in QImode, HImode and SImode relative
296 to reg-reg move (2). */
8b60264b 297 const int int_store[3]; /* cost of storing integer register
96e7ae40 298 in QImode, HImode and SImode */
df41dbaf
JH
299 const int sse_load[5]; /* cost of loading SSE register
300 in 32bit, 64bit, 128bit, 256bit and 512bit */
df41dbaf 301 const int sse_store[5]; /* cost of storing SSE register
d321551c
L
302 in 32bit, 64bit, 128bit, 256bit and 512bit */
303 const int sse_unaligned_load[5];/* cost of unaligned load. */
df41dbaf 304 const int sse_unaligned_store[5];/* cost of unaligned store. */
d321551c
L
305 const int xmm_move, ymm_move, /* cost of moving XMM and YMM register. */
306 zmm_move;
66574c53 307 const int sse_to_integer; /* cost of moving SSE register to integer. */
a4fe6139
JH
308 const int gather_static, gather_per_elt; /* Cost of gather load is computed
309 as static + per_item * nelts. */
310 const int scatter_static, scatter_per_elt; /* Cost of gather store is
311 computed as static + per_item * nelts. */
46cb0441
ZD
312 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
313 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
f4365627
JH
314 const int prefetch_block; /* bytes moved to cache for prefetch. */
315 const int simultaneous_prefetches; /* number of parallel prefetch
316 operations. */
4977bab6 317 const int branch_cost; /* Default value for BRANCH_COST. */
229b303a
RS
318 const int fadd; /* cost of FADD and FSUB instructions. */
319 const int fmul; /* cost of FMUL instruction. */
320 const int fdiv; /* cost of FDIV instruction. */
321 const int fabs; /* cost of FABS instruction. */
322 const int fchs; /* cost of FCHS instruction. */
323 const int fsqrt; /* cost of FSQRT instruction. */
8c996513 324 /* Specify what algorithm
bee51209 325 to use for stringops on unknown size. */
c53c148c 326 const int sse_op; /* cost of cheap SSE instruction. */
6065f444
JH
327 const int addss; /* cost of ADDSS/SD SUBSS/SD instructions. */
328 const int mulss; /* cost of MULSS instructions. */
329 const int mulsd; /* cost of MULSD instructions. */
c53c148c
JH
330 const int fmass; /* cost of FMASS instructions. */
331 const int fmasd; /* cost of FMASD instructions. */
6065f444
JH
332 const int divss; /* cost of DIVSS instructions. */
333 const int divsd; /* cost of DIVSD instructions. */
334 const int sqrtss; /* cost of SQRTSS instructions. */
335 const int sqrtsd; /* cost of SQRTSD instructions. */
a813c280
JH
336 const int reassoc_int, reassoc_fp, reassoc_vec_int, reassoc_vec_fp;
337 /* Specify reassociation width for integer,
338 fp, vector integer and vector fp
339 operations. Generally should correspond
340 to number of instructions executed in
341 parallel. See also
342 ix86_reassociation_width. */
ad83025e 343 struct stringop_algs *memcpy, *memset;
e70444a8
HJ
344 const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer
345 cost model. */
346 const int cond_not_taken_branch_cost;/* Cost of not taken branch for
347 vectorizer cost model. */
7dc58b50
ML
348
349 /* The "0:0:8" label alignment specified for some processors generates
350 secondary 8-byte alignment only for those label/jump/loop targets
351 which have primary alignment. */
352 const char *const align_loop; /* Loop alignment. */
353 const char *const align_jump; /* Jump alignment. */
354 const char *const align_label; /* Label alignment. */
355 const char *const align_func; /* Function alignment. */
d4ba09c0
SC
356};
357
8b60264b 358extern const struct processor_costs *ix86_cost;
b2077fd2
JH
359extern const struct processor_costs ix86_size_cost;
360
361#define ix86_cur_cost() \
362 (optimize_insn_for_size_p () ? &ix86_size_cost: ix86_cost)
d4ba09c0 363
c98f8742
JVA
364/* Macros used in the machine description to test the flags. */
365
b97de419 366/* configure can arrange to change it. */
e075ae69 367
35b528be 368#ifndef TARGET_CPU_DEFAULT
b97de419 369#define TARGET_CPU_DEFAULT PROCESSOR_GENERIC
10e9fecc 370#endif
35b528be 371
004d3859
GK
372#ifndef TARGET_FPMATH_DEFAULT
373#define TARGET_FPMATH_DEFAULT \
374 (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387)
375#endif
376
bf7b5747
ST
377#ifndef TARGET_FPMATH_DEFAULT_P
378#define TARGET_FPMATH_DEFAULT_P(x) \
379 (TARGET_64BIT_P(x) && TARGET_SSE_P(x) ? FPMATH_SSE : FPMATH_387)
380#endif
381
c207fd99
L
382/* If the i387 is disabled or -miamcu is used , then do not return
383 values in it. */
384#define TARGET_FLOAT_RETURNS_IN_80387 \
385 (TARGET_FLOAT_RETURNS && TARGET_80387 && !TARGET_IAMCU)
386#define TARGET_FLOAT_RETURNS_IN_80387_P(x) \
387 (TARGET_FLOAT_RETURNS_P(x) && TARGET_80387_P(x) && !TARGET_IAMCU_P(x))
b08de47e 388
5791cc29
JT
389/* 64bit Sledgehammer mode. For libgcc2 we make sure this is a
390 compile-time constant. */
391#ifdef IN_LIBGCC2
6ac49599 392#undef TARGET_64BIT
5791cc29
JT
393#ifdef __x86_64__
394#define TARGET_64BIT 1
395#else
396#define TARGET_64BIT 0
397#endif
398#else
6ac49599
RS
399#ifndef TARGET_BI_ARCH
400#undef TARGET_64BIT
e49080ec 401#undef TARGET_64BIT_P
67adf6a9 402#if TARGET_64BIT_DEFAULT
0c2dc519 403#define TARGET_64BIT 1
e49080ec 404#define TARGET_64BIT_P(x) 1
0c2dc519
JH
405#else
406#define TARGET_64BIT 0
e49080ec 407#define TARGET_64BIT_P(x) 0
0c2dc519
JH
408#endif
409#endif
5791cc29 410#endif
25f94bb5 411
750054a2
CT
412#define HAS_LONG_COND_BRANCH 1
413#define HAS_LONG_UNCOND_BRANCH 1
414
9e555526
RH
415#define TARGET_386 (ix86_tune == PROCESSOR_I386)
416#define TARGET_486 (ix86_tune == PROCESSOR_I486)
417#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
418#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
cfe1b18f 419#define TARGET_GEODE (ix86_tune == PROCESSOR_GEODE)
9e555526
RH
420#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
421#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
422#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
423#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
4977bab6 424#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
89c43c0a 425#define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA)
340ef734 426#define TARGET_CORE2 (ix86_tune == PROCESSOR_CORE2)
d3c11974
L
427#define TARGET_NEHALEM (ix86_tune == PROCESSOR_NEHALEM)
428#define TARGET_SANDYBRIDGE (ix86_tune == PROCESSOR_SANDYBRIDGE)
3a579e09 429#define TARGET_HASWELL (ix86_tune == PROCESSOR_HASWELL)
d3c11974
L
430#define TARGET_BONNELL (ix86_tune == PROCESSOR_BONNELL)
431#define TARGET_SILVERMONT (ix86_tune == PROCESSOR_SILVERMONT)
50e461df 432#define TARGET_GOLDMONT (ix86_tune == PROCESSOR_GOLDMONT)
74b2bb19 433#define TARGET_GOLDMONT_PLUS (ix86_tune == PROCESSOR_GOLDMONT_PLUS)
a548a5a1 434#define TARGET_TREMONT (ix86_tune == PROCESSOR_TREMONT)
52747219 435#define TARGET_KNL (ix86_tune == PROCESSOR_KNL)
cace2309 436#define TARGET_KNM (ix86_tune == PROCESSOR_KNM)
176a3386 437#define TARGET_SKYLAKE (ix86_tune == PROCESSOR_SKYLAKE)
06caf59d 438#define TARGET_SKYLAKE_AVX512 (ix86_tune == PROCESSOR_SKYLAKE_AVX512)
c234d831 439#define TARGET_CANNONLAKE (ix86_tune == PROCESSOR_CANNONLAKE)
79ab5364
JK
440#define TARGET_ICELAKE_CLIENT (ix86_tune == PROCESSOR_ICELAKE_CLIENT)
441#define TARGET_ICELAKE_SERVER (ix86_tune == PROCESSOR_ICELAKE_SERVER)
7cab07f0 442#define TARGET_CASCADELAKE (ix86_tune == PROCESSOR_CASCADELAKE)
a9fcfec3
HL
443#define TARGET_TIGERLAKE (ix86_tune == PROCESSOR_TIGERLAKE)
444#define TARGET_COOPERLAKE (ix86_tune == PROCESSOR_COOPERLAKE)
9a7f94d7 445#define TARGET_INTEL (ix86_tune == PROCESSOR_INTEL)
9d532162 446#define TARGET_GENERIC (ix86_tune == PROCESSOR_GENERIC)
21efb4d4 447#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
1133125e 448#define TARGET_BDVER1 (ix86_tune == PROCESSOR_BDVER1)
4d652a18 449#define TARGET_BDVER2 (ix86_tune == PROCESSOR_BDVER2)
eb2f2b44 450#define TARGET_BDVER3 (ix86_tune == PROCESSOR_BDVER3)
ed97ad47 451#define TARGET_BDVER4 (ix86_tune == PROCESSOR_BDVER4)
14b52538 452#define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1)
e32bfc16 453#define TARGET_BTVER2 (ix86_tune == PROCESSOR_BTVER2)
9ce29eb0 454#define TARGET_ZNVER1 (ix86_tune == PROCESSOR_ZNVER1)
2901f42f 455#define TARGET_ZNVER2 (ix86_tune == PROCESSOR_ZNVER2)
a269a03c 456
80fd744f
RH
457/* Feature tests against the various tunings. */
458enum ix86_tune_indices {
4b8bc035 459#undef DEF_TUNE
3ad20bd4 460#define DEF_TUNE(tune, name, selector) tune,
4b8bc035
XDL
461#include "x86-tune.def"
462#undef DEF_TUNE
463X86_TUNE_LAST
80fd744f
RH
464};
465
ab442df7 466extern unsigned char ix86_tune_features[X86_TUNE_LAST];
80fd744f
RH
467
468#define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE]
469#define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY]
470#define TARGET_ZERO_EXTEND_WITH_AND \
471 ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND]
80fd744f 472#define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN]
80fd744f
RH
473#define TARGET_BRANCH_PREDICTION_HINTS \
474 ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS]
475#define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD]
476#define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF]
477#define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX]
478#define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL]
479#define TARGET_PARTIAL_FLAG_REG_STALL \
480 ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL]
7b38ee83
TJ
481#define TARGET_LCP_STALL \
482 ix86_tune_features[X86_TUNE_LCP_STALL]
80fd744f
RH
483#define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP]
484#define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP]
485#define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0]
486#define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD]
487#define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB]
488#define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES]
489#define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE]
490#define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY]
491#define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE]
492#define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX]
493#define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP]
5783ad0e
UB
494#define TARGET_MISALIGNED_MOVE_STRING_PRO_EPILOGUES \
495 ix86_tune_features[X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES]
80fd744f
RH
496#define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH]
497#define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH]
498#define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS]
499#define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS]
d8b08ecd
UB
500#define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP]
501#define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP]
502#define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH]
503#define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH]
80fd744f
RH
504#define TARGET_INTEGER_DFMODE_MOVES \
505 ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES]
506#define TARGET_PARTIAL_REG_DEPENDENCY \
507 ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY]
508#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \
509 ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY]
1133125e
HJ
510#define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \
511 ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL]
512#define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \
513 ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]
514#define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \
515 ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL]
80fd744f
RH
516#define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS]
517#define TARGET_SSE_TYPELESS_STORES \
518 ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES]
519#define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR]
520#define TARGET_MEMORY_MISMATCH_STALL \
521 ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL]
522#define TARGET_PROLOGUE_USING_MOVE \
523 ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE]
524#define TARGET_EPILOGUE_USING_MOVE \
525 ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE]
526#define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1]
527#define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP]
00fcb892
UB
528#define TARGET_INTER_UNIT_MOVES_TO_VEC \
529 ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC]
530#define TARGET_INTER_UNIT_MOVES_FROM_VEC \
531 ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_FROM_VEC]
532#define TARGET_INTER_UNIT_CONVERSIONS \
630ecd8d 533 ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS]
80fd744f
RH
534#define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT]
535#define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE]
536#define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT]
537#define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC]
538#define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS]
e7ed95a2
L
539#define TARGET_PAD_SHORT_FUNCTION \
540 ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION]
80fd744f
RH
541#define TARGET_EXT_80387_CONSTANTS \
542 ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS]
ddff69b9
MM
543#define TARGET_AVOID_VECTOR_DECODE \
544 ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE]
a646aded
UB
545#define TARGET_TUNE_PROMOTE_HIMODE_IMUL \
546 ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL]
ddff69b9
MM
547#define TARGET_SLOW_IMUL_IMM32_MEM \
548 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM]
549#define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8]
550#define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR]
551#define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE]
552#define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE]
54723b46
L
553#define TARGET_USE_VECTOR_FP_CONVERTS \
554 ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS]
354f84af
UB
555#define TARGET_USE_VECTOR_CONVERTS \
556 ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS]
a4ef7f3e
ES
557#define TARGET_SLOW_PSHUFB \
558 ix86_tune_features[X86_TUNE_SLOW_PSHUFB]
8e0dc054
JJ
559#define TARGET_AVOID_4BYTE_PREFIXES \
560 ix86_tune_features[X86_TUNE_AVOID_4BYTE_PREFIXES]
f6aa5171
JH
561#define TARGET_USE_GATHER \
562 ix86_tune_features[X86_TUNE_USE_GATHER]
0dc41f28
WM
563#define TARGET_FUSE_CMP_AND_BRANCH_32 \
564 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32]
565#define TARGET_FUSE_CMP_AND_BRANCH_64 \
566 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64]
354f84af 567#define TARGET_FUSE_CMP_AND_BRANCH \
0dc41f28
WM
568 (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \
569 : TARGET_FUSE_CMP_AND_BRANCH_32)
570#define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \
571 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS]
572#define TARGET_FUSE_ALU_AND_BRANCH \
573 ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH]
b6837b94 574#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU]
9a7f94d7
L
575#define TARGET_AVOID_LEA_FOR_ADDR \
576 ix86_tune_features[X86_TUNE_AVOID_LEA_FOR_ADDR]
5d0878e7
JH
577#define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \
578 ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL]
5c0d88e6
CF
579#define TARGET_AVX128_OPTIMAL \
580 ix86_tune_features[X86_TUNE_AVX128_OPTIMAL]
55a2c322
VM
581#define TARGET_GENERAL_REGS_SSE_SPILL \
582 ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL]
6c72ea12
UB
583#define TARGET_AVOID_MEM_OPND_FOR_CMOVE \
584 ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE]
55805e54 585#define TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS \
0f1d3965 586 ix86_tune_features[X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS]
2f62165d
GG
587#define TARGET_ADJUST_UNROLL \
588 ix86_tune_features[X86_TUNE_ADJUST_UNROLL]
374f5bf8
UB
589#define TARGET_AVOID_FALSE_DEP_FOR_BMI \
590 ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI]
ca90b1ed
YR
591#define TARGET_ONE_IF_CONV_INSN \
592 ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN]
348188bf
L
593#define TARGET_EMIT_VZEROUPPER \
594 ix86_tune_features[X86_TUNE_EMIT_VZEROUPPER]
df7b0cc4 595
80fd744f
RH
596/* Feature tests against the various architecture variations. */
597enum ix86_arch_indices {
cef31f9c 598 X86_ARCH_CMOV,
80fd744f
RH
599 X86_ARCH_CMPXCHG,
600 X86_ARCH_CMPXCHG8B,
601 X86_ARCH_XADD,
602 X86_ARCH_BSWAP,
603
604 X86_ARCH_LAST
605};
4f3f76e6 606
ab442df7 607extern unsigned char ix86_arch_features[X86_ARCH_LAST];
80fd744f 608
cef31f9c 609#define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV]
80fd744f
RH
610#define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG]
611#define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B]
612#define TARGET_XADD ix86_arch_features[X86_ARCH_XADD]
613#define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP]
614
cef31f9c
UB
615/* For sane SSE instruction set generation we need fcomi instruction.
616 It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic
617 expands to a sequence that includes conditional move. */
618#define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND)
619
80fd744f
RH
620#define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387)
621
cb261eb7 622extern unsigned char x86_prefetch_sse;
80fd744f
RH
623#define TARGET_PREFETCH_SSE x86_prefetch_sse
624
80fd744f
RH
625#define ASSEMBLER_DIALECT (ix86_asm_dialect)
626
627#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0)
628#define TARGET_MIX_SSE_I387 \
629 ((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387))
630
5fa578f0
UB
631#define TARGET_HARD_SF_REGS (TARGET_80387 || TARGET_MMX || TARGET_SSE)
632#define TARGET_HARD_DF_REGS (TARGET_80387 || TARGET_SSE)
633#define TARGET_HARD_XF_REGS (TARGET_80387)
634
80fd744f
RH
635#define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU)
636#define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2)
637#define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS)
d2af65b9 638#define TARGET_SUN_TLS 0
1ef45b77 639
67adf6a9
RH
640#ifndef TARGET_64BIT_DEFAULT
641#define TARGET_64BIT_DEFAULT 0
25f94bb5 642#endif
74dc3e94
RH
643#ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT
644#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0
645#endif
25f94bb5 646
e0ea8797
AH
647#define TARGET_SSP_GLOBAL_GUARD (ix86_stack_protector_guard == SSP_GLOBAL)
648#define TARGET_SSP_TLS_GUARD (ix86_stack_protector_guard == SSP_TLS)
649
79f5e442
ZD
650/* Fence to use after loop using storent. */
651
652extern tree x86_mfence;
653#define FENCE_FOLLOWING_MOVNT x86_mfence
654
0ed4a390
JL
655/* Once GDB has been enhanced to deal with functions without frame
656 pointers, we can change this to allow for elimination of
657 the frame pointer in leaf functions. */
658#define TARGET_DEFAULT 0
67adf6a9 659
0a1c5e55
UB
660/* Extra bits to force. */
661#define TARGET_SUBTARGET_DEFAULT 0
662#define TARGET_SUBTARGET_ISA_DEFAULT 0
663
664/* Extra bits to force on w/ 32-bit mode. */
665#define TARGET_SUBTARGET32_DEFAULT 0
666#define TARGET_SUBTARGET32_ISA_DEFAULT 0
667
ccf8e764
RH
668/* Extra bits to force on w/ 64-bit mode. */
669#define TARGET_SUBTARGET64_DEFAULT 0
8b131a8a
UB
670/* Enable MMX, SSE and SSE2 by default. */
671#define TARGET_SUBTARGET64_ISA_DEFAULT \
672 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2)
ccf8e764 673
fee3eacd
IS
674/* Replace MACH-O, ifdefs by in-line tests, where possible.
675 (a) Macros defined in config/i386/darwin.h */
b069de3b 676#define TARGET_MACHO 0
d308419c 677#define TARGET_MACHO_SYMBOL_STUBS 0
fee3eacd
IS
678#define MACHOPIC_ATT_STUB 0
679/* (b) Macros defined in config/darwin.h */
680#define MACHO_DYNAMIC_NO_PIC_P 0
681#define MACHOPIC_INDIRECT 0
682#define MACHOPIC_PURE 0
9005471b 683
5a579c3b
LE
684/* For the RDOS */
685#define TARGET_RDOS 0
686
9005471b 687/* For the Windows 64-bit ABI. */
7c800926
KT
688#define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
689
6510e8bb
KT
690/* For the Windows 32-bit ABI. */
691#define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
692
f81c9774
RH
693/* This is re-defined by cygming.h. */
694#define TARGET_SEH 0
695
51212b32 696/* The default abi used by target. */
7c800926 697#define DEFAULT_ABI SYSV_ABI
ccf8e764 698
b8b3f0ca 699/* The default TLS segment register used by target. */
00402c94
RH
700#define DEFAULT_TLS_SEG_REG \
701 (TARGET_64BIT ? ADDR_SPACE_SEG_FS : ADDR_SPACE_SEG_GS)
b8b3f0ca 702
cc69336f
RH
703/* Subtargets may reset this to 1 in order to enable 96-bit long double
704 with the rounding mode forced to 53 bits. */
705#define TARGET_96_ROUND_53_LONG_DOUBLE 0
706
98ae96d2
PB
707#ifndef SUBTARGET_DRIVER_SELF_SPECS
708# define SUBTARGET_DRIVER_SELF_SPECS ""
709#endif
710
711#define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS
712
682cd442
GK
713/* -march=native handling only makes sense with compiler running on
714 an x86 or x86_64 chip. If changing this condition, also change
715 the condition in driver-i386.c. */
716#if defined(__i386__) || defined(__x86_64__)
fa959ce4
MM
717/* In driver-i386.c. */
718extern const char *host_detect_local_cpu (int argc, const char **argv);
719#define EXTRA_SPEC_FUNCTIONS \
720 { "local_cpu_detect", host_detect_local_cpu },
682cd442 721#define HAVE_LOCAL_CPU_DETECT
fa959ce4
MM
722#endif
723
8981c15b
JM
724#if TARGET_64BIT_DEFAULT
725#define OPT_ARCH64 "!m32"
726#define OPT_ARCH32 "m32"
727#else
f0ea7581
L
728#define OPT_ARCH64 "m64|mx32"
729#define OPT_ARCH32 "m64|mx32:;"
8981c15b
JM
730#endif
731
1cba2b96
EC
732/* Support for configure-time defaults of some command line options.
733 The order here is important so that -march doesn't squash the
734 tune or cpu values. */
ce998900 735#define OPTION_DEFAULT_SPECS \
da2d4c01 736 {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
8981c15b
JM
737 {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
738 {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
ce998900 739 {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
8981c15b
JM
740 {"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
741 {"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
742 {"arch", "%{!march=*:-march=%(VALUE)}"}, \
743 {"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \
744 {"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"},
7816bea0 745
241e1a89
SC
746/* Specs for the compiler proper */
747
628714d8 748#ifndef CC1_CPU_SPEC
eb5bb0fd 749#define CC1_CPU_SPEC_1 ""
fa959ce4 750
682cd442 751#ifndef HAVE_LOCAL_CPU_DETECT
fa959ce4
MM
752#define CC1_CPU_SPEC CC1_CPU_SPEC_1
753#else
754#define CC1_CPU_SPEC CC1_CPU_SPEC_1 \
96f5b137
L
755"%{march=native:%>march=native %:local_cpu_detect(arch) \
756 %{!mtune=*:%>mtune=native %:local_cpu_detect(tune)}} \
757%{mtune=native:%>mtune=native %:local_cpu_detect(tune)}"
fa959ce4 758#endif
241e1a89 759#endif
c98f8742 760\f
30efe578 761/* Target CPU builtins. */
ab442df7
MM
762#define TARGET_CPU_CPP_BUILTINS() ix86_target_macros ()
763
764/* Target Pragmas. */
765#define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas ()
30efe578 766
b4c522fa
IB
767/* Target CPU versions for D. */
768#define TARGET_D_CPU_VERSIONS ix86_d_target_versions
769
628714d8 770#ifndef CC1_SPEC
8015b78d 771#define CC1_SPEC "%(cc1_cpu) "
628714d8
RK
772#endif
773
774/* This macro defines names of additional specifications to put in the
775 specs that can be used in various specifications like CC1_SPEC. Its
776 definition is an initializer with a subgrouping for each command option.
bcd86433
SC
777
778 Each subgrouping contains a string constant, that defines the
188fc5b5 779 specification name, and a string constant that used by the GCC driver
bcd86433
SC
780 program.
781
782 Do not define this macro if it does not need to do anything. */
783
784#ifndef SUBTARGET_EXTRA_SPECS
785#define SUBTARGET_EXTRA_SPECS
786#endif
787
788#define EXTRA_SPECS \
628714d8 789 { "cc1_cpu", CC1_CPU_SPEC }, \
bcd86433
SC
790 SUBTARGET_EXTRA_SPECS
791\f
ce998900 792
8ce94e44
JM
793/* Whether to allow x87 floating-point arithmetic on MODE (one of
794 SFmode, DFmode and XFmode) in the current excess precision
795 configuration. */
b8cab8a5
UB
796#define X87_ENABLE_ARITH(MODE) \
797 (flag_unsafe_math_optimizations \
798 || flag_excess_precision == EXCESS_PRECISION_FAST \
799 || (MODE) == XFmode)
8ce94e44
JM
800
801/* Likewise, whether to allow direct conversions from integer mode
802 IMODE (HImode, SImode or DImode) to MODE. */
803#define X87_ENABLE_FLOAT(MODE, IMODE) \
b8cab8a5
UB
804 (flag_unsafe_math_optimizations \
805 || flag_excess_precision == EXCESS_PRECISION_FAST \
8ce94e44
JM
806 || (MODE) == XFmode \
807 || ((MODE) == DFmode && (IMODE) == SImode) \
808 || (IMODE) == HImode)
809
979c67a5
UB
810/* target machine storage layout */
811
65d9c0ab
JH
812#define SHORT_TYPE_SIZE 16
813#define INT_TYPE_SIZE 32
f0ea7581
L
814#define LONG_TYPE_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD)
815#define POINTER_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD)
a96ad348 816#define LONG_LONG_TYPE_SIZE 64
65d9c0ab 817#define FLOAT_TYPE_SIZE 32
65d9c0ab 818#define DOUBLE_TYPE_SIZE 64
a2a1ddb5
L
819#define LONG_DOUBLE_TYPE_SIZE \
820 (TARGET_LONG_DOUBLE_64 ? 64 : (TARGET_LONG_DOUBLE_128 ? 128 : 80))
979c67a5 821
c637141a 822#define WIDEST_HARDWARE_FP_SIZE 80
65d9c0ab 823
67adf6a9 824#if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT
0c2dc519 825#define MAX_BITS_PER_WORD 64
0c2dc519
JH
826#else
827#define MAX_BITS_PER_WORD 32
0c2dc519
JH
828#endif
829
c98f8742
JVA
830/* Define this if most significant byte of a word is the lowest numbered. */
831/* That is true on the 80386. */
832
833#define BITS_BIG_ENDIAN 0
834
835/* Define this if most significant byte of a word is the lowest numbered. */
836/* That is not true on the 80386. */
837#define BYTES_BIG_ENDIAN 0
838
839/* Define this if most significant word of a multiword number is the lowest
840 numbered. */
841/* Not true for 80386 */
842#define WORDS_BIG_ENDIAN 0
843
c98f8742 844/* Width of a word, in units (bytes). */
4ae8027b 845#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
63001560
UB
846
847#ifndef IN_LIBGCC2
2e64c636
JH
848#define MIN_UNITS_PER_WORD 4
849#endif
c98f8742 850
c98f8742 851/* Allocation boundary (in *bits*) for storing arguments in argument list. */
65d9c0ab 852#define PARM_BOUNDARY BITS_PER_WORD
c98f8742 853
e075ae69 854/* Boundary (in *bits*) on which stack pointer should be aligned. */
bd5d3961 855#define STACK_BOUNDARY (TARGET_64BIT_MS_ABI ? 128 : BITS_PER_WORD)
c98f8742 856
2e3f842f
L
857/* Stack boundary of the main function guaranteed by OS. */
858#define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32)
859
de1132d1 860/* Minimum stack boundary. */
cba9c789 861#define MIN_STACK_BOUNDARY BITS_PER_WORD
2e3f842f 862
d1f87653 863/* Boundary (in *bits*) on which the stack pointer prefers to be
3af4bd89 864 aligned; the compiler cannot rely on having this alignment. */
e075ae69 865#define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary
65954bd8 866
de1132d1 867/* It should be MIN_STACK_BOUNDARY. But we set it to 128 bits for
2e3f842f
L
868 both 32bit and 64bit, to support codes that need 128 bit stack
869 alignment for SSE instructions, but can't realign the stack. */
d9063947
L
870#define PREFERRED_STACK_BOUNDARY_DEFAULT \
871 (TARGET_IAMCU ? MIN_STACK_BOUNDARY : 128)
2e3f842f
L
872
873/* 1 if -mstackrealign should be turned on by default. It will
874 generate an alternate prologue and epilogue that realigns the
875 runtime stack if nessary. This supports mixing codes that keep a
876 4-byte aligned stack, as specified by i386 psABI, with codes that
890b9b96 877 need a 16-byte aligned stack, as required by SSE instructions. */
2e3f842f
L
878#define STACK_REALIGN_DEFAULT 0
879
880/* Boundary (in *bits*) on which the incoming stack is aligned. */
881#define INCOMING_STACK_BOUNDARY ix86_incoming_stack_boundary
1d482056 882
a2851b75
TG
883/* According to Windows x64 software convention, the maximum stack allocatable
884 in the prologue is 4G - 8 bytes. Furthermore, there is a limited set of
885 instructions allowed to adjust the stack pointer in the epilog, forcing the
886 use of frame pointer for frames larger than 2 GB. This theorical limit
887 is reduced by 256, an over-estimated upper bound for the stack use by the
888 prologue.
889 We define only one threshold for both the prolog and the epilog. When the
4e523f33 890 frame size is larger than this threshold, we allocate the area to save SSE
a2851b75
TG
891 regs, then save them, and then allocate the remaining. There is no SEH
892 unwind info for this later allocation. */
893#define SEH_MAX_FRAME_SIZE ((2U << 30) - 256)
894
ebff937c
SH
895/* Target OS keeps a vector-aligned (128-bit, 16-byte) stack. This is
896 mandatory for the 64-bit ABI, and may or may not be true for other
897 operating systems. */
898#define TARGET_KEEPS_VECTOR_ALIGNED_STACK TARGET_64BIT
899
f963b5d9
RS
900/* Minimum allocation boundary for the code of a function. */
901#define FUNCTION_BOUNDARY 8
902
903/* C++ stores the virtual bit in the lowest bit of function pointers. */
904#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn
c98f8742 905
c98f8742
JVA
906/* Minimum size in bits of the largest boundary to which any
907 and all fundamental data types supported by the hardware
908 might need to be aligned. No data type wants to be aligned
17f24ff0 909 rounder than this.
fce5a9f2 910
d1f87653 911 Pentium+ prefers DFmode values to be aligned to 64 bit boundary
6d2b7199
BS
912 and Pentium Pro XFmode values at 128 bit boundaries.
913
914 When increasing the maximum, also update
915 TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */
17f24ff0 916
3f97cb0b 917#define BIGGEST_ALIGNMENT \
0076c82f 918 (TARGET_IAMCU ? 32 : (TARGET_AVX512F ? 512 : (TARGET_AVX ? 256 : 128)))
17f24ff0 919
2e3f842f
L
920/* Maximum stack alignment. */
921#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT
922
6e4f1168
L
923/* Alignment value for attribute ((aligned)). It is a constant since
924 it is the part of the ABI. We shouldn't change it with -mavx. */
e9c9e772 925#define ATTRIBUTE_ALIGNED_VALUE (TARGET_IAMCU ? 32 : 128)
6e4f1168 926
822eda12 927/* Decide whether a variable of mode MODE should be 128 bit aligned. */
a7180f70 928#define ALIGN_MODE_128(MODE) \
4501d314 929 ((MODE) == XFmode || SSE_REG_MODE_P (MODE))
a7180f70 930
17f24ff0 931/* The published ABIs say that doubles should be aligned on word
d1f87653 932 boundaries, so lower the alignment for structure fields unless
6fc605d8 933 -malign-double is set. */
e932b21b 934
e83f3cff
RH
935/* ??? Blah -- this macro is used directly by libobjc. Since it
936 supports no vector modes, cut out the complexity and fall back
937 on BIGGEST_FIELD_ALIGNMENT. */
938#ifdef IN_TARGET_LIBS
ef49d42e
JH
939#ifdef __x86_64__
940#define BIGGEST_FIELD_ALIGNMENT 128
941#else
e83f3cff 942#define BIGGEST_FIELD_ALIGNMENT 32
ef49d42e 943#endif
e83f3cff 944#else
a4cf4b64
RB
945#define ADJUST_FIELD_ALIGN(FIELD, TYPE, COMPUTED) \
946 x86_field_alignment ((TYPE), (COMPUTED))
e83f3cff 947#endif
c98f8742 948
8a022443
JW
949/* If defined, a C expression to compute the alignment for a static
950 variable. TYPE is the data type, and ALIGN is the alignment that
951 the object would ordinarily have. The value of this macro is used
952 instead of that alignment to align the object.
953
954 If this macro is not defined, then ALIGN is used.
955
956 One use of this macro is to increase alignment of medium-size
957 data to make it all fit in fewer cache lines. Another is to
958 cause character arrays to be word-aligned so that `strcpy' calls
959 that copy constants to character arrays can be done inline. */
960
df8a1d28
JJ
961#define DATA_ALIGNMENT(TYPE, ALIGN) \
962 ix86_data_alignment ((TYPE), (ALIGN), true)
963
964/* Similar to DATA_ALIGNMENT, but for the cases where the ABI mandates
965 some alignment increase, instead of optimization only purposes. E.g.
966 AMD x86-64 psABI says that variables with array type larger than 15 bytes
967 must be aligned to 16 byte boundaries.
968
969 If this macro is not defined, then ALIGN is used. */
970
971#define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \
972 ix86_data_alignment ((TYPE), (ALIGN), false)
d16790f2
JW
973
974/* If defined, a C expression to compute the alignment for a local
975 variable. TYPE is the data type, and ALIGN is the alignment that
976 the object would ordinarily have. The value of this macro is used
977 instead of that alignment to align the object.
978
979 If this macro is not defined, then ALIGN is used.
980
981 One use of this macro is to increase alignment of medium-size
982 data to make it all fit in fewer cache lines. */
983
76fe54f0
L
984#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
985 ix86_local_alignment ((TYPE), VOIDmode, (ALIGN))
986
987/* If defined, a C expression to compute the alignment for stack slot.
988 TYPE is the data type, MODE is the widest mode available, and ALIGN
989 is the alignment that the slot would ordinarily have. The value of
990 this macro is used instead of that alignment to align the slot.
991
992 If this macro is not defined, then ALIGN is used when TYPE is NULL,
993 Otherwise, LOCAL_ALIGNMENT will be used.
994
995 One use of this macro is to set alignment of stack slot to the
996 maximum alignment of all possible modes which the slot may have. */
997
998#define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \
999 ix86_local_alignment ((TYPE), (MODE), (ALIGN))
8a022443 1000
9bfaf89d
JJ
1001/* If defined, a C expression to compute the alignment for a local
1002 variable DECL.
1003
1004 If this macro is not defined, then
1005 LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used.
1006
1007 One use of this macro is to increase alignment of medium-size
1008 data to make it all fit in fewer cache lines. */
1009
1010#define LOCAL_DECL_ALIGNMENT(DECL) \
1011 ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL))
1012
ae58e548
JJ
1013/* If defined, a C expression to compute the minimum required alignment
1014 for dynamic stack realignment purposes for EXP (a TYPE or DECL),
1015 MODE, assuming normal alignment ALIGN.
1016
1017 If this macro is not defined, then (ALIGN) will be used. */
1018
1019#define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \
1a6e82b8 1020 ix86_minimum_alignment ((EXP), (MODE), (ALIGN))
ae58e548 1021
9bfaf89d 1022
9cd10576 1023/* Set this nonzero if move instructions will actually fail to work
c98f8742 1024 when given unaligned data. */
b4ac57ab 1025#define STRICT_ALIGNMENT 0
c98f8742
JVA
1026
1027/* If bit field type is int, don't let it cross an int,
1028 and give entire struct the alignment of an int. */
43a88a8c 1029/* Required on the 386 since it doesn't have bit-field insns. */
c98f8742 1030#define PCC_BITFIELD_TYPE_MATTERS 1
c98f8742
JVA
1031\f
1032/* Standard register usage. */
1033
1034/* This processor has special stack-like registers. See reg-stack.c
892a2d68 1035 for details. */
c98f8742
JVA
1036
1037#define STACK_REGS
ce998900 1038
f48b4284
UB
1039#define IS_STACK_MODE(MODE) \
1040 (X87_FLOAT_MODE_P (MODE) \
1041 && (!(SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \
1042 || TARGET_MIX_SSE_I387))
c98f8742
JVA
1043
1044/* Number of actual hardware registers.
1045 The hardware registers are assigned numbers for the compiler
1046 from 0 to just below FIRST_PSEUDO_REGISTER.
1047 All registers that the compiler knows about must be given numbers,
1048 even those that are not normally considered general registers.
1049
1050 In the 80386 we give the 8 general purpose registers the numbers 0-7.
1051 We number the floating point registers 8-15.
1052 Note that registers 0-7 can be accessed as a short or int,
1053 while only 0-3 may be used with byte `mov' instructions.
1054
1055 Reg 16 does not correspond to any hardware register, but instead
1056 appears in the RTL as an argument pointer prior to reload, and is
1057 eliminated during reloading in favor of either the stack or frame
892a2d68 1058 pointer. */
c98f8742 1059
05416670 1060#define FIRST_PSEUDO_REGISTER FIRST_PSEUDO_REG
c98f8742 1061
3073d01c
ML
1062/* Number of hardware registers that go into the DWARF-2 unwind info.
1063 If not defined, equals FIRST_PSEUDO_REGISTER. */
1064
1065#define DWARF_FRAME_REGISTERS 17
1066
c98f8742
JVA
1067/* 1 for registers that have pervasive standard uses
1068 and are not available for the register allocator.
3f3f2124 1069 On the 80386, the stack pointer is such, as is the arg pointer.
fce5a9f2 1070
621bc046
UB
1071 REX registers are disabled for 32bit targets in
1072 TARGET_CONDITIONAL_REGISTER_USAGE. */
1073
a7180f70
BS
1074#define FIXED_REGISTERS \
1075/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
3a4416fb 1076{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \
eaa17c21
UB
1077/*arg,flags,fpsr,frame*/ \
1078 1, 1, 1, 1, \
a7180f70
BS
1079/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
1080 0, 0, 0, 0, 0, 0, 0, 0, \
78168632 1081/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
3f3f2124
JH
1082 0, 0, 0, 0, 0, 0, 0, 0, \
1083/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
621bc046 1084 0, 0, 0, 0, 0, 0, 0, 0, \
3f3f2124 1085/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
3f97cb0b
AI
1086 0, 0, 0, 0, 0, 0, 0, 0, \
1087/*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \
1088 0, 0, 0, 0, 0, 0, 0, 0, \
1089/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
85a77221
AI
1090 0, 0, 0, 0, 0, 0, 0, 0, \
1091/* k0, k1, k2, k3, k4, k5, k6, k7*/ \
eafa30ef 1092 0, 0, 0, 0, 0, 0, 0, 0 }
c98f8742
JVA
1093
1094/* 1 for registers not available across function calls.
1095 These must include the FIXED_REGISTERS and also any
1096 registers that can be used without being saved.
1097 The latter must include the registers where values are returned
1098 and the register where structure-value addresses are passed.
fce5a9f2
EC
1099 Aside from that, you can include as many other registers as you like.
1100
621bc046
UB
1101 Value is set to 1 if the register is call used unconditionally.
1102 Bit one is set if the register is call used on TARGET_32BIT ABI.
1103 Bit two is set if the register is call used on TARGET_64BIT ABI.
1104 Bit three is set if the register is call used on TARGET_64BIT_MS_ABI.
1105
1106 Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE. */
1107
1f3ccbc8
L
1108#define CALL_USED_REGISTERS_MASK(IS_64BIT_MS_ABI) \
1109 ((IS_64BIT_MS_ABI) ? (1 << 3) : TARGET_64BIT ? (1 << 2) : (1 << 1))
1110
a7180f70
BS
1111#define CALL_USED_REGISTERS \
1112/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
621bc046 1113{ 1, 1, 1, 0, 4, 4, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
eaa17c21
UB
1114/*arg,flags,fpsr,frame*/ \
1115 1, 1, 1, 1, \
a7180f70 1116/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
621bc046 1117 1, 1, 1, 1, 1, 1, 6, 6, \
78168632 1118/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
3a4416fb 1119 1, 1, 1, 1, 1, 1, 1, 1, \
3f3f2124 1120/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
3a4416fb 1121 1, 1, 1, 1, 2, 2, 2, 2, \
3f3f2124 1122/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
3f97cb0b
AI
1123 6, 6, 6, 6, 6, 6, 6, 6, \
1124/*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \
1125 6, 6, 6, 6, 6, 6, 6, 6, \
1126/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
85a77221
AI
1127 6, 6, 6, 6, 6, 6, 6, 6, \
1128 /* k0, k1, k2, k3, k4, k5, k6, k7*/ \
eafa30ef 1129 1, 1, 1, 1, 1, 1, 1, 1 }
c98f8742 1130
3b3c6a3f
MM
1131/* Order in which to allocate registers. Each register must be
1132 listed once, even those in FIXED_REGISTERS. List frame pointer
1133 late and fixed registers last. Note that, in general, we prefer
1134 registers listed in CALL_USED_REGISTERS, keeping the others
1135 available for storage of persistent values.
1136
5a733826 1137 The ADJUST_REG_ALLOC_ORDER actually overwrite the order,
162f023b 1138 so this is just empty initializer for array. */
3b3c6a3f 1139
eaa17c21
UB
1140#define REG_ALLOC_ORDER \
1141{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
1142 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, \
1143 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
1144 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \
1145 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 }
3b3c6a3f 1146
5a733826 1147/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
162f023b 1148 to be rearranged based on a particular function. When using sse math,
03c259ad 1149 we want to allocate SSE before x87 registers and vice versa. */
3b3c6a3f 1150
5a733826 1151#define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc ()
3b3c6a3f 1152
f5316dfe 1153
7c800926
KT
1154#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL)
1155
8521c414 1156#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \
7bf65250
UB
1157 (TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT \
1158 && GENERAL_REGNO_P (REGNO) \
1159 && ((MODE) == XFmode || (MODE) == XCmode))
8521c414
JM
1160
1161#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8)
1162
e21b52af
HL
1163#define REGMODE_NATURAL_SIZE(MODE) ix86_regmode_natural_size (MODE)
1164
95879c72
L
1165#define VALID_AVX256_REG_MODE(MODE) \
1166 ((MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
8a0436cb
JJ
1167 || (MODE) == V4DImode || (MODE) == V2TImode || (MODE) == V8SFmode \
1168 || (MODE) == V4DFmode)
95879c72 1169
4ac005ba 1170#define VALID_AVX256_REG_OR_OI_MODE(MODE) \
ff97910d
VY
1171 (VALID_AVX256_REG_MODE (MODE) || (MODE) == OImode)
1172
3f97cb0b
AI
1173#define VALID_AVX512F_SCALAR_MODE(MODE) \
1174 ((MODE) == DImode || (MODE) == DFmode || (MODE) == SImode \
1175 || (MODE) == SFmode)
1176
1177#define VALID_AVX512F_REG_MODE(MODE) \
1178 ((MODE) == V8DImode || (MODE) == V8DFmode || (MODE) == V64QImode \
9e4a4dd6
AI
1179 || (MODE) == V16SImode || (MODE) == V16SFmode || (MODE) == V32HImode \
1180 || (MODE) == V4TImode)
1181
e6f146d2
SP
1182#define VALID_AVX512F_REG_OR_XI_MODE(MODE) \
1183 (VALID_AVX512F_REG_MODE (MODE) || (MODE) == XImode)
1184
05416670 1185#define VALID_AVX512VL_128_REG_MODE(MODE) \
9e4a4dd6 1186 ((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \
40bd4bf9
JJ
1187 || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \
1188 || (MODE) == TFmode || (MODE) == V1TImode)
3f97cb0b 1189
ce998900
UB
1190#define VALID_SSE2_REG_MODE(MODE) \
1191 ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
1192 || (MODE) == V2DImode || (MODE) == DFmode)
fbe5eb6d 1193
d9a5f180 1194#define VALID_SSE_REG_MODE(MODE) \
fe6ae2da
UB
1195 ((MODE) == V1TImode || (MODE) == TImode \
1196 || (MODE) == V4SFmode || (MODE) == V4SImode \
ce998900 1197 || (MODE) == SFmode || (MODE) == TFmode)
a7180f70 1198
47f339cf 1199#define VALID_MMX_REG_MODE_3DNOW(MODE) \
ce998900 1200 ((MODE) == V2SFmode || (MODE) == SFmode)
47f339cf 1201
d9a5f180 1202#define VALID_MMX_REG_MODE(MODE) \
879f9d0b 1203 ((MODE) == V1DImode || (MODE) == DImode \
10a97ae6
UB
1204 || (MODE) == V2SImode || (MODE) == SImode \
1205 || (MODE) == V4HImode || (MODE) == V8QImode)
a7180f70 1206
05416670
UB
1207#define VALID_MASK_REG_MODE(MODE) ((MODE) == HImode || (MODE) == QImode)
1208
1209#define VALID_MASK_AVX512BW_MODE(MODE) ((MODE) == SImode || (MODE) == DImode)
1210
ce998900
UB
1211#define VALID_DFP_MODE_P(MODE) \
1212 ((MODE) == SDmode || (MODE) == DDmode || (MODE) == TDmode)
62d75179 1213
d9a5f180 1214#define VALID_FP_MODE_P(MODE) \
ce998900
UB
1215 ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \
1216 || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) \
a946dd00 1217
d9a5f180 1218#define VALID_INT_MODE_P(MODE) \
ce998900
UB
1219 ((MODE) == QImode || (MODE) == HImode || (MODE) == SImode \
1220 || (MODE) == DImode \
1221 || (MODE) == CQImode || (MODE) == CHImode || (MODE) == CSImode \
1222 || (MODE) == CDImode \
1223 || (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode \
1224 || (MODE) == TFmode || (MODE) == TCmode)))
a946dd00 1225
822eda12 1226/* Return true for modes passed in SSE registers. */
ce998900 1227#define SSE_REG_MODE_P(MODE) \
fe6ae2da
UB
1228 ((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \
1229 || (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \
1230 || (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \
1231 || (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
8a0436cb 1232 || (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode \
3f97cb0b
AI
1233 || (MODE) == V2TImode || (MODE) == V8DImode || (MODE) == V64QImode \
1234 || (MODE) == V16SImode || (MODE) == V32HImode || (MODE) == V8DFmode \
1235 || (MODE) == V16SFmode)
822eda12 1236
05416670
UB
1237#define X87_FLOAT_MODE_P(MODE) \
1238 (TARGET_80387 && ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode))
85a77221 1239
05416670
UB
1240#define SSE_FLOAT_MODE_P(MODE) \
1241 ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode))
1242
1243#define FMA4_VEC_FLOAT_MODE_P(MODE) \
1244 (TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \
1245 || (MODE) == V8SFmode || (MODE) == V4DFmode))
9e4a4dd6 1246
ff25ef99
ZD
1247/* It is possible to write patterns to move flags; but until someone
1248 does it, */
1249#define AVOID_CCMODE_COPIES
c98f8742 1250
e075ae69 1251/* Specify the modes required to caller save a given hard regno.
787dc842 1252 We do this on i386 to prevent flags from being saved at all.
e075ae69 1253
787dc842
JH
1254 Kill any attempts to combine saving of modes. */
1255
d9a5f180
GS
1256#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
1257 (CC_REGNO_P (REGNO) ? VOIDmode \
1258 : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
ce998900 1259 : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false) \
a60c3351
UB
1260 : (MODE) == HImode && !((GENERAL_REGNO_P (REGNO) \
1261 && TARGET_PARTIAL_REG_STALL) \
85a77221 1262 || MASK_REGNO_P (REGNO)) ? SImode \
a60c3351 1263 : (MODE) == QImode && !(ANY_QI_REGNO_P (REGNO) \
85a77221 1264 || MASK_REGNO_P (REGNO)) ? SImode \
d2836273 1265 : (MODE))
ce998900 1266
c98f8742
JVA
1267/* Specify the registers used for certain standard purposes.
1268 The values of these macros are register numbers. */
1269
1270/* on the 386 the pc register is %eip, and is not usable as a general
1271 register. The ordinary mov instructions won't work */
1272/* #define PC_REGNUM */
1273
05416670
UB
1274/* Base register for access to arguments of the function. */
1275#define ARG_POINTER_REGNUM ARGP_REG
1276
c98f8742 1277/* Register to use for pushing function arguments. */
05416670 1278#define STACK_POINTER_REGNUM SP_REG
c98f8742
JVA
1279
1280/* Base register for access to local variables of the function. */
05416670
UB
1281#define FRAME_POINTER_REGNUM FRAME_REG
1282#define HARD_FRAME_POINTER_REGNUM BP_REG
564d80f4 1283
05416670
UB
1284#define FIRST_INT_REG AX_REG
1285#define LAST_INT_REG SP_REG
c98f8742 1286
05416670
UB
1287#define FIRST_QI_REG AX_REG
1288#define LAST_QI_REG BX_REG
c98f8742
JVA
1289
1290/* First & last stack-like regs */
05416670
UB
1291#define FIRST_STACK_REG ST0_REG
1292#define LAST_STACK_REG ST7_REG
c98f8742 1293
05416670
UB
1294#define FIRST_SSE_REG XMM0_REG
1295#define LAST_SSE_REG XMM7_REG
fce5a9f2 1296
05416670
UB
1297#define FIRST_MMX_REG MM0_REG
1298#define LAST_MMX_REG MM7_REG
a7180f70 1299
05416670
UB
1300#define FIRST_REX_INT_REG R8_REG
1301#define LAST_REX_INT_REG R15_REG
3f3f2124 1302
05416670
UB
1303#define FIRST_REX_SSE_REG XMM8_REG
1304#define LAST_REX_SSE_REG XMM15_REG
3f3f2124 1305
05416670
UB
1306#define FIRST_EXT_REX_SSE_REG XMM16_REG
1307#define LAST_EXT_REX_SSE_REG XMM31_REG
3f97cb0b 1308
05416670
UB
1309#define FIRST_MASK_REG MASK0_REG
1310#define LAST_MASK_REG MASK7_REG
85a77221 1311
aabcd309 1312/* Override this in other tm.h files to cope with various OS lossage
6fca22eb
RH
1313 requiring a frame pointer. */
1314#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
1315#define SUBTARGET_FRAME_POINTER_REQUIRED 0
1316#endif
1317
1318/* Make sure we can access arbitrary call frames. */
1319#define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses ()
c98f8742 1320
c98f8742 1321/* Register to hold the addressing base for position independent
5b43fed1
RH
1322 code access to data items. We don't use PIC pointer for 64bit
1323 mode. Define the regnum to dummy value to prevent gcc from
fce5a9f2 1324 pessimizing code dealing with EBX.
bd09bdeb
RH
1325
1326 To avoid clobbering a call-saved register unnecessarily, we renumber
1327 the pic register when possible. The change is visible after the
1328 prologue has been emitted. */
1329
e8b5eb25 1330#define REAL_PIC_OFFSET_TABLE_REGNUM (TARGET_64BIT ? R15_REG : BX_REG)
bd09bdeb 1331
bcb21886 1332#define PIC_OFFSET_TABLE_REGNUM \
d290bb1d
IE
1333 (ix86_use_pseudo_pic_reg () \
1334 ? (pic_offset_table_rtx \
1335 ? INVALID_REGNUM \
1336 : REAL_PIC_OFFSET_TABLE_REGNUM) \
1337 : INVALID_REGNUM)
c98f8742 1338
5fc0e5df
KW
1339#define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_"
1340
c51e6d85 1341/* This is overridden by <cygwin.h>. */
5e062767
DS
1342#define MS_AGGREGATE_RETURN 0
1343
61fec9ff 1344#define KEEP_AGGREGATE_RETURN_POINTER 0
c98f8742
JVA
1345\f
1346/* Define the classes of registers for register constraints in the
1347 machine description. Also define ranges of constants.
1348
1349 One of the classes must always be named ALL_REGS and include all hard regs.
1350 If there is more than one class, another class must be named NO_REGS
1351 and contain no registers.
1352
1353 The name GENERAL_REGS must be the name of a class (or an alias for
1354 another name such as ALL_REGS). This is the class of registers
1355 that is allowed by "g" or "r" in a register constraint.
1356 Also, registers outside this class are allocated only when
1357 instructions express preferences for them.
1358
1359 The classes must be numbered in nondecreasing order; that is,
1360 a larger-numbered class must never be contained completely
2e24efd3
AM
1361 in a smaller-numbered class. This is why CLOBBERED_REGS class
1362 is listed early, even though in 64-bit mode it contains more
1363 registers than just %eax, %ecx, %edx.
c98f8742
JVA
1364
1365 For any two classes, it is very desirable that there be another
ab408a86
JVA
1366 class that represents their union.
1367
eaa17c21 1368 The flags and fpsr registers are in no class. */
c98f8742
JVA
1369
1370enum reg_class
1371{
1372 NO_REGS,
e075ae69 1373 AREG, DREG, CREG, BREG, SIREG, DIREG,
4b71cd6e 1374 AD_REGS, /* %eax/%edx for DImode */
2e24efd3 1375 CLOBBERED_REGS, /* call-clobbered integer registers */
c98f8742 1376 Q_REGS, /* %eax %ebx %ecx %edx */
564d80f4 1377 NON_Q_REGS, /* %esi %edi %ebp %esp */
de86ff8f 1378 TLS_GOTBASE_REGS, /* %ebx %ecx %edx %esi %edi %ebp */
c98f8742 1379 INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
3f3f2124 1380 LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */
63001560
UB
1381 GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp
1382 %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */
c98f8742
JVA
1383 FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */
1384 FLOAT_REGS,
06f4e35d 1385 SSE_FIRST_REG,
45392c76 1386 NO_REX_SSE_REGS,
a7180f70 1387 SSE_REGS,
3f97cb0b 1388 ALL_SSE_REGS,
a7180f70 1389 MMX_REGS,
446988df
JH
1390 FLOAT_SSE_REGS,
1391 FLOAT_INT_REGS,
1392 INT_SSE_REGS,
1393 FLOAT_INT_SSE_REGS,
85a77221 1394 MASK_REGS,
d18cbbf6
UB
1395 ALL_MASK_REGS,
1396 ALL_REGS,
1397 LIM_REG_CLASSES
c98f8742
JVA
1398};
1399
d9a5f180
GS
1400#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
1401
1402#define INTEGER_CLASS_P(CLASS) \
1403 reg_class_subset_p ((CLASS), GENERAL_REGS)
1404#define FLOAT_CLASS_P(CLASS) \
1405 reg_class_subset_p ((CLASS), FLOAT_REGS)
1406#define SSE_CLASS_P(CLASS) \
3f97cb0b 1407 reg_class_subset_p ((CLASS), ALL_SSE_REGS)
d9a5f180 1408#define MMX_CLASS_P(CLASS) \
f75959a6 1409 ((CLASS) == MMX_REGS)
4ed04e93 1410#define MASK_CLASS_P(CLASS) \
d18cbbf6 1411 reg_class_subset_p ((CLASS), ALL_MASK_REGS)
d9a5f180
GS
1412#define MAYBE_INTEGER_CLASS_P(CLASS) \
1413 reg_classes_intersect_p ((CLASS), GENERAL_REGS)
1414#define MAYBE_FLOAT_CLASS_P(CLASS) \
1415 reg_classes_intersect_p ((CLASS), FLOAT_REGS)
1416#define MAYBE_SSE_CLASS_P(CLASS) \
3f97cb0b 1417 reg_classes_intersect_p ((CLASS), ALL_SSE_REGS)
d9a5f180 1418#define MAYBE_MMX_CLASS_P(CLASS) \
0bd72901 1419 reg_classes_intersect_p ((CLASS), MMX_REGS)
85a77221 1420#define MAYBE_MASK_CLASS_P(CLASS) \
d18cbbf6 1421 reg_classes_intersect_p ((CLASS), ALL_MASK_REGS)
d9a5f180
GS
1422
1423#define Q_CLASS_P(CLASS) \
1424 reg_class_subset_p ((CLASS), Q_REGS)
7c6b971d 1425
0bd72901
UB
1426#define MAYBE_NON_Q_CLASS_P(CLASS) \
1427 reg_classes_intersect_p ((CLASS), NON_Q_REGS)
1428
43f3a59d 1429/* Give names of register classes as strings for dump file. */
c98f8742
JVA
1430
1431#define REG_CLASS_NAMES \
1432{ "NO_REGS", \
ab408a86 1433 "AREG", "DREG", "CREG", "BREG", \
c98f8742 1434 "SIREG", "DIREG", \
e075ae69 1435 "AD_REGS", \
2e24efd3 1436 "CLOBBERED_REGS", \
e075ae69 1437 "Q_REGS", "NON_Q_REGS", \
de86ff8f 1438 "TLS_GOTBASE_REGS", \
c98f8742 1439 "INDEX_REGS", \
3f3f2124 1440 "LEGACY_REGS", \
c98f8742
JVA
1441 "GENERAL_REGS", \
1442 "FP_TOP_REG", "FP_SECOND_REG", \
1443 "FLOAT_REGS", \
cb482895 1444 "SSE_FIRST_REG", \
45392c76 1445 "NO_REX_SSE_REGS", \
a7180f70 1446 "SSE_REGS", \
3f97cb0b 1447 "ALL_SSE_REGS", \
a7180f70 1448 "MMX_REGS", \
446988df 1449 "FLOAT_SSE_REGS", \
8fcaaa80 1450 "FLOAT_INT_REGS", \
446988df
JH
1451 "INT_SSE_REGS", \
1452 "FLOAT_INT_SSE_REGS", \
85a77221 1453 "MASK_REGS", \
d18cbbf6 1454 "ALL_MASK_REGS", \
c98f8742
JVA
1455 "ALL_REGS" }
1456
ac2e563f
RH
1457/* Define which registers fit in which classes. This is an initializer
1458 for a vector of HARD_REG_SET of length N_REG_CLASSES.
1459
621bc046
UB
1460 Note that CLOBBERED_REGS are calculated by
1461 TARGET_CONDITIONAL_REGISTER_USAGE. */
c98f8742 1462
d18cbbf6 1463#define REG_CLASS_CONTENTS \
eaa17c21
UB
1464{ { 0x0, 0x0, 0x0 }, /* NO_REGS */ \
1465 { 0x01, 0x0, 0x0 }, /* AREG */ \
1466 { 0x02, 0x0, 0x0 }, /* DREG */ \
1467 { 0x04, 0x0, 0x0 }, /* CREG */ \
1468 { 0x08, 0x0, 0x0 }, /* BREG */ \
1469 { 0x10, 0x0, 0x0 }, /* SIREG */ \
1470 { 0x20, 0x0, 0x0 }, /* DIREG */ \
1471 { 0x03, 0x0, 0x0 }, /* AD_REGS */ \
1472 { 0x07, 0x0, 0x0 }, /* CLOBBERED_REGS */ \
1473 { 0x0f, 0x0, 0x0 }, /* Q_REGS */ \
1474 { 0x900f0, 0x0, 0x0 }, /* NON_Q_REGS */ \
1475 { 0x7e, 0xff0, 0x0 }, /* TLS_GOTBASE_REGS */ \
1476 { 0x7f, 0xff0, 0x0 }, /* INDEX_REGS */ \
1477 { 0x900ff, 0x0, 0x0 }, /* LEGACY_REGS */ \
1478 { 0x900ff, 0xff0, 0x0 }, /* GENERAL_REGS */ \
1479 { 0x100, 0x0, 0x0 }, /* FP_TOP_REG */ \
1480 { 0x200, 0x0, 0x0 }, /* FP_SECOND_REG */ \
1481 { 0xff00, 0x0, 0x0 }, /* FLOAT_REGS */ \
1482 { 0x100000, 0x0, 0x0 }, /* SSE_FIRST_REG */ \
1483 { 0xff00000, 0x0, 0x0 }, /* NO_REX_SSE_REGS */ \
1484 { 0xff00000, 0xff000, 0x0 }, /* SSE_REGS */ \
1485 { 0xff00000, 0xfffff000, 0xf }, /* ALL_SSE_REGS */ \
1486{ 0xf0000000, 0xf, 0x0 }, /* MMX_REGS */ \
1487 { 0xff0ff00, 0xfffff000, 0xf }, /* FLOAT_SSE_REGS */ \
1488 { 0x9ffff, 0xff0, 0x0 }, /* FLOAT_INT_REGS */ \
1489 { 0xff900ff, 0xfffffff0, 0xf }, /* INT_SSE_REGS */ \
1490 { 0xff9ffff, 0xfffffff0, 0xf }, /* FLOAT_INT_SSE_REGS */ \
1491 { 0x0, 0x0, 0xfe0 }, /* MASK_REGS */ \
1492 { 0x0, 0x0, 0xff0 }, /* ALL_MASK_REGS */ \
1493{ 0xffffffff, 0xffffffff, 0xfff } /* ALL_REGS */ \
e075ae69 1494}
c98f8742
JVA
1495
1496/* The same information, inverted:
1497 Return the class number of the smallest class containing
1498 reg number REGNO. This could be a conditional expression
1499 or could index an array. */
1500
1a6e82b8 1501#define REGNO_REG_CLASS(REGNO) (regclass_map[(REGNO)])
c98f8742 1502
42db504c
SB
1503/* When this hook returns true for MODE, the compiler allows
1504 registers explicitly used in the rtl to be used as spill registers
1505 but prevents the compiler from extending the lifetime of these
1506 registers. */
1507#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
c98f8742 1508
fc27f749 1509#define QI_REG_P(X) (REG_P (X) && QI_REGNO_P (REGNO (X)))
05416670
UB
1510#define QI_REGNO_P(N) IN_RANGE ((N), FIRST_QI_REG, LAST_QI_REG)
1511
1512#define LEGACY_INT_REG_P(X) (REG_P (X) && LEGACY_INT_REGNO_P (REGNO (X)))
1513#define LEGACY_INT_REGNO_P(N) (IN_RANGE ((N), FIRST_INT_REG, LAST_INT_REG))
1514
1515#define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X)))
1516#define REX_INT_REGNO_P(N) \
1517 IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG)
3f3f2124 1518
58b0b34c 1519#define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X)))
fc27f749 1520#define GENERAL_REGNO_P(N) \
58b0b34c 1521 (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N))
3f3f2124 1522
fc27f749
UB
1523#define ANY_QI_REG_P(X) (REG_P (X) && ANY_QI_REGNO_P (REGNO (X)))
1524#define ANY_QI_REGNO_P(N) \
1525 (TARGET_64BIT ? GENERAL_REGNO_P (N) : QI_REGNO_P (N))
3f3f2124 1526
66aaf16f
UB
1527#define STACK_REG_P(X) (REG_P (X) && STACK_REGNO_P (REGNO (X)))
1528#define STACK_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG)
fc27f749 1529
fc27f749 1530#define SSE_REG_P(X) (REG_P (X) && SSE_REGNO_P (REGNO (X)))
fb84c7a0
UB
1531#define SSE_REGNO_P(N) \
1532 (IN_RANGE ((N), FIRST_SSE_REG, LAST_SSE_REG) \
3f97cb0b
AI
1533 || REX_SSE_REGNO_P (N) \
1534 || EXT_REX_SSE_REGNO_P (N))
3f3f2124 1535
4977bab6 1536#define REX_SSE_REGNO_P(N) \
fb84c7a0 1537 IN_RANGE ((N), FIRST_REX_SSE_REG, LAST_REX_SSE_REG)
4977bab6 1538
0a48088a
IT
1539#define EXT_REX_SSE_REG_P(X) (REG_P (X) && EXT_REX_SSE_REGNO_P (REGNO (X)))
1540
3f97cb0b
AI
1541#define EXT_REX_SSE_REGNO_P(N) \
1542 IN_RANGE ((N), FIRST_EXT_REX_SSE_REG, LAST_EXT_REX_SSE_REG)
1543
05416670
UB
1544#define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X)))
1545#define ANY_FP_REGNO_P(N) (STACK_REGNO_P (N) || SSE_REGNO_P (N))
3f97cb0b 1546
9e4a4dd6 1547#define MASK_REG_P(X) (REG_P (X) && MASK_REGNO_P (REGNO (X)))
85a77221 1548#define MASK_REGNO_P(N) IN_RANGE ((N), FIRST_MASK_REG, LAST_MASK_REG)
e21b52af 1549#define MASK_PAIR_REGNO_P(N) ((((N) - FIRST_MASK_REG) & 1) == 0)
446988df 1550
fc27f749 1551#define MMX_REG_P(X) (REG_P (X) && MMX_REGNO_P (REGNO (X)))
fb84c7a0 1552#define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG)
fce5a9f2 1553
e075ae69 1554#define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X)))
adb67ffb 1555#define CC_REGNO_P(X) ((X) == FLAGS_REG)
e075ae69 1556
5fbb13a7
KY
1557#define MOD4_SSE_REG_P(X) (REG_P (X) && MOD4_SSE_REGNO_P (REGNO (X)))
1558#define MOD4_SSE_REGNO_P(N) ((N) == XMM0_REG \
1559 || (N) == XMM4_REG \
1560 || (N) == XMM8_REG \
1561 || (N) == XMM12_REG \
1562 || (N) == XMM16_REG \
1563 || (N) == XMM20_REG \
1564 || (N) == XMM24_REG \
1565 || (N) == XMM28_REG)
1566
05416670
UB
1567/* First floating point reg */
1568#define FIRST_FLOAT_REG FIRST_STACK_REG
1569#define STACK_TOP_P(X) (REG_P (X) && REGNO (X) == FIRST_FLOAT_REG)
1570
02469d3a
UB
1571#define GET_SSE_REGNO(N) \
1572 ((N) < 8 ? FIRST_SSE_REG + (N) \
1573 : (N) < 16 ? FIRST_REX_SSE_REG + (N) - 8 \
1574 : FIRST_EXT_REX_SSE_REG + (N) - 16)
05416670 1575
c98f8742
JVA
1576/* The class value for index registers, and the one for base regs. */
1577
1578#define INDEX_REG_CLASS INDEX_REGS
1579#define BASE_REG_CLASS GENERAL_REGS
c98f8742
JVA
1580\f
1581/* Stack layout; function entry, exit and calling. */
1582
1583/* Define this if pushing a word on the stack
1584 makes the stack pointer a smaller address. */
62f9f30b 1585#define STACK_GROWS_DOWNWARD 1
c98f8742 1586
a4d05547 1587/* Define this to nonzero if the nominal address of the stack frame
c98f8742
JVA
1588 is at the high-address end of the local variables;
1589 that is, each additional local variable allocated
1590 goes at a more negative offset in the frame. */
f62c8a5c 1591#define FRAME_GROWS_DOWNWARD 1
c98f8742 1592
7b4df2bf 1593#define PUSH_ROUNDING(BYTES) ix86_push_rounding (BYTES)
8c2b2fae
UB
1594
1595/* If defined, the maximum amount of space required for outgoing arguments
1596 will be computed and placed into the variable `crtl->outgoing_args_size'.
1597 No space will be pushed onto the stack for each call; instead, the
1598 function prologue should increase the stack frame size by this amount.
41ee845b
JH
1599
1600 In 32bit mode enabling argument accumulation results in about 5% code size
56aae4b7 1601 growth because move instructions are less compact than push. In 64bit
41ee845b
JH
1602 mode the difference is less drastic but visible.
1603
1604 FIXME: Unlike earlier implementations, the size of unwind info seems to
f830ddc2 1605 actually grow with accumulation. Is that because accumulated args
41ee845b 1606 unwind info became unnecesarily bloated?
f830ddc2
RH
1607
1608 With the 64-bit MS ABI, we can generate correct code with or without
1609 accumulated args, but because of OUTGOING_REG_PARM_STACK_SPACE the code
1610 generated without accumulated args is terrible.
41ee845b
JH
1611
1612 If stack probes are required, the space used for large function
1613 arguments on the stack must also be probed, so enable
f8071c05
L
1614 -maccumulate-outgoing-args so this happens in the prologue.
1615
1616 We must use argument accumulation in interrupt function if stack
1617 may be realigned to avoid DRAP. */
f73ad30e 1618
6c6094f1 1619#define ACCUMULATE_OUTGOING_ARGS \
f8071c05
L
1620 ((TARGET_ACCUMULATE_OUTGOING_ARGS \
1621 && optimize_function_for_speed_p (cfun)) \
1622 || (cfun->machine->func_type != TYPE_NORMAL \
1623 && crtl->stack_realign_needed) \
1624 || TARGET_STACK_PROBE \
1625 || TARGET_64BIT_MS_ABI \
ff734e26 1626 || (TARGET_MACHO && crtl->profile))
f73ad30e
JH
1627
1628/* If defined, a C expression whose value is nonzero when we want to use PUSH
1629 instructions to pass outgoing arguments. */
1630
1631#define PUSH_ARGS (TARGET_PUSH_ARGS && !ACCUMULATE_OUTGOING_ARGS)
1632
2da4124d
L
1633/* We want the stack and args grow in opposite directions, even if
1634 PUSH_ARGS is 0. */
1635#define PUSH_ARGS_REVERSED 1
1636
c98f8742
JVA
1637/* Offset of first parameter from the argument pointer register value. */
1638#define FIRST_PARM_OFFSET(FNDECL) 0
1639
a7180f70
BS
1640/* Define this macro if functions should assume that stack space has been
1641 allocated for arguments even when their values are passed in registers.
1642
1643 The value of this macro is the size, in bytes, of the area reserved for
1644 arguments passed in registers for the function represented by FNDECL.
1645
1646 This space can be allocated by the caller, or be a part of the
1647 machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says
1648 which. */
7c800926
KT
1649#define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL)
1650
4ae8027b 1651#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \
6510e8bb 1652 (TARGET_64BIT && ix86_function_type_abi (FNTYPE) == MS_ABI)
7c800926 1653
c98f8742
JVA
1654/* Define how to find the value returned by a library function
1655 assuming the value has mode MODE. */
1656
4ae8027b 1657#define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE)
c98f8742 1658
e9125c09
TW
1659/* Define the size of the result block used for communication between
1660 untyped_call and untyped_return. The block contains a DImode value
1661 followed by the block used by fnsave and frstor. */
1662
1663#define APPLY_RESULT_SIZE (8+108)
1664
b08de47e 1665/* 1 if N is a possible register number for function argument passing. */
53c17031 1666#define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N)
c98f8742
JVA
1667
1668/* Define a data type for recording info about an argument list
1669 during the scan of that argument list. This data type should
1670 hold all necessary information about the function itself
1671 and about the args processed so far, enough to enable macros
b08de47e 1672 such as FUNCTION_ARG to determine where the next arg should go. */
c98f8742 1673
e075ae69 1674typedef struct ix86_args {
fa283935 1675 int words; /* # words passed so far */
b08de47e
MM
1676 int nregs; /* # registers available for passing */
1677 int regno; /* next available register number */
3e65f251
KT
1678 int fastcall; /* fastcall or thiscall calling convention
1679 is used */
fa283935 1680 int sse_words; /* # sse words passed so far */
a7180f70 1681 int sse_nregs; /* # sse registers available for passing */
223cdd15
UB
1682 int warn_avx512f; /* True when we want to warn
1683 about AVX512F ABI. */
95879c72 1684 int warn_avx; /* True when we want to warn about AVX ABI. */
47a37ce4 1685 int warn_sse; /* True when we want to warn about SSE ABI. */
fa283935 1686 int warn_mmx; /* True when we want to warn about MMX ABI. */
974aedcc
MP
1687 int warn_empty; /* True when we want to warn about empty classes
1688 passing ABI change. */
fa283935
UB
1689 int sse_regno; /* next available sse register number */
1690 int mmx_words; /* # mmx words passed so far */
bcf17554
JH
1691 int mmx_nregs; /* # mmx registers available for passing */
1692 int mmx_regno; /* next available mmx register number */
892a2d68 1693 int maybe_vaarg; /* true for calls to possibly vardic fncts. */
2767a7f2 1694 int caller; /* true if it is caller. */
2824d6e5
UB
1695 int float_in_sse; /* Set to 1 or 2 for 32bit targets if
1696 SFmode/DFmode arguments should be passed
1697 in SSE registers. Otherwise 0. */
d5e254e1 1698 int stdarg; /* Set to 1 if function is stdarg. */
51212b32 1699 enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
7c800926 1700 MS_ABI for ms abi. */
e66fc623 1701 tree decl; /* Callee decl. */
b08de47e 1702} CUMULATIVE_ARGS;
c98f8742
JVA
1703
1704/* Initialize a variable CUM of type CUMULATIVE_ARGS
1705 for a call to a function whose data type is FNTYPE.
b08de47e 1706 For a library call, FNTYPE is 0. */
c98f8742 1707
0f6937fe 1708#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
2767a7f2
L
1709 init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL), \
1710 (N_NAMED_ARGS) != -1)
c98f8742 1711
c98f8742
JVA
1712/* Output assembler code to FILE to increment profiler label # LABELNO
1713 for profiling a function entry. */
1714
1a6e82b8
UB
1715#define FUNCTION_PROFILER(FILE, LABELNO) \
1716 x86_function_profiler ((FILE), (LABELNO))
a5fa1ecd
JH
1717
1718#define MCOUNT_NAME "_mcount"
1719
3c5273a9
KT
1720#define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__"
1721
a5fa1ecd 1722#define PROFILE_COUNT_REGISTER "edx"
c98f8742
JVA
1723
1724/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
1725 the stack pointer does not matter. The value is tested only in
1726 functions that have frame pointers.
1727 No definition is equivalent to always zero. */
fce5a9f2 1728/* Note on the 386 it might be more efficient not to define this since
c98f8742
JVA
1729 we have to restore it ourselves from the frame pointer, in order to
1730 use pop */
1731
1732#define EXIT_IGNORE_STACK 1
1733
f8071c05
L
1734/* Define this macro as a C expression that is nonzero for registers
1735 used by the epilogue or the `return' pattern. */
1736
1737#define EPILOGUE_USES(REGNO) ix86_epilogue_uses (REGNO)
1738
c98f8742
JVA
1739/* Output assembler code for a block containing the constant parts
1740 of a trampoline, leaving space for the variable parts. */
1741
a269a03c 1742/* On the 386, the trampoline contains two instructions:
c98f8742 1743 mov #STATIC,ecx
a269a03c
JC
1744 jmp FUNCTION
1745 The trampoline is generated entirely at runtime. The operand of JMP
1746 is the address of FUNCTION relative to the instruction following the
1747 JMP (which is 5 bytes long). */
c98f8742
JVA
1748
1749/* Length in units of the trampoline for entering a nested function. */
1750
6514899f 1751#define TRAMPOLINE_SIZE (TARGET_64BIT ? 28 : 14)
c98f8742
JVA
1752\f
1753/* Definitions for register eliminations.
1754
1755 This is an array of structures. Each structure initializes one pair
1756 of eliminable registers. The "from" register number is given first,
1757 followed by "to". Eliminations of the same "from" register are listed
1758 in order of preference.
1759
afc2cd05
NC
1760 There are two registers that can always be eliminated on the i386.
1761 The frame pointer and the arg pointer can be replaced by either the
1762 hard frame pointer or to the stack pointer, depending upon the
1763 circumstances. The hard frame pointer is not used before reload and
1764 so it is not eligible for elimination. */
c98f8742 1765
564d80f4
JH
1766#define ELIMINABLE_REGS \
1767{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1768 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
1769 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1770 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
c98f8742 1771
c98f8742
JVA
1772/* Define the offset between two registers, one to be eliminated, and the other
1773 its replacement, at the start of a routine. */
1774
d9a5f180
GS
1775#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
1776 ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO)))
c98f8742
JVA
1777\f
1778/* Addressing modes, and classification of registers for them. */
1779
c98f8742
JVA
1780/* Macros to check register numbers against specific register classes. */
1781
1782/* These assume that REGNO is a hard or pseudo reg number.
1783 They give nonzero only if REGNO is a hard reg of the suitable class
1784 or a pseudo reg currently allocated to a suitable hard reg.
1785 Since they use reg_renumber, they are safe only once reg_renumber
aeb9f7cf
SB
1786 has been allocated, which happens in reginfo.c during register
1787 allocation. */
c98f8742 1788
3f3f2124
JH
1789#define REGNO_OK_FOR_INDEX_P(REGNO) \
1790 ((REGNO) < STACK_POINTER_REGNUM \
fb84c7a0
UB
1791 || REX_INT_REGNO_P (REGNO) \
1792 || (unsigned) reg_renumber[(REGNO)] < STACK_POINTER_REGNUM \
1793 || REX_INT_REGNO_P ((unsigned) reg_renumber[(REGNO)]))
c98f8742 1794
3f3f2124 1795#define REGNO_OK_FOR_BASE_P(REGNO) \
fb84c7a0 1796 (GENERAL_REGNO_P (REGNO) \
3f3f2124
JH
1797 || (REGNO) == ARG_POINTER_REGNUM \
1798 || (REGNO) == FRAME_POINTER_REGNUM \
fb84c7a0 1799 || GENERAL_REGNO_P ((unsigned) reg_renumber[(REGNO)]))
c98f8742 1800
c98f8742
JVA
1801/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
1802 and check its validity for a certain class.
1803 We have two alternate definitions for each of them.
1804 The usual definition accepts all pseudo regs; the other rejects
1805 them unless they have been allocated suitable hard regs.
1806 The symbol REG_OK_STRICT causes the latter definition to be used.
1807
1808 Most source files want to accept pseudo regs in the hope that
1809 they will get allocated to the class that the insn wants them to be in.
1810 Source files for reload pass need to be strict.
1811 After reload, it makes no difference, since pseudo regs have
1812 been eliminated by then. */
1813
c98f8742 1814
ff482c8d 1815/* Non strict versions, pseudos are ok. */
3b3c6a3f
MM
1816#define REG_OK_FOR_INDEX_NONSTRICT_P(X) \
1817 (REGNO (X) < STACK_POINTER_REGNUM \
fb84c7a0 1818 || REX_INT_REGNO_P (REGNO (X)) \
c98f8742
JVA
1819 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
1820
3b3c6a3f 1821#define REG_OK_FOR_BASE_NONSTRICT_P(X) \
fb84c7a0 1822 (GENERAL_REGNO_P (REGNO (X)) \
3b3c6a3f 1823 || REGNO (X) == ARG_POINTER_REGNUM \
3f3f2124 1824 || REGNO (X) == FRAME_POINTER_REGNUM \
3b3c6a3f 1825 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
c98f8742 1826
3b3c6a3f
MM
1827/* Strict versions, hard registers only */
1828#define REG_OK_FOR_INDEX_STRICT_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
1829#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
c98f8742 1830
3b3c6a3f 1831#ifndef REG_OK_STRICT
d9a5f180
GS
1832#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X)
1833#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X)
3b3c6a3f
MM
1834
1835#else
d9a5f180
GS
1836#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X)
1837#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X)
c98f8742
JVA
1838#endif
1839
331d9186 1840/* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
c98f8742
JVA
1841 that is a valid memory address for an instruction.
1842 The MODE argument is the machine mode for the MEM expression
1843 that wants to use this address.
1844
331d9186 1845 The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P,
c98f8742
JVA
1846 except for CONSTANT_ADDRESS_P which is usually machine-independent.
1847
1848 See legitimize_pic_address in i386.c for details as to what
1849 constitutes a legitimate address when -fpic is used. */
1850
1851#define MAX_REGS_PER_ADDRESS 2
1852
f996902d 1853#define CONSTANT_ADDRESS_P(X) constant_address_p (X)
c98f8742 1854
b949ea8b
JW
1855/* If defined, a C expression to determine the base term of address X.
1856 This macro is used in only one place: `find_base_term' in alias.c.
1857
1858 It is always safe for this macro to not be defined. It exists so
1859 that alias analysis can understand machine-dependent addresses.
1860
1861 The typical use of this macro is to handle addresses containing
1862 a label_ref or symbol_ref within an UNSPEC. */
1863
d9a5f180 1864#define FIND_BASE_TERM(X) ix86_find_base_term (X)
b949ea8b 1865
c98f8742 1866/* Nonzero if the constant value X is a legitimate general operand
fce5a9f2 1867 when generating PIC code. It is given that flag_pic is on and
c98f8742
JVA
1868 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
1869
f996902d 1870#define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X)
c98f8742
JVA
1871
1872#define SYMBOLIC_CONST(X) \
d9a5f180
GS
1873 (GET_CODE (X) == SYMBOL_REF \
1874 || GET_CODE (X) == LABEL_REF \
1875 || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
c98f8742 1876\f
b08de47e
MM
1877/* Max number of args passed in registers. If this is more than 3, we will
1878 have problems with ebx (register #4), since it is a caller save register and
1879 is also used as the pic register in ELF. So for now, don't allow more than
1880 3 registers to be passed in registers. */
1881
7c800926
KT
1882/* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */
1883#define X86_64_REGPARM_MAX 6
72fa3605 1884#define X86_64_MS_REGPARM_MAX 4
7c800926 1885
72fa3605 1886#define X86_32_REGPARM_MAX 3
7c800926 1887
4ae8027b 1888#define REGPARM_MAX \
2824d6e5
UB
1889 (TARGET_64BIT \
1890 ? (TARGET_64BIT_MS_ABI \
1891 ? X86_64_MS_REGPARM_MAX \
1892 : X86_64_REGPARM_MAX) \
4ae8027b 1893 : X86_32_REGPARM_MAX)
d2836273 1894
72fa3605
UB
1895#define X86_64_SSE_REGPARM_MAX 8
1896#define X86_64_MS_SSE_REGPARM_MAX 4
1897
b6010cab 1898#define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0)
72fa3605 1899
4ae8027b 1900#define SSE_REGPARM_MAX \
2824d6e5
UB
1901 (TARGET_64BIT \
1902 ? (TARGET_64BIT_MS_ABI \
1903 ? X86_64_MS_SSE_REGPARM_MAX \
1904 : X86_64_SSE_REGPARM_MAX) \
4ae8027b 1905 : X86_32_SSE_REGPARM_MAX)
bcf17554
JH
1906
1907#define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : (TARGET_MMX ? 3 : 0))
c98f8742
JVA
1908\f
1909/* Specify the machine mode that this machine uses
1910 for the index in the tablejump instruction. */
dc4d7240 1911#define CASE_VECTOR_MODE \
6025b127 1912 (!TARGET_LP64 || (flag_pic && ix86_cmodel != CM_LARGE_PIC) ? SImode : DImode)
c98f8742 1913
c98f8742
JVA
1914/* Define this as 1 if `char' should by default be signed; else as 0. */
1915#define DEFAULT_SIGNED_CHAR 1
1916
1917/* Max number of bytes we can move from memory to memory
1918 in one reasonably fast instruction. */
65d9c0ab
JH
1919#define MOVE_MAX 16
1920
1921/* MOVE_MAX_PIECES is the number of bytes at a time which we can
1922 move efficiently, as opposed to MOVE_MAX which is the maximum
df7ec09f
L
1923 number of bytes we can move with a single instruction.
1924
1925 ??? We should use TImode in 32-bit mode and use OImode or XImode
1926 if they are available. But since by_pieces_ninsns determines the
1927 widest mode with MAX_FIXED_MODE_SIZE, we can only use TImode in
1928 64-bit mode. */
1929#define MOVE_MAX_PIECES \
1930 ((TARGET_64BIT \
1931 && TARGET_SSE2 \
1932 && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \
1933 && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \
1934 ? GET_MODE_SIZE (TImode) : UNITS_PER_WORD)
c98f8742 1935
7e24ffc9 1936/* If a memory-to-memory move would take MOVE_RATIO or more simple
76715c32 1937 move-instruction pairs, we will do a cpymem or libcall instead.
7e24ffc9
HPN
1938 Increasing the value will always make code faster, but eventually
1939 incurs high cost in increased code size.
c98f8742 1940
e2e52e1b 1941 If you don't define this, a reasonable default is used. */
c98f8742 1942
e04ad03d 1943#define MOVE_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 3)
c98f8742 1944
45d78e7f
JJ
1945/* If a clear memory operation would take CLEAR_RATIO or more simple
1946 move-instruction sequences, we will do a clrmem or libcall instead. */
1947
e04ad03d 1948#define CLEAR_RATIO(speed) ((speed) ? MIN (6, ix86_cost->move_ratio) : 2)
45d78e7f 1949
53f00dde
UB
1950/* Define if shifts truncate the shift count which implies one can
1951 omit a sign-extension or zero-extension of a shift count.
1952
1953 On i386, shifts do truncate the count. But bit test instructions
1954 take the modulo of the bit offset operand. */
c98f8742
JVA
1955
1956/* #define SHIFT_COUNT_TRUNCATED */
1957
d9f32422
JH
1958/* A macro to update M and UNSIGNEDP when an object whose type is
1959 TYPE and which has the specified mode and signedness is to be
1960 stored in a register. This macro is only called when TYPE is a
1961 scalar type.
1962
f710504c 1963 On i386 it is sometimes useful to promote HImode and QImode
d9f32422
JH
1964 quantities to SImode. The choice depends on target type. */
1965
1966#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
d9a5f180 1967do { \
d9f32422
JH
1968 if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \
1969 || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \
d9a5f180
GS
1970 (MODE) = SImode; \
1971} while (0)
d9f32422 1972
c98f8742
JVA
1973/* Specify the machine mode that pointers have.
1974 After generation of rtl, the compiler makes no further distinction
1975 between pointers and any other objects of this machine mode. */
28968d91 1976#define Pmode (ix86_pmode == PMODE_DI ? DImode : SImode)
c98f8742 1977
5e1e91c4
L
1978/* Supply a definition of STACK_SAVEAREA_MODE for emit_stack_save.
1979 NONLOCAL needs space to save both shadow stack and stack pointers.
1980
1981 FIXME: We only need to save and restore stack pointer in ptr_mode.
1982 But expand_builtin_setjmp_setup and expand_builtin_longjmp use Pmode
1983 to save and restore stack pointer. See
1984 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84150
1985 */
1986#define STACK_SAVEAREA_MODE(LEVEL) \
1987 ((LEVEL) == SAVE_NONLOCAL ? (TARGET_64BIT ? TImode : DImode) : Pmode)
1988
d16b9d1c
UB
1989/* Specify the machine_mode of the size increment
1990 operand of an 'allocate_stack' named pattern. */
1991#define STACK_SIZE_MODE Pmode
1992
f0ea7581
L
1993/* A C expression whose value is zero if pointers that need to be extended
1994 from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and
1995 greater then zero if they are zero-extended and less then zero if the
1996 ptr_extend instruction should be used. */
1997
1998#define POINTERS_EXTEND_UNSIGNED 1
1999
c98f8742
JVA
2000/* A function address in a call instruction
2001 is a byte address (for indexing purposes)
2002 so give the MEM rtx a byte's mode. */
2003#define FUNCTION_MODE QImode
d4ba09c0 2004\f
d4ba09c0 2005
d4ba09c0
SC
2006/* A C expression for the cost of a branch instruction. A value of 1
2007 is the default; other values are interpreted relative to that. */
2008
3a4fd356
JH
2009#define BRANCH_COST(speed_p, predictable_p) \
2010 (!(speed_p) ? 2 : (predictable_p) ? 0 : ix86_branch_cost)
d4ba09c0 2011
e327d1a3
L
2012/* An integer expression for the size in bits of the largest integer machine
2013 mode that should actually be used. We allow pairs of registers. */
2014#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode)
2015
d4ba09c0
SC
2016/* Define this macro as a C expression which is nonzero if accessing
2017 less than a word of memory (i.e. a `char' or a `short') is no
2018 faster than accessing a word of memory, i.e., if such access
2019 require more than one instruction or if there is no difference in
2020 cost between byte and (aligned) word loads.
2021
2022 When this macro is not defined, the compiler will access a field by
2023 finding the smallest containing object; when it is defined, a
2024 fullword load will be used if alignment permits. Unless bytes
2025 accesses are faster than word accesses, using word accesses is
2026 preferable since it may eliminate subsequent memory access if
2027 subsequent accesses occur to other fields in the same word of the
2028 structure, but to different bytes. */
2029
2030#define SLOW_BYTE_ACCESS 0
2031
2032/* Nonzero if access to memory by shorts is slow and undesirable. */
2033#define SLOW_SHORT_ACCESS 0
2034
d4ba09c0
SC
2035/* Define this macro if it is as good or better to call a constant
2036 function address than to call an address kept in a register.
2037
2038 Desirable on the 386 because a CALL with a constant address is
2039 faster than one with a register address. */
2040
1e8552c2 2041#define NO_FUNCTION_CSE 1
c98f8742 2042\f
c572e5ba
JVA
2043/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2044 return the mode to be used for the comparison.
2045
2046 For floating-point equality comparisons, CCFPEQmode should be used.
e075ae69 2047 VOIDmode should be used in all other cases.
c572e5ba 2048
16189740 2049 For integer comparisons against zero, reduce to CCNOmode or CCZmode if
e075ae69 2050 possible, to allow for more combinations. */
c98f8742 2051
d9a5f180 2052#define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y))
9e7adcb3 2053
9cd10576 2054/* Return nonzero if MODE implies a floating point inequality can be
9e7adcb3
JH
2055 reversed. */
2056
2057#define REVERSIBLE_CC_MODE(MODE) 1
2058
2059/* A C expression whose value is reversed condition code of the CODE for
2060 comparison done in CC_MODE mode. */
3c5cb3e4 2061#define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE))
9e7adcb3 2062
c98f8742
JVA
2063\f
2064/* Control the assembler format that we output, to the extent
2065 this does not vary between assemblers. */
2066
2067/* How to refer to registers in assembler output.
892a2d68 2068 This sequence is indexed by compiler's hard-register-number (see above). */
c98f8742 2069
a7b376ee 2070/* In order to refer to the first 8 regs as 32-bit regs, prefix an "e".
c98f8742
JVA
2071 For non floating point regs, the following are the HImode names.
2072
2073 For float regs, the stack top is sometimes referred to as "%st(0)"
6e2188e0
NF
2074 instead of just "%st". TARGET_PRINT_OPERAND handles this with the
2075 "y" code. */
c98f8742 2076
a7180f70
BS
2077#define HI_REGISTER_NAMES \
2078{"ax","dx","cx","bx","si","di","bp","sp", \
480feac0 2079 "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \
eaa17c21 2080 "argp", "flags", "fpsr", "frame", \
a7180f70 2081 "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \
03c259ad 2082 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", \
3f3f2124 2083 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
3f97cb0b
AI
2084 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", \
2085 "xmm16", "xmm17", "xmm18", "xmm19", \
2086 "xmm20", "xmm21", "xmm22", "xmm23", \
2087 "xmm24", "xmm25", "xmm26", "xmm27", \
85a77221 2088 "xmm28", "xmm29", "xmm30", "xmm31", \
eafa30ef 2089 "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" }
a7180f70 2090
c98f8742
JVA
2091#define REGISTER_NAMES HI_REGISTER_NAMES
2092
50bec228
UB
2093#define QI_REGISTER_NAMES \
2094{"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl"}
2095
2096#define QI_HIGH_REGISTER_NAMES \
2097{"ah", "dh", "ch", "bh"}
2098
c98f8742
JVA
2099/* Table of additional register names to use in user input. */
2100
eaa17c21
UB
2101#define ADDITIONAL_REGISTER_NAMES \
2102{ \
2103 { "eax", AX_REG }, { "edx", DX_REG }, { "ecx", CX_REG }, { "ebx", BX_REG }, \
2104 { "esi", SI_REG }, { "edi", DI_REG }, { "ebp", BP_REG }, { "esp", SP_REG }, \
2105 { "rax", AX_REG }, { "rdx", DX_REG }, { "rcx", CX_REG }, { "rbx", BX_REG }, \
2106 { "rsi", SI_REG }, { "rdi", DI_REG }, { "rbp", BP_REG }, { "rsp", SP_REG }, \
2107 { "al", AX_REG }, { "dl", DX_REG }, { "cl", CX_REG }, { "bl", BX_REG }, \
50bec228 2108 { "sil", SI_REG }, { "dil", DI_REG }, { "bpl", BP_REG }, { "spl", SP_REG }, \
eaa17c21
UB
2109 { "ah", AX_REG }, { "dh", DX_REG }, { "ch", CX_REG }, { "bh", BX_REG }, \
2110 { "ymm0", XMM0_REG }, { "ymm1", XMM1_REG }, { "ymm2", XMM2_REG }, { "ymm3", XMM3_REG }, \
2111 { "ymm4", XMM4_REG }, { "ymm5", XMM5_REG }, { "ymm6", XMM6_REG }, { "ymm7", XMM7_REG }, \
2112 { "ymm8", XMM8_REG }, { "ymm9", XMM9_REG }, { "ymm10", XMM10_REG }, { "ymm11", XMM11_REG }, \
2113 { "ymm12", XMM12_REG }, { "ymm13", XMM13_REG }, { "ymm14", XMM14_REG }, { "ymm15", XMM15_REG }, \
2114 { "ymm16", XMM16_REG }, { "ymm17", XMM17_REG }, { "ymm18", XMM18_REG }, { "ymm19", XMM19_REG }, \
2115 { "ymm20", XMM20_REG }, { "ymm21", XMM21_REG }, { "ymm22", XMM22_REG }, { "ymm23", XMM23_REG }, \
2116 { "ymm24", XMM24_REG }, { "ymm25", XMM25_REG }, { "ymm26", XMM26_REG }, { "ymm27", XMM27_REG }, \
2117 { "ymm28", XMM28_REG }, { "ymm29", XMM29_REG }, { "ymm30", XMM30_REG }, { "ymm31", XMM31_REG }, \
2118 { "zmm0", XMM0_REG }, { "zmm1", XMM1_REG }, { "zmm2", XMM2_REG }, { "zmm3", XMM3_REG }, \
2119 { "zmm4", XMM4_REG }, { "zmm5", XMM5_REG }, { "zmm6", XMM6_REG }, { "zmm7", XMM7_REG }, \
2120 { "zmm8", XMM8_REG }, { "zmm9", XMM9_REG }, { "zmm10", XMM10_REG }, { "zmm11", XMM11_REG }, \
2121 { "zmm12", XMM12_REG }, { "zmm13", XMM13_REG }, { "zmm14", XMM14_REG }, { "zmm15", XMM15_REG }, \
2122 { "zmm16", XMM16_REG }, { "zmm17", XMM17_REG }, { "zmm18", XMM18_REG }, { "zmm19", XMM19_REG }, \
2123 { "zmm20", XMM20_REG }, { "zmm21", XMM21_REG }, { "zmm22", XMM22_REG }, { "zmm23", XMM23_REG }, \
2124 { "zmm24", XMM24_REG }, { "zmm25", XMM25_REG }, { "zmm26", XMM26_REG }, { "zmm27", XMM27_REG }, \
2125 { "zmm28", XMM28_REG }, { "zmm29", XMM29_REG }, { "zmm30", XMM30_REG }, { "zmm31", XMM31_REG } \
2126}
c98f8742 2127
c98f8742
JVA
2128/* How to renumber registers for dbx and gdb. */
2129
d9a5f180
GS
2130#define DBX_REGISTER_NUMBER(N) \
2131 (TARGET_64BIT ? dbx64_register_map[(N)] : dbx_register_map[(N)])
83774849 2132
9a82e702
MS
2133extern int const dbx_register_map[FIRST_PSEUDO_REGISTER];
2134extern int const dbx64_register_map[FIRST_PSEUDO_REGISTER];
2135extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER];
c98f8742 2136
469ac993
JM
2137/* Before the prologue, RA is at 0(%esp). */
2138#define INCOMING_RETURN_ADDR_RTX \
2efb4214 2139 gen_rtx_MEM (Pmode, stack_pointer_rtx)
fce5a9f2 2140
e414ab29 2141/* After the prologue, RA is at -4(AP) in the current frame. */
1a6e82b8
UB
2142#define RETURN_ADDR_RTX(COUNT, FRAME) \
2143 ((COUNT) == 0 \
2144 ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \
2145 -UNITS_PER_WORD)) \
2146 : gen_rtx_MEM (Pmode, plus_constant (Pmode, (FRAME), UNITS_PER_WORD)))
e414ab29 2147
892a2d68 2148/* PC is dbx register 8; let's use that column for RA. */
0f7fa3d0 2149#define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8)
469ac993 2150
a10b3cf1
L
2151/* Before the prologue, there are return address and error code for
2152 exception handler on the top of the frame. */
2153#define INCOMING_FRAME_SP_OFFSET \
2154 (cfun->machine->func_type == TYPE_EXCEPTION \
2155 ? 2 * UNITS_PER_WORD : UNITS_PER_WORD)
a6ab3aad 2156
26fc730d
JJ
2157/* The value of INCOMING_FRAME_SP_OFFSET the assembler assumes in
2158 .cfi_startproc. */
2159#define DEFAULT_INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD
2160
1020a5ab 2161/* Describe how we implement __builtin_eh_return. */
2824d6e5
UB
2162#define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM)
2163#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG)
1020a5ab 2164
ad919812 2165
e4c4ebeb
RH
2166/* Select a format to encode pointers in exception handling data. CODE
2167 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
2168 true if the symbol may be affected by dynamic relocations.
2169
2170 ??? All x86 object file formats are capable of representing this.
2171 After all, the relocation needed is the same as for the call insn.
2172 Whether or not a particular assembler allows us to enter such, I
2173 guess we'll have to see. */
d9a5f180 2174#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
72ce3d4a 2175 asm_preferred_eh_data_format ((CODE), (GLOBAL))
e4c4ebeb 2176
ec1895c1
UB
2177/* These are a couple of extensions to the formats accepted
2178 by asm_fprintf:
2179 %z prints out opcode suffix for word-mode instruction
2180 %r prints out word-mode name for reg_names[arg] */
2181#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \
2182 case 'z': \
2183 fputc (TARGET_64BIT ? 'q' : 'l', (FILE)); \
2184 break; \
2185 \
2186 case 'r': \
2187 { \
2188 unsigned int regno = va_arg ((ARGS), int); \
2189 if (LEGACY_INT_REGNO_P (regno)) \
2190 fputc (TARGET_64BIT ? 'r' : 'e', (FILE)); \
2191 fputs (reg_names[regno], (FILE)); \
2192 break; \
2193 }
2194
2195/* This is how to output an insn to push a register on the stack. */
2196
2197#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \
2198 asm_fprintf ((FILE), "\tpush%z\t%%%r\n", (REGNO))
2199
2200/* This is how to output an insn to pop a register from the stack. */
c98f8742 2201
d9a5f180 2202#define ASM_OUTPUT_REG_POP(FILE, REGNO) \
ec1895c1 2203 asm_fprintf ((FILE), "\tpop%z\t%%%r\n", (REGNO))
c98f8742 2204
f88c65f7 2205/* This is how to output an element of a case-vector that is absolute. */
c98f8742
JVA
2206
2207#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
d9a5f180 2208 ix86_output_addr_vec_elt ((FILE), (VALUE))
c98f8742 2209
f88c65f7 2210/* This is how to output an element of a case-vector that is relative. */
c98f8742 2211
33f7f353 2212#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
d9a5f180 2213 ix86_output_addr_diff_elt ((FILE), (VALUE), (REL))
f88c65f7 2214
63001560 2215/* When we see %v, we will print the 'v' prefix if TARGET_AVX is true. */
95879c72
L
2216
2217#define ASM_OUTPUT_AVX_PREFIX(STREAM, PTR) \
2218{ \
2219 if ((PTR)[0] == '%' && (PTR)[1] == 'v') \
63001560 2220 (PTR) += TARGET_AVX ? 1 : 2; \
95879c72
L
2221}
2222
2223/* A C statement or statements which output an assembler instruction
2224 opcode to the stdio stream STREAM. The macro-operand PTR is a
2225 variable of type `char *' which points to the opcode name in
2226 its "internal" form--the form that is written in the machine
2227 description. */
2228
2229#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
2230 ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR))
2231
6a90d232
L
2232/* A C statement to output to the stdio stream FILE an assembler
2233 command to pad the location counter to a multiple of 1<<LOG
2234 bytes if it is within MAX_SKIP bytes. */
2235
2236#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
2237#undef ASM_OUTPUT_MAX_SKIP_PAD
2238#define ASM_OUTPUT_MAX_SKIP_PAD(FILE, LOG, MAX_SKIP) \
2239 if ((LOG) != 0) \
2240 { \
dd047c67 2241 if ((MAX_SKIP) == 0 || (MAX_SKIP) >= (1 << (LOG)) - 1) \
6a90d232
L
2242 fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
2243 else \
2244 fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
2245 }
2246#endif
2247
135a687e
KT
2248/* Write the extra assembler code needed to declare a function
2249 properly. */
2250
2251#undef ASM_OUTPUT_FUNCTION_LABEL
2252#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \
1a6e82b8 2253 ix86_asm_output_function_label ((FILE), (NAME), (DECL))
135a687e 2254
f7288899
EC
2255/* Under some conditions we need jump tables in the text section,
2256 because the assembler cannot handle label differences between
2257 sections. This is the case for x86_64 on Mach-O for example. */
f88c65f7
RH
2258
2259#define JUMP_TABLES_IN_TEXT_SECTION \
f7288899
EC
2260 (flag_pic && ((TARGET_MACHO && TARGET_64BIT) \
2261 || (!TARGET_64BIT && !HAVE_AS_GOTOFF_IN_DATA)))
c98f8742 2262
cea3bd3e
RH
2263/* Switch to init or fini section via SECTION_OP, emit a call to FUNC,
2264 and switch back. For x86 we do this only to save a few bytes that
2265 would otherwise be unused in the text section. */
ad211091
KT
2266#define CRT_MKSTR2(VAL) #VAL
2267#define CRT_MKSTR(x) CRT_MKSTR2(x)
2268
2269#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
2270 asm (SECTION_OP "\n\t" \
2271 "call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \
cea3bd3e 2272 TEXT_SECTION_ASM_OP);
5a579c3b
LE
2273
2274/* Default threshold for putting data in large sections
2275 with x86-64 medium memory model */
2276#define DEFAULT_LARGE_SECTION_THRESHOLD 65536
74b42c8b 2277\f
b97de419
L
2278/* Which processor to tune code generation for. These must be in sync
2279 with processor_target_table in i386.c. */
5bf0ebab
RH
2280
2281enum processor_type
2282{
b97de419
L
2283 PROCESSOR_GENERIC = 0,
2284 PROCESSOR_I386, /* 80386 */
5bf0ebab
RH
2285 PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */
2286 PROCESSOR_PENTIUM,
2d6b2e28 2287 PROCESSOR_LAKEMONT,
5bf0ebab 2288 PROCESSOR_PENTIUMPRO,
5bf0ebab 2289 PROCESSOR_PENTIUM4,
89c43c0a 2290 PROCESSOR_NOCONA,
340ef734 2291 PROCESSOR_CORE2,
d3c11974
L
2292 PROCESSOR_NEHALEM,
2293 PROCESSOR_SANDYBRIDGE,
3a579e09 2294 PROCESSOR_HASWELL,
d3c11974
L
2295 PROCESSOR_BONNELL,
2296 PROCESSOR_SILVERMONT,
50e461df 2297 PROCESSOR_GOLDMONT,
74b2bb19 2298 PROCESSOR_GOLDMONT_PLUS,
a548a5a1 2299 PROCESSOR_TREMONT,
52747219 2300 PROCESSOR_KNL,
cace2309 2301 PROCESSOR_KNM,
176a3386 2302 PROCESSOR_SKYLAKE,
06caf59d 2303 PROCESSOR_SKYLAKE_AVX512,
c234d831 2304 PROCESSOR_CANNONLAKE,
79ab5364
JK
2305 PROCESSOR_ICELAKE_CLIENT,
2306 PROCESSOR_ICELAKE_SERVER,
7cab07f0 2307 PROCESSOR_CASCADELAKE,
a9fcfec3
HL
2308 PROCESSOR_TIGERLAKE,
2309 PROCESSOR_COOPERLAKE,
9a7f94d7 2310 PROCESSOR_INTEL,
b97de419
L
2311 PROCESSOR_GEODE,
2312 PROCESSOR_K6,
2313 PROCESSOR_ATHLON,
2314 PROCESSOR_K8,
21efb4d4 2315 PROCESSOR_AMDFAM10,
1133125e 2316 PROCESSOR_BDVER1,
4d652a18 2317 PROCESSOR_BDVER2,
eb2f2b44 2318 PROCESSOR_BDVER3,
ed97ad47 2319 PROCESSOR_BDVER4,
14b52538 2320 PROCESSOR_BTVER1,
e32bfc16 2321 PROCESSOR_BTVER2,
9ce29eb0 2322 PROCESSOR_ZNVER1,
2901f42f 2323 PROCESSOR_ZNVER2,
5bf0ebab
RH
2324 PROCESSOR_max
2325};
2326
c98c2430 2327#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
2559ef9f 2328extern const char *const processor_names[];
c98c2430
ML
2329
2330#include "wide-int-bitmask.h"
2331
2332const wide_int_bitmask PTA_3DNOW (HOST_WIDE_INT_1U << 0);
2333const wide_int_bitmask PTA_3DNOW_A (HOST_WIDE_INT_1U << 1);
2334const wide_int_bitmask PTA_64BIT (HOST_WIDE_INT_1U << 2);
2335const wide_int_bitmask PTA_ABM (HOST_WIDE_INT_1U << 3);
2336const wide_int_bitmask PTA_AES (HOST_WIDE_INT_1U << 4);
2337const wide_int_bitmask PTA_AVX (HOST_WIDE_INT_1U << 5);
2338const wide_int_bitmask PTA_BMI (HOST_WIDE_INT_1U << 6);
2339const wide_int_bitmask PTA_CX16 (HOST_WIDE_INT_1U << 7);
2340const wide_int_bitmask PTA_F16C (HOST_WIDE_INT_1U << 8);
2341const wide_int_bitmask PTA_FMA (HOST_WIDE_INT_1U << 9);
2342const wide_int_bitmask PTA_FMA4 (HOST_WIDE_INT_1U << 10);
2343const wide_int_bitmask PTA_FSGSBASE (HOST_WIDE_INT_1U << 11);
2344const wide_int_bitmask PTA_LWP (HOST_WIDE_INT_1U << 12);
2345const wide_int_bitmask PTA_LZCNT (HOST_WIDE_INT_1U << 13);
2346const wide_int_bitmask PTA_MMX (HOST_WIDE_INT_1U << 14);
2347const wide_int_bitmask PTA_MOVBE (HOST_WIDE_INT_1U << 15);
2348const wide_int_bitmask PTA_NO_SAHF (HOST_WIDE_INT_1U << 16);
2349const wide_int_bitmask PTA_PCLMUL (HOST_WIDE_INT_1U << 17);
2350const wide_int_bitmask PTA_POPCNT (HOST_WIDE_INT_1U << 18);
2351const wide_int_bitmask PTA_PREFETCH_SSE (HOST_WIDE_INT_1U << 19);
2352const wide_int_bitmask PTA_RDRND (HOST_WIDE_INT_1U << 20);
2353const wide_int_bitmask PTA_SSE (HOST_WIDE_INT_1U << 21);
2354const wide_int_bitmask PTA_SSE2 (HOST_WIDE_INT_1U << 22);
2355const wide_int_bitmask PTA_SSE3 (HOST_WIDE_INT_1U << 23);
2356const wide_int_bitmask PTA_SSE4_1 (HOST_WIDE_INT_1U << 24);
2357const wide_int_bitmask PTA_SSE4_2 (HOST_WIDE_INT_1U << 25);
2358const wide_int_bitmask PTA_SSE4A (HOST_WIDE_INT_1U << 26);
2359const wide_int_bitmask PTA_SSSE3 (HOST_WIDE_INT_1U << 27);
2360const wide_int_bitmask PTA_TBM (HOST_WIDE_INT_1U << 28);
2361const wide_int_bitmask PTA_XOP (HOST_WIDE_INT_1U << 29);
2362const wide_int_bitmask PTA_AVX2 (HOST_WIDE_INT_1U << 30);
2363const wide_int_bitmask PTA_BMI2 (HOST_WIDE_INT_1U << 31);
2364const wide_int_bitmask PTA_RTM (HOST_WIDE_INT_1U << 32);
2365const wide_int_bitmask PTA_HLE (HOST_WIDE_INT_1U << 33);
2366const wide_int_bitmask PTA_PRFCHW (HOST_WIDE_INT_1U << 34);
2367const wide_int_bitmask PTA_RDSEED (HOST_WIDE_INT_1U << 35);
2368const wide_int_bitmask PTA_ADX (HOST_WIDE_INT_1U << 36);
2369const wide_int_bitmask PTA_FXSR (HOST_WIDE_INT_1U << 37);
2370const wide_int_bitmask PTA_XSAVE (HOST_WIDE_INT_1U << 38);
2371const wide_int_bitmask PTA_XSAVEOPT (HOST_WIDE_INT_1U << 39);
2372const wide_int_bitmask PTA_AVX512F (HOST_WIDE_INT_1U << 40);
2373const wide_int_bitmask PTA_AVX512ER (HOST_WIDE_INT_1U << 41);
2374const wide_int_bitmask PTA_AVX512PF (HOST_WIDE_INT_1U << 42);
2375const wide_int_bitmask PTA_AVX512CD (HOST_WIDE_INT_1U << 43);
2376/* Hole after PTA_MPX was removed. */
2377const wide_int_bitmask PTA_SHA (HOST_WIDE_INT_1U << 45);
2378const wide_int_bitmask PTA_PREFETCHWT1 (HOST_WIDE_INT_1U << 46);
2379const wide_int_bitmask PTA_CLFLUSHOPT (HOST_WIDE_INT_1U << 47);
2380const wide_int_bitmask PTA_XSAVEC (HOST_WIDE_INT_1U << 48);
2381const wide_int_bitmask PTA_XSAVES (HOST_WIDE_INT_1U << 49);
2382const wide_int_bitmask PTA_AVX512DQ (HOST_WIDE_INT_1U << 50);
2383const wide_int_bitmask PTA_AVX512BW (HOST_WIDE_INT_1U << 51);
2384const wide_int_bitmask PTA_AVX512VL (HOST_WIDE_INT_1U << 52);
2385const wide_int_bitmask PTA_AVX512IFMA (HOST_WIDE_INT_1U << 53);
2386const wide_int_bitmask PTA_AVX512VBMI (HOST_WIDE_INT_1U << 54);
2387const wide_int_bitmask PTA_CLWB (HOST_WIDE_INT_1U << 55);
2388const wide_int_bitmask PTA_MWAITX (HOST_WIDE_INT_1U << 56);
2389const wide_int_bitmask PTA_CLZERO (HOST_WIDE_INT_1U << 57);
2390const wide_int_bitmask PTA_NO_80387 (HOST_WIDE_INT_1U << 58);
2391const wide_int_bitmask PTA_PKU (HOST_WIDE_INT_1U << 59);
2392const wide_int_bitmask PTA_AVX5124VNNIW (HOST_WIDE_INT_1U << 60);
2393const wide_int_bitmask PTA_AVX5124FMAPS (HOST_WIDE_INT_1U << 61);
2394const wide_int_bitmask PTA_AVX512VPOPCNTDQ (HOST_WIDE_INT_1U << 62);
2395const wide_int_bitmask PTA_SGX (HOST_WIDE_INT_1U << 63);
2396const wide_int_bitmask PTA_AVX512VNNI (0, HOST_WIDE_INT_1U);
2397const wide_int_bitmask PTA_GFNI (0, HOST_WIDE_INT_1U << 1);
2398const wide_int_bitmask PTA_VAES (0, HOST_WIDE_INT_1U << 2);
2399const wide_int_bitmask PTA_AVX512VBMI2 (0, HOST_WIDE_INT_1U << 3);
2400const wide_int_bitmask PTA_VPCLMULQDQ (0, HOST_WIDE_INT_1U << 4);
2401const wide_int_bitmask PTA_AVX512BITALG (0, HOST_WIDE_INT_1U << 5);
2402const wide_int_bitmask PTA_RDPID (0, HOST_WIDE_INT_1U << 6);
2403const wide_int_bitmask PTA_PCONFIG (0, HOST_WIDE_INT_1U << 7);
2404const wide_int_bitmask PTA_WBNOINVD (0, HOST_WIDE_INT_1U << 8);
e21b52af 2405const wide_int_bitmask PTA_AVX512VP2INTERSECT (0, HOST_WIDE_INT_1U << 9);
c98c2430 2406const wide_int_bitmask PTA_WAITPKG (0, HOST_WIDE_INT_1U << 9);
41f8d1fc 2407const wide_int_bitmask PTA_PTWRITE (0, HOST_WIDE_INT_1U << 10);
4f0e90fa 2408const wide_int_bitmask PTA_AVX512BF16 (0, HOST_WIDE_INT_1U << 11);
a9fcfec3
HL
2409const wide_int_bitmask PTA_MOVDIRI(0, HOST_WIDE_INT_1U << 13);
2410const wide_int_bitmask PTA_MOVDIR64B(0, HOST_WIDE_INT_1U << 14);
c98c2430
ML
2411
2412const wide_int_bitmask PTA_CORE2 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2
2413 | PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_FXSR;
2414const wide_int_bitmask PTA_NEHALEM = PTA_CORE2 | PTA_SSE4_1 | PTA_SSE4_2
2415 | PTA_POPCNT;
c9450033 2416const wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_PCLMUL;
c98c2430
ML
2417const wide_int_bitmask PTA_SANDYBRIDGE = PTA_WESTMERE | PTA_AVX | PTA_XSAVE
2418 | PTA_XSAVEOPT;
2419const wide_int_bitmask PTA_IVYBRIDGE = PTA_SANDYBRIDGE | PTA_FSGSBASE
2420 | PTA_RDRND | PTA_F16C;
2421const wide_int_bitmask PTA_HASWELL = PTA_IVYBRIDGE | PTA_AVX2 | PTA_BMI
2422 | PTA_BMI2 | PTA_LZCNT | PTA_FMA | PTA_MOVBE | PTA_HLE;
2423const wide_int_bitmask PTA_BROADWELL = PTA_HASWELL | PTA_ADX | PTA_PRFCHW
2424 | PTA_RDSEED;
c9450033 2425const wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_AES | PTA_CLFLUSHOPT
c98c2430
ML
2426 | PTA_XSAVEC | PTA_XSAVES | PTA_SGX;
2427const wide_int_bitmask PTA_SKYLAKE_AVX512 = PTA_SKYLAKE | PTA_AVX512F
2428 | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU
2429 | PTA_CLWB;
7cab07f0 2430const wide_int_bitmask PTA_CASCADELAKE = PTA_SKYLAKE_AVX512 | PTA_AVX512VNNI;
a9fcfec3 2431const wide_int_bitmask PTA_COOPERLAKE = PTA_CASCADELAKE | PTA_AVX512BF16;
c98c2430
ML
2432const wide_int_bitmask PTA_CANNONLAKE = PTA_SKYLAKE | PTA_AVX512F
2433 | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU
2434 | PTA_AVX512VBMI | PTA_AVX512IFMA | PTA_SHA;
2435const wide_int_bitmask PTA_ICELAKE_CLIENT = PTA_CANNONLAKE | PTA_AVX512VNNI
2436 | PTA_GFNI | PTA_VAES | PTA_AVX512VBMI2 | PTA_VPCLMULQDQ | PTA_AVX512BITALG
2437 | PTA_RDPID | PTA_CLWB;
2438const wide_int_bitmask PTA_ICELAKE_SERVER = PTA_ICELAKE_CLIENT | PTA_PCONFIG
2439 | PTA_WBNOINVD;
a9fcfec3
HL
2440const wide_int_bitmask PTA_TIGERLAKE = PTA_ICELAKE_CLIENT | PTA_MOVDIRI
2441 | PTA_MOVDIR64B | PTA_AVX512VP2INTERSECT;
c98c2430
ML
2442const wide_int_bitmask PTA_KNL = PTA_BROADWELL | PTA_AVX512PF | PTA_AVX512ER
2443 | PTA_AVX512F | PTA_AVX512CD;
2444const wide_int_bitmask PTA_BONNELL = PTA_CORE2 | PTA_MOVBE;
2445const wide_int_bitmask PTA_SILVERMONT = PTA_WESTMERE | PTA_MOVBE | PTA_RDRND;
c9450033 2446const wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_AES | PTA_SHA | PTA_XSAVE
c98c2430
ML
2447 | PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT | PTA_XSAVEOPT
2448 | PTA_FSGSBASE;
2449const wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID
41f8d1fc 2450 | PTA_SGX | PTA_PTWRITE;
c98c2430
ML
2451const wide_int_bitmask PTA_TREMONT = PTA_GOLDMONT_PLUS | PTA_CLWB
2452 | PTA_GFNI;
2453const wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW
2454 | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ;
2455
2456#ifndef GENERATOR_FILE
2457
2458#include "insn-attr-common.h"
2459
6c1dae73 2460class pta
c98c2430 2461{
6c1dae73 2462public:
c98c2430
ML
2463 const char *const name; /* processor name or nickname. */
2464 const enum processor_type processor;
2465 const enum attr_cpu schedule;
2466 const wide_int_bitmask flags;
2467};
2468
2469extern const pta processor_alias_table[];
2470extern int const pta_size;
2471#endif
2472
2473#endif
2474
9e555526 2475extern enum processor_type ix86_tune;
5bf0ebab 2476extern enum processor_type ix86_arch;
5bf0ebab 2477
8362f420
JH
2478/* Size of the RED_ZONE area. */
2479#define RED_ZONE_SIZE 128
2480/* Reserved area of the red zone for temporaries. */
2481#define RED_ZONE_RESERVE 8
c93e80a5 2482
95899b34 2483extern unsigned int ix86_preferred_stack_boundary;
2e3f842f 2484extern unsigned int ix86_incoming_stack_boundary;
5bf0ebab
RH
2485
2486/* Smallest class containing REGNO. */
2487extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER];
2488
0948ccb2
PB
2489enum ix86_fpcmp_strategy {
2490 IX86_FPCMP_SAHF,
2491 IX86_FPCMP_COMI,
2492 IX86_FPCMP_ARITH
2493};
22fb740d
JH
2494\f
2495/* To properly truncate FP values into integers, we need to set i387 control
2496 word. We can't emit proper mode switching code before reload, as spills
2497 generated by reload may truncate values incorrectly, but we still can avoid
2498 redundant computation of new control word by the mode switching pass.
2499 The fldcw instructions are still emitted redundantly, but this is probably
2500 not going to be noticeable problem, as most CPUs do have fast path for
fce5a9f2 2501 the sequence.
22fb740d
JH
2502
2503 The machinery is to emit simple truncation instructions and split them
2504 before reload to instructions having USEs of two memory locations that
2505 are filled by this code to old and new control word.
fce5a9f2 2506
22fb740d
JH
2507 Post-reload pass may be later used to eliminate the redundant fildcw if
2508 needed. */
2509
c7ca8ef8
UB
2510enum ix86_stack_slot
2511{
2512 SLOT_TEMP = 0,
2513 SLOT_CW_STORED,
2514 SLOT_CW_TRUNC,
2515 SLOT_CW_FLOOR,
2516 SLOT_CW_CEIL,
80008279 2517 SLOT_STV_TEMP,
c7ca8ef8
UB
2518 MAX_386_STACK_LOCALS
2519};
2520
ff680eb1
UB
2521enum ix86_entity
2522{
c7ca8ef8
UB
2523 X86_DIRFLAG = 0,
2524 AVX_U128,
ff97910d 2525 I387_TRUNC,
ff680eb1
UB
2526 I387_FLOOR,
2527 I387_CEIL,
ff680eb1
UB
2528 MAX_386_ENTITIES
2529};
2530
c7ca8ef8 2531enum x86_dirflag_state
ff680eb1 2532{
c7ca8ef8
UB
2533 X86_DIRFLAG_RESET,
2534 X86_DIRFLAG_ANY
ff680eb1 2535};
22fb740d 2536
ff97910d
VY
2537enum avx_u128_state
2538{
2539 AVX_U128_CLEAN,
2540 AVX_U128_DIRTY,
2541 AVX_U128_ANY
2542};
2543
22fb740d
JH
2544/* Define this macro if the port needs extra instructions inserted
2545 for mode switching in an optimizing compilation. */
2546
ff680eb1
UB
2547#define OPTIMIZE_MODE_SWITCHING(ENTITY) \
2548 ix86_optimize_mode_switching[(ENTITY)]
22fb740d
JH
2549
2550/* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as
2551 initializer for an array of integers. Each initializer element N
2552 refers to an entity that needs mode switching, and specifies the
2553 number of different modes that might need to be set for this
2554 entity. The position of the initializer in the initializer -
2555 starting counting at zero - determines the integer that is used to
2556 refer to the mode-switched entity in question. */
2557
c7ca8ef8
UB
2558#define NUM_MODES_FOR_MODE_SWITCHING \
2559 { X86_DIRFLAG_ANY, AVX_U128_ANY, \
8c097065 2560 I387_CW_ANY, I387_CW_ANY, I387_CW_ANY }
22fb740d 2561
0f0138b6
JH
2562\f
2563/* Avoid renaming of stack registers, as doing so in combination with
2564 scheduling just increases amount of live registers at time and in
2565 the turn amount of fxch instructions needed.
2566
3f97cb0b
AI
2567 ??? Maybe Pentium chips benefits from renaming, someone can try....
2568
2569 Don't rename evex to non-evex sse registers. */
0f0138b6 2570
1a6e82b8
UB
2571#define HARD_REGNO_RENAME_OK(SRC, TARGET) \
2572 (!STACK_REGNO_P (SRC) \
2573 && EXT_REX_SSE_REGNO_P (SRC) == EXT_REX_SSE_REGNO_P (TARGET))
22fb740d 2574
3b3c6a3f 2575\f
e91f04de 2576#define FASTCALL_PREFIX '@'
fa1a0d02 2577\f
77560086
BE
2578#ifndef USED_FOR_TARGET
2579/* Structure describing stack frame layout.
2580 Stack grows downward:
2581
2582 [arguments]
2583 <- ARG_POINTER
2584 saved pc
2585
2586 saved static chain if ix86_static_chain_on_stack
2587
2588 saved frame pointer if frame_pointer_needed
2589 <- HARD_FRAME_POINTER
2590 [saved regs]
2591 <- reg_save_offset
2592 [padding0]
2593 <- stack_realign_offset
2594 [saved SSE regs]
2595 OR
2596 [stub-saved registers for ms x64 --> sysv clobbers
2597 <- Start of out-of-line, stub-saved/restored regs
2598 (see libgcc/config/i386/(sav|res)ms64*.S)
2599 [XMM6-15]
2600 [RSI]
2601 [RDI]
2602 [?RBX] only if RBX is clobbered
2603 [?RBP] only if RBP and RBX are clobbered
2604 [?R12] only if R12 and all previous regs are clobbered
2605 [?R13] only if R13 and all previous regs are clobbered
2606 [?R14] only if R14 and all previous regs are clobbered
2607 [?R15] only if R15 and all previous regs are clobbered
2608 <- end of stub-saved/restored regs
2609 [padding1]
2610 ]
5d9d834d 2611 <- sse_reg_save_offset
77560086
BE
2612 [padding2]
2613 | <- FRAME_POINTER
2614 [va_arg registers] |
2615 |
2616 [frame] |
2617 |
2618 [padding2] | = to_allocate
2619 <- STACK_POINTER
2620 */
2621struct GTY(()) ix86_frame
2622{
2623 int nsseregs;
2624 int nregs;
2625 int va_arg_size;
2626 int red_zone_size;
2627 int outgoing_arguments_size;
2628
2629 /* The offsets relative to ARG_POINTER. */
2630 HOST_WIDE_INT frame_pointer_offset;
2631 HOST_WIDE_INT hard_frame_pointer_offset;
2632 HOST_WIDE_INT stack_pointer_offset;
2633 HOST_WIDE_INT hfp_save_offset;
2634 HOST_WIDE_INT reg_save_offset;
122f9da1 2635 HOST_WIDE_INT stack_realign_allocate;
77560086 2636 HOST_WIDE_INT stack_realign_offset;
77560086
BE
2637 HOST_WIDE_INT sse_reg_save_offset;
2638
2639 /* When save_regs_using_mov is set, emit prologue using
2640 move instead of push instructions. */
2641 bool save_regs_using_mov;
2642};
2643
122f9da1
DS
2644/* Machine specific frame tracking during prologue/epilogue generation. All
2645 values are positive, but since the x86 stack grows downward, are subtratced
2646 from the CFA to produce a valid address. */
cd9c1ca8 2647
ec7ded37 2648struct GTY(()) machine_frame_state
cd9c1ca8 2649{
ec7ded37
RH
2650 /* This pair tracks the currently active CFA as reg+offset. When reg
2651 is drap_reg, we don't bother trying to record here the real CFA when
2652 it might really be a DW_CFA_def_cfa_expression. */
2653 rtx cfa_reg;
2654 HOST_WIDE_INT cfa_offset;
2655
2656 /* The current offset (canonically from the CFA) of ESP and EBP.
2657 When stack frame re-alignment is active, these may not be relative
2658 to the CFA. However, in all cases they are relative to the offsets
2659 of the saved registers stored in ix86_frame. */
2660 HOST_WIDE_INT sp_offset;
2661 HOST_WIDE_INT fp_offset;
2662
2663 /* The size of the red-zone that may be assumed for the purposes of
2664 eliding register restore notes in the epilogue. This may be zero
2665 if no red-zone is in effect, or may be reduced from the real
2666 red-zone value by a maximum runtime stack re-alignment value. */
2667 int red_zone_offset;
2668
2669 /* Indicate whether each of ESP, EBP or DRAP currently holds a valid
2670 value within the frame. If false then the offset above should be
2671 ignored. Note that DRAP, if valid, *always* points to the CFA and
2672 thus has an offset of zero. */
2673 BOOL_BITFIELD sp_valid : 1;
2674 BOOL_BITFIELD fp_valid : 1;
2675 BOOL_BITFIELD drap_valid : 1;
c9f4c451
RH
2676
2677 /* Indicate whether the local stack frame has been re-aligned. When
2678 set, the SP/FP offsets above are relative to the aligned frame
2679 and not the CFA. */
2680 BOOL_BITFIELD realigned : 1;
d6d4d770
DS
2681
2682 /* Indicates whether the stack pointer has been re-aligned. When set,
2683 SP/FP continue to be relative to the CFA, but the stack pointer
122f9da1
DS
2684 should only be used for offsets > sp_realigned_offset, while
2685 the frame pointer should be used for offsets <= sp_realigned_fp_last.
d6d4d770
DS
2686 The flags realigned and sp_realigned are mutually exclusive. */
2687 BOOL_BITFIELD sp_realigned : 1;
2688
122f9da1
DS
2689 /* If sp_realigned is set, this is the last valid offset from the CFA
2690 that can be used for access with the frame pointer. */
2691 HOST_WIDE_INT sp_realigned_fp_last;
2692
2693 /* If sp_realigned is set, this is the offset from the CFA that the stack
2694 pointer was realigned, and may or may not be equal to sp_realigned_fp_last.
2695 Access via the stack pointer is only valid for offsets that are greater than
2696 this value. */
d6d4d770 2697 HOST_WIDE_INT sp_realigned_offset;
cd9c1ca8
RH
2698};
2699
f81c9774
RH
2700/* Private to winnt.c. */
2701struct seh_frame_state;
2702
f8071c05
L
2703enum function_type
2704{
2705 TYPE_UNKNOWN = 0,
2706 TYPE_NORMAL,
2707 /* The current function is an interrupt service routine with a
2708 pointer argument as specified by the "interrupt" attribute. */
2709 TYPE_INTERRUPT,
2710 /* The current function is an interrupt service routine with a
2711 pointer argument and an integer argument as specified by the
2712 "interrupt" attribute. */
2713 TYPE_EXCEPTION
2714};
2715
d1b38208 2716struct GTY(()) machine_function {
fa1a0d02 2717 struct stack_local_entry *stack_locals;
4aab97f9
L
2718 int varargs_gpr_size;
2719 int varargs_fpr_size;
ff680eb1 2720 int optimize_mode_switching[MAX_386_ENTITIES];
3452586b 2721
77560086
BE
2722 /* Cached initial frame layout for the current function. */
2723 struct ix86_frame frame;
3452586b 2724
7458026b
ILT
2725 /* For -fsplit-stack support: A stack local which holds a pointer to
2726 the stack arguments for a function with a variable number of
2727 arguments. This is set at the start of the function and is used
2728 to initialize the overflow_arg_area field of the va_list
2729 structure. */
2730 rtx split_stack_varargs_pointer;
2731
3452586b
RH
2732 /* This value is used for amd64 targets and specifies the current abi
2733 to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */
25efe060 2734 ENUM_BITFIELD(calling_abi) call_abi : 8;
3452586b
RH
2735
2736 /* Nonzero if the function accesses a previous frame. */
2737 BOOL_BITFIELD accesses_prev_frame : 1;
2738
922e3e33
UB
2739 /* Set by ix86_compute_frame_layout and used by prologue/epilogue
2740 expander to determine the style used. */
3452586b
RH
2741 BOOL_BITFIELD use_fast_prologue_epilogue : 1;
2742
1e4490dc
UB
2743 /* Nonzero if the current function calls pc thunk and
2744 must not use the red zone. */
2745 BOOL_BITFIELD pc_thunk_call_expanded : 1;
2746
5bf5a10b
AO
2747 /* If true, the current function needs the default PIC register, not
2748 an alternate register (on x86) and must not use the red zone (on
2749 x86_64), even if it's a leaf function. We don't want the
2750 function to be regarded as non-leaf because TLS calls need not
2751 affect register allocation. This flag is set when a TLS call
2752 instruction is expanded within a function, and never reset, even
2753 if all such instructions are optimized away. Use the
2754 ix86_current_function_calls_tls_descriptor macro for a better
2755 approximation. */
3452586b
RH
2756 BOOL_BITFIELD tls_descriptor_call_expanded_p : 1;
2757
2758 /* If true, the current function has a STATIC_CHAIN is placed on the
2759 stack below the return address. */
2760 BOOL_BITFIELD static_chain_on_stack : 1;
25efe060 2761
529a6471
JJ
2762 /* If true, it is safe to not save/restore DRAP register. */
2763 BOOL_BITFIELD no_drap_save_restore : 1;
2764
f8071c05
L
2765 /* Function type. */
2766 ENUM_BITFIELD(function_type) func_type : 2;
2767
da99fd4a
L
2768 /* How to generate indirec branch. */
2769 ENUM_BITFIELD(indirect_branch) indirect_branch_type : 3;
2770
2771 /* If true, the current function has local indirect jumps, like
2772 "indirect_jump" or "tablejump". */
2773 BOOL_BITFIELD has_local_indirect_jump : 1;
2774
45e14019
L
2775 /* How to generate function return. */
2776 ENUM_BITFIELD(indirect_branch) function_return_type : 3;
2777
f8071c05
L
2778 /* If true, the current function is a function specified with
2779 the "interrupt" or "no_caller_saved_registers" attribute. */
2780 BOOL_BITFIELD no_caller_saved_registers : 1;
2781
a0ff7835
L
2782 /* If true, there is register available for argument passing. This
2783 is used only in ix86_function_ok_for_sibcall by 32-bit to determine
2784 if there is scratch register available for indirect sibcall. In
2785 64-bit, rax, r10 and r11 are scratch registers which aren't used to
2786 pass arguments and can be used for indirect sibcall. */
2787 BOOL_BITFIELD arg_reg_available : 1;
2788
d6d4d770 2789 /* If true, we're out-of-lining reg save/restore for regs clobbered
5d9d834d 2790 by 64-bit ms_abi functions calling a sysv_abi function. */
d6d4d770
DS
2791 BOOL_BITFIELD call_ms2sysv : 1;
2792
2793 /* If true, the incoming 16-byte aligned stack has an offset (of 8) and
5d9d834d 2794 needs padding prior to out-of-line stub save/restore area. */
d6d4d770
DS
2795 BOOL_BITFIELD call_ms2sysv_pad_in : 1;
2796
d6d4d770
DS
2797 /* This is the number of extra registers saved by stub (valid range is
2798 0-6). Each additional register is only saved/restored by the stubs
2799 if all successive ones are. (Will always be zero when using a hard
2800 frame pointer.) */
2801 unsigned int call_ms2sysv_extra_regs:3;
2802
35c95658
L
2803 /* Nonzero if the function places outgoing arguments on stack. */
2804 BOOL_BITFIELD outgoing_args_on_stack : 1;
2805
708c728d
L
2806 /* If true, ENDBR is queued at function entrance. */
2807 BOOL_BITFIELD endbr_queued_at_entrance : 1;
2808
c2080a1f
L
2809 /* True if the function needs a stack frame. */
2810 BOOL_BITFIELD stack_frame_required : 1;
2811
cd3410cc
L
2812 /* The largest alignment, in bytes, of stack slot actually used. */
2813 unsigned int max_used_stack_alignment;
2814
ec7ded37
RH
2815 /* During prologue/epilogue generation, the current frame state.
2816 Otherwise, the frame state at the end of the prologue. */
2817 struct machine_frame_state fs;
f81c9774
RH
2818
2819 /* During SEH output, this is non-null. */
2820 struct seh_frame_state * GTY((skip(""))) seh;
fa1a0d02 2821};
2bf6d935
ML
2822
2823extern GTY(()) tree sysv_va_list_type_node;
2824extern GTY(()) tree ms_va_list_type_node;
cd9c1ca8 2825#endif
fa1a0d02
JH
2826
2827#define ix86_stack_locals (cfun->machine->stack_locals)
4aab97f9
L
2828#define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size)
2829#define ix86_varargs_fpr_size (cfun->machine->varargs_fpr_size)
fa1a0d02 2830#define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching)
1e4490dc 2831#define ix86_pc_thunk_call_expanded (cfun->machine->pc_thunk_call_expanded)
5bf5a10b
AO
2832#define ix86_tls_descriptor_calls_expanded_in_cfun \
2833 (cfun->machine->tls_descriptor_call_expanded_p)
2834/* Since tls_descriptor_call_expanded is not cleared, even if all TLS
2835 calls are optimized away, we try to detect cases in which it was
2836 optimized away. Since such instructions (use (reg REG_SP)), we can
2837 verify whether there's any such instruction live by testing that
2838 REG_SP is live. */
2839#define ix86_current_function_calls_tls_descriptor \
6fb5fa3c 2840 (ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG))
3452586b 2841#define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack)
2ecf9ac7 2842#define ix86_red_zone_size (cfun->machine->frame.red_zone_size)
249e6b63 2843
1bc7c5b6
ZW
2844/* Control behavior of x86_file_start. */
2845#define X86_FILE_START_VERSION_DIRECTIVE false
2846#define X86_FILE_START_FLTUSED false
2847
7dcbf659
JH
2848/* Flag to mark data that is in the large address area. */
2849#define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0)
2850#define SYMBOL_REF_FAR_ADDR_P(X) \
2851 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0)
da489f73
RH
2852
2853/* Flags to mark dllimport/dllexport. Used by PE ports, but handy to
2854 have defined always, to avoid ifdefing. */
2855#define SYMBOL_FLAG_DLLIMPORT (SYMBOL_FLAG_MACH_DEP << 1)
2856#define SYMBOL_REF_DLLIMPORT_P(X) \
2857 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLIMPORT) != 0)
2858
2859#define SYMBOL_FLAG_DLLEXPORT (SYMBOL_FLAG_MACH_DEP << 2)
2860#define SYMBOL_REF_DLLEXPORT_P(X) \
2861 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0)
2862
82c0e1a0
KT
2863#define SYMBOL_FLAG_STUBVAR (SYMBOL_FLAG_MACH_DEP << 4)
2864#define SYMBOL_REF_STUBVAR_P(X) \
2865 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_STUBVAR) != 0)
2866
7942e47e
RY
2867extern void debug_ready_dispatch (void);
2868extern void debug_dispatch_window (int);
2869
91afcfa3
QN
2870/* The value at zero is only defined for the BMI instructions
2871 LZCNT and TZCNT, not the BSR/BSF insns in the original isa. */
2872#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
1068ced5 2873 ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_BMI ? 1 : 0)
91afcfa3 2874#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
1068ced5 2875 ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_LZCNT ? 1 : 0)
91afcfa3
QN
2876
2877
b8ce4e94
KT
2878/* Flags returned by ix86_get_callcvt (). */
2879#define IX86_CALLCVT_CDECL 0x1
2880#define IX86_CALLCVT_STDCALL 0x2
2881#define IX86_CALLCVT_FASTCALL 0x4
2882#define IX86_CALLCVT_THISCALL 0x8
2883#define IX86_CALLCVT_REGPARM 0x10
2884#define IX86_CALLCVT_SSEREGPARM 0x20
2885
2886#define IX86_BASE_CALLCVT(FLAGS) \
2887 ((FLAGS) & (IX86_CALLCVT_CDECL | IX86_CALLCVT_STDCALL \
2888 | IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL))
2889
b86b9f44
MM
2890#define RECIP_MASK_NONE 0x00
2891#define RECIP_MASK_DIV 0x01
2892#define RECIP_MASK_SQRT 0x02
2893#define RECIP_MASK_VEC_DIV 0x04
2894#define RECIP_MASK_VEC_SQRT 0x08
2895#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \
2896 | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT)
bbe996ec 2897#define RECIP_MASK_DEFAULT (RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT)
b86b9f44
MM
2898
2899#define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0)
2900#define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0)
2901#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0)
2902#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0)
2903
ab2c4ec8
SS
2904/* Use 128-bit AVX instructions in the auto-vectorizer. */
2905#define TARGET_PREFER_AVX128 (prefer_vector_width_type == PVW_AVX128)
2906/* Use 256-bit AVX instructions in the auto-vectorizer. */
02a70367
SS
2907#define TARGET_PREFER_AVX256 (TARGET_PREFER_AVX128 \
2908 || prefer_vector_width_type == PVW_AVX256)
ab2c4ec8 2909
c2c601b2
L
2910#define TARGET_INDIRECT_BRANCH_REGISTER \
2911 (ix86_indirect_branch_register \
2912 || cfun->machine->indirect_branch_type != indirect_branch_keep)
2913
5dcfdccd
KY
2914#define IX86_HLE_ACQUIRE (1 << 16)
2915#define IX86_HLE_RELEASE (1 << 17)
2916
e83b8e2e
JJ
2917/* For switching between functions with different target attributes. */
2918#define SWITCHABLE_TARGET 1
2919
44d0de8d
UB
2920#define TARGET_SUPPORTS_WIDE_INT 1
2921
2bf6d935
ML
2922#if !defined(GENERATOR_FILE) && !defined(IN_LIBGCC2)
2923extern enum attr_cpu ix86_schedule;
2924
2925#define NUM_X86_64_MS_CLOBBERED_REGS 12
2926#endif
2927
c98f8742
JVA
2928/*
2929Local variables:
2930version-control: t
2931End:
2932*/