]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/i386/i386.h
Update copyright years.
[thirdparty/gcc.git] / gcc / config / i386 / i386.h
CommitLineData
188fc5b5 1/* Definitions of target machine for GCC for IA-32.
8d9254fc 2 Copyright (C) 1988-2020 Free Software Foundation, Inc.
c98f8742 3
188fc5b5 4This file is part of GCC.
c98f8742 5
188fc5b5 6GCC is free software; you can redistribute it and/or modify
c98f8742 7it under the terms of the GNU General Public License as published by
2f83c7d6 8the Free Software Foundation; either version 3, or (at your option)
c98f8742
JVA
9any later version.
10
188fc5b5 11GCC is distributed in the hope that it will be useful,
c98f8742
JVA
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
748086b7
JJ
16Under Section 7 of GPL version 3, you are granted additional
17permissions described in the GCC Runtime Library Exception, version
183.1, as published by the Free Software Foundation.
19
20You should have received a copy of the GNU General Public License and
21a copy of the GCC Runtime Library Exception along with this program;
22see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
2f83c7d6 23<http://www.gnu.org/licenses/>. */
c98f8742 24
ccf8e764
RH
25/* The purpose of this file is to define the characteristics of the i386,
26 independent of assembler syntax or operating system.
27
28 Three other files build on this one to describe a specific assembler syntax:
29 bsd386.h, att386.h, and sun386.h.
30
31 The actual tm.h file for a particular system should include
32 this file, and then the file for the appropriate assembler syntax.
33
34 Many macros that specify assembler syntax are omitted entirely from
35 this file because they really belong in the files for particular
36 assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR,
37 ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many
38 that start with ASM_ or end in ASM_OP. */
39
0a1c5e55
UB
40/* Redefines for option macros. */
41
90922d36 42#define TARGET_64BIT TARGET_ISA_64BIT
bf7b5747 43#define TARGET_64BIT_P(x) TARGET_ISA_64BIT_P(x)
90922d36 44#define TARGET_MMX TARGET_ISA_MMX
bf7b5747 45#define TARGET_MMX_P(x) TARGET_ISA_MMX_P(x)
90922d36 46#define TARGET_3DNOW TARGET_ISA_3DNOW
bf7b5747 47#define TARGET_3DNOW_P(x) TARGET_ISA_3DNOW_P(x)
90922d36 48#define TARGET_3DNOW_A TARGET_ISA_3DNOW_A
bf7b5747 49#define TARGET_3DNOW_A_P(x) TARGET_ISA_3DNOW_A_P(x)
90922d36 50#define TARGET_SSE TARGET_ISA_SSE
bf7b5747 51#define TARGET_SSE_P(x) TARGET_ISA_SSE_P(x)
90922d36 52#define TARGET_SSE2 TARGET_ISA_SSE2
bf7b5747 53#define TARGET_SSE2_P(x) TARGET_ISA_SSE2_P(x)
90922d36 54#define TARGET_SSE3 TARGET_ISA_SSE3
bf7b5747 55#define TARGET_SSE3_P(x) TARGET_ISA_SSE3_P(x)
90922d36 56#define TARGET_SSSE3 TARGET_ISA_SSSE3
bf7b5747 57#define TARGET_SSSE3_P(x) TARGET_ISA_SSSE3_P(x)
90922d36 58#define TARGET_SSE4_1 TARGET_ISA_SSE4_1
bf7b5747 59#define TARGET_SSE4_1_P(x) TARGET_ISA_SSE4_1_P(x)
90922d36 60#define TARGET_SSE4_2 TARGET_ISA_SSE4_2
bf7b5747 61#define TARGET_SSE4_2_P(x) TARGET_ISA_SSE4_2_P(x)
90922d36 62#define TARGET_AVX TARGET_ISA_AVX
bf7b5747 63#define TARGET_AVX_P(x) TARGET_ISA_AVX_P(x)
90922d36 64#define TARGET_AVX2 TARGET_ISA_AVX2
bf7b5747 65#define TARGET_AVX2_P(x) TARGET_ISA_AVX2_P(x)
cb610367
UB
66#define TARGET_AVX512F TARGET_ISA_AVX512F
67#define TARGET_AVX512F_P(x) TARGET_ISA_AVX512F_P(x)
68#define TARGET_AVX512PF TARGET_ISA_AVX512PF
69#define TARGET_AVX512PF_P(x) TARGET_ISA_AVX512PF_P(x)
70#define TARGET_AVX512ER TARGET_ISA_AVX512ER
71#define TARGET_AVX512ER_P(x) TARGET_ISA_AVX512ER_P(x)
72#define TARGET_AVX512CD TARGET_ISA_AVX512CD
73#define TARGET_AVX512CD_P(x) TARGET_ISA_AVX512CD_P(x)
07165dd7
AI
74#define TARGET_AVX512DQ TARGET_ISA_AVX512DQ
75#define TARGET_AVX512DQ_P(x) TARGET_ISA_AVX512DQ_P(x)
b525d943
AI
76#define TARGET_AVX512BW TARGET_ISA_AVX512BW
77#define TARGET_AVX512BW_P(x) TARGET_ISA_AVX512BW_P(x)
f4af595f
AI
78#define TARGET_AVX512VL TARGET_ISA_AVX512VL
79#define TARGET_AVX512VL_P(x) TARGET_ISA_AVX512VL_P(x)
3dcc8af5
IT
80#define TARGET_AVX512VBMI TARGET_ISA_AVX512VBMI
81#define TARGET_AVX512VBMI_P(x) TARGET_ISA_AVX512VBMI_P(x)
4190ea38
IT
82#define TARGET_AVX512IFMA TARGET_ISA_AVX512IFMA
83#define TARGET_AVX512IFMA_P(x) TARGET_ISA_AVX512IFMA_P(x)
8cf86e14
HL
84#define TARGET_AVX5124FMAPS TARGET_ISA2_AVX5124FMAPS
85#define TARGET_AVX5124FMAPS_P(x) TARGET_ISA2_AVX5124FMAPS_P(x)
86#define TARGET_AVX5124VNNIW TARGET_ISA2_AVX5124VNNIW
87#define TARGET_AVX5124VNNIW_P(x) TARGET_ISA2_AVX5124VNNIW_P(x)
fca51879
JK
88#define TARGET_AVX512VBMI2 TARGET_ISA_AVX512VBMI2
89#define TARGET_AVX512VBMI2_P(x) TARGET_ISA_AVX512VBMI2_P(x)
79fc8ffe
AS
90#define TARGET_AVX512VPOPCNTDQ TARGET_ISA_AVX512VPOPCNTDQ
91#define TARGET_AVX512VPOPCNTDQ_P(x) TARGET_ISA_AVX512VPOPCNTDQ_P(x)
98966963
JK
92#define TARGET_AVX512VNNI TARGET_ISA_AVX512VNNI
93#define TARGET_AVX512VNNI_P(x) TARGET_ISA_AVX512VNNI_P(x)
e2a29465
JK
94#define TARGET_AVX512BITALG TARGET_ISA_AVX512BITALG
95#define TARGET_AVX512BITALG_P(x) TARGET_ISA_AVX512BITALG_P(x)
8cf86e14
HL
96#define TARGET_AVX512VP2INTERSECT TARGET_ISA2_AVX512VP2INTERSECT
97#define TARGET_AVX512VP2INTERSECT_P(x) TARGET_ISA2_AVX512VP2INTERSECT_P(x)
90922d36 98#define TARGET_FMA TARGET_ISA_FMA
bf7b5747 99#define TARGET_FMA_P(x) TARGET_ISA_FMA_P(x)
90922d36 100#define TARGET_SSE4A TARGET_ISA_SSE4A
bf7b5747 101#define TARGET_SSE4A_P(x) TARGET_ISA_SSE4A_P(x)
90922d36 102#define TARGET_FMA4 TARGET_ISA_FMA4
bf7b5747 103#define TARGET_FMA4_P(x) TARGET_ISA_FMA4_P(x)
90922d36 104#define TARGET_XOP TARGET_ISA_XOP
bf7b5747 105#define TARGET_XOP_P(x) TARGET_ISA_XOP_P(x)
90922d36 106#define TARGET_LWP TARGET_ISA_LWP
bf7b5747 107#define TARGET_LWP_P(x) TARGET_ISA_LWP_P(x)
90922d36 108#define TARGET_ABM TARGET_ISA_ABM
bf7b5747 109#define TARGET_ABM_P(x) TARGET_ISA_ABM_P(x)
8cf86e14
HL
110#define TARGET_PCONFIG TARGET_ISA2_PCONFIG
111#define TARGET_PCONFIG_P(x) TARGET_ISA2_PCONFIG_P(x)
112#define TARGET_WBNOINVD TARGET_ISA2_WBNOINVD
113#define TARGET_WBNOINVD_P(x) TARGET_ISA2_WBNOINVD_P(x)
114#define TARGET_SGX TARGET_ISA2_SGX
115#define TARGET_SGX_P(x) TARGET_ISA2_SGX_P(x)
116#define TARGET_RDPID TARGET_ISA2_RDPID
117#define TARGET_RDPID_P(x) TARGET_ISA2_RDPID_P(x)
b8cca31c
JK
118#define TARGET_GFNI TARGET_ISA_GFNI
119#define TARGET_GFNI_P(x) TARGET_ISA_GFNI_P(x)
8cf86e14
HL
120#define TARGET_VAES TARGET_ISA2_VAES
121#define TARGET_VAES_P(x) TARGET_ISA2_VAES_P(x)
6557be99
JK
122#define TARGET_VPCLMULQDQ TARGET_ISA_VPCLMULQDQ
123#define TARGET_VPCLMULQDQ_P(x) TARGET_ISA_VPCLMULQDQ_P(x)
90922d36 124#define TARGET_BMI TARGET_ISA_BMI
bf7b5747 125#define TARGET_BMI_P(x) TARGET_ISA_BMI_P(x)
90922d36 126#define TARGET_BMI2 TARGET_ISA_BMI2
bf7b5747 127#define TARGET_BMI2_P(x) TARGET_ISA_BMI2_P(x)
90922d36 128#define TARGET_LZCNT TARGET_ISA_LZCNT
bf7b5747 129#define TARGET_LZCNT_P(x) TARGET_ISA_LZCNT_P(x)
90922d36 130#define TARGET_TBM TARGET_ISA_TBM
bf7b5747 131#define TARGET_TBM_P(x) TARGET_ISA_TBM_P(x)
90922d36 132#define TARGET_POPCNT TARGET_ISA_POPCNT
bf7b5747 133#define TARGET_POPCNT_P(x) TARGET_ISA_POPCNT_P(x)
90922d36 134#define TARGET_SAHF TARGET_ISA_SAHF
bf7b5747 135#define TARGET_SAHF_P(x) TARGET_ISA_SAHF_P(x)
8cf86e14
HL
136#define TARGET_MOVBE TARGET_ISA2_MOVBE
137#define TARGET_MOVBE_P(x) TARGET_ISA2_MOVBE_P(x)
90922d36 138#define TARGET_CRC32 TARGET_ISA_CRC32
bf7b5747 139#define TARGET_CRC32_P(x) TARGET_ISA_CRC32_P(x)
90922d36 140#define TARGET_AES TARGET_ISA_AES
bf7b5747 141#define TARGET_AES_P(x) TARGET_ISA_AES_P(x)
c1618f82
AI
142#define TARGET_SHA TARGET_ISA_SHA
143#define TARGET_SHA_P(x) TARGET_ISA_SHA_P(x)
9cdea277
IT
144#define TARGET_CLFLUSHOPT TARGET_ISA_CLFLUSHOPT
145#define TARGET_CLFLUSHOPT_P(x) TARGET_ISA_CLFLUSHOPT_P(x)
8cf86e14
HL
146#define TARGET_CLZERO TARGET_ISA2_CLZERO
147#define TARGET_CLZERO_P(x) TARGET_ISA2_CLZERO_P(x)
9cdea277
IT
148#define TARGET_XSAVEC TARGET_ISA_XSAVEC
149#define TARGET_XSAVEC_P(x) TARGET_ISA_XSAVEC_P(x)
150#define TARGET_XSAVES TARGET_ISA_XSAVES
151#define TARGET_XSAVES_P(x) TARGET_ISA_XSAVES_P(x)
90922d36 152#define TARGET_PCLMUL TARGET_ISA_PCLMUL
bf7b5747 153#define TARGET_PCLMUL_P(x) TARGET_ISA_PCLMUL_P(x)
8cf86e14
HL
154#define TARGET_CMPXCHG16B TARGET_ISA2_CX16
155#define TARGET_CMPXCHG16B_P(x) TARGET_ISA2_CX16_P(x)
90922d36 156#define TARGET_FSGSBASE TARGET_ISA_FSGSBASE
bf7b5747 157#define TARGET_FSGSBASE_P(x) TARGET_ISA_FSGSBASE_P(x)
90922d36 158#define TARGET_RDRND TARGET_ISA_RDRND
bf7b5747 159#define TARGET_RDRND_P(x) TARGET_ISA_RDRND_P(x)
90922d36 160#define TARGET_F16C TARGET_ISA_F16C
bf7b5747 161#define TARGET_F16C_P(x) TARGET_ISA_F16C_P(x)
cb610367
UB
162#define TARGET_RTM TARGET_ISA_RTM
163#define TARGET_RTM_P(x) TARGET_ISA_RTM_P(x)
8cf86e14
HL
164#define TARGET_HLE TARGET_ISA2_HLE
165#define TARGET_HLE_P(x) TARGET_ISA2_HLE_P(x)
90922d36 166#define TARGET_RDSEED TARGET_ISA_RDSEED
bf7b5747 167#define TARGET_RDSEED_P(x) TARGET_ISA_RDSEED_P(x)
90922d36 168#define TARGET_PRFCHW TARGET_ISA_PRFCHW
bf7b5747 169#define TARGET_PRFCHW_P(x) TARGET_ISA_PRFCHW_P(x)
90922d36 170#define TARGET_ADX TARGET_ISA_ADX
bf7b5747 171#define TARGET_ADX_P(x) TARGET_ISA_ADX_P(x)
3a0d99bb 172#define TARGET_FXSR TARGET_ISA_FXSR
bf7b5747 173#define TARGET_FXSR_P(x) TARGET_ISA_FXSR_P(x)
3a0d99bb 174#define TARGET_XSAVE TARGET_ISA_XSAVE
bf7b5747 175#define TARGET_XSAVE_P(x) TARGET_ISA_XSAVE_P(x)
3a0d99bb 176#define TARGET_XSAVEOPT TARGET_ISA_XSAVEOPT
bf7b5747 177#define TARGET_XSAVEOPT_P(x) TARGET_ISA_XSAVEOPT_P(x)
43b3f52f
IT
178#define TARGET_PREFETCHWT1 TARGET_ISA_PREFETCHWT1
179#define TARGET_PREFETCHWT1_P(x) TARGET_ISA_PREFETCHWT1_P(x)
9c3bca11
IT
180#define TARGET_CLWB TARGET_ISA_CLWB
181#define TARGET_CLWB_P(x) TARGET_ISA_CLWB_P(x)
8cf86e14
HL
182#define TARGET_MWAITX TARGET_ISA2_MWAITX
183#define TARGET_MWAITX_P(x) TARGET_ISA2_MWAITX_P(x)
41a4ef22
KY
184#define TARGET_PKU TARGET_ISA_PKU
185#define TARGET_PKU_P(x) TARGET_ISA_PKU_P(x)
2a25448c
IT
186#define TARGET_SHSTK TARGET_ISA_SHSTK
187#define TARGET_SHSTK_P(x) TARGET_ISA_SHSTK_P(x)
37d51c75
SP
188#define TARGET_MOVDIRI TARGET_ISA_MOVDIRI
189#define TARGET_MOVDIRI_P(x) TARGET_ISA_MOVDIRI_P(x)
8cf86e14
HL
190#define TARGET_MOVDIR64B TARGET_ISA2_MOVDIR64B
191#define TARGET_MOVDIR64B_P(x) TARGET_ISA2_MOVDIR64B_P(x)
192#define TARGET_WAITPKG TARGET_ISA2_WAITPKG
193#define TARGET_WAITPKG_P(x) TARGET_ISA2_WAITPKG_P(x)
194#define TARGET_CLDEMOTE TARGET_ISA2_CLDEMOTE
195#define TARGET_CLDEMOTE_P(x) TARGET_ISA2_CLDEMOTE_P(x)
196#define TARGET_PTWRITE TARGET_ISA2_PTWRITE
197#define TARGET_PTWRITE_P(x) TARGET_ISA2_PTWRITE_P(x)
198#define TARGET_AVX512BF16 TARGET_ISA2_AVX512BF16
199#define TARGET_AVX512BF16_P(x) TARGET_ISA2_AVX512BF16_P(x)
200#define TARGET_ENQCMD TARGET_ISA2_ENQCMD
201#define TARGET_ENQCMD_P(x) TARGET_ISA2_ENQCMD_P(x)
41a4ef22 202
90922d36 203#define TARGET_LP64 TARGET_ABI_64
bf7b5747 204#define TARGET_LP64_P(x) TARGET_ABI_64_P(x)
90922d36 205#define TARGET_X32 TARGET_ABI_X32
bf7b5747 206#define TARGET_X32_P(x) TARGET_ABI_X32_P(x)
d5d618b5
L
207#define TARGET_16BIT TARGET_CODE16
208#define TARGET_16BIT_P(x) TARGET_CODE16_P(x)
04e1d06b 209
dfa61b9e
L
210#define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2)
211
26b5109f
RS
212#include "config/vxworks-dummy.h"
213
7eb68c06 214#include "config/i386/i386-opts.h"
ccf8e764 215
c69fa2d4 216#define MAX_STRINGOP_ALGS 4
ccf8e764 217
8c996513
JH
218/* Specify what algorithm to use for stringops on known size.
219 When size is unknown, the UNKNOWN_SIZE alg is used. When size is
220 known at compile time or estimated via feedback, the SIZE array
221 is walked in order until MAX is greater then the estimate (or -1
4f3f76e6 222 means infinity). Corresponding ALG is used then.
340ef734
JH
223 When NOALIGN is true the code guaranting the alignment of the memory
224 block is skipped.
225
8c996513 226 For example initializer:
4f3f76e6 227 {{256, loop}, {-1, rep_prefix_4_byte}}
8c996513 228 will use loop for blocks smaller or equal to 256 bytes, rep prefix will
ccf8e764 229 be used otherwise. */
8c996513
JH
230struct stringop_algs
231{
232 const enum stringop_alg unknown_size;
233 const struct stringop_strategy {
234 const int max;
235 const enum stringop_alg alg;
340ef734 236 int noalign;
c69fa2d4 237 } size [MAX_STRINGOP_ALGS];
8c996513
JH
238};
239
d321551c
L
240/* Define the specific costs for a given cpu. NB: hard_register is used
241 by TARGET_REGISTER_MOVE_COST and TARGET_MEMORY_MOVE_COST to compute
242 hard register move costs by register allocator. Relative costs of
243 pseudo register load and store versus pseudo register moves in RTL
244 expressions for TARGET_RTX_COSTS can be different from relative
245 costs of hard registers to get the most efficient operations with
246 pseudo registers. */
d4ba09c0
SC
247
248struct processor_costs {
d321551c
L
249 /* Costs used by register allocator. integer->integer register move
250 cost is 2. */
251 struct
252 {
253 const int movzbl_load; /* cost of loading using movzbl */
254 const int int_load[3]; /* cost of loading integer registers
255 in QImode, HImode and SImode relative
256 to reg-reg move (2). */
257 const int int_store[3]; /* cost of storing integer register
258 in QImode, HImode and SImode */
259 const int fp_move; /* cost of reg,reg fld/fst */
260 const int fp_load[3]; /* cost of loading FP register
261 in SFmode, DFmode and XFmode */
262 const int fp_store[3]; /* cost of storing FP register
263 in SFmode, DFmode and XFmode */
264 const int mmx_move; /* cost of moving MMX register. */
265 const int mmx_load[2]; /* cost of loading MMX register
266 in SImode and DImode */
267 const int mmx_store[2]; /* cost of storing MMX register
268 in SImode and DImode */
269 const int xmm_move; /* cost of moving XMM register. */
270 const int ymm_move; /* cost of moving XMM register. */
271 const int zmm_move; /* cost of moving XMM register. */
272 const int sse_load[5]; /* cost of loading SSE register
273 in 32bit, 64bit, 128bit, 256bit and 512bit */
274 const int sse_store[5]; /* cost of storing SSE register
275 in SImode, DImode and TImode. */
276 const int sse_to_integer; /* cost of moving SSE register to integer. */
277 const int integer_to_sse; /* cost of moving integer register to SSE. */
278 } hard_register;
279
8b60264b
KG
280 const int add; /* cost of an add instruction */
281 const int lea; /* cost of a lea instruction */
282 const int shift_var; /* variable shift costs */
283 const int shift_const; /* constant shift costs */
f676971a 284 const int mult_init[5]; /* cost of starting a multiply
4977bab6 285 in QImode, HImode, SImode, DImode, TImode*/
8b60264b 286 const int mult_bit; /* cost of multiply per each bit set */
f676971a 287 const int divide[5]; /* cost of a divide/mod
4977bab6 288 in QImode, HImode, SImode, DImode, TImode*/
44cf5b6a
JH
289 int movsx; /* The cost of movsx operation. */
290 int movzx; /* The cost of movzx operation. */
8b60264b
KG
291 const int large_insn; /* insns larger than this cost more */
292 const int move_ratio; /* The threshold of number of scalar
ac775968 293 memory-to-memory move insns. */
25e22b19
L
294 const int clear_ratio; /* The threshold of number of scalar
295 memory clearing insns. */
8b60264b 296 const int int_load[3]; /* cost of loading integer registers
96e7ae40
JH
297 in QImode, HImode and SImode relative
298 to reg-reg move (2). */
8b60264b 299 const int int_store[3]; /* cost of storing integer register
96e7ae40 300 in QImode, HImode and SImode */
df41dbaf
JH
301 const int sse_load[5]; /* cost of loading SSE register
302 in 32bit, 64bit, 128bit, 256bit and 512bit */
df41dbaf 303 const int sse_store[5]; /* cost of storing SSE register
d321551c
L
304 in 32bit, 64bit, 128bit, 256bit and 512bit */
305 const int sse_unaligned_load[5];/* cost of unaligned load. */
df41dbaf 306 const int sse_unaligned_store[5];/* cost of unaligned store. */
d321551c
L
307 const int xmm_move, ymm_move, /* cost of moving XMM and YMM register. */
308 zmm_move;
66574c53 309 const int sse_to_integer; /* cost of moving SSE register to integer. */
a4fe6139
JH
310 const int gather_static, gather_per_elt; /* Cost of gather load is computed
311 as static + per_item * nelts. */
312 const int scatter_static, scatter_per_elt; /* Cost of gather store is
313 computed as static + per_item * nelts. */
46cb0441
ZD
314 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
315 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
f4365627
JH
316 const int prefetch_block; /* bytes moved to cache for prefetch. */
317 const int simultaneous_prefetches; /* number of parallel prefetch
318 operations. */
4977bab6 319 const int branch_cost; /* Default value for BRANCH_COST. */
229b303a
RS
320 const int fadd; /* cost of FADD and FSUB instructions. */
321 const int fmul; /* cost of FMUL instruction. */
322 const int fdiv; /* cost of FDIV instruction. */
323 const int fabs; /* cost of FABS instruction. */
324 const int fchs; /* cost of FCHS instruction. */
325 const int fsqrt; /* cost of FSQRT instruction. */
8c996513 326 /* Specify what algorithm
bee51209 327 to use for stringops on unknown size. */
c53c148c 328 const int sse_op; /* cost of cheap SSE instruction. */
6065f444
JH
329 const int addss; /* cost of ADDSS/SD SUBSS/SD instructions. */
330 const int mulss; /* cost of MULSS instructions. */
331 const int mulsd; /* cost of MULSD instructions. */
c53c148c
JH
332 const int fmass; /* cost of FMASS instructions. */
333 const int fmasd; /* cost of FMASD instructions. */
6065f444
JH
334 const int divss; /* cost of DIVSS instructions. */
335 const int divsd; /* cost of DIVSD instructions. */
336 const int sqrtss; /* cost of SQRTSS instructions. */
337 const int sqrtsd; /* cost of SQRTSD instructions. */
a813c280
JH
338 const int reassoc_int, reassoc_fp, reassoc_vec_int, reassoc_vec_fp;
339 /* Specify reassociation width for integer,
340 fp, vector integer and vector fp
341 operations. Generally should correspond
342 to number of instructions executed in
343 parallel. See also
344 ix86_reassociation_width. */
ad83025e 345 struct stringop_algs *memcpy, *memset;
e70444a8
HJ
346 const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer
347 cost model. */
348 const int cond_not_taken_branch_cost;/* Cost of not taken branch for
349 vectorizer cost model. */
7dc58b50
ML
350
351 /* The "0:0:8" label alignment specified for some processors generates
352 secondary 8-byte alignment only for those label/jump/loop targets
353 which have primary alignment. */
354 const char *const align_loop; /* Loop alignment. */
355 const char *const align_jump; /* Jump alignment. */
356 const char *const align_label; /* Label alignment. */
357 const char *const align_func; /* Function alignment. */
d4ba09c0
SC
358};
359
8b60264b 360extern const struct processor_costs *ix86_cost;
b2077fd2
JH
361extern const struct processor_costs ix86_size_cost;
362
363#define ix86_cur_cost() \
364 (optimize_insn_for_size_p () ? &ix86_size_cost: ix86_cost)
d4ba09c0 365
c98f8742
JVA
366/* Macros used in the machine description to test the flags. */
367
b97de419 368/* configure can arrange to change it. */
e075ae69 369
35b528be 370#ifndef TARGET_CPU_DEFAULT
b97de419 371#define TARGET_CPU_DEFAULT PROCESSOR_GENERIC
10e9fecc 372#endif
35b528be 373
004d3859
GK
374#ifndef TARGET_FPMATH_DEFAULT
375#define TARGET_FPMATH_DEFAULT \
376 (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387)
377#endif
378
bf7b5747
ST
379#ifndef TARGET_FPMATH_DEFAULT_P
380#define TARGET_FPMATH_DEFAULT_P(x) \
381 (TARGET_64BIT_P(x) && TARGET_SSE_P(x) ? FPMATH_SSE : FPMATH_387)
382#endif
383
c207fd99
L
384/* If the i387 is disabled or -miamcu is used , then do not return
385 values in it. */
386#define TARGET_FLOAT_RETURNS_IN_80387 \
387 (TARGET_FLOAT_RETURNS && TARGET_80387 && !TARGET_IAMCU)
388#define TARGET_FLOAT_RETURNS_IN_80387_P(x) \
389 (TARGET_FLOAT_RETURNS_P(x) && TARGET_80387_P(x) && !TARGET_IAMCU_P(x))
b08de47e 390
5791cc29
JT
391/* 64bit Sledgehammer mode. For libgcc2 we make sure this is a
392 compile-time constant. */
393#ifdef IN_LIBGCC2
6ac49599 394#undef TARGET_64BIT
5791cc29
JT
395#ifdef __x86_64__
396#define TARGET_64BIT 1
397#else
398#define TARGET_64BIT 0
399#endif
400#else
6ac49599
RS
401#ifndef TARGET_BI_ARCH
402#undef TARGET_64BIT
e49080ec 403#undef TARGET_64BIT_P
67adf6a9 404#if TARGET_64BIT_DEFAULT
0c2dc519 405#define TARGET_64BIT 1
e49080ec 406#define TARGET_64BIT_P(x) 1
0c2dc519
JH
407#else
408#define TARGET_64BIT 0
e49080ec 409#define TARGET_64BIT_P(x) 0
0c2dc519
JH
410#endif
411#endif
5791cc29 412#endif
25f94bb5 413
750054a2
CT
414#define HAS_LONG_COND_BRANCH 1
415#define HAS_LONG_UNCOND_BRANCH 1
416
9e555526
RH
417#define TARGET_386 (ix86_tune == PROCESSOR_I386)
418#define TARGET_486 (ix86_tune == PROCESSOR_I486)
419#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
420#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
cfe1b18f 421#define TARGET_GEODE (ix86_tune == PROCESSOR_GEODE)
9e555526
RH
422#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
423#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
424#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
425#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
4977bab6 426#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
89c43c0a 427#define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA)
340ef734 428#define TARGET_CORE2 (ix86_tune == PROCESSOR_CORE2)
d3c11974
L
429#define TARGET_NEHALEM (ix86_tune == PROCESSOR_NEHALEM)
430#define TARGET_SANDYBRIDGE (ix86_tune == PROCESSOR_SANDYBRIDGE)
3a579e09 431#define TARGET_HASWELL (ix86_tune == PROCESSOR_HASWELL)
d3c11974
L
432#define TARGET_BONNELL (ix86_tune == PROCESSOR_BONNELL)
433#define TARGET_SILVERMONT (ix86_tune == PROCESSOR_SILVERMONT)
50e461df 434#define TARGET_GOLDMONT (ix86_tune == PROCESSOR_GOLDMONT)
74b2bb19 435#define TARGET_GOLDMONT_PLUS (ix86_tune == PROCESSOR_GOLDMONT_PLUS)
a548a5a1 436#define TARGET_TREMONT (ix86_tune == PROCESSOR_TREMONT)
52747219 437#define TARGET_KNL (ix86_tune == PROCESSOR_KNL)
cace2309 438#define TARGET_KNM (ix86_tune == PROCESSOR_KNM)
176a3386 439#define TARGET_SKYLAKE (ix86_tune == PROCESSOR_SKYLAKE)
06caf59d 440#define TARGET_SKYLAKE_AVX512 (ix86_tune == PROCESSOR_SKYLAKE_AVX512)
c234d831 441#define TARGET_CANNONLAKE (ix86_tune == PROCESSOR_CANNONLAKE)
79ab5364
JK
442#define TARGET_ICELAKE_CLIENT (ix86_tune == PROCESSOR_ICELAKE_CLIENT)
443#define TARGET_ICELAKE_SERVER (ix86_tune == PROCESSOR_ICELAKE_SERVER)
7cab07f0 444#define TARGET_CASCADELAKE (ix86_tune == PROCESSOR_CASCADELAKE)
a9fcfec3
HL
445#define TARGET_TIGERLAKE (ix86_tune == PROCESSOR_TIGERLAKE)
446#define TARGET_COOPERLAKE (ix86_tune == PROCESSOR_COOPERLAKE)
9a7f94d7 447#define TARGET_INTEL (ix86_tune == PROCESSOR_INTEL)
9d532162 448#define TARGET_GENERIC (ix86_tune == PROCESSOR_GENERIC)
21efb4d4 449#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
1133125e 450#define TARGET_BDVER1 (ix86_tune == PROCESSOR_BDVER1)
4d652a18 451#define TARGET_BDVER2 (ix86_tune == PROCESSOR_BDVER2)
eb2f2b44 452#define TARGET_BDVER3 (ix86_tune == PROCESSOR_BDVER3)
ed97ad47 453#define TARGET_BDVER4 (ix86_tune == PROCESSOR_BDVER4)
14b52538 454#define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1)
e32bfc16 455#define TARGET_BTVER2 (ix86_tune == PROCESSOR_BTVER2)
9ce29eb0 456#define TARGET_ZNVER1 (ix86_tune == PROCESSOR_ZNVER1)
2901f42f 457#define TARGET_ZNVER2 (ix86_tune == PROCESSOR_ZNVER2)
a269a03c 458
80fd744f
RH
459/* Feature tests against the various tunings. */
460enum ix86_tune_indices {
4b8bc035 461#undef DEF_TUNE
3ad20bd4 462#define DEF_TUNE(tune, name, selector) tune,
4b8bc035
XDL
463#include "x86-tune.def"
464#undef DEF_TUNE
465X86_TUNE_LAST
80fd744f
RH
466};
467
ab442df7 468extern unsigned char ix86_tune_features[X86_TUNE_LAST];
80fd744f
RH
469
470#define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE]
471#define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY]
472#define TARGET_ZERO_EXTEND_WITH_AND \
473 ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND]
80fd744f 474#define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN]
80fd744f
RH
475#define TARGET_BRANCH_PREDICTION_HINTS \
476 ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS]
477#define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD]
478#define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF]
479#define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX]
480#define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL]
481#define TARGET_PARTIAL_FLAG_REG_STALL \
482 ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL]
7b38ee83
TJ
483#define TARGET_LCP_STALL \
484 ix86_tune_features[X86_TUNE_LCP_STALL]
80fd744f
RH
485#define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP]
486#define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP]
487#define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0]
488#define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD]
489#define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB]
490#define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES]
491#define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE]
492#define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY]
493#define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE]
494#define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX]
495#define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP]
5783ad0e
UB
496#define TARGET_MISALIGNED_MOVE_STRING_PRO_EPILOGUES \
497 ix86_tune_features[X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES]
80fd744f
RH
498#define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH]
499#define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH]
500#define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS]
501#define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS]
d8b08ecd
UB
502#define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP]
503#define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP]
504#define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH]
505#define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH]
80fd744f
RH
506#define TARGET_INTEGER_DFMODE_MOVES \
507 ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES]
508#define TARGET_PARTIAL_REG_DEPENDENCY \
509 ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY]
510#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \
511 ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY]
1133125e
HJ
512#define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \
513 ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL]
514#define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \
515 ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]
516#define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \
517 ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL]
80fd744f
RH
518#define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS]
519#define TARGET_SSE_TYPELESS_STORES \
520 ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES]
521#define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR]
522#define TARGET_MEMORY_MISMATCH_STALL \
523 ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL]
524#define TARGET_PROLOGUE_USING_MOVE \
525 ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE]
526#define TARGET_EPILOGUE_USING_MOVE \
527 ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE]
528#define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1]
529#define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP]
00fcb892
UB
530#define TARGET_INTER_UNIT_MOVES_TO_VEC \
531 ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC]
532#define TARGET_INTER_UNIT_MOVES_FROM_VEC \
533 ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_FROM_VEC]
534#define TARGET_INTER_UNIT_CONVERSIONS \
630ecd8d 535 ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS]
80fd744f
RH
536#define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT]
537#define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE]
538#define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT]
539#define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC]
540#define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS]
e7ed95a2
L
541#define TARGET_PAD_SHORT_FUNCTION \
542 ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION]
80fd744f
RH
543#define TARGET_EXT_80387_CONSTANTS \
544 ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS]
ddff69b9
MM
545#define TARGET_AVOID_VECTOR_DECODE \
546 ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE]
a646aded
UB
547#define TARGET_TUNE_PROMOTE_HIMODE_IMUL \
548 ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL]
ddff69b9
MM
549#define TARGET_SLOW_IMUL_IMM32_MEM \
550 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM]
551#define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8]
552#define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR]
553#define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE]
554#define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE]
54723b46
L
555#define TARGET_USE_VECTOR_FP_CONVERTS \
556 ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS]
354f84af
UB
557#define TARGET_USE_VECTOR_CONVERTS \
558 ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS]
a4ef7f3e
ES
559#define TARGET_SLOW_PSHUFB \
560 ix86_tune_features[X86_TUNE_SLOW_PSHUFB]
8e0dc054
JJ
561#define TARGET_AVOID_4BYTE_PREFIXES \
562 ix86_tune_features[X86_TUNE_AVOID_4BYTE_PREFIXES]
f6aa5171
JH
563#define TARGET_USE_GATHER \
564 ix86_tune_features[X86_TUNE_USE_GATHER]
0dc41f28
WM
565#define TARGET_FUSE_CMP_AND_BRANCH_32 \
566 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32]
567#define TARGET_FUSE_CMP_AND_BRANCH_64 \
568 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64]
354f84af 569#define TARGET_FUSE_CMP_AND_BRANCH \
0dc41f28
WM
570 (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \
571 : TARGET_FUSE_CMP_AND_BRANCH_32)
572#define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \
573 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS]
574#define TARGET_FUSE_ALU_AND_BRANCH \
575 ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH]
b6837b94 576#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU]
9a7f94d7
L
577#define TARGET_AVOID_LEA_FOR_ADDR \
578 ix86_tune_features[X86_TUNE_AVOID_LEA_FOR_ADDR]
5d0878e7
JH
579#define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \
580 ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL]
586bbef1
HL
581#define TARGET_AVX256_SPLIT_REGS \
582 ix86_tune_features[X86_TUNE_AVX256_SPLIT_REGS]
55a2c322
VM
583#define TARGET_GENERAL_REGS_SSE_SPILL \
584 ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL]
6c72ea12
UB
585#define TARGET_AVOID_MEM_OPND_FOR_CMOVE \
586 ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE]
55805e54 587#define TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS \
0f1d3965 588 ix86_tune_features[X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS]
2f62165d
GG
589#define TARGET_ADJUST_UNROLL \
590 ix86_tune_features[X86_TUNE_ADJUST_UNROLL]
374f5bf8
UB
591#define TARGET_AVOID_FALSE_DEP_FOR_BMI \
592 ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI]
ca90b1ed
YR
593#define TARGET_ONE_IF_CONV_INSN \
594 ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN]
48d552e5
UB
595#define TARGET_USE_XCHG_FOR_ATOMIC_STORE \
596 ix86_tune_features[X86_TUNE_USE_XCHG_FOR_ATOMIC_STORE]
348188bf
L
597#define TARGET_EMIT_VZEROUPPER \
598 ix86_tune_features[X86_TUNE_EMIT_VZEROUPPER]
da86c5af
HW
599#define TARGET_EXPAND_ABS \
600 ix86_tune_features[X86_TUNE_EXPAND_ABS]
df7b0cc4 601
80fd744f
RH
602/* Feature tests against the various architecture variations. */
603enum ix86_arch_indices {
cef31f9c 604 X86_ARCH_CMOV,
80fd744f
RH
605 X86_ARCH_CMPXCHG,
606 X86_ARCH_CMPXCHG8B,
607 X86_ARCH_XADD,
608 X86_ARCH_BSWAP,
609
610 X86_ARCH_LAST
611};
4f3f76e6 612
ab442df7 613extern unsigned char ix86_arch_features[X86_ARCH_LAST];
80fd744f 614
cef31f9c 615#define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV]
80fd744f
RH
616#define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG]
617#define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B]
618#define TARGET_XADD ix86_arch_features[X86_ARCH_XADD]
619#define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP]
620
cef31f9c
UB
621/* For sane SSE instruction set generation we need fcomi instruction.
622 It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic
623 expands to a sequence that includes conditional move. */
624#define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND)
625
80fd744f
RH
626#define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387)
627
cb261eb7 628extern unsigned char x86_prefetch_sse;
80fd744f
RH
629#define TARGET_PREFETCH_SSE x86_prefetch_sse
630
80fd744f
RH
631#define ASSEMBLER_DIALECT (ix86_asm_dialect)
632
633#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0)
634#define TARGET_MIX_SSE_I387 \
635 ((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387))
636
5fa578f0
UB
637#define TARGET_HARD_SF_REGS (TARGET_80387 || TARGET_MMX || TARGET_SSE)
638#define TARGET_HARD_DF_REGS (TARGET_80387 || TARGET_SSE)
639#define TARGET_HARD_XF_REGS (TARGET_80387)
640
80fd744f
RH
641#define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU)
642#define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2)
643#define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS)
d2af65b9 644#define TARGET_SUN_TLS 0
1ef45b77 645
67adf6a9
RH
646#ifndef TARGET_64BIT_DEFAULT
647#define TARGET_64BIT_DEFAULT 0
25f94bb5 648#endif
74dc3e94
RH
649#ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT
650#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0
651#endif
25f94bb5 652
e0ea8797
AH
653#define TARGET_SSP_GLOBAL_GUARD (ix86_stack_protector_guard == SSP_GLOBAL)
654#define TARGET_SSP_TLS_GUARD (ix86_stack_protector_guard == SSP_TLS)
655
79f5e442
ZD
656/* Fence to use after loop using storent. */
657
658extern tree x86_mfence;
659#define FENCE_FOLLOWING_MOVNT x86_mfence
660
0ed4a390
JL
661/* Once GDB has been enhanced to deal with functions without frame
662 pointers, we can change this to allow for elimination of
663 the frame pointer in leaf functions. */
664#define TARGET_DEFAULT 0
67adf6a9 665
0a1c5e55
UB
666/* Extra bits to force. */
667#define TARGET_SUBTARGET_DEFAULT 0
668#define TARGET_SUBTARGET_ISA_DEFAULT 0
669
670/* Extra bits to force on w/ 32-bit mode. */
671#define TARGET_SUBTARGET32_DEFAULT 0
672#define TARGET_SUBTARGET32_ISA_DEFAULT 0
673
ccf8e764
RH
674/* Extra bits to force on w/ 64-bit mode. */
675#define TARGET_SUBTARGET64_DEFAULT 0
8b131a8a
UB
676/* Enable MMX, SSE and SSE2 by default. */
677#define TARGET_SUBTARGET64_ISA_DEFAULT \
678 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2)
ccf8e764 679
fee3eacd
IS
680/* Replace MACH-O, ifdefs by in-line tests, where possible.
681 (a) Macros defined in config/i386/darwin.h */
b069de3b 682#define TARGET_MACHO 0
d308419c 683#define TARGET_MACHO_SYMBOL_STUBS 0
fee3eacd
IS
684#define MACHOPIC_ATT_STUB 0
685/* (b) Macros defined in config/darwin.h */
686#define MACHO_DYNAMIC_NO_PIC_P 0
687#define MACHOPIC_INDIRECT 0
688#define MACHOPIC_PURE 0
9005471b 689
5a579c3b
LE
690/* For the RDOS */
691#define TARGET_RDOS 0
692
9005471b 693/* For the Windows 64-bit ABI. */
7c800926
KT
694#define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
695
6510e8bb
KT
696/* For the Windows 32-bit ABI. */
697#define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
698
f81c9774
RH
699/* This is re-defined by cygming.h. */
700#define TARGET_SEH 0
701
51212b32 702/* The default abi used by target. */
7c800926 703#define DEFAULT_ABI SYSV_ABI
ccf8e764 704
b8b3f0ca 705/* The default TLS segment register used by target. */
00402c94
RH
706#define DEFAULT_TLS_SEG_REG \
707 (TARGET_64BIT ? ADDR_SPACE_SEG_FS : ADDR_SPACE_SEG_GS)
b8b3f0ca 708
cc69336f
RH
709/* Subtargets may reset this to 1 in order to enable 96-bit long double
710 with the rounding mode forced to 53 bits. */
711#define TARGET_96_ROUND_53_LONG_DOUBLE 0
712
98ae96d2
PB
713#ifndef SUBTARGET_DRIVER_SELF_SPECS
714# define SUBTARGET_DRIVER_SELF_SPECS ""
715#endif
716
717#define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS
718
682cd442
GK
719/* -march=native handling only makes sense with compiler running on
720 an x86 or x86_64 chip. If changing this condition, also change
721 the condition in driver-i386.c. */
722#if defined(__i386__) || defined(__x86_64__)
fa959ce4
MM
723/* In driver-i386.c. */
724extern const char *host_detect_local_cpu (int argc, const char **argv);
725#define EXTRA_SPEC_FUNCTIONS \
726 { "local_cpu_detect", host_detect_local_cpu },
682cd442 727#define HAVE_LOCAL_CPU_DETECT
fa959ce4
MM
728#endif
729
8981c15b
JM
730#if TARGET_64BIT_DEFAULT
731#define OPT_ARCH64 "!m32"
732#define OPT_ARCH32 "m32"
733#else
f0ea7581
L
734#define OPT_ARCH64 "m64|mx32"
735#define OPT_ARCH32 "m64|mx32:;"
8981c15b
JM
736#endif
737
1cba2b96
EC
738/* Support for configure-time defaults of some command line options.
739 The order here is important so that -march doesn't squash the
740 tune or cpu values. */
ce998900 741#define OPTION_DEFAULT_SPECS \
da2d4c01 742 {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
8981c15b
JM
743 {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
744 {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
ce998900 745 {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
8981c15b
JM
746 {"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
747 {"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
748 {"arch", "%{!march=*:-march=%(VALUE)}"}, \
749 {"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \
750 {"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"},
7816bea0 751
241e1a89
SC
752/* Specs for the compiler proper */
753
628714d8 754#ifndef CC1_CPU_SPEC
eb5bb0fd 755#define CC1_CPU_SPEC_1 ""
fa959ce4 756
682cd442 757#ifndef HAVE_LOCAL_CPU_DETECT
fa959ce4
MM
758#define CC1_CPU_SPEC CC1_CPU_SPEC_1
759#else
760#define CC1_CPU_SPEC CC1_CPU_SPEC_1 \
96f5b137
L
761"%{march=native:%>march=native %:local_cpu_detect(arch) \
762 %{!mtune=*:%>mtune=native %:local_cpu_detect(tune)}} \
763%{mtune=native:%>mtune=native %:local_cpu_detect(tune)}"
fa959ce4 764#endif
241e1a89 765#endif
c98f8742 766\f
30efe578 767/* Target CPU builtins. */
ab442df7
MM
768#define TARGET_CPU_CPP_BUILTINS() ix86_target_macros ()
769
770/* Target Pragmas. */
771#define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas ()
30efe578 772
b4c522fa
IB
773/* Target CPU versions for D. */
774#define TARGET_D_CPU_VERSIONS ix86_d_target_versions
775
628714d8 776#ifndef CC1_SPEC
8015b78d 777#define CC1_SPEC "%(cc1_cpu) "
628714d8
RK
778#endif
779
780/* This macro defines names of additional specifications to put in the
781 specs that can be used in various specifications like CC1_SPEC. Its
782 definition is an initializer with a subgrouping for each command option.
bcd86433
SC
783
784 Each subgrouping contains a string constant, that defines the
188fc5b5 785 specification name, and a string constant that used by the GCC driver
bcd86433
SC
786 program.
787
788 Do not define this macro if it does not need to do anything. */
789
790#ifndef SUBTARGET_EXTRA_SPECS
791#define SUBTARGET_EXTRA_SPECS
792#endif
793
794#define EXTRA_SPECS \
628714d8 795 { "cc1_cpu", CC1_CPU_SPEC }, \
bcd86433
SC
796 SUBTARGET_EXTRA_SPECS
797\f
ce998900 798
8ce94e44
JM
799/* Whether to allow x87 floating-point arithmetic on MODE (one of
800 SFmode, DFmode and XFmode) in the current excess precision
801 configuration. */
b8cab8a5
UB
802#define X87_ENABLE_ARITH(MODE) \
803 (flag_unsafe_math_optimizations \
804 || flag_excess_precision == EXCESS_PRECISION_FAST \
805 || (MODE) == XFmode)
8ce94e44
JM
806
807/* Likewise, whether to allow direct conversions from integer mode
808 IMODE (HImode, SImode or DImode) to MODE. */
809#define X87_ENABLE_FLOAT(MODE, IMODE) \
b8cab8a5
UB
810 (flag_unsafe_math_optimizations \
811 || flag_excess_precision == EXCESS_PRECISION_FAST \
8ce94e44
JM
812 || (MODE) == XFmode \
813 || ((MODE) == DFmode && (IMODE) == SImode) \
814 || (IMODE) == HImode)
815
979c67a5
UB
816/* target machine storage layout */
817
65d9c0ab
JH
818#define SHORT_TYPE_SIZE 16
819#define INT_TYPE_SIZE 32
f0ea7581
L
820#define LONG_TYPE_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD)
821#define POINTER_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD)
a96ad348 822#define LONG_LONG_TYPE_SIZE 64
65d9c0ab 823#define FLOAT_TYPE_SIZE 32
65d9c0ab 824#define DOUBLE_TYPE_SIZE 64
a2a1ddb5
L
825#define LONG_DOUBLE_TYPE_SIZE \
826 (TARGET_LONG_DOUBLE_64 ? 64 : (TARGET_LONG_DOUBLE_128 ? 128 : 80))
979c67a5 827
c637141a 828#define WIDEST_HARDWARE_FP_SIZE 80
65d9c0ab 829
67adf6a9 830#if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT
0c2dc519 831#define MAX_BITS_PER_WORD 64
0c2dc519
JH
832#else
833#define MAX_BITS_PER_WORD 32
0c2dc519
JH
834#endif
835
c98f8742
JVA
836/* Define this if most significant byte of a word is the lowest numbered. */
837/* That is true on the 80386. */
838
839#define BITS_BIG_ENDIAN 0
840
841/* Define this if most significant byte of a word is the lowest numbered. */
842/* That is not true on the 80386. */
843#define BYTES_BIG_ENDIAN 0
844
845/* Define this if most significant word of a multiword number is the lowest
846 numbered. */
847/* Not true for 80386 */
848#define WORDS_BIG_ENDIAN 0
849
c98f8742 850/* Width of a word, in units (bytes). */
4ae8027b 851#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
63001560
UB
852
853#ifndef IN_LIBGCC2
2e64c636
JH
854#define MIN_UNITS_PER_WORD 4
855#endif
c98f8742 856
c98f8742 857/* Allocation boundary (in *bits*) for storing arguments in argument list. */
65d9c0ab 858#define PARM_BOUNDARY BITS_PER_WORD
c98f8742 859
e075ae69 860/* Boundary (in *bits*) on which stack pointer should be aligned. */
bd5d3961 861#define STACK_BOUNDARY (TARGET_64BIT_MS_ABI ? 128 : BITS_PER_WORD)
c98f8742 862
2e3f842f
L
863/* Stack boundary of the main function guaranteed by OS. */
864#define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32)
865
de1132d1 866/* Minimum stack boundary. */
cba9c789 867#define MIN_STACK_BOUNDARY BITS_PER_WORD
2e3f842f 868
d1f87653 869/* Boundary (in *bits*) on which the stack pointer prefers to be
3af4bd89 870 aligned; the compiler cannot rely on having this alignment. */
e075ae69 871#define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary
65954bd8 872
de1132d1 873/* It should be MIN_STACK_BOUNDARY. But we set it to 128 bits for
2e3f842f
L
874 both 32bit and 64bit, to support codes that need 128 bit stack
875 alignment for SSE instructions, but can't realign the stack. */
d9063947
L
876#define PREFERRED_STACK_BOUNDARY_DEFAULT \
877 (TARGET_IAMCU ? MIN_STACK_BOUNDARY : 128)
2e3f842f
L
878
879/* 1 if -mstackrealign should be turned on by default. It will
880 generate an alternate prologue and epilogue that realigns the
881 runtime stack if nessary. This supports mixing codes that keep a
882 4-byte aligned stack, as specified by i386 psABI, with codes that
890b9b96 883 need a 16-byte aligned stack, as required by SSE instructions. */
2e3f842f
L
884#define STACK_REALIGN_DEFAULT 0
885
886/* Boundary (in *bits*) on which the incoming stack is aligned. */
887#define INCOMING_STACK_BOUNDARY ix86_incoming_stack_boundary
1d482056 888
a2851b75
TG
889/* According to Windows x64 software convention, the maximum stack allocatable
890 in the prologue is 4G - 8 bytes. Furthermore, there is a limited set of
891 instructions allowed to adjust the stack pointer in the epilog, forcing the
892 use of frame pointer for frames larger than 2 GB. This theorical limit
893 is reduced by 256, an over-estimated upper bound for the stack use by the
894 prologue.
895 We define only one threshold for both the prolog and the epilog. When the
4e523f33 896 frame size is larger than this threshold, we allocate the area to save SSE
a2851b75
TG
897 regs, then save them, and then allocate the remaining. There is no SEH
898 unwind info for this later allocation. */
899#define SEH_MAX_FRAME_SIZE ((2U << 30) - 256)
900
ebff937c
SH
901/* Target OS keeps a vector-aligned (128-bit, 16-byte) stack. This is
902 mandatory for the 64-bit ABI, and may or may not be true for other
903 operating systems. */
904#define TARGET_KEEPS_VECTOR_ALIGNED_STACK TARGET_64BIT
905
f963b5d9
RS
906/* Minimum allocation boundary for the code of a function. */
907#define FUNCTION_BOUNDARY 8
908
909/* C++ stores the virtual bit in the lowest bit of function pointers. */
910#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn
c98f8742 911
c98f8742
JVA
912/* Minimum size in bits of the largest boundary to which any
913 and all fundamental data types supported by the hardware
914 might need to be aligned. No data type wants to be aligned
17f24ff0 915 rounder than this.
fce5a9f2 916
d1f87653 917 Pentium+ prefers DFmode values to be aligned to 64 bit boundary
6d2b7199
BS
918 and Pentium Pro XFmode values at 128 bit boundaries.
919
920 When increasing the maximum, also update
921 TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */
17f24ff0 922
3f97cb0b 923#define BIGGEST_ALIGNMENT \
0076c82f 924 (TARGET_IAMCU ? 32 : (TARGET_AVX512F ? 512 : (TARGET_AVX ? 256 : 128)))
17f24ff0 925
2e3f842f
L
926/* Maximum stack alignment. */
927#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT
928
6e4f1168
L
929/* Alignment value for attribute ((aligned)). It is a constant since
930 it is the part of the ABI. We shouldn't change it with -mavx. */
e9c9e772 931#define ATTRIBUTE_ALIGNED_VALUE (TARGET_IAMCU ? 32 : 128)
6e4f1168 932
822eda12 933/* Decide whether a variable of mode MODE should be 128 bit aligned. */
a7180f70 934#define ALIGN_MODE_128(MODE) \
4501d314 935 ((MODE) == XFmode || SSE_REG_MODE_P (MODE))
a7180f70 936
17f24ff0 937/* The published ABIs say that doubles should be aligned on word
d1f87653 938 boundaries, so lower the alignment for structure fields unless
6fc605d8 939 -malign-double is set. */
e932b21b 940
e83f3cff
RH
941/* ??? Blah -- this macro is used directly by libobjc. Since it
942 supports no vector modes, cut out the complexity and fall back
943 on BIGGEST_FIELD_ALIGNMENT. */
944#ifdef IN_TARGET_LIBS
ef49d42e
JH
945#ifdef __x86_64__
946#define BIGGEST_FIELD_ALIGNMENT 128
947#else
e83f3cff 948#define BIGGEST_FIELD_ALIGNMENT 32
ef49d42e 949#endif
e83f3cff 950#else
a4cf4b64
RB
951#define ADJUST_FIELD_ALIGN(FIELD, TYPE, COMPUTED) \
952 x86_field_alignment ((TYPE), (COMPUTED))
e83f3cff 953#endif
c98f8742 954
8a022443
JW
955/* If defined, a C expression to compute the alignment for a static
956 variable. TYPE is the data type, and ALIGN is the alignment that
957 the object would ordinarily have. The value of this macro is used
958 instead of that alignment to align the object.
959
960 If this macro is not defined, then ALIGN is used.
961
962 One use of this macro is to increase alignment of medium-size
963 data to make it all fit in fewer cache lines. Another is to
964 cause character arrays to be word-aligned so that `strcpy' calls
965 that copy constants to character arrays can be done inline. */
966
df8a1d28
JJ
967#define DATA_ALIGNMENT(TYPE, ALIGN) \
968 ix86_data_alignment ((TYPE), (ALIGN), true)
969
970/* Similar to DATA_ALIGNMENT, but for the cases where the ABI mandates
971 some alignment increase, instead of optimization only purposes. E.g.
972 AMD x86-64 psABI says that variables with array type larger than 15 bytes
973 must be aligned to 16 byte boundaries.
974
975 If this macro is not defined, then ALIGN is used. */
976
977#define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \
978 ix86_data_alignment ((TYPE), (ALIGN), false)
d16790f2
JW
979
980/* If defined, a C expression to compute the alignment for a local
981 variable. TYPE is the data type, and ALIGN is the alignment that
982 the object would ordinarily have. The value of this macro is used
983 instead of that alignment to align the object.
984
985 If this macro is not defined, then ALIGN is used.
986
987 One use of this macro is to increase alignment of medium-size
988 data to make it all fit in fewer cache lines. */
989
76fe54f0
L
990#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
991 ix86_local_alignment ((TYPE), VOIDmode, (ALIGN))
992
993/* If defined, a C expression to compute the alignment for stack slot.
994 TYPE is the data type, MODE is the widest mode available, and ALIGN
995 is the alignment that the slot would ordinarily have. The value of
996 this macro is used instead of that alignment to align the slot.
997
998 If this macro is not defined, then ALIGN is used when TYPE is NULL,
999 Otherwise, LOCAL_ALIGNMENT will be used.
1000
1001 One use of this macro is to set alignment of stack slot to the
1002 maximum alignment of all possible modes which the slot may have. */
1003
1004#define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \
1005 ix86_local_alignment ((TYPE), (MODE), (ALIGN))
8a022443 1006
9bfaf89d
JJ
1007/* If defined, a C expression to compute the alignment for a local
1008 variable DECL.
1009
1010 If this macro is not defined, then
1011 LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used.
1012
1013 One use of this macro is to increase alignment of medium-size
1014 data to make it all fit in fewer cache lines. */
1015
1016#define LOCAL_DECL_ALIGNMENT(DECL) \
1017 ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL))
1018
ae58e548
JJ
1019/* If defined, a C expression to compute the minimum required alignment
1020 for dynamic stack realignment purposes for EXP (a TYPE or DECL),
1021 MODE, assuming normal alignment ALIGN.
1022
1023 If this macro is not defined, then (ALIGN) will be used. */
1024
1025#define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \
1a6e82b8 1026 ix86_minimum_alignment ((EXP), (MODE), (ALIGN))
ae58e548 1027
9bfaf89d 1028
9cd10576 1029/* Set this nonzero if move instructions will actually fail to work
c98f8742 1030 when given unaligned data. */
b4ac57ab 1031#define STRICT_ALIGNMENT 0
c98f8742
JVA
1032
1033/* If bit field type is int, don't let it cross an int,
1034 and give entire struct the alignment of an int. */
43a88a8c 1035/* Required on the 386 since it doesn't have bit-field insns. */
c98f8742 1036#define PCC_BITFIELD_TYPE_MATTERS 1
c98f8742
JVA
1037\f
1038/* Standard register usage. */
1039
1040/* This processor has special stack-like registers. See reg-stack.c
892a2d68 1041 for details. */
c98f8742
JVA
1042
1043#define STACK_REGS
ce998900 1044
f48b4284
UB
1045#define IS_STACK_MODE(MODE) \
1046 (X87_FLOAT_MODE_P (MODE) \
1047 && (!(SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \
1048 || TARGET_MIX_SSE_I387))
c98f8742
JVA
1049
1050/* Number of actual hardware registers.
1051 The hardware registers are assigned numbers for the compiler
1052 from 0 to just below FIRST_PSEUDO_REGISTER.
1053 All registers that the compiler knows about must be given numbers,
1054 even those that are not normally considered general registers.
1055
1056 In the 80386 we give the 8 general purpose registers the numbers 0-7.
1057 We number the floating point registers 8-15.
1058 Note that registers 0-7 can be accessed as a short or int,
1059 while only 0-3 may be used with byte `mov' instructions.
1060
1061 Reg 16 does not correspond to any hardware register, but instead
1062 appears in the RTL as an argument pointer prior to reload, and is
1063 eliminated during reloading in favor of either the stack or frame
892a2d68 1064 pointer. */
c98f8742 1065
05416670 1066#define FIRST_PSEUDO_REGISTER FIRST_PSEUDO_REG
c98f8742 1067
3073d01c
ML
1068/* Number of hardware registers that go into the DWARF-2 unwind info.
1069 If not defined, equals FIRST_PSEUDO_REGISTER. */
1070
1071#define DWARF_FRAME_REGISTERS 17
1072
c98f8742
JVA
1073/* 1 for registers that have pervasive standard uses
1074 and are not available for the register allocator.
3f3f2124 1075 On the 80386, the stack pointer is such, as is the arg pointer.
fce5a9f2 1076
621bc046
UB
1077 REX registers are disabled for 32bit targets in
1078 TARGET_CONDITIONAL_REGISTER_USAGE. */
1079
a7180f70
BS
1080#define FIXED_REGISTERS \
1081/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
3a4416fb 1082{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \
eaa17c21
UB
1083/*arg,flags,fpsr,frame*/ \
1084 1, 1, 1, 1, \
a7180f70
BS
1085/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
1086 0, 0, 0, 0, 0, 0, 0, 0, \
78168632 1087/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
3f3f2124
JH
1088 0, 0, 0, 0, 0, 0, 0, 0, \
1089/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
621bc046 1090 0, 0, 0, 0, 0, 0, 0, 0, \
3f3f2124 1091/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
3f97cb0b
AI
1092 0, 0, 0, 0, 0, 0, 0, 0, \
1093/*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \
1094 0, 0, 0, 0, 0, 0, 0, 0, \
1095/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
85a77221
AI
1096 0, 0, 0, 0, 0, 0, 0, 0, \
1097/* k0, k1, k2, k3, k4, k5, k6, k7*/ \
eafa30ef 1098 0, 0, 0, 0, 0, 0, 0, 0 }
c98f8742
JVA
1099
1100/* 1 for registers not available across function calls.
1101 These must include the FIXED_REGISTERS and also any
1102 registers that can be used without being saved.
1103 The latter must include the registers where values are returned
1104 and the register where structure-value addresses are passed.
fce5a9f2
EC
1105 Aside from that, you can include as many other registers as you like.
1106
621bc046
UB
1107 Value is set to 1 if the register is call used unconditionally.
1108 Bit one is set if the register is call used on TARGET_32BIT ABI.
1109 Bit two is set if the register is call used on TARGET_64BIT ABI.
1110 Bit three is set if the register is call used on TARGET_64BIT_MS_ABI.
1111
1112 Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE. */
1113
1f3ccbc8
L
1114#define CALL_USED_REGISTERS_MASK(IS_64BIT_MS_ABI) \
1115 ((IS_64BIT_MS_ABI) ? (1 << 3) : TARGET_64BIT ? (1 << 2) : (1 << 1))
1116
a7180f70
BS
1117#define CALL_USED_REGISTERS \
1118/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
621bc046 1119{ 1, 1, 1, 0, 4, 4, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
eaa17c21
UB
1120/*arg,flags,fpsr,frame*/ \
1121 1, 1, 1, 1, \
a7180f70 1122/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
621bc046 1123 1, 1, 1, 1, 1, 1, 6, 6, \
78168632 1124/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
3a4416fb 1125 1, 1, 1, 1, 1, 1, 1, 1, \
3f3f2124 1126/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
3a4416fb 1127 1, 1, 1, 1, 2, 2, 2, 2, \
3f3f2124 1128/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
3f97cb0b
AI
1129 6, 6, 6, 6, 6, 6, 6, 6, \
1130/*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \
1131 6, 6, 6, 6, 6, 6, 6, 6, \
1132/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
85a77221
AI
1133 6, 6, 6, 6, 6, 6, 6, 6, \
1134 /* k0, k1, k2, k3, k4, k5, k6, k7*/ \
eafa30ef 1135 1, 1, 1, 1, 1, 1, 1, 1 }
c98f8742 1136
3b3c6a3f
MM
1137/* Order in which to allocate registers. Each register must be
1138 listed once, even those in FIXED_REGISTERS. List frame pointer
1139 late and fixed registers last. Note that, in general, we prefer
1140 registers listed in CALL_USED_REGISTERS, keeping the others
1141 available for storage of persistent values.
1142
5a733826 1143 The ADJUST_REG_ALLOC_ORDER actually overwrite the order,
162f023b 1144 so this is just empty initializer for array. */
3b3c6a3f 1145
eaa17c21
UB
1146#define REG_ALLOC_ORDER \
1147{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
1148 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, \
1149 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
1150 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \
1151 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 }
3b3c6a3f 1152
5a733826 1153/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
162f023b 1154 to be rearranged based on a particular function. When using sse math,
03c259ad 1155 we want to allocate SSE before x87 registers and vice versa. */
3b3c6a3f 1156
5a733826 1157#define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc ()
3b3c6a3f 1158
f5316dfe 1159
7c800926
KT
1160#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL)
1161
8521c414 1162#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \
7bf65250
UB
1163 (TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT \
1164 && GENERAL_REGNO_P (REGNO) \
1165 && ((MODE) == XFmode || (MODE) == XCmode))
8521c414
JM
1166
1167#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8)
1168
e21b52af
HL
1169#define REGMODE_NATURAL_SIZE(MODE) ix86_regmode_natural_size (MODE)
1170
95879c72
L
1171#define VALID_AVX256_REG_MODE(MODE) \
1172 ((MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
8a0436cb
JJ
1173 || (MODE) == V4DImode || (MODE) == V2TImode || (MODE) == V8SFmode \
1174 || (MODE) == V4DFmode)
95879c72 1175
4ac005ba 1176#define VALID_AVX256_REG_OR_OI_MODE(MODE) \
ff97910d
VY
1177 (VALID_AVX256_REG_MODE (MODE) || (MODE) == OImode)
1178
3f97cb0b
AI
1179#define VALID_AVX512F_SCALAR_MODE(MODE) \
1180 ((MODE) == DImode || (MODE) == DFmode || (MODE) == SImode \
1181 || (MODE) == SFmode)
1182
1183#define VALID_AVX512F_REG_MODE(MODE) \
1184 ((MODE) == V8DImode || (MODE) == V8DFmode || (MODE) == V64QImode \
9e4a4dd6
AI
1185 || (MODE) == V16SImode || (MODE) == V16SFmode || (MODE) == V32HImode \
1186 || (MODE) == V4TImode)
1187
e6f146d2
SP
1188#define VALID_AVX512F_REG_OR_XI_MODE(MODE) \
1189 (VALID_AVX512F_REG_MODE (MODE) || (MODE) == XImode)
1190
05416670 1191#define VALID_AVX512VL_128_REG_MODE(MODE) \
9e4a4dd6 1192 ((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \
40bd4bf9
JJ
1193 || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \
1194 || (MODE) == TFmode || (MODE) == V1TImode)
3f97cb0b 1195
ce998900
UB
1196#define VALID_SSE2_REG_MODE(MODE) \
1197 ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
1198 || (MODE) == V2DImode || (MODE) == DFmode)
fbe5eb6d 1199
d9a5f180 1200#define VALID_SSE_REG_MODE(MODE) \
fe6ae2da
UB
1201 ((MODE) == V1TImode || (MODE) == TImode \
1202 || (MODE) == V4SFmode || (MODE) == V4SImode \
ce998900 1203 || (MODE) == SFmode || (MODE) == TFmode)
a7180f70 1204
47f339cf 1205#define VALID_MMX_REG_MODE_3DNOW(MODE) \
ce998900 1206 ((MODE) == V2SFmode || (MODE) == SFmode)
47f339cf 1207
d9a5f180 1208#define VALID_MMX_REG_MODE(MODE) \
879f9d0b 1209 ((MODE) == V1DImode || (MODE) == DImode \
10a97ae6
UB
1210 || (MODE) == V2SImode || (MODE) == SImode \
1211 || (MODE) == V4HImode || (MODE) == V8QImode)
a7180f70 1212
05416670
UB
1213#define VALID_MASK_REG_MODE(MODE) ((MODE) == HImode || (MODE) == QImode)
1214
1215#define VALID_MASK_AVX512BW_MODE(MODE) ((MODE) == SImode || (MODE) == DImode)
1216
ce998900
UB
1217#define VALID_DFP_MODE_P(MODE) \
1218 ((MODE) == SDmode || (MODE) == DDmode || (MODE) == TDmode)
62d75179 1219
d9a5f180 1220#define VALID_FP_MODE_P(MODE) \
ce998900
UB
1221 ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \
1222 || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) \
a946dd00 1223
d9a5f180 1224#define VALID_INT_MODE_P(MODE) \
ce998900
UB
1225 ((MODE) == QImode || (MODE) == HImode || (MODE) == SImode \
1226 || (MODE) == DImode \
1227 || (MODE) == CQImode || (MODE) == CHImode || (MODE) == CSImode \
1228 || (MODE) == CDImode \
1229 || (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode \
1230 || (MODE) == TFmode || (MODE) == TCmode)))
a946dd00 1231
822eda12 1232/* Return true for modes passed in SSE registers. */
ce998900 1233#define SSE_REG_MODE_P(MODE) \
fe6ae2da
UB
1234 ((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \
1235 || (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \
1236 || (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \
1237 || (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
8a0436cb 1238 || (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode \
3f97cb0b
AI
1239 || (MODE) == V2TImode || (MODE) == V8DImode || (MODE) == V64QImode \
1240 || (MODE) == V16SImode || (MODE) == V32HImode || (MODE) == V8DFmode \
1241 || (MODE) == V16SFmode)
822eda12 1242
05416670
UB
1243#define X87_FLOAT_MODE_P(MODE) \
1244 (TARGET_80387 && ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode))
85a77221 1245
05416670
UB
1246#define SSE_FLOAT_MODE_P(MODE) \
1247 ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode))
1248
1249#define FMA4_VEC_FLOAT_MODE_P(MODE) \
1250 (TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \
1251 || (MODE) == V8SFmode || (MODE) == V4DFmode))
9e4a4dd6 1252
ff25ef99
ZD
1253/* It is possible to write patterns to move flags; but until someone
1254 does it, */
1255#define AVOID_CCMODE_COPIES
c98f8742 1256
e075ae69 1257/* Specify the modes required to caller save a given hard regno.
787dc842 1258 We do this on i386 to prevent flags from being saved at all.
e075ae69 1259
787dc842
JH
1260 Kill any attempts to combine saving of modes. */
1261
d9a5f180
GS
1262#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
1263 (CC_REGNO_P (REGNO) ? VOIDmode \
1264 : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
737d6a1a 1265 : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), NULL) \
a60c3351
UB
1266 : (MODE) == HImode && !((GENERAL_REGNO_P (REGNO) \
1267 && TARGET_PARTIAL_REG_STALL) \
85a77221 1268 || MASK_REGNO_P (REGNO)) ? SImode \
a60c3351 1269 : (MODE) == QImode && !(ANY_QI_REGNO_P (REGNO) \
85a77221 1270 || MASK_REGNO_P (REGNO)) ? SImode \
d2836273 1271 : (MODE))
ce998900 1272
c98f8742
JVA
1273/* Specify the registers used for certain standard purposes.
1274 The values of these macros are register numbers. */
1275
1276/* on the 386 the pc register is %eip, and is not usable as a general
1277 register. The ordinary mov instructions won't work */
1278/* #define PC_REGNUM */
1279
05416670
UB
1280/* Base register for access to arguments of the function. */
1281#define ARG_POINTER_REGNUM ARGP_REG
1282
c98f8742 1283/* Register to use for pushing function arguments. */
05416670 1284#define STACK_POINTER_REGNUM SP_REG
c98f8742
JVA
1285
1286/* Base register for access to local variables of the function. */
05416670
UB
1287#define FRAME_POINTER_REGNUM FRAME_REG
1288#define HARD_FRAME_POINTER_REGNUM BP_REG
564d80f4 1289
05416670
UB
1290#define FIRST_INT_REG AX_REG
1291#define LAST_INT_REG SP_REG
c98f8742 1292
05416670
UB
1293#define FIRST_QI_REG AX_REG
1294#define LAST_QI_REG BX_REG
c98f8742
JVA
1295
1296/* First & last stack-like regs */
05416670
UB
1297#define FIRST_STACK_REG ST0_REG
1298#define LAST_STACK_REG ST7_REG
c98f8742 1299
05416670
UB
1300#define FIRST_SSE_REG XMM0_REG
1301#define LAST_SSE_REG XMM7_REG
fce5a9f2 1302
05416670
UB
1303#define FIRST_MMX_REG MM0_REG
1304#define LAST_MMX_REG MM7_REG
a7180f70 1305
05416670
UB
1306#define FIRST_REX_INT_REG R8_REG
1307#define LAST_REX_INT_REG R15_REG
3f3f2124 1308
05416670
UB
1309#define FIRST_REX_SSE_REG XMM8_REG
1310#define LAST_REX_SSE_REG XMM15_REG
3f3f2124 1311
05416670
UB
1312#define FIRST_EXT_REX_SSE_REG XMM16_REG
1313#define LAST_EXT_REX_SSE_REG XMM31_REG
3f97cb0b 1314
05416670
UB
1315#define FIRST_MASK_REG MASK0_REG
1316#define LAST_MASK_REG MASK7_REG
85a77221 1317
aabcd309 1318/* Override this in other tm.h files to cope with various OS lossage
6fca22eb
RH
1319 requiring a frame pointer. */
1320#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
1321#define SUBTARGET_FRAME_POINTER_REQUIRED 0
1322#endif
1323
1324/* Make sure we can access arbitrary call frames. */
1325#define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses ()
c98f8742 1326
c98f8742 1327/* Register to hold the addressing base for position independent
5b43fed1
RH
1328 code access to data items. We don't use PIC pointer for 64bit
1329 mode. Define the regnum to dummy value to prevent gcc from
fce5a9f2 1330 pessimizing code dealing with EBX.
bd09bdeb
RH
1331
1332 To avoid clobbering a call-saved register unnecessarily, we renumber
1333 the pic register when possible. The change is visible after the
1334 prologue has been emitted. */
1335
e8b5eb25 1336#define REAL_PIC_OFFSET_TABLE_REGNUM (TARGET_64BIT ? R15_REG : BX_REG)
bd09bdeb 1337
bcb21886 1338#define PIC_OFFSET_TABLE_REGNUM \
d290bb1d
IE
1339 (ix86_use_pseudo_pic_reg () \
1340 ? (pic_offset_table_rtx \
1341 ? INVALID_REGNUM \
1342 : REAL_PIC_OFFSET_TABLE_REGNUM) \
1343 : INVALID_REGNUM)
c98f8742 1344
5fc0e5df
KW
1345#define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_"
1346
c51e6d85 1347/* This is overridden by <cygwin.h>. */
5e062767
DS
1348#define MS_AGGREGATE_RETURN 0
1349
61fec9ff 1350#define KEEP_AGGREGATE_RETURN_POINTER 0
c98f8742
JVA
1351\f
1352/* Define the classes of registers for register constraints in the
1353 machine description. Also define ranges of constants.
1354
1355 One of the classes must always be named ALL_REGS and include all hard regs.
1356 If there is more than one class, another class must be named NO_REGS
1357 and contain no registers.
1358
1359 The name GENERAL_REGS must be the name of a class (or an alias for
1360 another name such as ALL_REGS). This is the class of registers
1361 that is allowed by "g" or "r" in a register constraint.
1362 Also, registers outside this class are allocated only when
1363 instructions express preferences for them.
1364
1365 The classes must be numbered in nondecreasing order; that is,
1366 a larger-numbered class must never be contained completely
2e24efd3
AM
1367 in a smaller-numbered class. This is why CLOBBERED_REGS class
1368 is listed early, even though in 64-bit mode it contains more
1369 registers than just %eax, %ecx, %edx.
c98f8742
JVA
1370
1371 For any two classes, it is very desirable that there be another
ab408a86
JVA
1372 class that represents their union.
1373
eaa17c21 1374 The flags and fpsr registers are in no class. */
c98f8742
JVA
1375
1376enum reg_class
1377{
1378 NO_REGS,
e075ae69 1379 AREG, DREG, CREG, BREG, SIREG, DIREG,
4b71cd6e 1380 AD_REGS, /* %eax/%edx for DImode */
2e24efd3 1381 CLOBBERED_REGS, /* call-clobbered integer registers */
c98f8742 1382 Q_REGS, /* %eax %ebx %ecx %edx */
564d80f4 1383 NON_Q_REGS, /* %esi %edi %ebp %esp */
de86ff8f 1384 TLS_GOTBASE_REGS, /* %ebx %ecx %edx %esi %edi %ebp */
c98f8742 1385 INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
3f3f2124 1386 LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */
63001560
UB
1387 GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp
1388 %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */
c98f8742
JVA
1389 FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */
1390 FLOAT_REGS,
06f4e35d 1391 SSE_FIRST_REG,
45392c76 1392 NO_REX_SSE_REGS,
a7180f70 1393 SSE_REGS,
3f97cb0b 1394 ALL_SSE_REGS,
a7180f70 1395 MMX_REGS,
446988df
JH
1396 FLOAT_SSE_REGS,
1397 FLOAT_INT_REGS,
1398 INT_SSE_REGS,
1399 FLOAT_INT_SSE_REGS,
85a77221 1400 MASK_REGS,
d18cbbf6
UB
1401 ALL_MASK_REGS,
1402 ALL_REGS,
1403 LIM_REG_CLASSES
c98f8742
JVA
1404};
1405
d9a5f180
GS
1406#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
1407
1408#define INTEGER_CLASS_P(CLASS) \
1409 reg_class_subset_p ((CLASS), GENERAL_REGS)
1410#define FLOAT_CLASS_P(CLASS) \
1411 reg_class_subset_p ((CLASS), FLOAT_REGS)
1412#define SSE_CLASS_P(CLASS) \
3f97cb0b 1413 reg_class_subset_p ((CLASS), ALL_SSE_REGS)
d9a5f180 1414#define MMX_CLASS_P(CLASS) \
f75959a6 1415 ((CLASS) == MMX_REGS)
4ed04e93 1416#define MASK_CLASS_P(CLASS) \
d18cbbf6 1417 reg_class_subset_p ((CLASS), ALL_MASK_REGS)
d9a5f180
GS
1418#define MAYBE_INTEGER_CLASS_P(CLASS) \
1419 reg_classes_intersect_p ((CLASS), GENERAL_REGS)
1420#define MAYBE_FLOAT_CLASS_P(CLASS) \
1421 reg_classes_intersect_p ((CLASS), FLOAT_REGS)
1422#define MAYBE_SSE_CLASS_P(CLASS) \
3f97cb0b 1423 reg_classes_intersect_p ((CLASS), ALL_SSE_REGS)
d9a5f180 1424#define MAYBE_MMX_CLASS_P(CLASS) \
0bd72901 1425 reg_classes_intersect_p ((CLASS), MMX_REGS)
85a77221 1426#define MAYBE_MASK_CLASS_P(CLASS) \
d18cbbf6 1427 reg_classes_intersect_p ((CLASS), ALL_MASK_REGS)
d9a5f180
GS
1428
1429#define Q_CLASS_P(CLASS) \
1430 reg_class_subset_p ((CLASS), Q_REGS)
7c6b971d 1431
0bd72901
UB
1432#define MAYBE_NON_Q_CLASS_P(CLASS) \
1433 reg_classes_intersect_p ((CLASS), NON_Q_REGS)
1434
43f3a59d 1435/* Give names of register classes as strings for dump file. */
c98f8742
JVA
1436
1437#define REG_CLASS_NAMES \
1438{ "NO_REGS", \
ab408a86 1439 "AREG", "DREG", "CREG", "BREG", \
c98f8742 1440 "SIREG", "DIREG", \
e075ae69 1441 "AD_REGS", \
2e24efd3 1442 "CLOBBERED_REGS", \
e075ae69 1443 "Q_REGS", "NON_Q_REGS", \
de86ff8f 1444 "TLS_GOTBASE_REGS", \
c98f8742 1445 "INDEX_REGS", \
3f3f2124 1446 "LEGACY_REGS", \
c98f8742
JVA
1447 "GENERAL_REGS", \
1448 "FP_TOP_REG", "FP_SECOND_REG", \
1449 "FLOAT_REGS", \
cb482895 1450 "SSE_FIRST_REG", \
45392c76 1451 "NO_REX_SSE_REGS", \
a7180f70 1452 "SSE_REGS", \
3f97cb0b 1453 "ALL_SSE_REGS", \
a7180f70 1454 "MMX_REGS", \
446988df 1455 "FLOAT_SSE_REGS", \
8fcaaa80 1456 "FLOAT_INT_REGS", \
446988df
JH
1457 "INT_SSE_REGS", \
1458 "FLOAT_INT_SSE_REGS", \
85a77221 1459 "MASK_REGS", \
d18cbbf6 1460 "ALL_MASK_REGS", \
c98f8742
JVA
1461 "ALL_REGS" }
1462
ac2e563f
RH
1463/* Define which registers fit in which classes. This is an initializer
1464 for a vector of HARD_REG_SET of length N_REG_CLASSES.
1465
621bc046
UB
1466 Note that CLOBBERED_REGS are calculated by
1467 TARGET_CONDITIONAL_REGISTER_USAGE. */
c98f8742 1468
d18cbbf6 1469#define REG_CLASS_CONTENTS \
eaa17c21
UB
1470{ { 0x0, 0x0, 0x0 }, /* NO_REGS */ \
1471 { 0x01, 0x0, 0x0 }, /* AREG */ \
1472 { 0x02, 0x0, 0x0 }, /* DREG */ \
1473 { 0x04, 0x0, 0x0 }, /* CREG */ \
1474 { 0x08, 0x0, 0x0 }, /* BREG */ \
1475 { 0x10, 0x0, 0x0 }, /* SIREG */ \
1476 { 0x20, 0x0, 0x0 }, /* DIREG */ \
1477 { 0x03, 0x0, 0x0 }, /* AD_REGS */ \
1478 { 0x07, 0x0, 0x0 }, /* CLOBBERED_REGS */ \
1479 { 0x0f, 0x0, 0x0 }, /* Q_REGS */ \
1480 { 0x900f0, 0x0, 0x0 }, /* NON_Q_REGS */ \
1481 { 0x7e, 0xff0, 0x0 }, /* TLS_GOTBASE_REGS */ \
1482 { 0x7f, 0xff0, 0x0 }, /* INDEX_REGS */ \
1483 { 0x900ff, 0x0, 0x0 }, /* LEGACY_REGS */ \
1484 { 0x900ff, 0xff0, 0x0 }, /* GENERAL_REGS */ \
1485 { 0x100, 0x0, 0x0 }, /* FP_TOP_REG */ \
1486 { 0x200, 0x0, 0x0 }, /* FP_SECOND_REG */ \
1487 { 0xff00, 0x0, 0x0 }, /* FLOAT_REGS */ \
1488 { 0x100000, 0x0, 0x0 }, /* SSE_FIRST_REG */ \
1489 { 0xff00000, 0x0, 0x0 }, /* NO_REX_SSE_REGS */ \
1490 { 0xff00000, 0xff000, 0x0 }, /* SSE_REGS */ \
1491 { 0xff00000, 0xfffff000, 0xf }, /* ALL_SSE_REGS */ \
1492{ 0xf0000000, 0xf, 0x0 }, /* MMX_REGS */ \
1493 { 0xff0ff00, 0xfffff000, 0xf }, /* FLOAT_SSE_REGS */ \
1494 { 0x9ffff, 0xff0, 0x0 }, /* FLOAT_INT_REGS */ \
1495 { 0xff900ff, 0xfffffff0, 0xf }, /* INT_SSE_REGS */ \
1496 { 0xff9ffff, 0xfffffff0, 0xf }, /* FLOAT_INT_SSE_REGS */ \
1497 { 0x0, 0x0, 0xfe0 }, /* MASK_REGS */ \
1498 { 0x0, 0x0, 0xff0 }, /* ALL_MASK_REGS */ \
1499{ 0xffffffff, 0xffffffff, 0xfff } /* ALL_REGS */ \
e075ae69 1500}
c98f8742
JVA
1501
1502/* The same information, inverted:
1503 Return the class number of the smallest class containing
1504 reg number REGNO. This could be a conditional expression
1505 or could index an array. */
1506
1a6e82b8 1507#define REGNO_REG_CLASS(REGNO) (regclass_map[(REGNO)])
c98f8742 1508
42db504c
SB
1509/* When this hook returns true for MODE, the compiler allows
1510 registers explicitly used in the rtl to be used as spill registers
1511 but prevents the compiler from extending the lifetime of these
1512 registers. */
1513#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
c98f8742 1514
fc27f749 1515#define QI_REG_P(X) (REG_P (X) && QI_REGNO_P (REGNO (X)))
05416670
UB
1516#define QI_REGNO_P(N) IN_RANGE ((N), FIRST_QI_REG, LAST_QI_REG)
1517
1518#define LEGACY_INT_REG_P(X) (REG_P (X) && LEGACY_INT_REGNO_P (REGNO (X)))
1519#define LEGACY_INT_REGNO_P(N) (IN_RANGE ((N), FIRST_INT_REG, LAST_INT_REG))
1520
1521#define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X)))
1522#define REX_INT_REGNO_P(N) \
1523 IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG)
3f3f2124 1524
58b0b34c 1525#define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X)))
fc27f749 1526#define GENERAL_REGNO_P(N) \
58b0b34c 1527 (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N))
3f3f2124 1528
fc27f749
UB
1529#define ANY_QI_REG_P(X) (REG_P (X) && ANY_QI_REGNO_P (REGNO (X)))
1530#define ANY_QI_REGNO_P(N) \
1531 (TARGET_64BIT ? GENERAL_REGNO_P (N) : QI_REGNO_P (N))
3f3f2124 1532
66aaf16f
UB
1533#define STACK_REG_P(X) (REG_P (X) && STACK_REGNO_P (REGNO (X)))
1534#define STACK_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG)
fc27f749 1535
fc27f749 1536#define SSE_REG_P(X) (REG_P (X) && SSE_REGNO_P (REGNO (X)))
fb84c7a0
UB
1537#define SSE_REGNO_P(N) \
1538 (IN_RANGE ((N), FIRST_SSE_REG, LAST_SSE_REG) \
3f97cb0b
AI
1539 || REX_SSE_REGNO_P (N) \
1540 || EXT_REX_SSE_REGNO_P (N))
3f3f2124 1541
4977bab6 1542#define REX_SSE_REGNO_P(N) \
fb84c7a0 1543 IN_RANGE ((N), FIRST_REX_SSE_REG, LAST_REX_SSE_REG)
4977bab6 1544
0a48088a
IT
1545#define EXT_REX_SSE_REG_P(X) (REG_P (X) && EXT_REX_SSE_REGNO_P (REGNO (X)))
1546
3f97cb0b
AI
1547#define EXT_REX_SSE_REGNO_P(N) \
1548 IN_RANGE ((N), FIRST_EXT_REX_SSE_REG, LAST_EXT_REX_SSE_REG)
1549
05416670
UB
1550#define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X)))
1551#define ANY_FP_REGNO_P(N) (STACK_REGNO_P (N) || SSE_REGNO_P (N))
3f97cb0b 1552
9e4a4dd6 1553#define MASK_REG_P(X) (REG_P (X) && MASK_REGNO_P (REGNO (X)))
85a77221 1554#define MASK_REGNO_P(N) IN_RANGE ((N), FIRST_MASK_REG, LAST_MASK_REG)
e21b52af 1555#define MASK_PAIR_REGNO_P(N) ((((N) - FIRST_MASK_REG) & 1) == 0)
446988df 1556
fc27f749 1557#define MMX_REG_P(X) (REG_P (X) && MMX_REGNO_P (REGNO (X)))
fb84c7a0 1558#define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG)
fce5a9f2 1559
e075ae69 1560#define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X)))
adb67ffb 1561#define CC_REGNO_P(X) ((X) == FLAGS_REG)
e075ae69 1562
5fbb13a7
KY
1563#define MOD4_SSE_REG_P(X) (REG_P (X) && MOD4_SSE_REGNO_P (REGNO (X)))
1564#define MOD4_SSE_REGNO_P(N) ((N) == XMM0_REG \
1565 || (N) == XMM4_REG \
1566 || (N) == XMM8_REG \
1567 || (N) == XMM12_REG \
1568 || (N) == XMM16_REG \
1569 || (N) == XMM20_REG \
1570 || (N) == XMM24_REG \
1571 || (N) == XMM28_REG)
1572
05416670
UB
1573/* First floating point reg */
1574#define FIRST_FLOAT_REG FIRST_STACK_REG
1575#define STACK_TOP_P(X) (REG_P (X) && REGNO (X) == FIRST_FLOAT_REG)
1576
02469d3a
UB
1577#define GET_SSE_REGNO(N) \
1578 ((N) < 8 ? FIRST_SSE_REG + (N) \
1579 : (N) < 16 ? FIRST_REX_SSE_REG + (N) - 8 \
1580 : FIRST_EXT_REX_SSE_REG + (N) - 16)
05416670 1581
c98f8742
JVA
1582/* The class value for index registers, and the one for base regs. */
1583
1584#define INDEX_REG_CLASS INDEX_REGS
1585#define BASE_REG_CLASS GENERAL_REGS
c98f8742
JVA
1586\f
1587/* Stack layout; function entry, exit and calling. */
1588
1589/* Define this if pushing a word on the stack
1590 makes the stack pointer a smaller address. */
62f9f30b 1591#define STACK_GROWS_DOWNWARD 1
c98f8742 1592
a4d05547 1593/* Define this to nonzero if the nominal address of the stack frame
c98f8742
JVA
1594 is at the high-address end of the local variables;
1595 that is, each additional local variable allocated
1596 goes at a more negative offset in the frame. */
f62c8a5c 1597#define FRAME_GROWS_DOWNWARD 1
c98f8742 1598
7b4df2bf 1599#define PUSH_ROUNDING(BYTES) ix86_push_rounding (BYTES)
8c2b2fae
UB
1600
1601/* If defined, the maximum amount of space required for outgoing arguments
1602 will be computed and placed into the variable `crtl->outgoing_args_size'.
1603 No space will be pushed onto the stack for each call; instead, the
1604 function prologue should increase the stack frame size by this amount.
41ee845b
JH
1605
1606 In 32bit mode enabling argument accumulation results in about 5% code size
56aae4b7 1607 growth because move instructions are less compact than push. In 64bit
41ee845b
JH
1608 mode the difference is less drastic but visible.
1609
1610 FIXME: Unlike earlier implementations, the size of unwind info seems to
f830ddc2 1611 actually grow with accumulation. Is that because accumulated args
41ee845b 1612 unwind info became unnecesarily bloated?
f830ddc2
RH
1613
1614 With the 64-bit MS ABI, we can generate correct code with or without
1615 accumulated args, but because of OUTGOING_REG_PARM_STACK_SPACE the code
1616 generated without accumulated args is terrible.
41ee845b
JH
1617
1618 If stack probes are required, the space used for large function
1619 arguments on the stack must also be probed, so enable
f8071c05
L
1620 -maccumulate-outgoing-args so this happens in the prologue.
1621
1622 We must use argument accumulation in interrupt function if stack
1623 may be realigned to avoid DRAP. */
f73ad30e 1624
6c6094f1 1625#define ACCUMULATE_OUTGOING_ARGS \
f8071c05
L
1626 ((TARGET_ACCUMULATE_OUTGOING_ARGS \
1627 && optimize_function_for_speed_p (cfun)) \
1628 || (cfun->machine->func_type != TYPE_NORMAL \
1629 && crtl->stack_realign_needed) \
1630 || TARGET_STACK_PROBE \
1631 || TARGET_64BIT_MS_ABI \
ff734e26 1632 || (TARGET_MACHO && crtl->profile))
f73ad30e
JH
1633
1634/* If defined, a C expression whose value is nonzero when we want to use PUSH
1635 instructions to pass outgoing arguments. */
1636
1637#define PUSH_ARGS (TARGET_PUSH_ARGS && !ACCUMULATE_OUTGOING_ARGS)
1638
2da4124d
L
1639/* We want the stack and args grow in opposite directions, even if
1640 PUSH_ARGS is 0. */
1641#define PUSH_ARGS_REVERSED 1
1642
c98f8742
JVA
1643/* Offset of first parameter from the argument pointer register value. */
1644#define FIRST_PARM_OFFSET(FNDECL) 0
1645
a7180f70
BS
1646/* Define this macro if functions should assume that stack space has been
1647 allocated for arguments even when their values are passed in registers.
1648
1649 The value of this macro is the size, in bytes, of the area reserved for
1650 arguments passed in registers for the function represented by FNDECL.
1651
1652 This space can be allocated by the caller, or be a part of the
1653 machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says
1654 which. */
7c800926
KT
1655#define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL)
1656
4ae8027b 1657#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \
6510e8bb 1658 (TARGET_64BIT && ix86_function_type_abi (FNTYPE) == MS_ABI)
7c800926 1659
c98f8742
JVA
1660/* Define how to find the value returned by a library function
1661 assuming the value has mode MODE. */
1662
4ae8027b 1663#define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE)
c98f8742 1664
e9125c09
TW
1665/* Define the size of the result block used for communication between
1666 untyped_call and untyped_return. The block contains a DImode value
1667 followed by the block used by fnsave and frstor. */
1668
1669#define APPLY_RESULT_SIZE (8+108)
1670
b08de47e 1671/* 1 if N is a possible register number for function argument passing. */
53c17031 1672#define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N)
c98f8742
JVA
1673
1674/* Define a data type for recording info about an argument list
1675 during the scan of that argument list. This data type should
1676 hold all necessary information about the function itself
1677 and about the args processed so far, enough to enable macros
b08de47e 1678 such as FUNCTION_ARG to determine where the next arg should go. */
c98f8742 1679
e075ae69 1680typedef struct ix86_args {
fa283935 1681 int words; /* # words passed so far */
b08de47e
MM
1682 int nregs; /* # registers available for passing */
1683 int regno; /* next available register number */
3e65f251
KT
1684 int fastcall; /* fastcall or thiscall calling convention
1685 is used */
fa283935 1686 int sse_words; /* # sse words passed so far */
a7180f70 1687 int sse_nregs; /* # sse registers available for passing */
223cdd15
UB
1688 int warn_avx512f; /* True when we want to warn
1689 about AVX512F ABI. */
95879c72 1690 int warn_avx; /* True when we want to warn about AVX ABI. */
47a37ce4 1691 int warn_sse; /* True when we want to warn about SSE ABI. */
fa283935 1692 int warn_mmx; /* True when we want to warn about MMX ABI. */
974aedcc
MP
1693 int warn_empty; /* True when we want to warn about empty classes
1694 passing ABI change. */
fa283935
UB
1695 int sse_regno; /* next available sse register number */
1696 int mmx_words; /* # mmx words passed so far */
bcf17554
JH
1697 int mmx_nregs; /* # mmx registers available for passing */
1698 int mmx_regno; /* next available mmx register number */
892a2d68 1699 int maybe_vaarg; /* true for calls to possibly vardic fncts. */
2767a7f2 1700 int caller; /* true if it is caller. */
2824d6e5
UB
1701 int float_in_sse; /* Set to 1 or 2 for 32bit targets if
1702 SFmode/DFmode arguments should be passed
1703 in SSE registers. Otherwise 0. */
d5e254e1 1704 int stdarg; /* Set to 1 if function is stdarg. */
51212b32 1705 enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
7c800926 1706 MS_ABI for ms abi. */
e66fc623 1707 tree decl; /* Callee decl. */
b08de47e 1708} CUMULATIVE_ARGS;
c98f8742
JVA
1709
1710/* Initialize a variable CUM of type CUMULATIVE_ARGS
1711 for a call to a function whose data type is FNTYPE.
b08de47e 1712 For a library call, FNTYPE is 0. */
c98f8742 1713
0f6937fe 1714#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
2767a7f2
L
1715 init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL), \
1716 (N_NAMED_ARGS) != -1)
c98f8742 1717
c98f8742
JVA
1718/* Output assembler code to FILE to increment profiler label # LABELNO
1719 for profiling a function entry. */
1720
1a6e82b8
UB
1721#define FUNCTION_PROFILER(FILE, LABELNO) \
1722 x86_function_profiler ((FILE), (LABELNO))
a5fa1ecd
JH
1723
1724#define MCOUNT_NAME "_mcount"
1725
3c5273a9
KT
1726#define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__"
1727
a5fa1ecd 1728#define PROFILE_COUNT_REGISTER "edx"
c98f8742
JVA
1729
1730/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
1731 the stack pointer does not matter. The value is tested only in
1732 functions that have frame pointers.
1733 No definition is equivalent to always zero. */
fce5a9f2 1734/* Note on the 386 it might be more efficient not to define this since
c98f8742
JVA
1735 we have to restore it ourselves from the frame pointer, in order to
1736 use pop */
1737
1738#define EXIT_IGNORE_STACK 1
1739
f8071c05
L
1740/* Define this macro as a C expression that is nonzero for registers
1741 used by the epilogue or the `return' pattern. */
1742
1743#define EPILOGUE_USES(REGNO) ix86_epilogue_uses (REGNO)
1744
c98f8742
JVA
1745/* Output assembler code for a block containing the constant parts
1746 of a trampoline, leaving space for the variable parts. */
1747
a269a03c 1748/* On the 386, the trampoline contains two instructions:
c98f8742 1749 mov #STATIC,ecx
a269a03c
JC
1750 jmp FUNCTION
1751 The trampoline is generated entirely at runtime. The operand of JMP
1752 is the address of FUNCTION relative to the instruction following the
1753 JMP (which is 5 bytes long). */
c98f8742
JVA
1754
1755/* Length in units of the trampoline for entering a nested function. */
1756
6514899f 1757#define TRAMPOLINE_SIZE (TARGET_64BIT ? 28 : 14)
c98f8742
JVA
1758\f
1759/* Definitions for register eliminations.
1760
1761 This is an array of structures. Each structure initializes one pair
1762 of eliminable registers. The "from" register number is given first,
1763 followed by "to". Eliminations of the same "from" register are listed
1764 in order of preference.
1765
afc2cd05
NC
1766 There are two registers that can always be eliminated on the i386.
1767 The frame pointer and the arg pointer can be replaced by either the
1768 hard frame pointer or to the stack pointer, depending upon the
1769 circumstances. The hard frame pointer is not used before reload and
1770 so it is not eligible for elimination. */
c98f8742 1771
564d80f4
JH
1772#define ELIMINABLE_REGS \
1773{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1774 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
1775 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1776 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
c98f8742 1777
c98f8742
JVA
1778/* Define the offset between two registers, one to be eliminated, and the other
1779 its replacement, at the start of a routine. */
1780
d9a5f180
GS
1781#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
1782 ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO)))
c98f8742
JVA
1783\f
1784/* Addressing modes, and classification of registers for them. */
1785
c98f8742
JVA
1786/* Macros to check register numbers against specific register classes. */
1787
1788/* These assume that REGNO is a hard or pseudo reg number.
1789 They give nonzero only if REGNO is a hard reg of the suitable class
1790 or a pseudo reg currently allocated to a suitable hard reg.
1791 Since they use reg_renumber, they are safe only once reg_renumber
aeb9f7cf
SB
1792 has been allocated, which happens in reginfo.c during register
1793 allocation. */
c98f8742 1794
3f3f2124
JH
1795#define REGNO_OK_FOR_INDEX_P(REGNO) \
1796 ((REGNO) < STACK_POINTER_REGNUM \
fb84c7a0
UB
1797 || REX_INT_REGNO_P (REGNO) \
1798 || (unsigned) reg_renumber[(REGNO)] < STACK_POINTER_REGNUM \
1799 || REX_INT_REGNO_P ((unsigned) reg_renumber[(REGNO)]))
c98f8742 1800
3f3f2124 1801#define REGNO_OK_FOR_BASE_P(REGNO) \
fb84c7a0 1802 (GENERAL_REGNO_P (REGNO) \
3f3f2124
JH
1803 || (REGNO) == ARG_POINTER_REGNUM \
1804 || (REGNO) == FRAME_POINTER_REGNUM \
fb84c7a0 1805 || GENERAL_REGNO_P ((unsigned) reg_renumber[(REGNO)]))
c98f8742 1806
c98f8742
JVA
1807/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
1808 and check its validity for a certain class.
1809 We have two alternate definitions for each of them.
1810 The usual definition accepts all pseudo regs; the other rejects
1811 them unless they have been allocated suitable hard regs.
1812 The symbol REG_OK_STRICT causes the latter definition to be used.
1813
1814 Most source files want to accept pseudo regs in the hope that
1815 they will get allocated to the class that the insn wants them to be in.
1816 Source files for reload pass need to be strict.
1817 After reload, it makes no difference, since pseudo regs have
1818 been eliminated by then. */
1819
c98f8742 1820
ff482c8d 1821/* Non strict versions, pseudos are ok. */
3b3c6a3f
MM
1822#define REG_OK_FOR_INDEX_NONSTRICT_P(X) \
1823 (REGNO (X) < STACK_POINTER_REGNUM \
fb84c7a0 1824 || REX_INT_REGNO_P (REGNO (X)) \
c98f8742
JVA
1825 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
1826
3b3c6a3f 1827#define REG_OK_FOR_BASE_NONSTRICT_P(X) \
fb84c7a0 1828 (GENERAL_REGNO_P (REGNO (X)) \
3b3c6a3f 1829 || REGNO (X) == ARG_POINTER_REGNUM \
3f3f2124 1830 || REGNO (X) == FRAME_POINTER_REGNUM \
3b3c6a3f 1831 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
c98f8742 1832
3b3c6a3f
MM
1833/* Strict versions, hard registers only */
1834#define REG_OK_FOR_INDEX_STRICT_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
1835#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
c98f8742 1836
3b3c6a3f 1837#ifndef REG_OK_STRICT
d9a5f180
GS
1838#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X)
1839#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X)
3b3c6a3f
MM
1840
1841#else
d9a5f180
GS
1842#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X)
1843#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X)
c98f8742
JVA
1844#endif
1845
331d9186 1846/* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
c98f8742
JVA
1847 that is a valid memory address for an instruction.
1848 The MODE argument is the machine mode for the MEM expression
1849 that wants to use this address.
1850
331d9186 1851 The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P,
c98f8742
JVA
1852 except for CONSTANT_ADDRESS_P which is usually machine-independent.
1853
1854 See legitimize_pic_address in i386.c for details as to what
1855 constitutes a legitimate address when -fpic is used. */
1856
1857#define MAX_REGS_PER_ADDRESS 2
1858
f996902d 1859#define CONSTANT_ADDRESS_P(X) constant_address_p (X)
c98f8742 1860
b949ea8b
JW
1861/* If defined, a C expression to determine the base term of address X.
1862 This macro is used in only one place: `find_base_term' in alias.c.
1863
1864 It is always safe for this macro to not be defined. It exists so
1865 that alias analysis can understand machine-dependent addresses.
1866
1867 The typical use of this macro is to handle addresses containing
1868 a label_ref or symbol_ref within an UNSPEC. */
1869
d9a5f180 1870#define FIND_BASE_TERM(X) ix86_find_base_term (X)
b949ea8b 1871
c98f8742 1872/* Nonzero if the constant value X is a legitimate general operand
fce5a9f2 1873 when generating PIC code. It is given that flag_pic is on and
c98f8742
JVA
1874 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
1875
f996902d 1876#define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X)
c98f8742
JVA
1877
1878#define SYMBOLIC_CONST(X) \
d9a5f180
GS
1879 (GET_CODE (X) == SYMBOL_REF \
1880 || GET_CODE (X) == LABEL_REF \
1881 || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
c98f8742 1882\f
b08de47e
MM
1883/* Max number of args passed in registers. If this is more than 3, we will
1884 have problems with ebx (register #4), since it is a caller save register and
1885 is also used as the pic register in ELF. So for now, don't allow more than
1886 3 registers to be passed in registers. */
1887
7c800926
KT
1888/* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */
1889#define X86_64_REGPARM_MAX 6
72fa3605 1890#define X86_64_MS_REGPARM_MAX 4
7c800926 1891
72fa3605 1892#define X86_32_REGPARM_MAX 3
7c800926 1893
4ae8027b 1894#define REGPARM_MAX \
2824d6e5
UB
1895 (TARGET_64BIT \
1896 ? (TARGET_64BIT_MS_ABI \
1897 ? X86_64_MS_REGPARM_MAX \
1898 : X86_64_REGPARM_MAX) \
4ae8027b 1899 : X86_32_REGPARM_MAX)
d2836273 1900
72fa3605
UB
1901#define X86_64_SSE_REGPARM_MAX 8
1902#define X86_64_MS_SSE_REGPARM_MAX 4
1903
b6010cab 1904#define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0)
72fa3605 1905
4ae8027b 1906#define SSE_REGPARM_MAX \
2824d6e5
UB
1907 (TARGET_64BIT \
1908 ? (TARGET_64BIT_MS_ABI \
1909 ? X86_64_MS_SSE_REGPARM_MAX \
1910 : X86_64_SSE_REGPARM_MAX) \
4ae8027b 1911 : X86_32_SSE_REGPARM_MAX)
bcf17554
JH
1912
1913#define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : (TARGET_MMX ? 3 : 0))
c98f8742
JVA
1914\f
1915/* Specify the machine mode that this machine uses
1916 for the index in the tablejump instruction. */
dc4d7240 1917#define CASE_VECTOR_MODE \
6025b127 1918 (!TARGET_LP64 || (flag_pic && ix86_cmodel != CM_LARGE_PIC) ? SImode : DImode)
c98f8742 1919
c98f8742
JVA
1920/* Define this as 1 if `char' should by default be signed; else as 0. */
1921#define DEFAULT_SIGNED_CHAR 1
1922
1923/* Max number of bytes we can move from memory to memory
1924 in one reasonably fast instruction. */
65d9c0ab
JH
1925#define MOVE_MAX 16
1926
1927/* MOVE_MAX_PIECES is the number of bytes at a time which we can
1928 move efficiently, as opposed to MOVE_MAX which is the maximum
df7ec09f
L
1929 number of bytes we can move with a single instruction.
1930
1931 ??? We should use TImode in 32-bit mode and use OImode or XImode
1932 if they are available. But since by_pieces_ninsns determines the
1933 widest mode with MAX_FIXED_MODE_SIZE, we can only use TImode in
1934 64-bit mode. */
1935#define MOVE_MAX_PIECES \
1936 ((TARGET_64BIT \
1937 && TARGET_SSE2 \
1938 && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \
1939 && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \
1940 ? GET_MODE_SIZE (TImode) : UNITS_PER_WORD)
c98f8742 1941
7e24ffc9 1942/* If a memory-to-memory move would take MOVE_RATIO or more simple
76715c32 1943 move-instruction pairs, we will do a cpymem or libcall instead.
7e24ffc9
HPN
1944 Increasing the value will always make code faster, but eventually
1945 incurs high cost in increased code size.
c98f8742 1946
e2e52e1b 1947 If you don't define this, a reasonable default is used. */
c98f8742 1948
e04ad03d 1949#define MOVE_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 3)
c98f8742 1950
45d78e7f
JJ
1951/* If a clear memory operation would take CLEAR_RATIO or more simple
1952 move-instruction sequences, we will do a clrmem or libcall instead. */
1953
25e22b19 1954#define CLEAR_RATIO(speed) ((speed) ? ix86_cost->clear_ratio : 2)
45d78e7f 1955
53f00dde
UB
1956/* Define if shifts truncate the shift count which implies one can
1957 omit a sign-extension or zero-extension of a shift count.
1958
1959 On i386, shifts do truncate the count. But bit test instructions
1960 take the modulo of the bit offset operand. */
c98f8742
JVA
1961
1962/* #define SHIFT_COUNT_TRUNCATED */
1963
d9f32422
JH
1964/* A macro to update M and UNSIGNEDP when an object whose type is
1965 TYPE and which has the specified mode and signedness is to be
1966 stored in a register. This macro is only called when TYPE is a
1967 scalar type.
1968
f710504c 1969 On i386 it is sometimes useful to promote HImode and QImode
d9f32422
JH
1970 quantities to SImode. The choice depends on target type. */
1971
1972#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
d9a5f180 1973do { \
d9f32422
JH
1974 if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \
1975 || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \
d9a5f180
GS
1976 (MODE) = SImode; \
1977} while (0)
d9f32422 1978
c98f8742
JVA
1979/* Specify the machine mode that pointers have.
1980 After generation of rtl, the compiler makes no further distinction
1981 between pointers and any other objects of this machine mode. */
28968d91 1982#define Pmode (ix86_pmode == PMODE_DI ? DImode : SImode)
c98f8742 1983
5e1e91c4
L
1984/* Supply a definition of STACK_SAVEAREA_MODE for emit_stack_save.
1985 NONLOCAL needs space to save both shadow stack and stack pointers.
1986
1987 FIXME: We only need to save and restore stack pointer in ptr_mode.
1988 But expand_builtin_setjmp_setup and expand_builtin_longjmp use Pmode
1989 to save and restore stack pointer. See
1990 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84150
1991 */
1992#define STACK_SAVEAREA_MODE(LEVEL) \
1993 ((LEVEL) == SAVE_NONLOCAL ? (TARGET_64BIT ? TImode : DImode) : Pmode)
1994
d16b9d1c
UB
1995/* Specify the machine_mode of the size increment
1996 operand of an 'allocate_stack' named pattern. */
1997#define STACK_SIZE_MODE Pmode
1998
f0ea7581
L
1999/* A C expression whose value is zero if pointers that need to be extended
2000 from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and
2001 greater then zero if they are zero-extended and less then zero if the
2002 ptr_extend instruction should be used. */
2003
2004#define POINTERS_EXTEND_UNSIGNED 1
2005
c98f8742
JVA
2006/* A function address in a call instruction
2007 is a byte address (for indexing purposes)
2008 so give the MEM rtx a byte's mode. */
2009#define FUNCTION_MODE QImode
d4ba09c0 2010\f
d4ba09c0 2011
d4ba09c0
SC
2012/* A C expression for the cost of a branch instruction. A value of 1
2013 is the default; other values are interpreted relative to that. */
2014
3a4fd356
JH
2015#define BRANCH_COST(speed_p, predictable_p) \
2016 (!(speed_p) ? 2 : (predictable_p) ? 0 : ix86_branch_cost)
d4ba09c0 2017
e327d1a3
L
2018/* An integer expression for the size in bits of the largest integer machine
2019 mode that should actually be used. We allow pairs of registers. */
2020#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode)
2021
d4ba09c0
SC
2022/* Define this macro as a C expression which is nonzero if accessing
2023 less than a word of memory (i.e. a `char' or a `short') is no
2024 faster than accessing a word of memory, i.e., if such access
2025 require more than one instruction or if there is no difference in
2026 cost between byte and (aligned) word loads.
2027
2028 When this macro is not defined, the compiler will access a field by
2029 finding the smallest containing object; when it is defined, a
2030 fullword load will be used if alignment permits. Unless bytes
2031 accesses are faster than word accesses, using word accesses is
2032 preferable since it may eliminate subsequent memory access if
2033 subsequent accesses occur to other fields in the same word of the
2034 structure, but to different bytes. */
2035
2036#define SLOW_BYTE_ACCESS 0
2037
2038/* Nonzero if access to memory by shorts is slow and undesirable. */
2039#define SLOW_SHORT_ACCESS 0
2040
d4ba09c0
SC
2041/* Define this macro if it is as good or better to call a constant
2042 function address than to call an address kept in a register.
2043
2044 Desirable on the 386 because a CALL with a constant address is
2045 faster than one with a register address. */
2046
1e8552c2 2047#define NO_FUNCTION_CSE 1
c98f8742 2048\f
c572e5ba
JVA
2049/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2050 return the mode to be used for the comparison.
2051
2052 For floating-point equality comparisons, CCFPEQmode should be used.
e075ae69 2053 VOIDmode should be used in all other cases.
c572e5ba 2054
16189740 2055 For integer comparisons against zero, reduce to CCNOmode or CCZmode if
e075ae69 2056 possible, to allow for more combinations. */
c98f8742 2057
d9a5f180 2058#define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y))
9e7adcb3 2059
9cd10576 2060/* Return nonzero if MODE implies a floating point inequality can be
9e7adcb3
JH
2061 reversed. */
2062
2063#define REVERSIBLE_CC_MODE(MODE) 1
2064
2065/* A C expression whose value is reversed condition code of the CODE for
2066 comparison done in CC_MODE mode. */
3c5cb3e4 2067#define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE))
9e7adcb3 2068
c98f8742
JVA
2069\f
2070/* Control the assembler format that we output, to the extent
2071 this does not vary between assemblers. */
2072
2073/* How to refer to registers in assembler output.
892a2d68 2074 This sequence is indexed by compiler's hard-register-number (see above). */
c98f8742 2075
a7b376ee 2076/* In order to refer to the first 8 regs as 32-bit regs, prefix an "e".
c98f8742
JVA
2077 For non floating point regs, the following are the HImode names.
2078
2079 For float regs, the stack top is sometimes referred to as "%st(0)"
6e2188e0
NF
2080 instead of just "%st". TARGET_PRINT_OPERAND handles this with the
2081 "y" code. */
c98f8742 2082
a7180f70
BS
2083#define HI_REGISTER_NAMES \
2084{"ax","dx","cx","bx","si","di","bp","sp", \
480feac0 2085 "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \
eaa17c21 2086 "argp", "flags", "fpsr", "frame", \
a7180f70 2087 "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \
03c259ad 2088 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", \
3f3f2124 2089 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
3f97cb0b
AI
2090 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", \
2091 "xmm16", "xmm17", "xmm18", "xmm19", \
2092 "xmm20", "xmm21", "xmm22", "xmm23", \
2093 "xmm24", "xmm25", "xmm26", "xmm27", \
85a77221 2094 "xmm28", "xmm29", "xmm30", "xmm31", \
eafa30ef 2095 "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" }
a7180f70 2096
c98f8742
JVA
2097#define REGISTER_NAMES HI_REGISTER_NAMES
2098
50bec228
UB
2099#define QI_REGISTER_NAMES \
2100{"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl"}
2101
2102#define QI_HIGH_REGISTER_NAMES \
2103{"ah", "dh", "ch", "bh"}
2104
c98f8742
JVA
2105/* Table of additional register names to use in user input. */
2106
eaa17c21
UB
2107#define ADDITIONAL_REGISTER_NAMES \
2108{ \
2109 { "eax", AX_REG }, { "edx", DX_REG }, { "ecx", CX_REG }, { "ebx", BX_REG }, \
2110 { "esi", SI_REG }, { "edi", DI_REG }, { "ebp", BP_REG }, { "esp", SP_REG }, \
2111 { "rax", AX_REG }, { "rdx", DX_REG }, { "rcx", CX_REG }, { "rbx", BX_REG }, \
2112 { "rsi", SI_REG }, { "rdi", DI_REG }, { "rbp", BP_REG }, { "rsp", SP_REG }, \
2113 { "al", AX_REG }, { "dl", DX_REG }, { "cl", CX_REG }, { "bl", BX_REG }, \
50bec228 2114 { "sil", SI_REG }, { "dil", DI_REG }, { "bpl", BP_REG }, { "spl", SP_REG }, \
eaa17c21
UB
2115 { "ah", AX_REG }, { "dh", DX_REG }, { "ch", CX_REG }, { "bh", BX_REG }, \
2116 { "ymm0", XMM0_REG }, { "ymm1", XMM1_REG }, { "ymm2", XMM2_REG }, { "ymm3", XMM3_REG }, \
2117 { "ymm4", XMM4_REG }, { "ymm5", XMM5_REG }, { "ymm6", XMM6_REG }, { "ymm7", XMM7_REG }, \
2118 { "ymm8", XMM8_REG }, { "ymm9", XMM9_REG }, { "ymm10", XMM10_REG }, { "ymm11", XMM11_REG }, \
2119 { "ymm12", XMM12_REG }, { "ymm13", XMM13_REG }, { "ymm14", XMM14_REG }, { "ymm15", XMM15_REG }, \
2120 { "ymm16", XMM16_REG }, { "ymm17", XMM17_REG }, { "ymm18", XMM18_REG }, { "ymm19", XMM19_REG }, \
2121 { "ymm20", XMM20_REG }, { "ymm21", XMM21_REG }, { "ymm22", XMM22_REG }, { "ymm23", XMM23_REG }, \
2122 { "ymm24", XMM24_REG }, { "ymm25", XMM25_REG }, { "ymm26", XMM26_REG }, { "ymm27", XMM27_REG }, \
2123 { "ymm28", XMM28_REG }, { "ymm29", XMM29_REG }, { "ymm30", XMM30_REG }, { "ymm31", XMM31_REG }, \
2124 { "zmm0", XMM0_REG }, { "zmm1", XMM1_REG }, { "zmm2", XMM2_REG }, { "zmm3", XMM3_REG }, \
2125 { "zmm4", XMM4_REG }, { "zmm5", XMM5_REG }, { "zmm6", XMM6_REG }, { "zmm7", XMM7_REG }, \
2126 { "zmm8", XMM8_REG }, { "zmm9", XMM9_REG }, { "zmm10", XMM10_REG }, { "zmm11", XMM11_REG }, \
2127 { "zmm12", XMM12_REG }, { "zmm13", XMM13_REG }, { "zmm14", XMM14_REG }, { "zmm15", XMM15_REG }, \
2128 { "zmm16", XMM16_REG }, { "zmm17", XMM17_REG }, { "zmm18", XMM18_REG }, { "zmm19", XMM19_REG }, \
2129 { "zmm20", XMM20_REG }, { "zmm21", XMM21_REG }, { "zmm22", XMM22_REG }, { "zmm23", XMM23_REG }, \
2130 { "zmm24", XMM24_REG }, { "zmm25", XMM25_REG }, { "zmm26", XMM26_REG }, { "zmm27", XMM27_REG }, \
2131 { "zmm28", XMM28_REG }, { "zmm29", XMM29_REG }, { "zmm30", XMM30_REG }, { "zmm31", XMM31_REG } \
2132}
c98f8742 2133
c98f8742
JVA
2134/* How to renumber registers for dbx and gdb. */
2135
d9a5f180
GS
2136#define DBX_REGISTER_NUMBER(N) \
2137 (TARGET_64BIT ? dbx64_register_map[(N)] : dbx_register_map[(N)])
83774849 2138
9a82e702
MS
2139extern int const dbx_register_map[FIRST_PSEUDO_REGISTER];
2140extern int const dbx64_register_map[FIRST_PSEUDO_REGISTER];
2141extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER];
c98f8742 2142
469ac993
JM
2143/* Before the prologue, RA is at 0(%esp). */
2144#define INCOMING_RETURN_ADDR_RTX \
2efb4214 2145 gen_rtx_MEM (Pmode, stack_pointer_rtx)
fce5a9f2 2146
e414ab29 2147/* After the prologue, RA is at -4(AP) in the current frame. */
1a6e82b8
UB
2148#define RETURN_ADDR_RTX(COUNT, FRAME) \
2149 ((COUNT) == 0 \
2150 ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \
2151 -UNITS_PER_WORD)) \
2152 : gen_rtx_MEM (Pmode, plus_constant (Pmode, (FRAME), UNITS_PER_WORD)))
e414ab29 2153
892a2d68 2154/* PC is dbx register 8; let's use that column for RA. */
0f7fa3d0 2155#define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8)
469ac993 2156
a10b3cf1
L
2157/* Before the prologue, there are return address and error code for
2158 exception handler on the top of the frame. */
2159#define INCOMING_FRAME_SP_OFFSET \
2160 (cfun->machine->func_type == TYPE_EXCEPTION \
2161 ? 2 * UNITS_PER_WORD : UNITS_PER_WORD)
a6ab3aad 2162
26fc730d
JJ
2163/* The value of INCOMING_FRAME_SP_OFFSET the assembler assumes in
2164 .cfi_startproc. */
2165#define DEFAULT_INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD
2166
1020a5ab 2167/* Describe how we implement __builtin_eh_return. */
2824d6e5
UB
2168#define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM)
2169#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG)
1020a5ab 2170
ad919812 2171
e4c4ebeb
RH
2172/* Select a format to encode pointers in exception handling data. CODE
2173 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
2174 true if the symbol may be affected by dynamic relocations.
2175
2176 ??? All x86 object file formats are capable of representing this.
2177 After all, the relocation needed is the same as for the call insn.
2178 Whether or not a particular assembler allows us to enter such, I
2179 guess we'll have to see. */
d9a5f180 2180#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
72ce3d4a 2181 asm_preferred_eh_data_format ((CODE), (GLOBAL))
e4c4ebeb 2182
ec1895c1
UB
2183/* These are a couple of extensions to the formats accepted
2184 by asm_fprintf:
2185 %z prints out opcode suffix for word-mode instruction
2186 %r prints out word-mode name for reg_names[arg] */
2187#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \
2188 case 'z': \
2189 fputc (TARGET_64BIT ? 'q' : 'l', (FILE)); \
2190 break; \
2191 \
2192 case 'r': \
2193 { \
2194 unsigned int regno = va_arg ((ARGS), int); \
2195 if (LEGACY_INT_REGNO_P (regno)) \
2196 fputc (TARGET_64BIT ? 'r' : 'e', (FILE)); \
2197 fputs (reg_names[regno], (FILE)); \
2198 break; \
2199 }
2200
2201/* This is how to output an insn to push a register on the stack. */
2202
2203#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \
2204 asm_fprintf ((FILE), "\tpush%z\t%%%r\n", (REGNO))
2205
2206/* This is how to output an insn to pop a register from the stack. */
c98f8742 2207
d9a5f180 2208#define ASM_OUTPUT_REG_POP(FILE, REGNO) \
ec1895c1 2209 asm_fprintf ((FILE), "\tpop%z\t%%%r\n", (REGNO))
c98f8742 2210
f88c65f7 2211/* This is how to output an element of a case-vector that is absolute. */
c98f8742
JVA
2212
2213#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
d9a5f180 2214 ix86_output_addr_vec_elt ((FILE), (VALUE))
c98f8742 2215
f88c65f7 2216/* This is how to output an element of a case-vector that is relative. */
c98f8742 2217
33f7f353 2218#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
d9a5f180 2219 ix86_output_addr_diff_elt ((FILE), (VALUE), (REL))
f88c65f7 2220
63001560 2221/* When we see %v, we will print the 'v' prefix if TARGET_AVX is true. */
95879c72
L
2222
2223#define ASM_OUTPUT_AVX_PREFIX(STREAM, PTR) \
2224{ \
2225 if ((PTR)[0] == '%' && (PTR)[1] == 'v') \
63001560 2226 (PTR) += TARGET_AVX ? 1 : 2; \
95879c72
L
2227}
2228
2229/* A C statement or statements which output an assembler instruction
2230 opcode to the stdio stream STREAM. The macro-operand PTR is a
2231 variable of type `char *' which points to the opcode name in
2232 its "internal" form--the form that is written in the machine
2233 description. */
2234
2235#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
2236 ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR))
2237
6a90d232
L
2238/* A C statement to output to the stdio stream FILE an assembler
2239 command to pad the location counter to a multiple of 1<<LOG
2240 bytes if it is within MAX_SKIP bytes. */
2241
2242#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
2243#undef ASM_OUTPUT_MAX_SKIP_PAD
2244#define ASM_OUTPUT_MAX_SKIP_PAD(FILE, LOG, MAX_SKIP) \
2245 if ((LOG) != 0) \
2246 { \
dd047c67 2247 if ((MAX_SKIP) == 0 || (MAX_SKIP) >= (1 << (LOG)) - 1) \
6a90d232
L
2248 fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
2249 else \
2250 fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
2251 }
2252#endif
2253
135a687e
KT
2254/* Write the extra assembler code needed to declare a function
2255 properly. */
2256
2257#undef ASM_OUTPUT_FUNCTION_LABEL
2258#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \
1a6e82b8 2259 ix86_asm_output_function_label ((FILE), (NAME), (DECL))
135a687e 2260
f7288899
EC
2261/* Under some conditions we need jump tables in the text section,
2262 because the assembler cannot handle label differences between
2263 sections. This is the case for x86_64 on Mach-O for example. */
f88c65f7
RH
2264
2265#define JUMP_TABLES_IN_TEXT_SECTION \
f7288899
EC
2266 (flag_pic && ((TARGET_MACHO && TARGET_64BIT) \
2267 || (!TARGET_64BIT && !HAVE_AS_GOTOFF_IN_DATA)))
c98f8742 2268
cea3bd3e
RH
2269/* Switch to init or fini section via SECTION_OP, emit a call to FUNC,
2270 and switch back. For x86 we do this only to save a few bytes that
2271 would otherwise be unused in the text section. */
ad211091
KT
2272#define CRT_MKSTR2(VAL) #VAL
2273#define CRT_MKSTR(x) CRT_MKSTR2(x)
2274
2275#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
2276 asm (SECTION_OP "\n\t" \
2277 "call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \
cea3bd3e 2278 TEXT_SECTION_ASM_OP);
5a579c3b
LE
2279
2280/* Default threshold for putting data in large sections
2281 with x86-64 medium memory model */
2282#define DEFAULT_LARGE_SECTION_THRESHOLD 65536
74b42c8b 2283\f
b97de419
L
2284/* Which processor to tune code generation for. These must be in sync
2285 with processor_target_table in i386.c. */
5bf0ebab
RH
2286
2287enum processor_type
2288{
b97de419
L
2289 PROCESSOR_GENERIC = 0,
2290 PROCESSOR_I386, /* 80386 */
5bf0ebab
RH
2291 PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */
2292 PROCESSOR_PENTIUM,
2d6b2e28 2293 PROCESSOR_LAKEMONT,
5bf0ebab 2294 PROCESSOR_PENTIUMPRO,
5bf0ebab 2295 PROCESSOR_PENTIUM4,
89c43c0a 2296 PROCESSOR_NOCONA,
340ef734 2297 PROCESSOR_CORE2,
d3c11974
L
2298 PROCESSOR_NEHALEM,
2299 PROCESSOR_SANDYBRIDGE,
3a579e09 2300 PROCESSOR_HASWELL,
d3c11974
L
2301 PROCESSOR_BONNELL,
2302 PROCESSOR_SILVERMONT,
50e461df 2303 PROCESSOR_GOLDMONT,
74b2bb19 2304 PROCESSOR_GOLDMONT_PLUS,
a548a5a1 2305 PROCESSOR_TREMONT,
52747219 2306 PROCESSOR_KNL,
cace2309 2307 PROCESSOR_KNM,
176a3386 2308 PROCESSOR_SKYLAKE,
06caf59d 2309 PROCESSOR_SKYLAKE_AVX512,
c234d831 2310 PROCESSOR_CANNONLAKE,
79ab5364
JK
2311 PROCESSOR_ICELAKE_CLIENT,
2312 PROCESSOR_ICELAKE_SERVER,
7cab07f0 2313 PROCESSOR_CASCADELAKE,
a9fcfec3
HL
2314 PROCESSOR_TIGERLAKE,
2315 PROCESSOR_COOPERLAKE,
9a7f94d7 2316 PROCESSOR_INTEL,
b97de419
L
2317 PROCESSOR_GEODE,
2318 PROCESSOR_K6,
2319 PROCESSOR_ATHLON,
2320 PROCESSOR_K8,
21efb4d4 2321 PROCESSOR_AMDFAM10,
1133125e 2322 PROCESSOR_BDVER1,
4d652a18 2323 PROCESSOR_BDVER2,
eb2f2b44 2324 PROCESSOR_BDVER3,
ed97ad47 2325 PROCESSOR_BDVER4,
14b52538 2326 PROCESSOR_BTVER1,
e32bfc16 2327 PROCESSOR_BTVER2,
9ce29eb0 2328 PROCESSOR_ZNVER1,
2901f42f 2329 PROCESSOR_ZNVER2,
5bf0ebab
RH
2330 PROCESSOR_max
2331};
2332
c98c2430 2333#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
2559ef9f 2334extern const char *const processor_names[];
c98c2430
ML
2335
2336#include "wide-int-bitmask.h"
2337
2338const wide_int_bitmask PTA_3DNOW (HOST_WIDE_INT_1U << 0);
2339const wide_int_bitmask PTA_3DNOW_A (HOST_WIDE_INT_1U << 1);
2340const wide_int_bitmask PTA_64BIT (HOST_WIDE_INT_1U << 2);
2341const wide_int_bitmask PTA_ABM (HOST_WIDE_INT_1U << 3);
2342const wide_int_bitmask PTA_AES (HOST_WIDE_INT_1U << 4);
2343const wide_int_bitmask PTA_AVX (HOST_WIDE_INT_1U << 5);
2344const wide_int_bitmask PTA_BMI (HOST_WIDE_INT_1U << 6);
2345const wide_int_bitmask PTA_CX16 (HOST_WIDE_INT_1U << 7);
2346const wide_int_bitmask PTA_F16C (HOST_WIDE_INT_1U << 8);
2347const wide_int_bitmask PTA_FMA (HOST_WIDE_INT_1U << 9);
2348const wide_int_bitmask PTA_FMA4 (HOST_WIDE_INT_1U << 10);
2349const wide_int_bitmask PTA_FSGSBASE (HOST_WIDE_INT_1U << 11);
2350const wide_int_bitmask PTA_LWP (HOST_WIDE_INT_1U << 12);
2351const wide_int_bitmask PTA_LZCNT (HOST_WIDE_INT_1U << 13);
2352const wide_int_bitmask PTA_MMX (HOST_WIDE_INT_1U << 14);
2353const wide_int_bitmask PTA_MOVBE (HOST_WIDE_INT_1U << 15);
2354const wide_int_bitmask PTA_NO_SAHF (HOST_WIDE_INT_1U << 16);
2355const wide_int_bitmask PTA_PCLMUL (HOST_WIDE_INT_1U << 17);
2356const wide_int_bitmask PTA_POPCNT (HOST_WIDE_INT_1U << 18);
2357const wide_int_bitmask PTA_PREFETCH_SSE (HOST_WIDE_INT_1U << 19);
2358const wide_int_bitmask PTA_RDRND (HOST_WIDE_INT_1U << 20);
2359const wide_int_bitmask PTA_SSE (HOST_WIDE_INT_1U << 21);
2360const wide_int_bitmask PTA_SSE2 (HOST_WIDE_INT_1U << 22);
2361const wide_int_bitmask PTA_SSE3 (HOST_WIDE_INT_1U << 23);
2362const wide_int_bitmask PTA_SSE4_1 (HOST_WIDE_INT_1U << 24);
2363const wide_int_bitmask PTA_SSE4_2 (HOST_WIDE_INT_1U << 25);
2364const wide_int_bitmask PTA_SSE4A (HOST_WIDE_INT_1U << 26);
2365const wide_int_bitmask PTA_SSSE3 (HOST_WIDE_INT_1U << 27);
2366const wide_int_bitmask PTA_TBM (HOST_WIDE_INT_1U << 28);
2367const wide_int_bitmask PTA_XOP (HOST_WIDE_INT_1U << 29);
2368const wide_int_bitmask PTA_AVX2 (HOST_WIDE_INT_1U << 30);
2369const wide_int_bitmask PTA_BMI2 (HOST_WIDE_INT_1U << 31);
2370const wide_int_bitmask PTA_RTM (HOST_WIDE_INT_1U << 32);
2371const wide_int_bitmask PTA_HLE (HOST_WIDE_INT_1U << 33);
2372const wide_int_bitmask PTA_PRFCHW (HOST_WIDE_INT_1U << 34);
2373const wide_int_bitmask PTA_RDSEED (HOST_WIDE_INT_1U << 35);
2374const wide_int_bitmask PTA_ADX (HOST_WIDE_INT_1U << 36);
2375const wide_int_bitmask PTA_FXSR (HOST_WIDE_INT_1U << 37);
2376const wide_int_bitmask PTA_XSAVE (HOST_WIDE_INT_1U << 38);
2377const wide_int_bitmask PTA_XSAVEOPT (HOST_WIDE_INT_1U << 39);
2378const wide_int_bitmask PTA_AVX512F (HOST_WIDE_INT_1U << 40);
2379const wide_int_bitmask PTA_AVX512ER (HOST_WIDE_INT_1U << 41);
2380const wide_int_bitmask PTA_AVX512PF (HOST_WIDE_INT_1U << 42);
2381const wide_int_bitmask PTA_AVX512CD (HOST_WIDE_INT_1U << 43);
2382/* Hole after PTA_MPX was removed. */
2383const wide_int_bitmask PTA_SHA (HOST_WIDE_INT_1U << 45);
2384const wide_int_bitmask PTA_PREFETCHWT1 (HOST_WIDE_INT_1U << 46);
2385const wide_int_bitmask PTA_CLFLUSHOPT (HOST_WIDE_INT_1U << 47);
2386const wide_int_bitmask PTA_XSAVEC (HOST_WIDE_INT_1U << 48);
2387const wide_int_bitmask PTA_XSAVES (HOST_WIDE_INT_1U << 49);
2388const wide_int_bitmask PTA_AVX512DQ (HOST_WIDE_INT_1U << 50);
2389const wide_int_bitmask PTA_AVX512BW (HOST_WIDE_INT_1U << 51);
2390const wide_int_bitmask PTA_AVX512VL (HOST_WIDE_INT_1U << 52);
2391const wide_int_bitmask PTA_AVX512IFMA (HOST_WIDE_INT_1U << 53);
2392const wide_int_bitmask PTA_AVX512VBMI (HOST_WIDE_INT_1U << 54);
2393const wide_int_bitmask PTA_CLWB (HOST_WIDE_INT_1U << 55);
2394const wide_int_bitmask PTA_MWAITX (HOST_WIDE_INT_1U << 56);
2395const wide_int_bitmask PTA_CLZERO (HOST_WIDE_INT_1U << 57);
2396const wide_int_bitmask PTA_NO_80387 (HOST_WIDE_INT_1U << 58);
2397const wide_int_bitmask PTA_PKU (HOST_WIDE_INT_1U << 59);
2398const wide_int_bitmask PTA_AVX5124VNNIW (HOST_WIDE_INT_1U << 60);
2399const wide_int_bitmask PTA_AVX5124FMAPS (HOST_WIDE_INT_1U << 61);
2400const wide_int_bitmask PTA_AVX512VPOPCNTDQ (HOST_WIDE_INT_1U << 62);
2401const wide_int_bitmask PTA_SGX (HOST_WIDE_INT_1U << 63);
2402const wide_int_bitmask PTA_AVX512VNNI (0, HOST_WIDE_INT_1U);
2403const wide_int_bitmask PTA_GFNI (0, HOST_WIDE_INT_1U << 1);
2404const wide_int_bitmask PTA_VAES (0, HOST_WIDE_INT_1U << 2);
2405const wide_int_bitmask PTA_AVX512VBMI2 (0, HOST_WIDE_INT_1U << 3);
2406const wide_int_bitmask PTA_VPCLMULQDQ (0, HOST_WIDE_INT_1U << 4);
2407const wide_int_bitmask PTA_AVX512BITALG (0, HOST_WIDE_INT_1U << 5);
2408const wide_int_bitmask PTA_RDPID (0, HOST_WIDE_INT_1U << 6);
2409const wide_int_bitmask PTA_PCONFIG (0, HOST_WIDE_INT_1U << 7);
2410const wide_int_bitmask PTA_WBNOINVD (0, HOST_WIDE_INT_1U << 8);
e21b52af 2411const wide_int_bitmask PTA_AVX512VP2INTERSECT (0, HOST_WIDE_INT_1U << 9);
c98c2430 2412const wide_int_bitmask PTA_WAITPKG (0, HOST_WIDE_INT_1U << 9);
41f8d1fc 2413const wide_int_bitmask PTA_PTWRITE (0, HOST_WIDE_INT_1U << 10);
4f0e90fa 2414const wide_int_bitmask PTA_AVX512BF16 (0, HOST_WIDE_INT_1U << 11);
a9fcfec3
HL
2415const wide_int_bitmask PTA_MOVDIRI(0, HOST_WIDE_INT_1U << 13);
2416const wide_int_bitmask PTA_MOVDIR64B(0, HOST_WIDE_INT_1U << 14);
c98c2430
ML
2417
2418const wide_int_bitmask PTA_CORE2 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2
2419 | PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_FXSR;
2420const wide_int_bitmask PTA_NEHALEM = PTA_CORE2 | PTA_SSE4_1 | PTA_SSE4_2
2421 | PTA_POPCNT;
c9450033 2422const wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_PCLMUL;
c98c2430
ML
2423const wide_int_bitmask PTA_SANDYBRIDGE = PTA_WESTMERE | PTA_AVX | PTA_XSAVE
2424 | PTA_XSAVEOPT;
2425const wide_int_bitmask PTA_IVYBRIDGE = PTA_SANDYBRIDGE | PTA_FSGSBASE
2426 | PTA_RDRND | PTA_F16C;
2427const wide_int_bitmask PTA_HASWELL = PTA_IVYBRIDGE | PTA_AVX2 | PTA_BMI
2428 | PTA_BMI2 | PTA_LZCNT | PTA_FMA | PTA_MOVBE | PTA_HLE;
2429const wide_int_bitmask PTA_BROADWELL = PTA_HASWELL | PTA_ADX | PTA_PRFCHW
2430 | PTA_RDSEED;
c9450033 2431const wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_AES | PTA_CLFLUSHOPT
c98c2430
ML
2432 | PTA_XSAVEC | PTA_XSAVES | PTA_SGX;
2433const wide_int_bitmask PTA_SKYLAKE_AVX512 = PTA_SKYLAKE | PTA_AVX512F
2434 | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU
2435 | PTA_CLWB;
7cab07f0 2436const wide_int_bitmask PTA_CASCADELAKE = PTA_SKYLAKE_AVX512 | PTA_AVX512VNNI;
a9fcfec3 2437const wide_int_bitmask PTA_COOPERLAKE = PTA_CASCADELAKE | PTA_AVX512BF16;
c98c2430
ML
2438const wide_int_bitmask PTA_CANNONLAKE = PTA_SKYLAKE | PTA_AVX512F
2439 | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU
2440 | PTA_AVX512VBMI | PTA_AVX512IFMA | PTA_SHA;
2441const wide_int_bitmask PTA_ICELAKE_CLIENT = PTA_CANNONLAKE | PTA_AVX512VNNI
2442 | PTA_GFNI | PTA_VAES | PTA_AVX512VBMI2 | PTA_VPCLMULQDQ | PTA_AVX512BITALG
7d5e6005 2443 | PTA_RDPID | PTA_CLWB | PTA_AVX512VPOPCNTDQ;
c98c2430
ML
2444const wide_int_bitmask PTA_ICELAKE_SERVER = PTA_ICELAKE_CLIENT | PTA_PCONFIG
2445 | PTA_WBNOINVD;
a9fcfec3
HL
2446const wide_int_bitmask PTA_TIGERLAKE = PTA_ICELAKE_CLIENT | PTA_MOVDIRI
2447 | PTA_MOVDIR64B | PTA_AVX512VP2INTERSECT;
c98c2430
ML
2448const wide_int_bitmask PTA_KNL = PTA_BROADWELL | PTA_AVX512PF | PTA_AVX512ER
2449 | PTA_AVX512F | PTA_AVX512CD;
2450const wide_int_bitmask PTA_BONNELL = PTA_CORE2 | PTA_MOVBE;
2451const wide_int_bitmask PTA_SILVERMONT = PTA_WESTMERE | PTA_MOVBE | PTA_RDRND;
c9450033 2452const wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_AES | PTA_SHA | PTA_XSAVE
c98c2430
ML
2453 | PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT | PTA_XSAVEOPT
2454 | PTA_FSGSBASE;
2455const wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID
41f8d1fc 2456 | PTA_SGX | PTA_PTWRITE;
c98c2430
ML
2457const wide_int_bitmask PTA_TREMONT = PTA_GOLDMONT_PLUS | PTA_CLWB
2458 | PTA_GFNI;
2459const wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW
2460 | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ;
2461
2462#ifndef GENERATOR_FILE
2463
2464#include "insn-attr-common.h"
2465
6c1dae73 2466class pta
c98c2430 2467{
6c1dae73 2468public:
c98c2430
ML
2469 const char *const name; /* processor name or nickname. */
2470 const enum processor_type processor;
2471 const enum attr_cpu schedule;
2472 const wide_int_bitmask flags;
2473};
2474
2475extern const pta processor_alias_table[];
2476extern int const pta_size;
2477#endif
2478
2479#endif
2480
9e555526 2481extern enum processor_type ix86_tune;
5bf0ebab 2482extern enum processor_type ix86_arch;
5bf0ebab 2483
8362f420
JH
2484/* Size of the RED_ZONE area. */
2485#define RED_ZONE_SIZE 128
2486/* Reserved area of the red zone for temporaries. */
2487#define RED_ZONE_RESERVE 8
c93e80a5 2488
95899b34 2489extern unsigned int ix86_preferred_stack_boundary;
2e3f842f 2490extern unsigned int ix86_incoming_stack_boundary;
5bf0ebab
RH
2491
2492/* Smallest class containing REGNO. */
2493extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER];
2494
0948ccb2
PB
2495enum ix86_fpcmp_strategy {
2496 IX86_FPCMP_SAHF,
2497 IX86_FPCMP_COMI,
2498 IX86_FPCMP_ARITH
2499};
22fb740d
JH
2500\f
2501/* To properly truncate FP values into integers, we need to set i387 control
2502 word. We can't emit proper mode switching code before reload, as spills
2503 generated by reload may truncate values incorrectly, but we still can avoid
2504 redundant computation of new control word by the mode switching pass.
2505 The fldcw instructions are still emitted redundantly, but this is probably
2506 not going to be noticeable problem, as most CPUs do have fast path for
fce5a9f2 2507 the sequence.
22fb740d
JH
2508
2509 The machinery is to emit simple truncation instructions and split them
2510 before reload to instructions having USEs of two memory locations that
2511 are filled by this code to old and new control word.
fce5a9f2 2512
22fb740d
JH
2513 Post-reload pass may be later used to eliminate the redundant fildcw if
2514 needed. */
2515
c7ca8ef8
UB
2516enum ix86_stack_slot
2517{
2518 SLOT_TEMP = 0,
2519 SLOT_CW_STORED,
d3b92f35 2520 SLOT_CW_ROUNDEVEN,
c7ca8ef8
UB
2521 SLOT_CW_TRUNC,
2522 SLOT_CW_FLOOR,
2523 SLOT_CW_CEIL,
80008279 2524 SLOT_STV_TEMP,
c7ca8ef8
UB
2525 MAX_386_STACK_LOCALS
2526};
2527
ff680eb1
UB
2528enum ix86_entity
2529{
c7ca8ef8
UB
2530 X86_DIRFLAG = 0,
2531 AVX_U128,
d3b92f35 2532 I387_ROUNDEVEN,
ff97910d 2533 I387_TRUNC,
ff680eb1
UB
2534 I387_FLOOR,
2535 I387_CEIL,
ff680eb1
UB
2536 MAX_386_ENTITIES
2537};
2538
c7ca8ef8 2539enum x86_dirflag_state
ff680eb1 2540{
c7ca8ef8
UB
2541 X86_DIRFLAG_RESET,
2542 X86_DIRFLAG_ANY
ff680eb1 2543};
22fb740d 2544
ff97910d
VY
2545enum avx_u128_state
2546{
2547 AVX_U128_CLEAN,
2548 AVX_U128_DIRTY,
2549 AVX_U128_ANY
2550};
2551
22fb740d
JH
2552/* Define this macro if the port needs extra instructions inserted
2553 for mode switching in an optimizing compilation. */
2554
ff680eb1
UB
2555#define OPTIMIZE_MODE_SWITCHING(ENTITY) \
2556 ix86_optimize_mode_switching[(ENTITY)]
22fb740d
JH
2557
2558/* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as
2559 initializer for an array of integers. Each initializer element N
2560 refers to an entity that needs mode switching, and specifies the
2561 number of different modes that might need to be set for this
2562 entity. The position of the initializer in the initializer -
2563 starting counting at zero - determines the integer that is used to
2564 refer to the mode-switched entity in question. */
2565
c7ca8ef8
UB
2566#define NUM_MODES_FOR_MODE_SWITCHING \
2567 { X86_DIRFLAG_ANY, AVX_U128_ANY, \
d3b92f35 2568 I387_CW_ANY, I387_CW_ANY, I387_CW_ANY, I387_CW_ANY }
22fb740d 2569
0f0138b6
JH
2570\f
2571/* Avoid renaming of stack registers, as doing so in combination with
2572 scheduling just increases amount of live registers at time and in
2573 the turn amount of fxch instructions needed.
2574
3f97cb0b
AI
2575 ??? Maybe Pentium chips benefits from renaming, someone can try....
2576
2577 Don't rename evex to non-evex sse registers. */
0f0138b6 2578
1a6e82b8
UB
2579#define HARD_REGNO_RENAME_OK(SRC, TARGET) \
2580 (!STACK_REGNO_P (SRC) \
2581 && EXT_REX_SSE_REGNO_P (SRC) == EXT_REX_SSE_REGNO_P (TARGET))
22fb740d 2582
3b3c6a3f 2583\f
e91f04de 2584#define FASTCALL_PREFIX '@'
fa1a0d02 2585\f
77560086
BE
2586#ifndef USED_FOR_TARGET
2587/* Structure describing stack frame layout.
2588 Stack grows downward:
2589
2590 [arguments]
2591 <- ARG_POINTER
2592 saved pc
2593
2594 saved static chain if ix86_static_chain_on_stack
2595
2596 saved frame pointer if frame_pointer_needed
2597 <- HARD_FRAME_POINTER
2598 [saved regs]
2599 <- reg_save_offset
2600 [padding0]
2601 <- stack_realign_offset
2602 [saved SSE regs]
2603 OR
2604 [stub-saved registers for ms x64 --> sysv clobbers
2605 <- Start of out-of-line, stub-saved/restored regs
2606 (see libgcc/config/i386/(sav|res)ms64*.S)
2607 [XMM6-15]
2608 [RSI]
2609 [RDI]
2610 [?RBX] only if RBX is clobbered
2611 [?RBP] only if RBP and RBX are clobbered
2612 [?R12] only if R12 and all previous regs are clobbered
2613 [?R13] only if R13 and all previous regs are clobbered
2614 [?R14] only if R14 and all previous regs are clobbered
2615 [?R15] only if R15 and all previous regs are clobbered
2616 <- end of stub-saved/restored regs
2617 [padding1]
2618 ]
5d9d834d 2619 <- sse_reg_save_offset
77560086
BE
2620 [padding2]
2621 | <- FRAME_POINTER
2622 [va_arg registers] |
2623 |
2624 [frame] |
2625 |
2626 [padding2] | = to_allocate
2627 <- STACK_POINTER
2628 */
2629struct GTY(()) ix86_frame
2630{
2631 int nsseregs;
2632 int nregs;
2633 int va_arg_size;
2634 int red_zone_size;
2635 int outgoing_arguments_size;
2636
2637 /* The offsets relative to ARG_POINTER. */
2638 HOST_WIDE_INT frame_pointer_offset;
2639 HOST_WIDE_INT hard_frame_pointer_offset;
2640 HOST_WIDE_INT stack_pointer_offset;
2641 HOST_WIDE_INT hfp_save_offset;
2642 HOST_WIDE_INT reg_save_offset;
122f9da1 2643 HOST_WIDE_INT stack_realign_allocate;
77560086 2644 HOST_WIDE_INT stack_realign_offset;
77560086
BE
2645 HOST_WIDE_INT sse_reg_save_offset;
2646
2647 /* When save_regs_using_mov is set, emit prologue using
2648 move instead of push instructions. */
2649 bool save_regs_using_mov;
2f007861
RS
2650
2651 /* Assume without checking that:
2652 EXPENSIVE_P = expensive_function_p (EXPENSIVE_COUNT). */
2653 bool expensive_p;
2654 int expensive_count;
77560086
BE
2655};
2656
122f9da1
DS
2657/* Machine specific frame tracking during prologue/epilogue generation. All
2658 values are positive, but since the x86 stack grows downward, are subtratced
2659 from the CFA to produce a valid address. */
cd9c1ca8 2660
ec7ded37 2661struct GTY(()) machine_frame_state
cd9c1ca8 2662{
ec7ded37
RH
2663 /* This pair tracks the currently active CFA as reg+offset. When reg
2664 is drap_reg, we don't bother trying to record here the real CFA when
2665 it might really be a DW_CFA_def_cfa_expression. */
2666 rtx cfa_reg;
2667 HOST_WIDE_INT cfa_offset;
2668
2669 /* The current offset (canonically from the CFA) of ESP and EBP.
2670 When stack frame re-alignment is active, these may not be relative
2671 to the CFA. However, in all cases they are relative to the offsets
2672 of the saved registers stored in ix86_frame. */
2673 HOST_WIDE_INT sp_offset;
2674 HOST_WIDE_INT fp_offset;
2675
2676 /* The size of the red-zone that may be assumed for the purposes of
2677 eliding register restore notes in the epilogue. This may be zero
2678 if no red-zone is in effect, or may be reduced from the real
2679 red-zone value by a maximum runtime stack re-alignment value. */
2680 int red_zone_offset;
2681
2682 /* Indicate whether each of ESP, EBP or DRAP currently holds a valid
2683 value within the frame. If false then the offset above should be
2684 ignored. Note that DRAP, if valid, *always* points to the CFA and
2685 thus has an offset of zero. */
2686 BOOL_BITFIELD sp_valid : 1;
2687 BOOL_BITFIELD fp_valid : 1;
2688 BOOL_BITFIELD drap_valid : 1;
c9f4c451
RH
2689
2690 /* Indicate whether the local stack frame has been re-aligned. When
2691 set, the SP/FP offsets above are relative to the aligned frame
2692 and not the CFA. */
2693 BOOL_BITFIELD realigned : 1;
d6d4d770
DS
2694
2695 /* Indicates whether the stack pointer has been re-aligned. When set,
2696 SP/FP continue to be relative to the CFA, but the stack pointer
122f9da1
DS
2697 should only be used for offsets > sp_realigned_offset, while
2698 the frame pointer should be used for offsets <= sp_realigned_fp_last.
d6d4d770
DS
2699 The flags realigned and sp_realigned are mutually exclusive. */
2700 BOOL_BITFIELD sp_realigned : 1;
2701
122f9da1
DS
2702 /* If sp_realigned is set, this is the last valid offset from the CFA
2703 that can be used for access with the frame pointer. */
2704 HOST_WIDE_INT sp_realigned_fp_last;
2705
2706 /* If sp_realigned is set, this is the offset from the CFA that the stack
2707 pointer was realigned, and may or may not be equal to sp_realigned_fp_last.
2708 Access via the stack pointer is only valid for offsets that are greater than
2709 this value. */
d6d4d770 2710 HOST_WIDE_INT sp_realigned_offset;
cd9c1ca8
RH
2711};
2712
f81c9774
RH
2713/* Private to winnt.c. */
2714struct seh_frame_state;
2715
f8071c05
L
2716enum function_type
2717{
2718 TYPE_UNKNOWN = 0,
2719 TYPE_NORMAL,
2720 /* The current function is an interrupt service routine with a
2721 pointer argument as specified by the "interrupt" attribute. */
2722 TYPE_INTERRUPT,
2723 /* The current function is an interrupt service routine with a
2724 pointer argument and an integer argument as specified by the
2725 "interrupt" attribute. */
2726 TYPE_EXCEPTION
2727};
2728
d1b38208 2729struct GTY(()) machine_function {
fa1a0d02 2730 struct stack_local_entry *stack_locals;
4aab97f9
L
2731 int varargs_gpr_size;
2732 int varargs_fpr_size;
ff680eb1 2733 int optimize_mode_switching[MAX_386_ENTITIES];
3452586b 2734
77560086
BE
2735 /* Cached initial frame layout for the current function. */
2736 struct ix86_frame frame;
3452586b 2737
7458026b
ILT
2738 /* For -fsplit-stack support: A stack local which holds a pointer to
2739 the stack arguments for a function with a variable number of
2740 arguments. This is set at the start of the function and is used
2741 to initialize the overflow_arg_area field of the va_list
2742 structure. */
2743 rtx split_stack_varargs_pointer;
2744
3452586b
RH
2745 /* This value is used for amd64 targets and specifies the current abi
2746 to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */
25efe060 2747 ENUM_BITFIELD(calling_abi) call_abi : 8;
3452586b
RH
2748
2749 /* Nonzero if the function accesses a previous frame. */
2750 BOOL_BITFIELD accesses_prev_frame : 1;
2751
922e3e33
UB
2752 /* Set by ix86_compute_frame_layout and used by prologue/epilogue
2753 expander to determine the style used. */
3452586b
RH
2754 BOOL_BITFIELD use_fast_prologue_epilogue : 1;
2755
1e4490dc
UB
2756 /* Nonzero if the current function calls pc thunk and
2757 must not use the red zone. */
2758 BOOL_BITFIELD pc_thunk_call_expanded : 1;
2759
5bf5a10b
AO
2760 /* If true, the current function needs the default PIC register, not
2761 an alternate register (on x86) and must not use the red zone (on
2762 x86_64), even if it's a leaf function. We don't want the
2763 function to be regarded as non-leaf because TLS calls need not
2764 affect register allocation. This flag is set when a TLS call
2765 instruction is expanded within a function, and never reset, even
2766 if all such instructions are optimized away. Use the
2767 ix86_current_function_calls_tls_descriptor macro for a better
2768 approximation. */
3452586b
RH
2769 BOOL_BITFIELD tls_descriptor_call_expanded_p : 1;
2770
2771 /* If true, the current function has a STATIC_CHAIN is placed on the
2772 stack below the return address. */
2773 BOOL_BITFIELD static_chain_on_stack : 1;
25efe060 2774
529a6471
JJ
2775 /* If true, it is safe to not save/restore DRAP register. */
2776 BOOL_BITFIELD no_drap_save_restore : 1;
2777
f8071c05
L
2778 /* Function type. */
2779 ENUM_BITFIELD(function_type) func_type : 2;
2780
da99fd4a
L
2781 /* How to generate indirec branch. */
2782 ENUM_BITFIELD(indirect_branch) indirect_branch_type : 3;
2783
2784 /* If true, the current function has local indirect jumps, like
2785 "indirect_jump" or "tablejump". */
2786 BOOL_BITFIELD has_local_indirect_jump : 1;
2787
45e14019
L
2788 /* How to generate function return. */
2789 ENUM_BITFIELD(indirect_branch) function_return_type : 3;
2790
f8071c05
L
2791 /* If true, the current function is a function specified with
2792 the "interrupt" or "no_caller_saved_registers" attribute. */
2793 BOOL_BITFIELD no_caller_saved_registers : 1;
2794
a0ff7835
L
2795 /* If true, there is register available for argument passing. This
2796 is used only in ix86_function_ok_for_sibcall by 32-bit to determine
2797 if there is scratch register available for indirect sibcall. In
2798 64-bit, rax, r10 and r11 are scratch registers which aren't used to
2799 pass arguments and can be used for indirect sibcall. */
2800 BOOL_BITFIELD arg_reg_available : 1;
2801
d6d4d770 2802 /* If true, we're out-of-lining reg save/restore for regs clobbered
5d9d834d 2803 by 64-bit ms_abi functions calling a sysv_abi function. */
d6d4d770
DS
2804 BOOL_BITFIELD call_ms2sysv : 1;
2805
2806 /* If true, the incoming 16-byte aligned stack has an offset (of 8) and
5d9d834d 2807 needs padding prior to out-of-line stub save/restore area. */
d6d4d770
DS
2808 BOOL_BITFIELD call_ms2sysv_pad_in : 1;
2809
d6d4d770
DS
2810 /* This is the number of extra registers saved by stub (valid range is
2811 0-6). Each additional register is only saved/restored by the stubs
2812 if all successive ones are. (Will always be zero when using a hard
2813 frame pointer.) */
2814 unsigned int call_ms2sysv_extra_regs:3;
2815
35c95658
L
2816 /* Nonzero if the function places outgoing arguments on stack. */
2817 BOOL_BITFIELD outgoing_args_on_stack : 1;
2818
708c728d
L
2819 /* If true, ENDBR is queued at function entrance. */
2820 BOOL_BITFIELD endbr_queued_at_entrance : 1;
2821
c2080a1f
L
2822 /* True if the function needs a stack frame. */
2823 BOOL_BITFIELD stack_frame_required : 1;
2824
cd3410cc
L
2825 /* The largest alignment, in bytes, of stack slot actually used. */
2826 unsigned int max_used_stack_alignment;
2827
ec7ded37
RH
2828 /* During prologue/epilogue generation, the current frame state.
2829 Otherwise, the frame state at the end of the prologue. */
2830 struct machine_frame_state fs;
f81c9774
RH
2831
2832 /* During SEH output, this is non-null. */
2833 struct seh_frame_state * GTY((skip(""))) seh;
fa1a0d02 2834};
2bf6d935
ML
2835
2836extern GTY(()) tree sysv_va_list_type_node;
2837extern GTY(()) tree ms_va_list_type_node;
cd9c1ca8 2838#endif
fa1a0d02
JH
2839
2840#define ix86_stack_locals (cfun->machine->stack_locals)
4aab97f9
L
2841#define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size)
2842#define ix86_varargs_fpr_size (cfun->machine->varargs_fpr_size)
fa1a0d02 2843#define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching)
1e4490dc 2844#define ix86_pc_thunk_call_expanded (cfun->machine->pc_thunk_call_expanded)
5bf5a10b
AO
2845#define ix86_tls_descriptor_calls_expanded_in_cfun \
2846 (cfun->machine->tls_descriptor_call_expanded_p)
2847/* Since tls_descriptor_call_expanded is not cleared, even if all TLS
2848 calls are optimized away, we try to detect cases in which it was
2849 optimized away. Since such instructions (use (reg REG_SP)), we can
2850 verify whether there's any such instruction live by testing that
2851 REG_SP is live. */
2852#define ix86_current_function_calls_tls_descriptor \
6fb5fa3c 2853 (ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG))
3452586b 2854#define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack)
2ecf9ac7 2855#define ix86_red_zone_size (cfun->machine->frame.red_zone_size)
249e6b63 2856
1bc7c5b6
ZW
2857/* Control behavior of x86_file_start. */
2858#define X86_FILE_START_VERSION_DIRECTIVE false
2859#define X86_FILE_START_FLTUSED false
2860
7dcbf659
JH
2861/* Flag to mark data that is in the large address area. */
2862#define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0)
2863#define SYMBOL_REF_FAR_ADDR_P(X) \
2864 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0)
da489f73
RH
2865
2866/* Flags to mark dllimport/dllexport. Used by PE ports, but handy to
2867 have defined always, to avoid ifdefing. */
2868#define SYMBOL_FLAG_DLLIMPORT (SYMBOL_FLAG_MACH_DEP << 1)
2869#define SYMBOL_REF_DLLIMPORT_P(X) \
2870 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLIMPORT) != 0)
2871
2872#define SYMBOL_FLAG_DLLEXPORT (SYMBOL_FLAG_MACH_DEP << 2)
2873#define SYMBOL_REF_DLLEXPORT_P(X) \
2874 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0)
2875
82c0e1a0
KT
2876#define SYMBOL_FLAG_STUBVAR (SYMBOL_FLAG_MACH_DEP << 4)
2877#define SYMBOL_REF_STUBVAR_P(X) \
2878 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_STUBVAR) != 0)
2879
7942e47e
RY
2880extern void debug_ready_dispatch (void);
2881extern void debug_dispatch_window (int);
2882
91afcfa3
QN
2883/* The value at zero is only defined for the BMI instructions
2884 LZCNT and TZCNT, not the BSR/BSF insns in the original isa. */
2885#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
1068ced5 2886 ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_BMI ? 1 : 0)
91afcfa3 2887#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
1068ced5 2888 ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_LZCNT ? 1 : 0)
91afcfa3
QN
2889
2890
b8ce4e94
KT
2891/* Flags returned by ix86_get_callcvt (). */
2892#define IX86_CALLCVT_CDECL 0x1
2893#define IX86_CALLCVT_STDCALL 0x2
2894#define IX86_CALLCVT_FASTCALL 0x4
2895#define IX86_CALLCVT_THISCALL 0x8
2896#define IX86_CALLCVT_REGPARM 0x10
2897#define IX86_CALLCVT_SSEREGPARM 0x20
2898
2899#define IX86_BASE_CALLCVT(FLAGS) \
2900 ((FLAGS) & (IX86_CALLCVT_CDECL | IX86_CALLCVT_STDCALL \
2901 | IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL))
2902
b86b9f44
MM
2903#define RECIP_MASK_NONE 0x00
2904#define RECIP_MASK_DIV 0x01
2905#define RECIP_MASK_SQRT 0x02
2906#define RECIP_MASK_VEC_DIV 0x04
2907#define RECIP_MASK_VEC_SQRT 0x08
2908#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \
2909 | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT)
bbe996ec 2910#define RECIP_MASK_DEFAULT (RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT)
b86b9f44
MM
2911
2912#define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0)
2913#define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0)
2914#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0)
2915#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0)
2916
ab2c4ec8
SS
2917/* Use 128-bit AVX instructions in the auto-vectorizer. */
2918#define TARGET_PREFER_AVX128 (prefer_vector_width_type == PVW_AVX128)
2919/* Use 256-bit AVX instructions in the auto-vectorizer. */
02a70367
SS
2920#define TARGET_PREFER_AVX256 (TARGET_PREFER_AVX128 \
2921 || prefer_vector_width_type == PVW_AVX256)
ab2c4ec8 2922
c2c601b2
L
2923#define TARGET_INDIRECT_BRANCH_REGISTER \
2924 (ix86_indirect_branch_register \
2925 || cfun->machine->indirect_branch_type != indirect_branch_keep)
2926
5dcfdccd
KY
2927#define IX86_HLE_ACQUIRE (1 << 16)
2928#define IX86_HLE_RELEASE (1 << 17)
2929
e83b8e2e
JJ
2930/* For switching between functions with different target attributes. */
2931#define SWITCHABLE_TARGET 1
2932
44d0de8d
UB
2933#define TARGET_SUPPORTS_WIDE_INT 1
2934
2bf6d935
ML
2935#if !defined(GENERATOR_FILE) && !defined(IN_LIBGCC2)
2936extern enum attr_cpu ix86_schedule;
2937
2938#define NUM_X86_64_MS_CLOBBERED_REGS 12
2939#endif
2940
c98f8742
JVA
2941/*
2942Local variables:
2943version-control: t
2944End:
2945*/