1 /* Definitions of x86 tunable features.
2 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License and
17 a copy of the GCC Runtime Library Exception along with this program;
18 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
19 <http://www.gnu.org/licenses/>. */
21 /* Tuning for a given CPU XXXX consists of:
22 - adding new CPU into:
23 - adding PROCESSOR_XXX to processor_type (in i386.h)
24 - possibly adding XXX into CPU attribute in i386.md
25 - adding XXX to processor_alias_table (in i386.c)
26 - introducing ix86_XXX_cost in i386.c
27 - Stringop generation table can be build based on test_stringop
28 - script (once rest of tuning is complete)
29 - designing a scheduler model in
31 - Updating ix86_issue_rate and ix86_adjust_cost in i386.md
32 - possibly updating ia32_multipass_dfa_lookahead, ix86_sched_reorder
33 and ix86_sched_init_global if those tricks are needed.
34 - Tunning the flags bellow. Those are split into sections and each
35 section is very roughly ordered by importance. */
37 /*****************************************************************************/
38 /* Scheduling flags. */
39 /*****************************************************************************/
41 /* X86_TUNE_SCHEDULE: Enable scheduling. */
42 DEF_TUNE (X86_TUNE_SCHEDULE, "schedule",
43 m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT
44 | m_INTEL | m_KNL | m_KNM | m_K6_GEODE | m_AMD_MULTIPLE | m_GOLDMONT
45 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC)
47 /* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming
48 on modern chips. Preffer stores affecting whole integer register
49 over partial stores. For example preffer MOVZBL or MOVQ to load 8bit
51 DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
52 m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2
53 | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL
54 | m_KNL | m_KNM | m_AMD_MULTIPLE | m_TREMONT
57 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store
58 destinations to be 128bit to allow register renaming on 128bit SSE units,
59 but usually results in one extra microop on 64bit SSE units.
60 Experimental results shows that disabling this option on P4 brings over 20%
61 SPECfp regression, while enabling it on K8 brings roughly 2.4% regression
62 that can be partly masked by careful scheduling of moves. */
63 DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency",
64 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10
65 | m_BDVER | m_ZNVER | m_GENERIC)
67 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
68 are resolved on SSE register parts instead of whole registers, so we may
69 maintain just lower part of scalar values in proper format leaving the
70 upper part undefined. */
71 DEF_TUNE (X86_TUNE_SSE_SPLIT_REGS, "sse_split_regs", m_ATHLON_K8)
73 /* X86_TUNE_PARTIAL_FLAG_REG_STALL: this flag disables use of of flags
74 set by instructions affecting just some flags (in particular shifts).
75 This is because Core2 resolves dependencies on whole flags register
76 and such sequences introduce false dependency on previous instruction
79 The flags does not affect generation of INC and DEC that is controlled
80 by X86_TUNE_USE_INCDEC. */
82 DEF_TUNE (X86_TUNE_PARTIAL_FLAG_REG_STALL, "partial_flag_reg_stall",
85 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
86 partial dependencies. */
87 DEF_TUNE (X86_TUNE_MOVX, "movx",
88 m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
89 | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL
90 | m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE
91 | m_CORE_AVX2 | m_TREMONT | m_GENERIC)
93 /* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by
95 DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall",
96 m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL
97 | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE
98 | m_TREMONT | m_GENERIC)
100 /* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent
101 conditional jump instruction for 32 bit TARGET. */
102 DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_32, "fuse_cmp_and_branch_32",
103 m_CORE_ALL | m_BDVER | m_ZNVER | m_GENERIC)
105 /* X86_TUNE_FUSE_CMP_AND_BRANCH_64: Fuse compare with a subsequent
106 conditional jump instruction for TARGET_64BIT. */
107 DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_64, "fuse_cmp_and_branch_64",
108 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER
109 | m_ZNVER | m_GENERIC)
111 /* X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS: Fuse compare with a
112 subsequent conditional jump instruction when the condition jump
113 check sign flag (SF) or overflow flag (OF). */
114 DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS, "fuse_cmp_and_branch_soflags",
115 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER
116 | m_ZNVER | m_GENERIC)
118 /* X86_TUNE_FUSE_ALU_AND_BRANCH: Fuse alu with a subsequent conditional
119 jump instruction when the alu instruction produces the CCFLAG consumed by
120 the conditional jump instruction. */
121 DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH, "fuse_alu_and_branch",
122 m_SANDYBRIDGE | m_CORE_AVX2 | m_GENERIC)
125 /*****************************************************************************/
126 /* Function prologue, epilogue and function calling sequences. */
127 /*****************************************************************************/
129 /* X86_TUNE_ACCUMULATE_OUTGOING_ARGS: Allocate stack space for outgoing
130 arguments in prologue/epilogue instead of separately for each call
131 by push/pop instructions.
132 This increase code size by about 5% in 32bit mode, less so in 64bit mode
133 because parameters are passed in registers. It is considerable
134 win for targets without stack engine that prevents multple push operations
135 to happen in parallel. */
137 DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args",
138 m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
139 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ATHLON_K8)
141 /* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are
142 considered on critical path. */
143 DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move",
144 m_PPRO | m_ATHLON_K8)
146 /* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in epilogues that are
147 considered on critical path. */
148 DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move",
149 m_PPRO | m_ATHLON_K8)
151 /* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */
152 DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave",
153 m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC)
155 /* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions.
156 Some chips, like 486 and Pentium works faster with separate load
157 and push instructions. */
158 DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory",
159 m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE
162 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
163 over esp subtraction. */
164 DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT
165 | m_LAKEMONT | m_K6_GEODE)
167 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
168 over esp subtraction. */
169 DEF_TUNE (X86_TUNE_DOUBLE_PUSH, "double_push", m_PENT | m_LAKEMONT
172 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
173 over esp addition. */
174 DEF_TUNE (X86_TUNE_SINGLE_POP, "single_pop", m_386 | m_486 | m_PENT
175 | m_LAKEMONT | m_PPRO)
177 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
178 over esp addition. */
179 DEF_TUNE (X86_TUNE_DOUBLE_POP, "double_pop", m_PENT | m_LAKEMONT)
181 /*****************************************************************************/
182 /* Branch predictor tuning */
183 /*****************************************************************************/
185 /* X86_TUNE_PAD_SHORT_FUNCTION: Make every function to be at least 4
186 instructions long. */
187 DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_BONNELL)
189 /* X86_TUNE_PAD_RETURNS: Place NOP before every RET that is a destination
190 of conditional jump or directly preceded by other jump instruction.
191 This is important for AND K8-AMDFAM10 because the branch prediction
192 architecture expect at most one jump per 2 byte window. Failing to
193 pad returns leads to misaligned return stack. */
194 DEF_TUNE (X86_TUNE_PAD_RETURNS, "pad_returns",
195 m_ATHLON_K8 | m_AMDFAM10)
197 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
198 than 4 branch instructions in the 16 byte window. */
199 DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit",
200 m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM
201 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL | m_ATHLON_K8
204 /*****************************************************************************/
205 /* Integer instruction selection tuning */
206 /*****************************************************************************/
208 /* X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL: Enable software prefetching
209 at -O3. For the moment, the prefetching seems badly tuned for Intel
211 DEF_TUNE (X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL, "software_prefetching_beneficial",
212 m_K6_GEODE | m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER)
214 /* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall
215 on 16-bit immediate moves into memory on Core2 and Corei7. */
216 DEF_TUNE (X86_TUNE_LCP_STALL, "lcp_stall", m_CORE_ALL | m_GENERIC)
218 /* X86_TUNE_READ_MODIFY: Enable use of read-modify instructions such
219 as "add mem, reg". */
220 DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_LAKEMONT | m_PPRO))
222 /* X86_TUNE_USE_INCDEC: Enable use of inc/dec instructions.
224 Core2 and nehalem has stall of 7 cycles for partial flag register stalls.
225 Sandy bridge and Ivy bridge generate extra uop. On Haswell this extra uop
226 is output only when the values needs to be really merged, which is not
227 done by GCC generated code. */
228 DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec",
229 ~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
230 | m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT
231 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC))
233 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
235 DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves",
236 ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
237 | m_KNL | m_KNM | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_GOLDMONT
238 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC))
240 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
241 will impact LEA instruction selection. */
242 DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_BONNELL | m_SILVERMONT | m_KNL
243 | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL)
245 /* X86_TUNE_AVOID_LEA_FOR_ADDR: Avoid lea for address computation. */
246 DEF_TUNE (X86_TUNE_AVOID_LEA_FOR_ADDR, "avoid_lea_for_addr",
247 m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT
250 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
251 vector path on AMD machines.
252 FIXME: Do we need to enable this for core? */
253 DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM32_MEM, "slow_imul_imm32_mem",
256 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
258 FIXME: Do we need to enable this for core? */
259 DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8",
262 /* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for
263 a conditional move. */
264 DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove",
265 m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_KNL
266 | m_KNM | m_TREMONT | m_INTEL)
268 /* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such
269 as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */
270 DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA)
272 /* X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES: Enable generation of
273 compact prologues and epilogues by issuing a misaligned moves. This
274 requires target to handle misaligned moves and partial memory stalls
276 FIXME: This may actualy be a win on more targets than listed here. */
277 DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES,
278 "misaligned_move_string_pro_epilogues",
279 m_386 | m_486 | m_CORE_ALL | m_AMD_MULTIPLE | m_GENERIC)
281 /* X86_TUNE_USE_SAHF: Controls use of SAHF. */
282 DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf",
283 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
284 | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER
285 | m_BTVER | m_ZNVER | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT
288 /* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */
289 DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd",
290 ~(m_PENT | m_LAKEMONT | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
291 | m_K6 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT))
293 /* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */
294 DEF_TUNE (X86_TUNE_USE_BT, "use_bt",
295 m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
296 | m_LAKEMONT | m_AMD_MULTIPLE | m_GOLDMONT | m_GOLDMONT_PLUS
297 | m_TREMONT | m_GENERIC)
299 /* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency
300 for bit-manipulation instructions. */
301 DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_BMI, "avoid_false_dep_for_bmi",
302 m_SANDYBRIDGE | m_CORE_AVX2 | m_GENERIC)
304 /* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based
305 on hardware capabilities. Bdver3 hardware has a loop buffer which makes
306 unrolling small loop less important. For, such architectures we adjust
307 the unroll factor so that the unrolled loop fits the loop buffer. */
308 DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4)
310 /* X86_TUNE_ONE_IF_CONV_INSNS: Restrict a number of cmov insns in
311 if-converted sequence to one. */
312 DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn",
313 m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_CORE_ALL | m_GOLDMONT
314 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC)
316 /* X86_TUNE_USE_XCHG_FOR_ATOMIC_STORE: Use xchg instead of mov+mfence. */
317 DEF_TUNE (X86_TUNE_USE_XCHG_FOR_ATOMIC_STORE, "use_xchg_for_atomic_store",
318 m_CORE_ALL | m_BDVER | m_ZNVER | m_GENERIC)
320 /*****************************************************************************/
321 /* 387 instruction selection tuning */
322 /*****************************************************************************/
324 /* X86_TUNE_USE_HIMODE_FIOP: Enables use of x87 instructions with 16bit
326 FIXME: Why this is disabled for modern chips? */
327 DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop",
328 m_386 | m_486 | m_K6_GEODE)
330 /* X86_TUNE_USE_SIMODE_FIOP: Enables use of x87 instructions with 32bit
332 DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop",
333 ~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL
334 | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_AMD_MULTIPLE
335 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC))
337 /* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */
338 DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE)
340 /* X86_TUNE_EXT_80387_CONSTANTS: Use fancy 80387 constants, such as PI. */
341 DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants",
342 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
343 | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_GOLDMONT
344 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC)
346 /*****************************************************************************/
347 /* SSE instruction selection tuning */
348 /*****************************************************************************/
350 /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE
351 regs instead of memory. */
352 DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill",
355 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead
356 of a sequence loading registers by parts. */
357 DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal",
358 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM
359 | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS
360 | m_TREMONT | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_GENERIC)
362 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead
363 of a sequence loading registers by parts. */
364 DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal",
365 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM
366 | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS
367 | m_TREMONT | m_BDVER | m_ZNVER | m_GENERIC)
369 /* Use packed single precision instructions where posisble. I.e. movups instead
371 DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optimal",
374 /* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */
375 DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores",
376 m_AMD_MULTIPLE | m_CORE_ALL | m_GENERIC)
378 /* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to
379 xorps/xorpd and other variants. */
380 DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor",
381 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_ZNVER
384 /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer
385 to SSE registers. If disabled, the moves will be done by storing
386 the value to memory and reloading.
387 Enable this flag for generic - the only relevant architecture preferring
388 no inter-unit moves is Buldozer. While this makes small regression on SPECfp
389 scores (sub 0.3%), disabling inter-unit moves penalizes noticeably hand
390 written vectorized code which use i.e. _mm_set_epi16. */
391 DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_TO_VEC, "inter_unit_moves_to_vec",
392 ~(m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER))
394 /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from SSE
395 to integer registers. If disabled, the moves will be done by storing
396 the value to memory and reloading. */
397 DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_FROM_VEC, "inter_unit_moves_from_vec",
400 /* X86_TUNE_INTER_UNIT_CONVERSIONS: Enable float<->integer conversions
401 to use both SSE and integer registers at a same time. */
402 DEF_TUNE (X86_TUNE_INTER_UNIT_CONVERSIONS, "inter_unit_conversions",
403 ~(m_AMDFAM10 | m_BDVER))
405 /* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for
406 fp converts to destination register. */
407 DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts",
408 m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS
409 | m_TREMONT | m_INTEL)
411 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
412 from FP to FP. This form of instructions avoids partial write to the
414 DEF_TUNE (X86_TUNE_USE_VECTOR_FP_CONVERTS, "use_vector_fp_converts",
417 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
418 from integer to FP. */
419 DEF_TUNE (X86_TUNE_USE_VECTOR_CONVERTS, "use_vector_converts", m_AMDFAM10)
421 /* X86_TUNE_SLOW_SHUFB: Indicates tunings with slow pshufb instruction. */
422 DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb",
423 m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT
424 | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL)
426 /* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */
427 DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes",
428 m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL)
430 /* X86_TUNE_USE_GATHER: Use gather instructions. */
431 DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather",
432 ~(m_ZNVER | m_GENERIC))
434 /* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or
435 smaller FMA chain. */
436 DEF_TUNE (X86_TUNE_AVOID_128FMA_CHAINS, "avoid_fma_chains", m_ZNVER)
438 /* X86_TUNE_AVOID_256FMA_CHAINS: Avoid creating loops with tight 256bit or
439 smaller FMA chain. */
440 DEF_TUNE (X86_TUNE_AVOID_256FMA_CHAINS, "avoid_fma256_chains", m_ZNVER2)
442 /*****************************************************************************/
443 /* AVX instruction selection tuning (some of SSE flags affects AVX, too) */
444 /*****************************************************************************/
446 /* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if false, unaligned loads are
448 DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal",
449 ~(m_NEHALEM | m_SANDYBRIDGE | m_GENERIC))
451 /* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if false, unaligned stores are
453 DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_store_optimal",
454 ~(m_NEHALEM | m_SANDYBRIDGE | m_BDVER | m_ZNVER1 | m_GENERIC))
456 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
457 the auto-vectorizer. */
458 DEF_TUNE (X86_TUNE_AVX128_OPTIMAL, "avx128_optimal", m_BDVER | m_BTVER2
461 /* X86_TUNE_AVX256_OPTIMAL: Use 256-bit AVX instructions instead of 512-bit AVX
462 instructions in the auto-vectorizer. */
463 DEF_TUNE (X86_TUNE_AVX256_OPTIMAL, "avx256_optimal", m_CORE_AVX512)
465 /*****************************************************************************/
466 /* Historical relics: tuning flags that helps a specific old CPU designs */
467 /*****************************************************************************/
469 /* X86_TUNE_DOUBLE_WITH_ADD: Use add instead of sal to double value in
470 an integer register. */
471 DEF_TUNE (X86_TUNE_DOUBLE_WITH_ADD, "double_with_add", ~m_386)
473 /* X86_TUNE_ALWAYS_FANCY_MATH_387: controls use of fancy 387 operations,
474 such as fsqrt, fprem, fsin, fcos, fsincos etc.
475 Should be enabled for all targets that always has coprocesor. */
476 DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387",
477 ~(m_386 | m_486 | m_LAKEMONT))
479 /* X86_TUNE_UNROLL_STRLEN: Produce (quite lame) unrolled sequence for
480 inline strlen. This affects only -minline-all-stringops mode. By
481 default we always dispatch to a library since our internal strlen
483 DEF_TUNE (X86_TUNE_UNROLL_STRLEN, "unroll_strlen", ~m_386)
485 /* X86_TUNE_SHIFT1: Enables use of short encoding of "sal reg" instead of
486 longer "sal $1, reg". */
487 DEF_TUNE (X86_TUNE_SHIFT1, "shift1", ~m_486)
489 /* X86_TUNE_ZERO_EXTEND_WITH_AND: Use AND instruction instead
491 DEF_TUNE (X86_TUNE_ZERO_EXTEND_WITH_AND, "zero_extend_with_and",
494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
496 DEF_TUNE (X86_TUNE_PROMOTE_HIMODE_IMUL, "promote_himode_imul",
499 /* X86_TUNE_FAST_PREFIX: Enable demoting some 32bit or 64bit arithmetic
500 into 16bit/8bit when resulting sequence is shorter. For example
501 for "and $-65536, reg" to 16bit store of 0. */
502 DEF_TUNE (X86_TUNE_FAST_PREFIX, "fast_prefix",
503 ~(m_386 | m_486 | m_PENT | m_LAKEMONT))
505 /* X86_TUNE_READ_MODIFY_WRITE: Enable use of read modify write instructions
506 such as "add $1, mem". */
507 DEF_TUNE (X86_TUNE_READ_MODIFY_WRITE, "read_modify_write",
508 ~(m_PENT | m_LAKEMONT))
510 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
512 DEF_TUNE (X86_TUNE_MOVE_M1_VIA_OR, "move_m1_via_or", m_PENT | m_LAKEMONT)
514 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
515 but one byte longer. */
516 DEF_TUNE (X86_TUNE_NOT_UNPAIRABLE, "not_unpairable", m_PENT | m_LAKEMONT)
518 /* X86_TUNE_PARTIAL_REG_STALL: Pentium pro, unlike later chips, handled
519 use of partial registers by renaming. This improved performance of 16bit
520 code where upper halves of registers are not used. It also leads to
521 an penalty whenever a 16bit store is followed by 32bit use. This flag
522 disables production of such sequences in common cases.
523 See also X86_TUNE_HIMODE_MATH.
525 In current implementation the partial register stalls are not eliminated
526 very well - they can be introduced via subregs synthesized by combine
527 and can happen in caller/callee saving sequences. */
528 DEF_TUNE (X86_TUNE_PARTIAL_REG_STALL, "partial_reg_stall", m_PPRO)
530 /* X86_TUNE_PROMOTE_QIMODE: When it is cheap, turn 8bit arithmetic to
531 corresponding 32bit arithmetic. */
532 DEF_TUNE (X86_TUNE_PROMOTE_QIMODE, "promote_qimode",
535 /* X86_TUNE_PROMOTE_HI_REGS: Same, but for 16bit artihmetic. Again we avoid
536 partial register stalls on PentiumPro targets. */
537 DEF_TUNE (X86_TUNE_PROMOTE_HI_REGS, "promote_hi_regs", m_PPRO)
539 /* X86_TUNE_HIMODE_MATH: Enable use of 16bit arithmetic.
540 On PPro this flag is meant to avoid partial register stalls. */
541 DEF_TUNE (X86_TUNE_HIMODE_MATH, "himode_math", ~m_PPRO)
543 /* X86_TUNE_SPLIT_LONG_MOVES: Avoid instructions moving immediates
544 directly to memory. */
545 DEF_TUNE (X86_TUNE_SPLIT_LONG_MOVES, "split_long_moves", m_PPRO)
547 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
548 DEF_TUNE (X86_TUNE_USE_XCHGB, "use_xchgb", m_PENT4)
550 /* X86_TUNE_USE_MOV0: Use "mov $0, reg" instead of "xor reg, reg" to clear
552 DEF_TUNE (X86_TUNE_USE_MOV0, "use_mov0", m_K6)
554 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
555 operand that cannot be represented using a modRM byte. The XOR
556 replacement is long decoded, so this split helps here as well. */
557 DEF_TUNE (X86_TUNE_NOT_VECTORMODE, "not_vectormode", m_K6)
559 /* X86_TUNE_AVOID_VECTOR_DECODE: Enable splitters that avoid vector decoded
560 forms of instructions on K8 targets. */
561 DEF_TUNE (X86_TUNE_AVOID_VECTOR_DECODE, "avoid_vector_decode",
564 /*****************************************************************************/
565 /* This never worked well before. */
566 /*****************************************************************************/
568 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
569 on simulation result. But after P4 was made, no performance benefit
570 was observed with branch hints. It also increases the code size.
571 As a result, icc never generates branch hints. */
572 DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS, "branch_prediction_hints", 0U)
574 /* X86_TUNE_QIMODE_MATH: Enable use of 8bit arithmetic. */
575 DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", ~0U)
577 /* X86_TUNE_PROMOTE_QI_REGS: This enables generic code that promotes all 8bit
578 arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme
579 is usually used for RISC targets. */
580 DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", 0U)
582 /* X86_TUNE_EMIT_VZEROUPPER: This enables vzeroupper instruction insertion
583 before a transfer of control flow out of the function. */
584 DEF_TUNE (X86_TUNE_EMIT_VZEROUPPER, "emit_vzeroupper", ~m_KNL)