x86-tune.def (X86_TUNE_SLOW_IMUL_IMM32_MEM, [...]): Keep enabled only for K8 and...
[gcc.git] / gcc / config / i386 / x86-tune.def
1 /* Definitions of x86 tunable features.
2 Copyright (C) 2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License and
17 a copy of the GCC Runtime Library Exception along with this program;
18 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
19 <http://www.gnu.org/licenses/>. */
20
21 /* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */
22 DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave",
23 m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC)
24
25 /* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions.
26 Some chips, like 486 and Pentium works faster with separate load
27 and push instructions. */
28 DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory",
29 m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE
30 | m_GENERIC)
31
32 /* X86_TUNE_ZERO_EXTEND_WITH_AND: Use AND instruction instead
33 of mozbl/movwl. */
34 DEF_TUNE (X86_TUNE_ZERO_EXTEND_WITH_AND, "zero_extend_with_and", m_486 | m_PENT)
35
36 /* X86_TUNE_UNROLL_STRLEN: Produce (quite lame) unrolled sequence for
37 inline strlen. This affects only -minline-all-stringops mode. By
38 default we always dispatch to a library since our internal strlen
39 is bad. */
40 DEF_TUNE (X86_TUNE_UNROLL_STRLEN, "unroll_strlen",
41 m_486 | m_PENT | m_PPRO | m_ATOM | m_SLM | m_CORE_ALL | m_K6
42 | m_AMD_MULTIPLE | m_GENERIC)
43
44 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
45 on simulation result. But after P4 was made, no performance benefit
46 was observed with branch hints. It also increases the code size.
47 As a result, icc never generates branch hints. */
48 DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS, "branch_prediction_hints", 0)
49
50 /* X86_TUNE_DOUBLE_WITH_ADD: Use add instead of sal to double value in
51 an integer register. */
52 DEF_TUNE (X86_TUNE_DOUBLE_WITH_ADD, "double_with_add", ~m_386)
53
54 /* X86_TUNE_USE_SAHF: Controls use of SAHF. */
55 DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf",
56 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE
57 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_GENERIC)
58
59 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
60 partial dependencies. */
61 DEF_TUNE (X86_TUNE_MOVX, "movx",
62 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GEODE
63 | m_AMD_MULTIPLE | m_GENERIC)
64
65 /* X86_TUNE_PARTIAL_REG_STALL: Pentium pro, unlike later chips, handled
66 use of partial registers by renaming. This improved performance of 16bit
67 code where upper halves of registers are not used. It also leads to
68 an penalty whenever a 16bit store is followed by 32bit use. This flag
69 disables production of such sequences in common cases.
70 See also X86_TUNE_HIMODE_MATH.
71
72 In current implementation the partial register stalls are not eliminated
73 very well - they can be introduced via subregs synthesized by combine
74 and can happen in caller/callee saving sequences. */
75 DEF_TUNE (X86_TUNE_PARTIAL_REG_STALL, "partial_reg_stall", m_PPRO)
76
77 /* X86_TUNE_PARTIAL_FLAG_REG_STALL: this flag disables use of of flags
78 set by instructions affecting just some flags (in particular shifts).
79 This is because Core2 resolves dependencies on whole flags register
80 and such sequences introduce false dependency on previous instruction
81 setting full flags.
82
83 The flags does not affect generation of INC and DEC that is controlled
84 by X86_TUNE_USE_INCDEC.
85
86 This flag may be dropped from generic once core2-corei5 machines are
87 rare enough. */
88 DEF_TUNE (X86_TUNE_PARTIAL_FLAG_REG_STALL, "partial_flag_reg_stall",
89 m_CORE2 | m_GENERIC)
90
91 /* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall
92 on 16-bit immediate moves into memory on Core2 and Corei7. */
93 DEF_TUNE (X86_TUNE_LCP_STALL, "lcp_stall", m_CORE_ALL | m_GENERIC)
94
95 /* X86_TUNE_USE_HIMODE_FIOP: Enables use of x87 instructions with 16bit
96 integer operand.
97 FIXME: Why this is disabled for modern chips? */
98 DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop",
99 m_386 | m_486 | m_K6_GEODE)
100
101 /* X86_TUNE_USE_SIMODE_FIOP: Enables use of x87 instructions with 32bit
102 integer operand. */
103 DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop",
104 ~(m_PENT | m_PPRO | m_CORE_ALL | m_ATOM
105 | m_SLM | m_AMD_MULTIPLE | m_GENERIC))
106
107 /* X86_TUNE_USE_MOV0: Use "mov $0, reg" instead of "xor reg, reg" to clear
108 integer register. */
109 DEF_TUNE (X86_TUNE_USE_MOV0, "use_mov0", m_K6)
110
111 /* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */
112 DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", ~(m_PENT | m_ATOM | m_SLM | m_K6))
113
114 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
115 DEF_TUNE (X86_TUNE_USE_XCHGB, "use_xchgb", m_PENT4)
116
117 /* X86_TUNE_SPLIT_LONG_MOVES: Avoid instructions moving immediates
118 directly to memory. */
119 DEF_TUNE (X86_TUNE_SPLIT_LONG_MOVES, "split_long_moves", m_PPRO)
120
121 /* X86_TUNE_READ_MODIFY_WRITE: Enable use of read modify write instructions
122 such as "add $1, mem". */
123 DEF_TUNE (X86_TUNE_READ_MODIFY_WRITE, "read_modify_write", ~m_PENT)
124
125 /* X86_TUNE_READ_MODIFY: Enable use of read-modify instructions such
126 as "add mem, reg". */
127 DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_PPRO))
128
129 /* X86_TUNE_PROMOTE_QIMODE: When it is cheap, turn 8bit arithmetic to
130 corresponding 32bit arithmetic. */
131 DEF_TUNE (X86_TUNE_PROMOTE_QIMODE, "promote_qimode",
132 m_386 | m_486 | m_PENT | m_CORE_ALL | m_ATOM | m_SLM
133 | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC)
134
135 /* X86_TUNE_FAST_PREFIX: Enable demoting some 32bit or 64bit arithmetic
136 into 16bit/8bit when resulting sequence is shorter. For example
137 for "and $-65536, reg" to 16bit store of 0. */
138 DEF_TUNE (X86_TUNE_FAST_PREFIX, "fast_prefix", ~(m_386 | m_486 | m_PENT))
139
140 /* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such
141 as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */
142 DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA)
143
144 /* X86_TUNE_QIMODE_MATH: Enable use of 8bit arithmetic. */
145 DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", ~0)
146
147 /* X86_TUNE_HIMODE_MATH: Enable use of 16bit arithmetic.
148 On PPro this flag is meant to avoid partial register stalls. */
149 DEF_TUNE (X86_TUNE_HIMODE_MATH, "himode_math", ~m_PPRO)
150
151 /* X86_TUNE_PROMOTE_QI_REGS: This enables generic code that promotes all 8bit
152 arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme
153 is usually used for RISC targets. */
154 DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", 0)
155
156 /* X86_TUNE_PROMOTE_HI_REGS: Same, but for 16bit artihmetic. Again we avoid
157 partial register stalls on PentiumPro targets. */
158 DEF_TUNE (X86_TUNE_PROMOTE_HI_REGS, "promote_hi_regs", m_PPRO)
159
160 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
161 over esp addition. */
162 DEF_TUNE (X86_TUNE_SINGLE_POP, "single_pop", m_386 | m_486 | m_PENT | m_PPRO)
163
164 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
165 over esp addition. */
166 DEF_TUNE (X86_TUNE_DOUBLE_POP, "double_pop", m_PENT)
167
168 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
169 over esp subtraction. */
170 DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT
171 | m_K6_GEODE)
172
173 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
174 over esp subtraction. */
175 DEF_TUNE (X86_TUNE_DOUBLE_PUSH, "double_push", m_PENT | m_K6_GEODE)
176
177 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
178 for DFmode copies */
179 DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves",
180 ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM
181 | m_GEODE | m_AMD_MULTIPLE | m_GENERIC))
182
183 /* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming
184 on modern chips. Preffer stores affecting whole integer register
185 over partial stores. For example preffer MOVZBL or MOVQ to load 8bit
186 value over movb. */
187 DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
188 m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE
189 | m_GENERIC)
190
191 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store
192 destinations to be 128bit to allow register renaming on 128bit SSE units,
193 but usually results in one extra microop on 64bit SSE units.
194 Experimental results shows that disabling this option on P4 brings over 20%
195 SPECfp regression, while enabling it on K8 brings roughly 2.4% regression
196 that can be partly masked by careful scheduling of moves. */
197 DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency",
198 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMDFAM10
199 | m_BDVER | m_GENERIC)
200
201 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead
202 of a sequence loading registers by parts. */
203 DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal",
204 m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC)
205
206 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead
207 of a sequence loading registers by parts. */
208 DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal",
209 m_COREI7 | m_BDVER | m_SLM | m_GENERIC)
210
211 /* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if true, unaligned loads are
212 split. */
213 DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal",
214 ~(m_COREI7 | m_GENERIC))
215
216 /* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if true, unaligned loads are
217 split. */
218 DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_load_optimal",
219 ~(m_COREI7 | m_BDVER | m_GENERIC))
220
221 /* Use packed single precision instructions where posisble. I.e. movups instead
222 of movupd. */
223 DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optimal",
224 m_BDVER)
225
226 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
227 are resolved on SSE register parts instead of whole registers, so we may
228 maintain just lower part of scalar values in proper format leaving the
229 upper part undefined. */
230 DEF_TUNE (X86_TUNE_SSE_SPLIT_REGS, "sse_split_regs", m_ATHLON_K8)
231
232 /* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */
233 DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores",
234 m_AMD_MULTIPLE | m_CORE_ALL | m_GENERIC)
235
236 /* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to
237 xorps/xorpd and other variants. */
238 DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor",
239 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_GENERIC)
240
241 /* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by
242 full sized loads. */
243 DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall",
244 m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC)
245
246 /* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are
247 considered on critical path. */
248 DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move",
249 m_PPRO | m_ATHLON_K8)
250
251 /* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in epilogues that are
252 considered on critical path. */
253 DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move",
254 m_PPRO | m_ATHLON_K8)
255
256 /* X86_TUNE_SHIFT1: Enables use of short encoding of "sal reg" instead of
257 longer "sal $1, reg". */
258 DEF_TUNE (X86_TUNE_SHIFT1, "shift1", ~m_486)
259
260 /* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */
261 DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE)
262
263 /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer
264 to SSE registers. If disabled, the moves will be done by storing
265 the value to memory and reloading. */
266 DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_TO_VEC, "inter_unit_moves_to_vec",
267 ~(m_AMD_MULTIPLE | m_GENERIC))
268
269 /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from SSE
270 to integer registers. If disabled, the moves will be done by storing
271 the value to memory and reloading. */
272 DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_FROM_VEC, "inter_unit_moves_from_vec",
273 ~m_ATHLON_K8)
274
275 /* X86_TUNE_INTER_UNIT_CONVERSIONS: Enable float<->integer conversions
276 to use both SSE and integer registers at a same time.
277 FIXME: revisit importance of this for generic. */
278 DEF_TUNE (X86_TUNE_INTER_UNIT_CONVERSIONS, "inter_unit_conversions",
279 ~(m_AMDFAM10 | m_BDVER))
280
281 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
282 than 4 branch instructions in the 16 byte window. */
283 DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit",
284 m_PPRO | m_P4_NOCONA | m_ATOM | m_SLM | m_ATHLON_K8 | m_AMDFAM10)
285
286 /* X86_TUNE_SCHEDULE: Enable scheduling. */
287 DEF_TUNE (X86_TUNE_SCHEDULE, "schedule",
288 m_PENT | m_PPRO | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE
289 | m_AMD_MULTIPLE | m_GENERIC)
290
291 /* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */
292 DEF_TUNE (X86_TUNE_USE_BT, "use_bt",
293 m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC)
294
295 /* X86_TUNE_USE_INCDEC: Enable use of inc/dec instructions. */
296 DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec",
297 ~(m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GENERIC))
298
299 /* X86_TUNE_PAD_RETURNS: Place NOP before every RET that is a destination
300 of conditional jump or directly preceded by other jump instruction.
301 This is important for AND K8-AMDFAM10 because the branch prediction
302 architecture expect at most one jump per 2 byte window. Failing to
303 pad returns leads to misaligned return stack. */
304 DEF_TUNE (X86_TUNE_PAD_RETURNS, "pad_returns",
305 m_ATHLON_K8 | m_AMDFAM10 | m_GENERIC)
306
307 /* X86_TUNE_PAD_SHORT_FUNCTION: Make every function to be at least 4
308 instructions long. */
309 DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_ATOM)
310
311 /* X86_TUNE_EXT_80387_CONSTANTS: Use fancy 80387 constants, such as PI. */
312 DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants",
313 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE
314 | m_ATHLON_K8 | m_GENERIC)
315
316 /* X86_TUNE_AVOID_VECTOR_DECODE: Enable splitters that avoid vector decoded
317 forms of instructions on K8 targets. */
318 DEF_TUNE (X86_TUNE_AVOID_VECTOR_DECODE, "avoid_vector_decode",
319 m_K8)
320
321 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
322 and SImode multiply, but 386 and 486 do HImode multiply faster. */
323 DEF_TUNE (X86_TUNE_PROMOTE_HIMODE_IMUL, "promote_himode_imul",
324 ~(m_386 | m_486))
325
326 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
327 vector path on AMD machines.
328 FIXME: Do we need to enable this for core? */
329 DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM32_MEM, "slow_imul_imm32_mem",
330 m_K8 | m_AMDFAM10)
331
332 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
333 machines.
334 FIXME: Do we need to enable this for core? */
335 DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8",
336 m_K8 | m_AMDFAM10)
337
338 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
339 than a MOV. */
340 DEF_TUNE (X86_TUNE_MOVE_M1_VIA_OR, "move_m1_via_or", m_PENT)
341
342 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
343 but one byte longer. */
344 DEF_TUNE (X86_TUNE_NOT_UNPAIRABLE, "not_unpairable", m_PENT)
345
346 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
347 operand that cannot be represented using a modRM byte. The XOR
348 replacement is long decoded, so this split helps here as well. */
349 DEF_TUNE (X86_TUNE_NOT_VECTORMODE, "not_vectormode", m_K6)
350
351 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
352 from FP to FP. This form of instructions avoids partial write to the
353 destination. */
354 DEF_TUNE (X86_TUNE_USE_VECTOR_FP_CONVERTS, "use_vector_fp_converts",
355 m_AMDFAM10)
356
357 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
358 from integer to FP. */
359 DEF_TUNE (X86_TUNE_USE_VECTOR_CONVERTS, "use_vector_converts", m_AMDFAM10)
360
361 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
362 with a subsequent conditional jump instruction into a single
363 compare-and-branch uop.
364 FIXME: revisit for generic. */
365 DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH, "fuse_cmp_and_branch", m_BDVER | m_CORE_ALL)
366
367 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
368 will impact LEA instruction selection. */
369 DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_ATOM | m_SLM)
370
371 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
372 instructions. */
373 DEF_TUNE (X86_TUNE_VECTORIZE_DOUBLE, "vectorize_double", ~m_ATOM)
374
375 /* X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL: Enable software prefetching
376 at -O3. For the moment, the prefetching seems badly tuned for Intel
377 chips. */
378 DEF_TUNE (X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL, "software_prefetching_beneficial",
379 m_K6_GEODE | m_AMD_MULTIPLE)
380
381 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
382 the auto-vectorizer. */
383 DEF_TUNE (X86_TUNE_AVX128_OPTIMAL, "avx128_optimal", m_BDVER | m_BTVER2)
384
385 /* X86_TUNE_REASSOC_INT_TO_PARALLEL: Try to produce parallel computations
386 during reassociation of integer computation. */
387 DEF_TUNE (X86_TUNE_REASSOC_INT_TO_PARALLEL, "reassoc_int_to_parallel",
388 m_ATOM)
389
390 /* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations
391 during reassociation of fp computation. */
392 DEF_TUNE (X86_TUNE_REASSOC_FP_TO_PARALLEL, "reassoc_fp_to_parallel",
393 m_ATOM | m_SLM | m_HASWELL | m_BDVER1 | m_BDVER2 | m_GENERIC)
394
395 /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE
396 regs instead of memory. */
397 DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill",
398 m_CORE_ALL)
399
400 /* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for
401 a conditional move. */
402 DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove",
403 m_ATOM | m_SLM)
404
405 /* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for
406 fp converts to destination register. */
407 DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts",
408 m_SLM)
409
410 /* X86_TUNE_ACCUMULATE_OUTGOING_ARGS: Allocate stack space for outgoing
411 arguments in prologue/epilogue instead of separately for each call
412 by push/pop instructions.
413 This increase code size by about 5% in 32bit mode, less so in 64bit mode
414 because parameters are passed in registers. It is considerable
415 win for targets without stack engine that prevents multple push operations
416 to happen in parallel.
417
418 FIXME: the flags is incorrectly enabled for amdfam10, Bulldozer,
419 Bobcat and Generic. This is because disabling it causes large
420 regression on mgrid due to IRA limitation leading to unecessary
421 use of the frame pointer in 32bit mode. */
422 DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args",
423 m_PPRO | m_P4_NOCONA | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC)
424
425 /* X86_TUNE_ALWAYS_FANCY_MATH_387: controls use of fancy 387 operations,
426 such as fsqrt, fprem, fsin, fcos, fsincos etc.
427 Should be enabled for all targets that always has coprocesor. */
428 DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387",
429 ~(m_386 | m_486))