i386.h (x86_cmpxchg16b): Remove const.
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
57 #endif
58
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
65 : 4)
66
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
70
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
72
73 static const
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
92 0, /* "large" insn */
93 2, /* MOVE_RATIO */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
117 2, /* Branch cost */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
128 };
129
130 /* Processor costs (relative to an add) */
131 static const
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
151 3, /* MOVE_RATIO */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
175 1, /* Branch cost */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
186 };
187
188 static const
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
208 3, /* MOVE_RATIO */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
232 1, /* Branch cost */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
242 DUMMY_STRINGOP_ALGS}
243 };
244
245 static const
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
265 6, /* MOVE_RATIO */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
289 2, /* Branch cost */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
299 DUMMY_STRINGOP_ALGS}
300 };
301
302 static const
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
322 6, /* MOVE_RATIO */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
346 2, /* Branch cost */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
357 */
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
363 DUMMY_STRINGOP_ALGS}
364 };
365
366 static const
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
386 4, /* MOVE_RATIO */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
397
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
411 1, /* Branch cost */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
421 DUMMY_STRINGOP_ALGS}
422 };
423
424 static const
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
444 4, /* MOVE_RATIO */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
468 1, /* Branch cost */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
478 DUMMY_STRINGOP_ALGS}
479 };
480
481 static const
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
501 9, /* MOVE_RATIO */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
525 5, /* Branch cost */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
538 DUMMY_STRINGOP_ALGS}
539 };
540
541 static const
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
561 9, /* MOVE_RATIO */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
588 time). */
589 100, /* number of parallel prefetches */
590 5, /* Branch cost */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
605 };
606
607 static const
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
627 6, /* MOVE_RATIO */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
651 2, /* Branch cost */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
661 {-1, libcall}}},
662 DUMMY_STRINGOP_ALGS},
663 };
664
665 static const
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
685 17, /* MOVE_RATIO */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
709 1, /* Branch cost */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
720 {-1, libcall}}},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
723 };
724
725 static const
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
745 16, /* MOVE_RATIO */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
768 3, /* Branch cost */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
782 };
783
784 /* Generic64 should produce code tuned for Nocona and K8. */
785 static const
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
809 17, /* MOVE_RATIO */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
835 3, /* Branch cost */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
846 };
847
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
849 static const
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
869 17, /* MOVE_RATIO */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
893 3, /* Branch cost */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
904 };
905
906 const struct processor_costs *ix86_cost = &pentium_cost;
907
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
925
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
928
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
995 of moves. */
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1000 undefined. */
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1006
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1010
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Exchange and add was added for 80486. */
1022 const int x86_xadd = ~m_386;
1023 /* Byteswap was added for 80486. */
1024 const int x86_bswap = ~m_386;
1025 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1026
1027 static enum stringop_alg stringop_alg = no_stringop;
1028
1029 /* In case the average insn count for single function invocation is
1030 lower than this constant, emit fast (but longer) prologue and
1031 epilogue code. */
1032 #define FAST_PROLOGUE_INSN_COUNT 20
1033
1034 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1035 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1036 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1037 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1038
1039 /* Array of the smallest class containing reg number REGNO, indexed by
1040 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1041
1042 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1043 {
1044 /* ax, dx, cx, bx */
1045 AREG, DREG, CREG, BREG,
1046 /* si, di, bp, sp */
1047 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1048 /* FP registers */
1049 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1050 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1051 /* arg pointer */
1052 NON_Q_REGS,
1053 /* flags, fpsr, fpcr, frame */
1054 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1055 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1056 SSE_REGS, SSE_REGS,
1057 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1058 MMX_REGS, MMX_REGS,
1059 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1060 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1061 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1062 SSE_REGS, SSE_REGS,
1063 };
1064
1065 /* The "default" register map used in 32bit mode. */
1066
1067 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1068 {
1069 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1070 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1071 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1072 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1073 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1074 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1075 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1076 };
1077
1078 static int const x86_64_int_parameter_registers[6] =
1079 {
1080 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1081 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1082 };
1083
1084 static int const x86_64_int_return_registers[4] =
1085 {
1086 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1087 };
1088
1089 /* The "default" register map used in 64bit mode. */
1090 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1091 {
1092 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1093 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1094 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1095 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1096 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1097 8,9,10,11,12,13,14,15, /* extended integer registers */
1098 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1099 };
1100
1101 /* Define the register numbers to be used in Dwarf debugging information.
1102 The SVR4 reference port C compiler uses the following register numbers
1103 in its Dwarf output code:
1104 0 for %eax (gcc regno = 0)
1105 1 for %ecx (gcc regno = 2)
1106 2 for %edx (gcc regno = 1)
1107 3 for %ebx (gcc regno = 3)
1108 4 for %esp (gcc regno = 7)
1109 5 for %ebp (gcc regno = 6)
1110 6 for %esi (gcc regno = 4)
1111 7 for %edi (gcc regno = 5)
1112 The following three DWARF register numbers are never generated by
1113 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1114 believes these numbers have these meanings.
1115 8 for %eip (no gcc equivalent)
1116 9 for %eflags (gcc regno = 17)
1117 10 for %trapno (no gcc equivalent)
1118 It is not at all clear how we should number the FP stack registers
1119 for the x86 architecture. If the version of SDB on x86/svr4 were
1120 a bit less brain dead with respect to floating-point then we would
1121 have a precedent to follow with respect to DWARF register numbers
1122 for x86 FP registers, but the SDB on x86/svr4 is so completely
1123 broken with respect to FP registers that it is hardly worth thinking
1124 of it as something to strive for compatibility with.
1125 The version of x86/svr4 SDB I have at the moment does (partially)
1126 seem to believe that DWARF register number 11 is associated with
1127 the x86 register %st(0), but that's about all. Higher DWARF
1128 register numbers don't seem to be associated with anything in
1129 particular, and even for DWARF regno 11, SDB only seems to under-
1130 stand that it should say that a variable lives in %st(0) (when
1131 asked via an `=' command) if we said it was in DWARF regno 11,
1132 but SDB still prints garbage when asked for the value of the
1133 variable in question (via a `/' command).
1134 (Also note that the labels SDB prints for various FP stack regs
1135 when doing an `x' command are all wrong.)
1136 Note that these problems generally don't affect the native SVR4
1137 C compiler because it doesn't allow the use of -O with -g and
1138 because when it is *not* optimizing, it allocates a memory
1139 location for each floating-point variable, and the memory
1140 location is what gets described in the DWARF AT_location
1141 attribute for the variable in question.
1142 Regardless of the severe mental illness of the x86/svr4 SDB, we
1143 do something sensible here and we use the following DWARF
1144 register numbers. Note that these are all stack-top-relative
1145 numbers.
1146 11 for %st(0) (gcc regno = 8)
1147 12 for %st(1) (gcc regno = 9)
1148 13 for %st(2) (gcc regno = 10)
1149 14 for %st(3) (gcc regno = 11)
1150 15 for %st(4) (gcc regno = 12)
1151 16 for %st(5) (gcc regno = 13)
1152 17 for %st(6) (gcc regno = 14)
1153 18 for %st(7) (gcc regno = 15)
1154 */
1155 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1156 {
1157 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1158 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1159 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1160 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1161 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1162 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1163 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1164 };
1165
1166 /* Test and compare insns in i386.md store the information needed to
1167 generate branch and scc insns here. */
1168
1169 rtx ix86_compare_op0 = NULL_RTX;
1170 rtx ix86_compare_op1 = NULL_RTX;
1171 rtx ix86_compare_emitted = NULL_RTX;
1172
1173 /* Size of the register save area. */
1174 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1175
1176 /* Define the structure for the machine field in struct function. */
1177
1178 struct stack_local_entry GTY(())
1179 {
1180 unsigned short mode;
1181 unsigned short n;
1182 rtx rtl;
1183 struct stack_local_entry *next;
1184 };
1185
1186 /* Structure describing stack frame layout.
1187 Stack grows downward:
1188
1189 [arguments]
1190 <- ARG_POINTER
1191 saved pc
1192
1193 saved frame pointer if frame_pointer_needed
1194 <- HARD_FRAME_POINTER
1195 [saved regs]
1196
1197 [padding1] \
1198 )
1199 [va_arg registers] (
1200 > to_allocate <- FRAME_POINTER
1201 [frame] (
1202 )
1203 [padding2] /
1204 */
1205 struct ix86_frame
1206 {
1207 int nregs;
1208 int padding1;
1209 int va_arg_size;
1210 HOST_WIDE_INT frame;
1211 int padding2;
1212 int outgoing_arguments_size;
1213 int red_zone_size;
1214
1215 HOST_WIDE_INT to_allocate;
1216 /* The offsets relative to ARG_POINTER. */
1217 HOST_WIDE_INT frame_pointer_offset;
1218 HOST_WIDE_INT hard_frame_pointer_offset;
1219 HOST_WIDE_INT stack_pointer_offset;
1220
1221 /* When save_regs_using_mov is set, emit prologue using
1222 move instead of push instructions. */
1223 bool save_regs_using_mov;
1224 };
1225
1226 /* Code model option. */
1227 enum cmodel ix86_cmodel;
1228 /* Asm dialect. */
1229 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1230 /* TLS dialects. */
1231 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1232
1233 /* Which unit we are generating floating point math for. */
1234 enum fpmath_unit ix86_fpmath;
1235
1236 /* Which cpu are we scheduling for. */
1237 enum processor_type ix86_tune;
1238 /* Which instruction set architecture to use. */
1239 enum processor_type ix86_arch;
1240
1241 /* true if sse prefetch instruction is not NOOP. */
1242 int x86_prefetch_sse;
1243
1244 /* true if cmpxchg16b is supported. */
1245 int x86_cmpxchg16b;
1246
1247 /* ix86_regparm_string as a number */
1248 static int ix86_regparm;
1249
1250 /* -mstackrealign option */
1251 extern int ix86_force_align_arg_pointer;
1252 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1253
1254 /* Preferred alignment for stack boundary in bits. */
1255 unsigned int ix86_preferred_stack_boundary;
1256
1257 /* Values 1-5: see jump.c */
1258 int ix86_branch_cost;
1259
1260 /* Variables which are this size or smaller are put in the data/bss
1261 or ldata/lbss sections. */
1262
1263 int ix86_section_threshold = 65536;
1264
1265 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1266 char internal_label_prefix[16];
1267 int internal_label_prefix_len;
1268 \f
1269 static bool ix86_handle_option (size_t, const char *, int);
1270 static void output_pic_addr_const (FILE *, rtx, int);
1271 static void put_condition_code (enum rtx_code, enum machine_mode,
1272 int, int, FILE *);
1273 static const char *get_some_local_dynamic_name (void);
1274 static int get_some_local_dynamic_name_1 (rtx *, void *);
1275 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1276 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1277 rtx *);
1278 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1279 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1280 enum machine_mode);
1281 static rtx get_thread_pointer (int);
1282 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1283 static void get_pc_thunk_name (char [32], unsigned int);
1284 static rtx gen_push (rtx);
1285 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1286 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1287 static struct machine_function * ix86_init_machine_status (void);
1288 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1289 static int ix86_nsaved_regs (void);
1290 static void ix86_emit_save_regs (void);
1291 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1292 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1293 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1294 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1295 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1296 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1297 static int ix86_issue_rate (void);
1298 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1299 static int ia32_multipass_dfa_lookahead (void);
1300 static void ix86_init_mmx_sse_builtins (void);
1301 static rtx x86_this_parameter (tree);
1302 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1303 HOST_WIDE_INT, tree);
1304 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1305 static void x86_file_start (void);
1306 static void ix86_reorg (void);
1307 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1308 static tree ix86_build_builtin_va_list (void);
1309 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1310 tree, int *, int);
1311 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1312 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1313 static bool ix86_vector_mode_supported_p (enum machine_mode);
1314
1315 static int ix86_address_cost (rtx);
1316 static bool ix86_cannot_force_const_mem (rtx);
1317 static rtx ix86_delegitimize_address (rtx);
1318
1319 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1320
1321 struct builtin_description;
1322 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1323 tree, rtx);
1324 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1325 tree, rtx);
1326 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1327 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1328 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1329 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1330 static rtx safe_vector_operand (rtx, enum machine_mode);
1331 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1332 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1335 static int ix86_fp_comparison_cost (enum rtx_code code);
1336 static unsigned int ix86_select_alt_pic_regnum (void);
1337 static int ix86_save_reg (unsigned int, int);
1338 static void ix86_compute_frame_layout (struct ix86_frame *);
1339 static int ix86_comp_type_attributes (tree, tree);
1340 static int ix86_function_regparm (tree, tree);
1341 const struct attribute_spec ix86_attribute_table[];
1342 static bool ix86_function_ok_for_sibcall (tree, tree);
1343 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1344 static int ix86_value_regno (enum machine_mode, tree, tree);
1345 static bool contains_128bit_aligned_vector_p (tree);
1346 static rtx ix86_struct_value_rtx (tree, int);
1347 static bool ix86_ms_bitfield_layout_p (tree);
1348 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1349 static int extended_reg_mentioned_1 (rtx *, void *);
1350 static bool ix86_rtx_costs (rtx, int, int, int *);
1351 static int min_insn_size (rtx);
1352 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1353 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1354 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1355 tree, bool);
1356 static void ix86_init_builtins (void);
1357 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1358 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1359 static const char *ix86_mangle_fundamental_type (tree);
1360 static tree ix86_stack_protect_fail (void);
1361 static rtx ix86_internal_arg_pointer (void);
1362 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1363
1364 /* This function is only used on Solaris. */
1365 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1366 ATTRIBUTE_UNUSED;
1367
1368 /* Register class used for passing given 64bit part of the argument.
1369 These represent classes as documented by the PS ABI, with the exception
1370 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1371 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1372
1373 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1374 whenever possible (upper half does contain padding).
1375 */
1376 enum x86_64_reg_class
1377 {
1378 X86_64_NO_CLASS,
1379 X86_64_INTEGER_CLASS,
1380 X86_64_INTEGERSI_CLASS,
1381 X86_64_SSE_CLASS,
1382 X86_64_SSESF_CLASS,
1383 X86_64_SSEDF_CLASS,
1384 X86_64_SSEUP_CLASS,
1385 X86_64_X87_CLASS,
1386 X86_64_X87UP_CLASS,
1387 X86_64_COMPLEX_X87_CLASS,
1388 X86_64_MEMORY_CLASS
1389 };
1390 static const char * const x86_64_reg_class_name[] = {
1391 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1392 "sseup", "x87", "x87up", "cplx87", "no"
1393 };
1394
1395 #define MAX_CLASSES 4
1396
1397 /* Table of constants used by fldpi, fldln2, etc.... */
1398 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1399 static bool ext_80387_constants_init = 0;
1400 static void init_ext_80387_constants (void);
1401 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1402 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1403 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1404 static section *x86_64_elf_select_section (tree decl, int reloc,
1405 unsigned HOST_WIDE_INT align)
1406 ATTRIBUTE_UNUSED;
1407 \f
1408 /* Initialize the GCC target structure. */
1409 #undef TARGET_ATTRIBUTE_TABLE
1410 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1411 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1412 # undef TARGET_MERGE_DECL_ATTRIBUTES
1413 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1414 #endif
1415
1416 #undef TARGET_COMP_TYPE_ATTRIBUTES
1417 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1418
1419 #undef TARGET_INIT_BUILTINS
1420 #define TARGET_INIT_BUILTINS ix86_init_builtins
1421 #undef TARGET_EXPAND_BUILTIN
1422 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1423 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1424 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1425
1426 #undef TARGET_ASM_FUNCTION_EPILOGUE
1427 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1428
1429 #undef TARGET_ENCODE_SECTION_INFO
1430 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1431 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1432 #else
1433 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1434 #endif
1435
1436 #undef TARGET_ASM_OPEN_PAREN
1437 #define TARGET_ASM_OPEN_PAREN ""
1438 #undef TARGET_ASM_CLOSE_PAREN
1439 #define TARGET_ASM_CLOSE_PAREN ""
1440
1441 #undef TARGET_ASM_ALIGNED_HI_OP
1442 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1443 #undef TARGET_ASM_ALIGNED_SI_OP
1444 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1445 #ifdef ASM_QUAD
1446 #undef TARGET_ASM_ALIGNED_DI_OP
1447 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1448 #endif
1449
1450 #undef TARGET_ASM_UNALIGNED_HI_OP
1451 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1452 #undef TARGET_ASM_UNALIGNED_SI_OP
1453 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1454 #undef TARGET_ASM_UNALIGNED_DI_OP
1455 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1456
1457 #undef TARGET_SCHED_ADJUST_COST
1458 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1459 #undef TARGET_SCHED_ISSUE_RATE
1460 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1461 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1462 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1463 ia32_multipass_dfa_lookahead
1464
1465 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1466 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1467
1468 #ifdef HAVE_AS_TLS
1469 #undef TARGET_HAVE_TLS
1470 #define TARGET_HAVE_TLS true
1471 #endif
1472 #undef TARGET_CANNOT_FORCE_CONST_MEM
1473 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1474 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1475 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1476
1477 #undef TARGET_DELEGITIMIZE_ADDRESS
1478 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1479
1480 #undef TARGET_MS_BITFIELD_LAYOUT_P
1481 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1482
1483 #if TARGET_MACHO
1484 #undef TARGET_BINDS_LOCAL_P
1485 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1486 #endif
1487
1488 #undef TARGET_ASM_OUTPUT_MI_THUNK
1489 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1490 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1491 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1492
1493 #undef TARGET_ASM_FILE_START
1494 #define TARGET_ASM_FILE_START x86_file_start
1495
1496 #undef TARGET_DEFAULT_TARGET_FLAGS
1497 #define TARGET_DEFAULT_TARGET_FLAGS \
1498 (TARGET_DEFAULT \
1499 | TARGET_64BIT_DEFAULT \
1500 | TARGET_SUBTARGET_DEFAULT \
1501 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1502
1503 #undef TARGET_HANDLE_OPTION
1504 #define TARGET_HANDLE_OPTION ix86_handle_option
1505
1506 #undef TARGET_RTX_COSTS
1507 #define TARGET_RTX_COSTS ix86_rtx_costs
1508 #undef TARGET_ADDRESS_COST
1509 #define TARGET_ADDRESS_COST ix86_address_cost
1510
1511 #undef TARGET_FIXED_CONDITION_CODE_REGS
1512 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1513 #undef TARGET_CC_MODES_COMPATIBLE
1514 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1515
1516 #undef TARGET_MACHINE_DEPENDENT_REORG
1517 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1518
1519 #undef TARGET_BUILD_BUILTIN_VA_LIST
1520 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1521
1522 #undef TARGET_MD_ASM_CLOBBERS
1523 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1524
1525 #undef TARGET_PROMOTE_PROTOTYPES
1526 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1527 #undef TARGET_STRUCT_VALUE_RTX
1528 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1529 #undef TARGET_SETUP_INCOMING_VARARGS
1530 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1531 #undef TARGET_MUST_PASS_IN_STACK
1532 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1533 #undef TARGET_PASS_BY_REFERENCE
1534 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1535 #undef TARGET_INTERNAL_ARG_POINTER
1536 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1537 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1538 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1539
1540 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1541 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1542
1543 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1544 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1545
1546 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1547 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1548
1549 #ifdef HAVE_AS_TLS
1550 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1551 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1552 #endif
1553
1554 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1555 #undef TARGET_INSERT_ATTRIBUTES
1556 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1557 #endif
1558
1559 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1560 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1561
1562 #undef TARGET_STACK_PROTECT_FAIL
1563 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1564
1565 #undef TARGET_FUNCTION_VALUE
1566 #define TARGET_FUNCTION_VALUE ix86_function_value
1567
1568 struct gcc_target targetm = TARGET_INITIALIZER;
1569
1570 \f
1571 /* The svr4 ABI for the i386 says that records and unions are returned
1572 in memory. */
1573 #ifndef DEFAULT_PCC_STRUCT_RETURN
1574 #define DEFAULT_PCC_STRUCT_RETURN 1
1575 #endif
1576
1577 /* Implement TARGET_HANDLE_OPTION. */
1578
1579 static bool
1580 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1581 {
1582 switch (code)
1583 {
1584 case OPT_m3dnow:
1585 if (!value)
1586 {
1587 target_flags &= ~MASK_3DNOW_A;
1588 target_flags_explicit |= MASK_3DNOW_A;
1589 }
1590 return true;
1591
1592 case OPT_mmmx:
1593 if (!value)
1594 {
1595 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1596 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1597 }
1598 return true;
1599
1600 case OPT_msse:
1601 if (!value)
1602 {
1603 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1604 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1605 }
1606 return true;
1607
1608 case OPT_msse2:
1609 if (!value)
1610 {
1611 target_flags &= ~MASK_SSE3;
1612 target_flags_explicit |= MASK_SSE3;
1613 }
1614 return true;
1615
1616 default:
1617 return true;
1618 }
1619 }
1620
1621 /* Sometimes certain combinations of command options do not make
1622 sense on a particular target machine. You can define a macro
1623 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1624 defined, is executed once just after all the command options have
1625 been parsed.
1626
1627 Don't use this macro to turn on various extra optimizations for
1628 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1629
1630 void
1631 override_options (void)
1632 {
1633 int i;
1634 int ix86_tune_defaulted = 0;
1635
1636 /* Comes from final.c -- no real reason to change it. */
1637 #define MAX_CODE_ALIGN 16
1638
1639 static struct ptt
1640 {
1641 const struct processor_costs *cost; /* Processor costs */
1642 const int target_enable; /* Target flags to enable. */
1643 const int target_disable; /* Target flags to disable. */
1644 const int align_loop; /* Default alignments. */
1645 const int align_loop_max_skip;
1646 const int align_jump;
1647 const int align_jump_max_skip;
1648 const int align_func;
1649 }
1650 const processor_target_table[PROCESSOR_max] =
1651 {
1652 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1653 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1654 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1655 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1656 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1657 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1658 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1659 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1660 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1661 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1662 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1664 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1665 };
1666
1667 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1668 static struct pta
1669 {
1670 const char *const name; /* processor name or nickname. */
1671 const enum processor_type processor;
1672 const enum pta_flags
1673 {
1674 PTA_SSE = 1,
1675 PTA_SSE2 = 2,
1676 PTA_SSE3 = 4,
1677 PTA_MMX = 8,
1678 PTA_PREFETCH_SSE = 16,
1679 PTA_3DNOW = 32,
1680 PTA_3DNOW_A = 64,
1681 PTA_64BIT = 128,
1682 PTA_SSSE3 = 256,
1683 PTA_CX16 = 512
1684 } flags;
1685 }
1686 const processor_alias_table[] =
1687 {
1688 {"i386", PROCESSOR_I386, 0},
1689 {"i486", PROCESSOR_I486, 0},
1690 {"i586", PROCESSOR_PENTIUM, 0},
1691 {"pentium", PROCESSOR_PENTIUM, 0},
1692 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1693 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1694 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1695 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1696 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1697 {"i686", PROCESSOR_PENTIUMPRO, 0},
1698 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1699 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1700 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1701 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1702 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1703 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1710 | PTA_MMX | PTA_PREFETCH_SSE | PTA_CX16},
1711 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1712 | PTA_64BIT | PTA_MMX
1713 | PTA_PREFETCH_SSE | PTA_CX16},
1714 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1715 | PTA_3DNOW_A},
1716 {"k6", PROCESSOR_K6, PTA_MMX},
1717 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1718 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1719 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1720 | PTA_3DNOW_A},
1721 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1722 | PTA_3DNOW | PTA_3DNOW_A},
1723 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1728 | PTA_3DNOW_A | PTA_SSE},
1729 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1730 | PTA_SSE | PTA_SSE2 },
1731 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1738 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1739 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1740 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1741 };
1742
1743 int const pta_size = ARRAY_SIZE (processor_alias_table);
1744
1745 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1746 SUBTARGET_OVERRIDE_OPTIONS;
1747 #endif
1748
1749 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1750 SUBSUBTARGET_OVERRIDE_OPTIONS;
1751 #endif
1752
1753 /* -fPIC is the default for x86_64. */
1754 if (TARGET_MACHO && TARGET_64BIT)
1755 flag_pic = 2;
1756
1757 /* Set the default values for switches whose default depends on TARGET_64BIT
1758 in case they weren't overwritten by command line options. */
1759 if (TARGET_64BIT)
1760 {
1761 /* Mach-O doesn't support omitting the frame pointer for now. */
1762 if (flag_omit_frame_pointer == 2)
1763 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1764 if (flag_asynchronous_unwind_tables == 2)
1765 flag_asynchronous_unwind_tables = 1;
1766 if (flag_pcc_struct_return == 2)
1767 flag_pcc_struct_return = 0;
1768 }
1769 else
1770 {
1771 if (flag_omit_frame_pointer == 2)
1772 flag_omit_frame_pointer = 0;
1773 if (flag_asynchronous_unwind_tables == 2)
1774 flag_asynchronous_unwind_tables = 0;
1775 if (flag_pcc_struct_return == 2)
1776 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1777 }
1778
1779 /* Need to check -mtune=generic first. */
1780 if (ix86_tune_string)
1781 {
1782 if (!strcmp (ix86_tune_string, "generic")
1783 || !strcmp (ix86_tune_string, "i686")
1784 /* As special support for cross compilers we read -mtune=native
1785 as -mtune=generic. With native compilers we won't see the
1786 -mtune=native, as it was changed by the driver. */
1787 || !strcmp (ix86_tune_string, "native"))
1788 {
1789 if (TARGET_64BIT)
1790 ix86_tune_string = "generic64";
1791 else
1792 ix86_tune_string = "generic32";
1793 }
1794 else if (!strncmp (ix86_tune_string, "generic", 7))
1795 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1796 }
1797 else
1798 {
1799 if (ix86_arch_string)
1800 ix86_tune_string = ix86_arch_string;
1801 if (!ix86_tune_string)
1802 {
1803 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1804 ix86_tune_defaulted = 1;
1805 }
1806
1807 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1808 need to use a sensible tune option. */
1809 if (!strcmp (ix86_tune_string, "generic")
1810 || !strcmp (ix86_tune_string, "x86-64")
1811 || !strcmp (ix86_tune_string, "i686"))
1812 {
1813 if (TARGET_64BIT)
1814 ix86_tune_string = "generic64";
1815 else
1816 ix86_tune_string = "generic32";
1817 }
1818 }
1819 if (ix86_stringop_string)
1820 {
1821 if (!strcmp (ix86_stringop_string, "rep_byte"))
1822 stringop_alg = rep_prefix_1_byte;
1823 else if (!strcmp (ix86_stringop_string, "libcall"))
1824 stringop_alg = libcall;
1825 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1826 stringop_alg = rep_prefix_4_byte;
1827 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1828 stringop_alg = rep_prefix_8_byte;
1829 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1830 stringop_alg = loop_1_byte;
1831 else if (!strcmp (ix86_stringop_string, "loop"))
1832 stringop_alg = loop;
1833 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1834 stringop_alg = unrolled_loop;
1835 else
1836 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1837 }
1838 if (!strcmp (ix86_tune_string, "x86-64"))
1839 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1840 "-mtune=generic instead as appropriate.");
1841
1842 if (!ix86_arch_string)
1843 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1844 if (!strcmp (ix86_arch_string, "generic"))
1845 error ("generic CPU can be used only for -mtune= switch");
1846 if (!strncmp (ix86_arch_string, "generic", 7))
1847 error ("bad value (%s) for -march= switch", ix86_arch_string);
1848
1849 if (ix86_cmodel_string != 0)
1850 {
1851 if (!strcmp (ix86_cmodel_string, "small"))
1852 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1853 else if (!strcmp (ix86_cmodel_string, "medium"))
1854 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1855 else if (flag_pic)
1856 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1857 else if (!strcmp (ix86_cmodel_string, "32"))
1858 ix86_cmodel = CM_32;
1859 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1860 ix86_cmodel = CM_KERNEL;
1861 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1862 ix86_cmodel = CM_LARGE;
1863 else
1864 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1865 }
1866 else
1867 {
1868 ix86_cmodel = CM_32;
1869 if (TARGET_64BIT)
1870 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1871 }
1872 if (ix86_asm_string != 0)
1873 {
1874 if (! TARGET_MACHO
1875 && !strcmp (ix86_asm_string, "intel"))
1876 ix86_asm_dialect = ASM_INTEL;
1877 else if (!strcmp (ix86_asm_string, "att"))
1878 ix86_asm_dialect = ASM_ATT;
1879 else
1880 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1881 }
1882 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1883 error ("code model %qs not supported in the %s bit mode",
1884 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1885 if (ix86_cmodel == CM_LARGE)
1886 sorry ("code model %<large%> not supported yet");
1887 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1888 sorry ("%i-bit mode not compiled in",
1889 (target_flags & MASK_64BIT) ? 64 : 32);
1890
1891 for (i = 0; i < pta_size; i++)
1892 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1893 {
1894 ix86_arch = processor_alias_table[i].processor;
1895 /* Default cpu tuning to the architecture. */
1896 ix86_tune = ix86_arch;
1897 if (processor_alias_table[i].flags & PTA_MMX
1898 && !(target_flags_explicit & MASK_MMX))
1899 target_flags |= MASK_MMX;
1900 if (processor_alias_table[i].flags & PTA_3DNOW
1901 && !(target_flags_explicit & MASK_3DNOW))
1902 target_flags |= MASK_3DNOW;
1903 if (processor_alias_table[i].flags & PTA_3DNOW_A
1904 && !(target_flags_explicit & MASK_3DNOW_A))
1905 target_flags |= MASK_3DNOW_A;
1906 if (processor_alias_table[i].flags & PTA_SSE
1907 && !(target_flags_explicit & MASK_SSE))
1908 target_flags |= MASK_SSE;
1909 if (processor_alias_table[i].flags & PTA_SSE2
1910 && !(target_flags_explicit & MASK_SSE2))
1911 target_flags |= MASK_SSE2;
1912 if (processor_alias_table[i].flags & PTA_SSE3
1913 && !(target_flags_explicit & MASK_SSE3))
1914 target_flags |= MASK_SSE3;
1915 if (processor_alias_table[i].flags & PTA_SSSE3
1916 && !(target_flags_explicit & MASK_SSSE3))
1917 target_flags |= MASK_SSSE3;
1918 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1919 x86_prefetch_sse = true;
1920 if (processor_alias_table[i].flags & PTA_CX16)
1921 x86_cmpxchg16b = true;
1922 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1923 error ("CPU you selected does not support x86-64 "
1924 "instruction set");
1925 break;
1926 }
1927
1928 if (i == pta_size)
1929 error ("bad value (%s) for -march= switch", ix86_arch_string);
1930
1931 for (i = 0; i < pta_size; i++)
1932 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1933 {
1934 ix86_tune = processor_alias_table[i].processor;
1935 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1936 {
1937 if (ix86_tune_defaulted)
1938 {
1939 ix86_tune_string = "x86-64";
1940 for (i = 0; i < pta_size; i++)
1941 if (! strcmp (ix86_tune_string,
1942 processor_alias_table[i].name))
1943 break;
1944 ix86_tune = processor_alias_table[i].processor;
1945 }
1946 else
1947 error ("CPU you selected does not support x86-64 "
1948 "instruction set");
1949 }
1950 /* Intel CPUs have always interpreted SSE prefetch instructions as
1951 NOPs; so, we can enable SSE prefetch instructions even when
1952 -mtune (rather than -march) points us to a processor that has them.
1953 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1954 higher processors. */
1955 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1956 x86_prefetch_sse = true;
1957 break;
1958 }
1959 if (i == pta_size)
1960 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1961
1962 if (optimize_size)
1963 ix86_cost = &size_cost;
1964 else
1965 ix86_cost = processor_target_table[ix86_tune].cost;
1966 target_flags |= processor_target_table[ix86_tune].target_enable;
1967 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1968
1969 /* Arrange to set up i386_stack_locals for all functions. */
1970 init_machine_status = ix86_init_machine_status;
1971
1972 /* Validate -mregparm= value. */
1973 if (ix86_regparm_string)
1974 {
1975 i = atoi (ix86_regparm_string);
1976 if (i < 0 || i > REGPARM_MAX)
1977 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1978 else
1979 ix86_regparm = i;
1980 }
1981 else
1982 if (TARGET_64BIT)
1983 ix86_regparm = REGPARM_MAX;
1984
1985 /* If the user has provided any of the -malign-* options,
1986 warn and use that value only if -falign-* is not set.
1987 Remove this code in GCC 3.2 or later. */
1988 if (ix86_align_loops_string)
1989 {
1990 warning (0, "-malign-loops is obsolete, use -falign-loops");
1991 if (align_loops == 0)
1992 {
1993 i = atoi (ix86_align_loops_string);
1994 if (i < 0 || i > MAX_CODE_ALIGN)
1995 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1996 else
1997 align_loops = 1 << i;
1998 }
1999 }
2000
2001 if (ix86_align_jumps_string)
2002 {
2003 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2004 if (align_jumps == 0)
2005 {
2006 i = atoi (ix86_align_jumps_string);
2007 if (i < 0 || i > MAX_CODE_ALIGN)
2008 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2009 else
2010 align_jumps = 1 << i;
2011 }
2012 }
2013
2014 if (ix86_align_funcs_string)
2015 {
2016 warning (0, "-malign-functions is obsolete, use -falign-functions");
2017 if (align_functions == 0)
2018 {
2019 i = atoi (ix86_align_funcs_string);
2020 if (i < 0 || i > MAX_CODE_ALIGN)
2021 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2022 else
2023 align_functions = 1 << i;
2024 }
2025 }
2026
2027 /* Default align_* from the processor table. */
2028 if (align_loops == 0)
2029 {
2030 align_loops = processor_target_table[ix86_tune].align_loop;
2031 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2032 }
2033 if (align_jumps == 0)
2034 {
2035 align_jumps = processor_target_table[ix86_tune].align_jump;
2036 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2037 }
2038 if (align_functions == 0)
2039 {
2040 align_functions = processor_target_table[ix86_tune].align_func;
2041 }
2042
2043 /* Validate -mbranch-cost= value, or provide default. */
2044 ix86_branch_cost = ix86_cost->branch_cost;
2045 if (ix86_branch_cost_string)
2046 {
2047 i = atoi (ix86_branch_cost_string);
2048 if (i < 0 || i > 5)
2049 error ("-mbranch-cost=%d is not between 0 and 5", i);
2050 else
2051 ix86_branch_cost = i;
2052 }
2053 if (ix86_section_threshold_string)
2054 {
2055 i = atoi (ix86_section_threshold_string);
2056 if (i < 0)
2057 error ("-mlarge-data-threshold=%d is negative", i);
2058 else
2059 ix86_section_threshold = i;
2060 }
2061
2062 if (ix86_tls_dialect_string)
2063 {
2064 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_GNU;
2066 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2067 ix86_tls_dialect = TLS_DIALECT_GNU2;
2068 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2069 ix86_tls_dialect = TLS_DIALECT_SUN;
2070 else
2071 error ("bad value (%s) for -mtls-dialect= switch",
2072 ix86_tls_dialect_string);
2073 }
2074
2075 /* Keep nonleaf frame pointers. */
2076 if (flag_omit_frame_pointer)
2077 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2078 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2079 flag_omit_frame_pointer = 1;
2080
2081 /* If we're doing fast math, we don't care about comparison order
2082 wrt NaNs. This lets us use a shorter comparison sequence. */
2083 if (flag_finite_math_only)
2084 target_flags &= ~MASK_IEEE_FP;
2085
2086 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2087 since the insns won't need emulation. */
2088 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2089 target_flags &= ~MASK_NO_FANCY_MATH_387;
2090
2091 /* Likewise, if the target doesn't have a 387, or we've specified
2092 software floating point, don't use 387 inline intrinsics. */
2093 if (!TARGET_80387)
2094 target_flags |= MASK_NO_FANCY_MATH_387;
2095
2096 /* Turn on SSE3 builtins for -mssse3. */
2097 if (TARGET_SSSE3)
2098 target_flags |= MASK_SSE3;
2099
2100 /* Turn on SSE2 builtins for -msse3. */
2101 if (TARGET_SSE3)
2102 target_flags |= MASK_SSE2;
2103
2104 /* Turn on SSE builtins for -msse2. */
2105 if (TARGET_SSE2)
2106 target_flags |= MASK_SSE;
2107
2108 /* Turn on MMX builtins for -msse. */
2109 if (TARGET_SSE)
2110 {
2111 target_flags |= MASK_MMX & ~target_flags_explicit;
2112 x86_prefetch_sse = true;
2113 }
2114
2115 /* Turn on MMX builtins for 3Dnow. */
2116 if (TARGET_3DNOW)
2117 target_flags |= MASK_MMX;
2118
2119 if (TARGET_64BIT)
2120 {
2121 if (TARGET_ALIGN_DOUBLE)
2122 error ("-malign-double makes no sense in the 64bit mode");
2123 if (TARGET_RTD)
2124 error ("-mrtd calling convention not supported in the 64bit mode");
2125
2126 /* Enable by default the SSE and MMX builtins. Do allow the user to
2127 explicitly disable any of these. In particular, disabling SSE and
2128 MMX for kernel code is extremely useful. */
2129 target_flags
2130 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2131 & ~target_flags_explicit);
2132 }
2133 else
2134 {
2135 /* i386 ABI does not specify red zone. It still makes sense to use it
2136 when programmer takes care to stack from being destroyed. */
2137 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2138 target_flags |= MASK_NO_RED_ZONE;
2139 }
2140
2141 /* Validate -mpreferred-stack-boundary= value, or provide default.
2142 The default of 128 bits is for Pentium III's SSE __m128. We can't
2143 change it because of optimize_size. Otherwise, we can't mix object
2144 files compiled with -Os and -On. */
2145 ix86_preferred_stack_boundary = 128;
2146 if (ix86_preferred_stack_boundary_string)
2147 {
2148 i = atoi (ix86_preferred_stack_boundary_string);
2149 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2150 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2151 TARGET_64BIT ? 4 : 2);
2152 else
2153 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2154 }
2155
2156 /* Accept -msseregparm only if at least SSE support is enabled. */
2157 if (TARGET_SSEREGPARM
2158 && ! TARGET_SSE)
2159 error ("-msseregparm used without SSE enabled");
2160
2161 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2162
2163 if (ix86_fpmath_string != 0)
2164 {
2165 if (! strcmp (ix86_fpmath_string, "387"))
2166 ix86_fpmath = FPMATH_387;
2167 else if (! strcmp (ix86_fpmath_string, "sse"))
2168 {
2169 if (!TARGET_SSE)
2170 {
2171 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2172 ix86_fpmath = FPMATH_387;
2173 }
2174 else
2175 ix86_fpmath = FPMATH_SSE;
2176 }
2177 else if (! strcmp (ix86_fpmath_string, "387,sse")
2178 || ! strcmp (ix86_fpmath_string, "sse,387"))
2179 {
2180 if (!TARGET_SSE)
2181 {
2182 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2183 ix86_fpmath = FPMATH_387;
2184 }
2185 else if (!TARGET_80387)
2186 {
2187 warning (0, "387 instruction set disabled, using SSE arithmetics");
2188 ix86_fpmath = FPMATH_SSE;
2189 }
2190 else
2191 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2192 }
2193 else
2194 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2195 }
2196
2197 /* If the i387 is disabled, then do not return values in it. */
2198 if (!TARGET_80387)
2199 target_flags &= ~MASK_FLOAT_RETURNS;
2200
2201 if ((x86_accumulate_outgoing_args & TUNEMASK)
2202 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2203 && !optimize_size)
2204 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2205
2206 /* ??? Unwind info is not correct around the CFG unless either a frame
2207 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2208 unwind info generation to be aware of the CFG and propagating states
2209 around edges. */
2210 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2211 || flag_exceptions || flag_non_call_exceptions)
2212 && flag_omit_frame_pointer
2213 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2214 {
2215 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2216 warning (0, "unwind tables currently require either a frame pointer "
2217 "or -maccumulate-outgoing-args for correctness");
2218 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2219 }
2220
2221 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2222 {
2223 char *p;
2224 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2225 p = strchr (internal_label_prefix, 'X');
2226 internal_label_prefix_len = p - internal_label_prefix;
2227 *p = '\0';
2228 }
2229
2230 /* When scheduling description is not available, disable scheduler pass
2231 so it won't slow down the compilation and make x87 code slower. */
2232 if (!TARGET_SCHEDULE)
2233 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2234
2235 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2236 set_param_value ("simultaneous-prefetches",
2237 ix86_cost->simultaneous_prefetches);
2238 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2239 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2240 }
2241 \f
2242 /* switch to the appropriate section for output of DECL.
2243 DECL is either a `VAR_DECL' node or a constant of some sort.
2244 RELOC indicates whether forming the initial value of DECL requires
2245 link-time relocations. */
2246
2247 static section *
2248 x86_64_elf_select_section (tree decl, int reloc,
2249 unsigned HOST_WIDE_INT align)
2250 {
2251 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2252 && ix86_in_large_data_p (decl))
2253 {
2254 const char *sname = NULL;
2255 unsigned int flags = SECTION_WRITE;
2256 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2257 {
2258 case SECCAT_DATA:
2259 sname = ".ldata";
2260 break;
2261 case SECCAT_DATA_REL:
2262 sname = ".ldata.rel";
2263 break;
2264 case SECCAT_DATA_REL_LOCAL:
2265 sname = ".ldata.rel.local";
2266 break;
2267 case SECCAT_DATA_REL_RO:
2268 sname = ".ldata.rel.ro";
2269 break;
2270 case SECCAT_DATA_REL_RO_LOCAL:
2271 sname = ".ldata.rel.ro.local";
2272 break;
2273 case SECCAT_BSS:
2274 sname = ".lbss";
2275 flags |= SECTION_BSS;
2276 break;
2277 case SECCAT_RODATA:
2278 case SECCAT_RODATA_MERGE_STR:
2279 case SECCAT_RODATA_MERGE_STR_INIT:
2280 case SECCAT_RODATA_MERGE_CONST:
2281 sname = ".lrodata";
2282 flags = 0;
2283 break;
2284 case SECCAT_SRODATA:
2285 case SECCAT_SDATA:
2286 case SECCAT_SBSS:
2287 gcc_unreachable ();
2288 case SECCAT_TEXT:
2289 case SECCAT_TDATA:
2290 case SECCAT_TBSS:
2291 /* We don't split these for medium model. Place them into
2292 default sections and hope for best. */
2293 break;
2294 }
2295 if (sname)
2296 {
2297 /* We might get called with string constants, but get_named_section
2298 doesn't like them as they are not DECLs. Also, we need to set
2299 flags in that case. */
2300 if (!DECL_P (decl))
2301 return get_section (sname, flags, NULL);
2302 return get_named_section (decl, sname, reloc);
2303 }
2304 }
2305 return default_elf_select_section (decl, reloc, align);
2306 }
2307
2308 /* Build up a unique section name, expressed as a
2309 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2310 RELOC indicates whether the initial value of EXP requires
2311 link-time relocations. */
2312
2313 static void
2314 x86_64_elf_unique_section (tree decl, int reloc)
2315 {
2316 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2317 && ix86_in_large_data_p (decl))
2318 {
2319 const char *prefix = NULL;
2320 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2321 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2322
2323 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2324 {
2325 case SECCAT_DATA:
2326 case SECCAT_DATA_REL:
2327 case SECCAT_DATA_REL_LOCAL:
2328 case SECCAT_DATA_REL_RO:
2329 case SECCAT_DATA_REL_RO_LOCAL:
2330 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2331 break;
2332 case SECCAT_BSS:
2333 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2334 break;
2335 case SECCAT_RODATA:
2336 case SECCAT_RODATA_MERGE_STR:
2337 case SECCAT_RODATA_MERGE_STR_INIT:
2338 case SECCAT_RODATA_MERGE_CONST:
2339 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2340 break;
2341 case SECCAT_SRODATA:
2342 case SECCAT_SDATA:
2343 case SECCAT_SBSS:
2344 gcc_unreachable ();
2345 case SECCAT_TEXT:
2346 case SECCAT_TDATA:
2347 case SECCAT_TBSS:
2348 /* We don't split these for medium model. Place them into
2349 default sections and hope for best. */
2350 break;
2351 }
2352 if (prefix)
2353 {
2354 const char *name;
2355 size_t nlen, plen;
2356 char *string;
2357 plen = strlen (prefix);
2358
2359 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2360 name = targetm.strip_name_encoding (name);
2361 nlen = strlen (name);
2362
2363 string = alloca (nlen + plen + 1);
2364 memcpy (string, prefix, plen);
2365 memcpy (string + plen, name, nlen + 1);
2366
2367 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2368 return;
2369 }
2370 }
2371 default_unique_section (decl, reloc);
2372 }
2373
2374 #ifdef COMMON_ASM_OP
2375 /* This says how to output assembler code to declare an
2376 uninitialized external linkage data object.
2377
2378 For medium model x86-64 we need to use .largecomm opcode for
2379 large objects. */
2380 void
2381 x86_elf_aligned_common (FILE *file,
2382 const char *name, unsigned HOST_WIDE_INT size,
2383 int align)
2384 {
2385 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2386 && size > (unsigned int)ix86_section_threshold)
2387 fprintf (file, ".largecomm\t");
2388 else
2389 fprintf (file, "%s", COMMON_ASM_OP);
2390 assemble_name (file, name);
2391 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2392 size, align / BITS_PER_UNIT);
2393 }
2394 #endif
2395 /* Utility function for targets to use in implementing
2396 ASM_OUTPUT_ALIGNED_BSS. */
2397
2398 void
2399 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2400 const char *name, unsigned HOST_WIDE_INT size,
2401 int align)
2402 {
2403 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2404 && size > (unsigned int)ix86_section_threshold)
2405 switch_to_section (get_named_section (decl, ".lbss", 0));
2406 else
2407 switch_to_section (bss_section);
2408 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2409 #ifdef ASM_DECLARE_OBJECT_NAME
2410 last_assemble_variable_decl = decl;
2411 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2412 #else
2413 /* Standard thing is just output label for the object. */
2414 ASM_OUTPUT_LABEL (file, name);
2415 #endif /* ASM_DECLARE_OBJECT_NAME */
2416 ASM_OUTPUT_SKIP (file, size ? size : 1);
2417 }
2418 \f
2419 void
2420 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2421 {
2422 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2423 make the problem with not enough registers even worse. */
2424 #ifdef INSN_SCHEDULING
2425 if (level > 1)
2426 flag_schedule_insns = 0;
2427 #endif
2428
2429 if (TARGET_MACHO)
2430 /* The Darwin libraries never set errno, so we might as well
2431 avoid calling them when that's the only reason we would. */
2432 flag_errno_math = 0;
2433
2434 /* The default values of these switches depend on the TARGET_64BIT
2435 that is not known at this moment. Mark these values with 2 and
2436 let user the to override these. In case there is no command line option
2437 specifying them, we will set the defaults in override_options. */
2438 if (optimize >= 1)
2439 flag_omit_frame_pointer = 2;
2440 flag_pcc_struct_return = 2;
2441 flag_asynchronous_unwind_tables = 2;
2442 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2443 SUBTARGET_OPTIMIZATION_OPTIONS;
2444 #endif
2445 }
2446 \f
2447 /* Table of valid machine attributes. */
2448 const struct attribute_spec ix86_attribute_table[] =
2449 {
2450 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2451 /* Stdcall attribute says callee is responsible for popping arguments
2452 if they are not variable. */
2453 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2454 /* Fastcall attribute says callee is responsible for popping arguments
2455 if they are not variable. */
2456 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2457 /* Cdecl attribute says the callee is a normal C declaration */
2458 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2459 /* Regparm attribute specifies how many integer arguments are to be
2460 passed in registers. */
2461 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2462 /* Sseregparm attribute says we are using x86_64 calling conventions
2463 for FP arguments. */
2464 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2465 /* force_align_arg_pointer says this function realigns the stack at entry. */
2466 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2467 false, true, true, ix86_handle_cconv_attribute },
2468 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2469 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2470 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2471 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2472 #endif
2473 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2474 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2475 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2476 SUBTARGET_ATTRIBUTE_TABLE,
2477 #endif
2478 { NULL, 0, 0, false, false, false, NULL }
2479 };
2480
2481 /* Decide whether we can make a sibling call to a function. DECL is the
2482 declaration of the function being targeted by the call and EXP is the
2483 CALL_EXPR representing the call. */
2484
2485 static bool
2486 ix86_function_ok_for_sibcall (tree decl, tree exp)
2487 {
2488 tree func;
2489 rtx a, b;
2490
2491 /* If we are generating position-independent code, we cannot sibcall
2492 optimize any indirect call, or a direct call to a global function,
2493 as the PLT requires %ebx be live. */
2494 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2495 return false;
2496
2497 if (decl)
2498 func = decl;
2499 else
2500 {
2501 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2502 if (POINTER_TYPE_P (func))
2503 func = TREE_TYPE (func);
2504 }
2505
2506 /* Check that the return value locations are the same. Like
2507 if we are returning floats on the 80387 register stack, we cannot
2508 make a sibcall from a function that doesn't return a float to a
2509 function that does or, conversely, from a function that does return
2510 a float to a function that doesn't; the necessary stack adjustment
2511 would not be executed. This is also the place we notice
2512 differences in the return value ABI. Note that it is ok for one
2513 of the functions to have void return type as long as the return
2514 value of the other is passed in a register. */
2515 a = ix86_function_value (TREE_TYPE (exp), func, false);
2516 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2517 cfun->decl, false);
2518 if (STACK_REG_P (a) || STACK_REG_P (b))
2519 {
2520 if (!rtx_equal_p (a, b))
2521 return false;
2522 }
2523 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2524 ;
2525 else if (!rtx_equal_p (a, b))
2526 return false;
2527
2528 /* If this call is indirect, we'll need to be able to use a call-clobbered
2529 register for the address of the target function. Make sure that all
2530 such registers are not used for passing parameters. */
2531 if (!decl && !TARGET_64BIT)
2532 {
2533 tree type;
2534
2535 /* We're looking at the CALL_EXPR, we need the type of the function. */
2536 type = TREE_OPERAND (exp, 0); /* pointer expression */
2537 type = TREE_TYPE (type); /* pointer type */
2538 type = TREE_TYPE (type); /* function type */
2539
2540 if (ix86_function_regparm (type, NULL) >= 3)
2541 {
2542 /* ??? Need to count the actual number of registers to be used,
2543 not the possible number of registers. Fix later. */
2544 return false;
2545 }
2546 }
2547
2548 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2549 /* Dllimport'd functions are also called indirectly. */
2550 if (decl && DECL_DLLIMPORT_P (decl)
2551 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2552 return false;
2553 #endif
2554
2555 /* If we forced aligned the stack, then sibcalling would unalign the
2556 stack, which may break the called function. */
2557 if (cfun->machine->force_align_arg_pointer)
2558 return false;
2559
2560 /* Otherwise okay. That also includes certain types of indirect calls. */
2561 return true;
2562 }
2563
2564 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2565 calling convention attributes;
2566 arguments as in struct attribute_spec.handler. */
2567
2568 static tree
2569 ix86_handle_cconv_attribute (tree *node, tree name,
2570 tree args,
2571 int flags ATTRIBUTE_UNUSED,
2572 bool *no_add_attrs)
2573 {
2574 if (TREE_CODE (*node) != FUNCTION_TYPE
2575 && TREE_CODE (*node) != METHOD_TYPE
2576 && TREE_CODE (*node) != FIELD_DECL
2577 && TREE_CODE (*node) != TYPE_DECL)
2578 {
2579 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2580 IDENTIFIER_POINTER (name));
2581 *no_add_attrs = true;
2582 return NULL_TREE;
2583 }
2584
2585 /* Can combine regparm with all attributes but fastcall. */
2586 if (is_attribute_p ("regparm", name))
2587 {
2588 tree cst;
2589
2590 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2591 {
2592 error ("fastcall and regparm attributes are not compatible");
2593 }
2594
2595 cst = TREE_VALUE (args);
2596 if (TREE_CODE (cst) != INTEGER_CST)
2597 {
2598 warning (OPT_Wattributes,
2599 "%qs attribute requires an integer constant argument",
2600 IDENTIFIER_POINTER (name));
2601 *no_add_attrs = true;
2602 }
2603 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2604 {
2605 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2606 IDENTIFIER_POINTER (name), REGPARM_MAX);
2607 *no_add_attrs = true;
2608 }
2609
2610 if (!TARGET_64BIT
2611 && lookup_attribute (ix86_force_align_arg_pointer_string,
2612 TYPE_ATTRIBUTES (*node))
2613 && compare_tree_int (cst, REGPARM_MAX-1))
2614 {
2615 error ("%s functions limited to %d register parameters",
2616 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2617 }
2618
2619 return NULL_TREE;
2620 }
2621
2622 if (TARGET_64BIT)
2623 {
2624 warning (OPT_Wattributes, "%qs attribute ignored",
2625 IDENTIFIER_POINTER (name));
2626 *no_add_attrs = true;
2627 return NULL_TREE;
2628 }
2629
2630 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2631 if (is_attribute_p ("fastcall", name))
2632 {
2633 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2634 {
2635 error ("fastcall and cdecl attributes are not compatible");
2636 }
2637 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2638 {
2639 error ("fastcall and stdcall attributes are not compatible");
2640 }
2641 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2642 {
2643 error ("fastcall and regparm attributes are not compatible");
2644 }
2645 }
2646
2647 /* Can combine stdcall with fastcall (redundant), regparm and
2648 sseregparm. */
2649 else if (is_attribute_p ("stdcall", name))
2650 {
2651 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2652 {
2653 error ("stdcall and cdecl attributes are not compatible");
2654 }
2655 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2656 {
2657 error ("stdcall and fastcall attributes are not compatible");
2658 }
2659 }
2660
2661 /* Can combine cdecl with regparm and sseregparm. */
2662 else if (is_attribute_p ("cdecl", name))
2663 {
2664 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2665 {
2666 error ("stdcall and cdecl attributes are not compatible");
2667 }
2668 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2669 {
2670 error ("fastcall and cdecl attributes are not compatible");
2671 }
2672 }
2673
2674 /* Can combine sseregparm with all attributes. */
2675
2676 return NULL_TREE;
2677 }
2678
2679 /* Return 0 if the attributes for two types are incompatible, 1 if they
2680 are compatible, and 2 if they are nearly compatible (which causes a
2681 warning to be generated). */
2682
2683 static int
2684 ix86_comp_type_attributes (tree type1, tree type2)
2685 {
2686 /* Check for mismatch of non-default calling convention. */
2687 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2688
2689 if (TREE_CODE (type1) != FUNCTION_TYPE)
2690 return 1;
2691
2692 /* Check for mismatched fastcall/regparm types. */
2693 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2694 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2695 || (ix86_function_regparm (type1, NULL)
2696 != ix86_function_regparm (type2, NULL)))
2697 return 0;
2698
2699 /* Check for mismatched sseregparm types. */
2700 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2701 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2702 return 0;
2703
2704 /* Check for mismatched return types (cdecl vs stdcall). */
2705 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2706 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2707 return 0;
2708
2709 return 1;
2710 }
2711 \f
2712 /* Return the regparm value for a function with the indicated TYPE and DECL.
2713 DECL may be NULL when calling function indirectly
2714 or considering a libcall. */
2715
2716 static int
2717 ix86_function_regparm (tree type, tree decl)
2718 {
2719 tree attr;
2720 int regparm = ix86_regparm;
2721 bool user_convention = false;
2722
2723 if (!TARGET_64BIT)
2724 {
2725 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2726 if (attr)
2727 {
2728 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2729 user_convention = true;
2730 }
2731
2732 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2733 {
2734 regparm = 2;
2735 user_convention = true;
2736 }
2737
2738 /* Use register calling convention for local functions when possible. */
2739 if (!TARGET_64BIT && !user_convention && decl
2740 && flag_unit_at_a_time && !profile_flag)
2741 {
2742 struct cgraph_local_info *i = cgraph_local_info (decl);
2743 if (i && i->local)
2744 {
2745 int local_regparm, globals = 0, regno;
2746
2747 /* Make sure no regparm register is taken by a global register
2748 variable. */
2749 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2750 if (global_regs[local_regparm])
2751 break;
2752 /* We can't use regparm(3) for nested functions as these use
2753 static chain pointer in third argument. */
2754 if (local_regparm == 3
2755 && decl_function_context (decl)
2756 && !DECL_NO_STATIC_CHAIN (decl))
2757 local_regparm = 2;
2758 /* If the function realigns its stackpointer, the
2759 prologue will clobber %ecx. If we've already
2760 generated code for the callee, the callee
2761 DECL_STRUCT_FUNCTION is gone, so we fall back to
2762 scanning the attributes for the self-realigning
2763 property. */
2764 if ((DECL_STRUCT_FUNCTION (decl)
2765 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2766 || (!DECL_STRUCT_FUNCTION (decl)
2767 && lookup_attribute (ix86_force_align_arg_pointer_string,
2768 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2769 local_regparm = 2;
2770 /* Each global register variable increases register preassure,
2771 so the more global reg vars there are, the smaller regparm
2772 optimization use, unless requested by the user explicitly. */
2773 for (regno = 0; regno < 6; regno++)
2774 if (global_regs[regno])
2775 globals++;
2776 local_regparm
2777 = globals < local_regparm ? local_regparm - globals : 0;
2778
2779 if (local_regparm > regparm)
2780 regparm = local_regparm;
2781 }
2782 }
2783 }
2784 return regparm;
2785 }
2786
2787 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2788 DFmode (2) arguments in SSE registers for a function with the
2789 indicated TYPE and DECL. DECL may be NULL when calling function
2790 indirectly or considering a libcall. Otherwise return 0. */
2791
2792 static int
2793 ix86_function_sseregparm (tree type, tree decl)
2794 {
2795 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2796 by the sseregparm attribute. */
2797 if (TARGET_SSEREGPARM
2798 || (type
2799 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2800 {
2801 if (!TARGET_SSE)
2802 {
2803 if (decl)
2804 error ("Calling %qD with attribute sseregparm without "
2805 "SSE/SSE2 enabled", decl);
2806 else
2807 error ("Calling %qT with attribute sseregparm without "
2808 "SSE/SSE2 enabled", type);
2809 return 0;
2810 }
2811
2812 return 2;
2813 }
2814
2815 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2816 (and DFmode for SSE2) arguments in SSE registers,
2817 even for 32-bit targets. */
2818 if (!TARGET_64BIT && decl
2819 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2820 {
2821 struct cgraph_local_info *i = cgraph_local_info (decl);
2822 if (i && i->local)
2823 return TARGET_SSE2 ? 2 : 1;
2824 }
2825
2826 return 0;
2827 }
2828
2829 /* Return true if EAX is live at the start of the function. Used by
2830 ix86_expand_prologue to determine if we need special help before
2831 calling allocate_stack_worker. */
2832
2833 static bool
2834 ix86_eax_live_at_start_p (void)
2835 {
2836 /* Cheat. Don't bother working forward from ix86_function_regparm
2837 to the function type to whether an actual argument is located in
2838 eax. Instead just look at cfg info, which is still close enough
2839 to correct at this point. This gives false positives for broken
2840 functions that might use uninitialized data that happens to be
2841 allocated in eax, but who cares? */
2842 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2843 }
2844
2845 /* Value is the number of bytes of arguments automatically
2846 popped when returning from a subroutine call.
2847 FUNDECL is the declaration node of the function (as a tree),
2848 FUNTYPE is the data type of the function (as a tree),
2849 or for a library call it is an identifier node for the subroutine name.
2850 SIZE is the number of bytes of arguments passed on the stack.
2851
2852 On the 80386, the RTD insn may be used to pop them if the number
2853 of args is fixed, but if the number is variable then the caller
2854 must pop them all. RTD can't be used for library calls now
2855 because the library is compiled with the Unix compiler.
2856 Use of RTD is a selectable option, since it is incompatible with
2857 standard Unix calling sequences. If the option is not selected,
2858 the caller must always pop the args.
2859
2860 The attribute stdcall is equivalent to RTD on a per module basis. */
2861
2862 int
2863 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2864 {
2865 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2866
2867 /* Cdecl functions override -mrtd, and never pop the stack. */
2868 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2869
2870 /* Stdcall and fastcall functions will pop the stack if not
2871 variable args. */
2872 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2873 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2874 rtd = 1;
2875
2876 if (rtd
2877 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2878 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2879 == void_type_node)))
2880 return size;
2881 }
2882
2883 /* Lose any fake structure return argument if it is passed on the stack. */
2884 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2885 && !TARGET_64BIT
2886 && !KEEP_AGGREGATE_RETURN_POINTER)
2887 {
2888 int nregs = ix86_function_regparm (funtype, fundecl);
2889
2890 if (!nregs)
2891 return GET_MODE_SIZE (Pmode);
2892 }
2893
2894 return 0;
2895 }
2896 \f
2897 /* Argument support functions. */
2898
2899 /* Return true when register may be used to pass function parameters. */
2900 bool
2901 ix86_function_arg_regno_p (int regno)
2902 {
2903 int i;
2904 if (!TARGET_64BIT)
2905 return (regno < REGPARM_MAX
2906 || (TARGET_MMX && MMX_REGNO_P (regno)
2907 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2908 || (TARGET_SSE && SSE_REGNO_P (regno)
2909 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2910
2911 if (TARGET_SSE && SSE_REGNO_P (regno)
2912 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2913 return true;
2914 /* RAX is used as hidden argument to va_arg functions. */
2915 if (!regno)
2916 return true;
2917 for (i = 0; i < REGPARM_MAX; i++)
2918 if (regno == x86_64_int_parameter_registers[i])
2919 return true;
2920 return false;
2921 }
2922
2923 /* Return if we do not know how to pass TYPE solely in registers. */
2924
2925 static bool
2926 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2927 {
2928 if (must_pass_in_stack_var_size_or_pad (mode, type))
2929 return true;
2930
2931 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2932 The layout_type routine is crafty and tries to trick us into passing
2933 currently unsupported vector types on the stack by using TImode. */
2934 return (!TARGET_64BIT && mode == TImode
2935 && type && TREE_CODE (type) != VECTOR_TYPE);
2936 }
2937
2938 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2939 for a call to a function whose data type is FNTYPE.
2940 For a library call, FNTYPE is 0. */
2941
2942 void
2943 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2944 tree fntype, /* tree ptr for function decl */
2945 rtx libname, /* SYMBOL_REF of library name or 0 */
2946 tree fndecl)
2947 {
2948 static CUMULATIVE_ARGS zero_cum;
2949 tree param, next_param;
2950
2951 if (TARGET_DEBUG_ARG)
2952 {
2953 fprintf (stderr, "\ninit_cumulative_args (");
2954 if (fntype)
2955 fprintf (stderr, "fntype code = %s, ret code = %s",
2956 tree_code_name[(int) TREE_CODE (fntype)],
2957 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2958 else
2959 fprintf (stderr, "no fntype");
2960
2961 if (libname)
2962 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2963 }
2964
2965 *cum = zero_cum;
2966
2967 /* Set up the number of registers to use for passing arguments. */
2968 cum->nregs = ix86_regparm;
2969 if (TARGET_SSE)
2970 cum->sse_nregs = SSE_REGPARM_MAX;
2971 if (TARGET_MMX)
2972 cum->mmx_nregs = MMX_REGPARM_MAX;
2973 cum->warn_sse = true;
2974 cum->warn_mmx = true;
2975 cum->maybe_vaarg = false;
2976
2977 /* Use ecx and edx registers if function has fastcall attribute,
2978 else look for regparm information. */
2979 if (fntype && !TARGET_64BIT)
2980 {
2981 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2982 {
2983 cum->nregs = 2;
2984 cum->fastcall = 1;
2985 }
2986 else
2987 cum->nregs = ix86_function_regparm (fntype, fndecl);
2988 }
2989
2990 /* Set up the number of SSE registers used for passing SFmode
2991 and DFmode arguments. Warn for mismatching ABI. */
2992 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2993
2994 /* Determine if this function has variable arguments. This is
2995 indicated by the last argument being 'void_type_mode' if there
2996 are no variable arguments. If there are variable arguments, then
2997 we won't pass anything in registers in 32-bit mode. */
2998
2999 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
3000 {
3001 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
3002 param != 0; param = next_param)
3003 {
3004 next_param = TREE_CHAIN (param);
3005 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3006 {
3007 if (!TARGET_64BIT)
3008 {
3009 cum->nregs = 0;
3010 cum->sse_nregs = 0;
3011 cum->mmx_nregs = 0;
3012 cum->warn_sse = 0;
3013 cum->warn_mmx = 0;
3014 cum->fastcall = 0;
3015 cum->float_in_sse = 0;
3016 }
3017 cum->maybe_vaarg = true;
3018 }
3019 }
3020 }
3021 if ((!fntype && !libname)
3022 || (fntype && !TYPE_ARG_TYPES (fntype)))
3023 cum->maybe_vaarg = true;
3024
3025 if (TARGET_DEBUG_ARG)
3026 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3027
3028 return;
3029 }
3030
3031 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3032 But in the case of vector types, it is some vector mode.
3033
3034 When we have only some of our vector isa extensions enabled, then there
3035 are some modes for which vector_mode_supported_p is false. For these
3036 modes, the generic vector support in gcc will choose some non-vector mode
3037 in order to implement the type. By computing the natural mode, we'll
3038 select the proper ABI location for the operand and not depend on whatever
3039 the middle-end decides to do with these vector types. */
3040
3041 static enum machine_mode
3042 type_natural_mode (tree type)
3043 {
3044 enum machine_mode mode = TYPE_MODE (type);
3045
3046 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3047 {
3048 HOST_WIDE_INT size = int_size_in_bytes (type);
3049 if ((size == 8 || size == 16)
3050 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3051 && TYPE_VECTOR_SUBPARTS (type) > 1)
3052 {
3053 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3054
3055 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3056 mode = MIN_MODE_VECTOR_FLOAT;
3057 else
3058 mode = MIN_MODE_VECTOR_INT;
3059
3060 /* Get the mode which has this inner mode and number of units. */
3061 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3062 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3063 && GET_MODE_INNER (mode) == innermode)
3064 return mode;
3065
3066 gcc_unreachable ();
3067 }
3068 }
3069
3070 return mode;
3071 }
3072
3073 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3074 this may not agree with the mode that the type system has chosen for the
3075 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3076 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3077
3078 static rtx
3079 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3080 unsigned int regno)
3081 {
3082 rtx tmp;
3083
3084 if (orig_mode != BLKmode)
3085 tmp = gen_rtx_REG (orig_mode, regno);
3086 else
3087 {
3088 tmp = gen_rtx_REG (mode, regno);
3089 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3090 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3091 }
3092
3093 return tmp;
3094 }
3095
3096 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3097 of this code is to classify each 8bytes of incoming argument by the register
3098 class and assign registers accordingly. */
3099
3100 /* Return the union class of CLASS1 and CLASS2.
3101 See the x86-64 PS ABI for details. */
3102
3103 static enum x86_64_reg_class
3104 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3105 {
3106 /* Rule #1: If both classes are equal, this is the resulting class. */
3107 if (class1 == class2)
3108 return class1;
3109
3110 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3111 the other class. */
3112 if (class1 == X86_64_NO_CLASS)
3113 return class2;
3114 if (class2 == X86_64_NO_CLASS)
3115 return class1;
3116
3117 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3118 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3119 return X86_64_MEMORY_CLASS;
3120
3121 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3122 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3123 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3124 return X86_64_INTEGERSI_CLASS;
3125 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3126 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3127 return X86_64_INTEGER_CLASS;
3128
3129 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3130 MEMORY is used. */
3131 if (class1 == X86_64_X87_CLASS
3132 || class1 == X86_64_X87UP_CLASS
3133 || class1 == X86_64_COMPLEX_X87_CLASS
3134 || class2 == X86_64_X87_CLASS
3135 || class2 == X86_64_X87UP_CLASS
3136 || class2 == X86_64_COMPLEX_X87_CLASS)
3137 return X86_64_MEMORY_CLASS;
3138
3139 /* Rule #6: Otherwise class SSE is used. */
3140 return X86_64_SSE_CLASS;
3141 }
3142
3143 /* Classify the argument of type TYPE and mode MODE.
3144 CLASSES will be filled by the register class used to pass each word
3145 of the operand. The number of words is returned. In case the parameter
3146 should be passed in memory, 0 is returned. As a special case for zero
3147 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3148
3149 BIT_OFFSET is used internally for handling records and specifies offset
3150 of the offset in bits modulo 256 to avoid overflow cases.
3151
3152 See the x86-64 PS ABI for details.
3153 */
3154
3155 static int
3156 classify_argument (enum machine_mode mode, tree type,
3157 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3158 {
3159 HOST_WIDE_INT bytes =
3160 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3161 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3162
3163 /* Variable sized entities are always passed/returned in memory. */
3164 if (bytes < 0)
3165 return 0;
3166
3167 if (mode != VOIDmode
3168 && targetm.calls.must_pass_in_stack (mode, type))
3169 return 0;
3170
3171 if (type && AGGREGATE_TYPE_P (type))
3172 {
3173 int i;
3174 tree field;
3175 enum x86_64_reg_class subclasses[MAX_CLASSES];
3176
3177 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3178 if (bytes > 16)
3179 return 0;
3180
3181 for (i = 0; i < words; i++)
3182 classes[i] = X86_64_NO_CLASS;
3183
3184 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3185 signalize memory class, so handle it as special case. */
3186 if (!words)
3187 {
3188 classes[0] = X86_64_NO_CLASS;
3189 return 1;
3190 }
3191
3192 /* Classify each field of record and merge classes. */
3193 switch (TREE_CODE (type))
3194 {
3195 case RECORD_TYPE:
3196 /* And now merge the fields of structure. */
3197 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3198 {
3199 if (TREE_CODE (field) == FIELD_DECL)
3200 {
3201 int num;
3202
3203 if (TREE_TYPE (field) == error_mark_node)
3204 continue;
3205
3206 /* Bitfields are always classified as integer. Handle them
3207 early, since later code would consider them to be
3208 misaligned integers. */
3209 if (DECL_BIT_FIELD (field))
3210 {
3211 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3212 i < ((int_bit_position (field) + (bit_offset % 64))
3213 + tree_low_cst (DECL_SIZE (field), 0)
3214 + 63) / 8 / 8; i++)
3215 classes[i] =
3216 merge_classes (X86_64_INTEGER_CLASS,
3217 classes[i]);
3218 }
3219 else
3220 {
3221 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3222 TREE_TYPE (field), subclasses,
3223 (int_bit_position (field)
3224 + bit_offset) % 256);
3225 if (!num)
3226 return 0;
3227 for (i = 0; i < num; i++)
3228 {
3229 int pos =
3230 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3231 classes[i + pos] =
3232 merge_classes (subclasses[i], classes[i + pos]);
3233 }
3234 }
3235 }
3236 }
3237 break;
3238
3239 case ARRAY_TYPE:
3240 /* Arrays are handled as small records. */
3241 {
3242 int num;
3243 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3244 TREE_TYPE (type), subclasses, bit_offset);
3245 if (!num)
3246 return 0;
3247
3248 /* The partial classes are now full classes. */
3249 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3250 subclasses[0] = X86_64_SSE_CLASS;
3251 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3252 subclasses[0] = X86_64_INTEGER_CLASS;
3253
3254 for (i = 0; i < words; i++)
3255 classes[i] = subclasses[i % num];
3256
3257 break;
3258 }
3259 case UNION_TYPE:
3260 case QUAL_UNION_TYPE:
3261 /* Unions are similar to RECORD_TYPE but offset is always 0.
3262 */
3263 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3264 {
3265 if (TREE_CODE (field) == FIELD_DECL)
3266 {
3267 int num;
3268
3269 if (TREE_TYPE (field) == error_mark_node)
3270 continue;
3271
3272 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3273 TREE_TYPE (field), subclasses,
3274 bit_offset);
3275 if (!num)
3276 return 0;
3277 for (i = 0; i < num; i++)
3278 classes[i] = merge_classes (subclasses[i], classes[i]);
3279 }
3280 }
3281 break;
3282
3283 default:
3284 gcc_unreachable ();
3285 }
3286
3287 /* Final merger cleanup. */
3288 for (i = 0; i < words; i++)
3289 {
3290 /* If one class is MEMORY, everything should be passed in
3291 memory. */
3292 if (classes[i] == X86_64_MEMORY_CLASS)
3293 return 0;
3294
3295 /* The X86_64_SSEUP_CLASS should be always preceded by
3296 X86_64_SSE_CLASS. */
3297 if (classes[i] == X86_64_SSEUP_CLASS
3298 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3299 classes[i] = X86_64_SSE_CLASS;
3300
3301 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3302 if (classes[i] == X86_64_X87UP_CLASS
3303 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3304 classes[i] = X86_64_SSE_CLASS;
3305 }
3306 return words;
3307 }
3308
3309 /* Compute alignment needed. We align all types to natural boundaries with
3310 exception of XFmode that is aligned to 64bits. */
3311 if (mode != VOIDmode && mode != BLKmode)
3312 {
3313 int mode_alignment = GET_MODE_BITSIZE (mode);
3314
3315 if (mode == XFmode)
3316 mode_alignment = 128;
3317 else if (mode == XCmode)
3318 mode_alignment = 256;
3319 if (COMPLEX_MODE_P (mode))
3320 mode_alignment /= 2;
3321 /* Misaligned fields are always returned in memory. */
3322 if (bit_offset % mode_alignment)
3323 return 0;
3324 }
3325
3326 /* for V1xx modes, just use the base mode */
3327 if (VECTOR_MODE_P (mode)
3328 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3329 mode = GET_MODE_INNER (mode);
3330
3331 /* Classification of atomic types. */
3332 switch (mode)
3333 {
3334 case SDmode:
3335 case DDmode:
3336 classes[0] = X86_64_SSE_CLASS;
3337 return 1;
3338 case TDmode:
3339 classes[0] = X86_64_SSE_CLASS;
3340 classes[1] = X86_64_SSEUP_CLASS;
3341 return 2;
3342 case DImode:
3343 case SImode:
3344 case HImode:
3345 case QImode:
3346 case CSImode:
3347 case CHImode:
3348 case CQImode:
3349 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3350 classes[0] = X86_64_INTEGERSI_CLASS;
3351 else
3352 classes[0] = X86_64_INTEGER_CLASS;
3353 return 1;
3354 case CDImode:
3355 case TImode:
3356 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3357 return 2;
3358 case CTImode:
3359 return 0;
3360 case SFmode:
3361 if (!(bit_offset % 64))
3362 classes[0] = X86_64_SSESF_CLASS;
3363 else
3364 classes[0] = X86_64_SSE_CLASS;
3365 return 1;
3366 case DFmode:
3367 classes[0] = X86_64_SSEDF_CLASS;
3368 return 1;
3369 case XFmode:
3370 classes[0] = X86_64_X87_CLASS;
3371 classes[1] = X86_64_X87UP_CLASS;
3372 return 2;
3373 case TFmode:
3374 classes[0] = X86_64_SSE_CLASS;
3375 classes[1] = X86_64_SSEUP_CLASS;
3376 return 2;
3377 case SCmode:
3378 classes[0] = X86_64_SSE_CLASS;
3379 return 1;
3380 case DCmode:
3381 classes[0] = X86_64_SSEDF_CLASS;
3382 classes[1] = X86_64_SSEDF_CLASS;
3383 return 2;
3384 case XCmode:
3385 classes[0] = X86_64_COMPLEX_X87_CLASS;
3386 return 1;
3387 case TCmode:
3388 /* This modes is larger than 16 bytes. */
3389 return 0;
3390 case V4SFmode:
3391 case V4SImode:
3392 case V16QImode:
3393 case V8HImode:
3394 case V2DFmode:
3395 case V2DImode:
3396 classes[0] = X86_64_SSE_CLASS;
3397 classes[1] = X86_64_SSEUP_CLASS;
3398 return 2;
3399 case V2SFmode:
3400 case V2SImode:
3401 case V4HImode:
3402 case V8QImode:
3403 classes[0] = X86_64_SSE_CLASS;
3404 return 1;
3405 case BLKmode:
3406 case VOIDmode:
3407 return 0;
3408 default:
3409 gcc_assert (VECTOR_MODE_P (mode));
3410
3411 if (bytes > 16)
3412 return 0;
3413
3414 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3415
3416 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3417 classes[0] = X86_64_INTEGERSI_CLASS;
3418 else
3419 classes[0] = X86_64_INTEGER_CLASS;
3420 classes[1] = X86_64_INTEGER_CLASS;
3421 return 1 + (bytes > 8);
3422 }
3423 }
3424
3425 /* Examine the argument and return set number of register required in each
3426 class. Return 0 iff parameter should be passed in memory. */
3427 static int
3428 examine_argument (enum machine_mode mode, tree type, int in_return,
3429 int *int_nregs, int *sse_nregs)
3430 {
3431 enum x86_64_reg_class class[MAX_CLASSES];
3432 int n = classify_argument (mode, type, class, 0);
3433
3434 *int_nregs = 0;
3435 *sse_nregs = 0;
3436 if (!n)
3437 return 0;
3438 for (n--; n >= 0; n--)
3439 switch (class[n])
3440 {
3441 case X86_64_INTEGER_CLASS:
3442 case X86_64_INTEGERSI_CLASS:
3443 (*int_nregs)++;
3444 break;
3445 case X86_64_SSE_CLASS:
3446 case X86_64_SSESF_CLASS:
3447 case X86_64_SSEDF_CLASS:
3448 (*sse_nregs)++;
3449 break;
3450 case X86_64_NO_CLASS:
3451 case X86_64_SSEUP_CLASS:
3452 break;
3453 case X86_64_X87_CLASS:
3454 case X86_64_X87UP_CLASS:
3455 if (!in_return)
3456 return 0;
3457 break;
3458 case X86_64_COMPLEX_X87_CLASS:
3459 return in_return ? 2 : 0;
3460 case X86_64_MEMORY_CLASS:
3461 gcc_unreachable ();
3462 }
3463 return 1;
3464 }
3465
3466 /* Construct container for the argument used by GCC interface. See
3467 FUNCTION_ARG for the detailed description. */
3468
3469 static rtx
3470 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3471 tree type, int in_return, int nintregs, int nsseregs,
3472 const int *intreg, int sse_regno)
3473 {
3474 /* The following variables hold the static issued_error state. */
3475 static bool issued_sse_arg_error;
3476 static bool issued_sse_ret_error;
3477 static bool issued_x87_ret_error;
3478
3479 enum machine_mode tmpmode;
3480 int bytes =
3481 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3482 enum x86_64_reg_class class[MAX_CLASSES];
3483 int n;
3484 int i;
3485 int nexps = 0;
3486 int needed_sseregs, needed_intregs;
3487 rtx exp[MAX_CLASSES];
3488 rtx ret;
3489
3490 n = classify_argument (mode, type, class, 0);
3491 if (TARGET_DEBUG_ARG)
3492 {
3493 if (!n)
3494 fprintf (stderr, "Memory class\n");
3495 else
3496 {
3497 fprintf (stderr, "Classes:");
3498 for (i = 0; i < n; i++)
3499 {
3500 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3501 }
3502 fprintf (stderr, "\n");
3503 }
3504 }
3505 if (!n)
3506 return NULL;
3507 if (!examine_argument (mode, type, in_return, &needed_intregs,
3508 &needed_sseregs))
3509 return NULL;
3510 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3511 return NULL;
3512
3513 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3514 some less clueful developer tries to use floating-point anyway. */
3515 if (needed_sseregs && !TARGET_SSE)
3516 {
3517 if (in_return)
3518 {
3519 if (!issued_sse_ret_error)
3520 {
3521 error ("SSE register return with SSE disabled");
3522 issued_sse_ret_error = true;
3523 }
3524 }
3525 else if (!issued_sse_arg_error)
3526 {
3527 error ("SSE register argument with SSE disabled");
3528 issued_sse_arg_error = true;
3529 }
3530 return NULL;
3531 }
3532
3533 /* Likewise, error if the ABI requires us to return values in the
3534 x87 registers and the user specified -mno-80387. */
3535 if (!TARGET_80387 && in_return)
3536 for (i = 0; i < n; i++)
3537 if (class[i] == X86_64_X87_CLASS
3538 || class[i] == X86_64_X87UP_CLASS
3539 || class[i] == X86_64_COMPLEX_X87_CLASS)
3540 {
3541 if (!issued_x87_ret_error)
3542 {
3543 error ("x87 register return with x87 disabled");
3544 issued_x87_ret_error = true;
3545 }
3546 return NULL;
3547 }
3548
3549 /* First construct simple cases. Avoid SCmode, since we want to use
3550 single register to pass this type. */
3551 if (n == 1 && mode != SCmode)
3552 switch (class[0])
3553 {
3554 case X86_64_INTEGER_CLASS:
3555 case X86_64_INTEGERSI_CLASS:
3556 return gen_rtx_REG (mode, intreg[0]);
3557 case X86_64_SSE_CLASS:
3558 case X86_64_SSESF_CLASS:
3559 case X86_64_SSEDF_CLASS:
3560 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3561 case X86_64_X87_CLASS:
3562 case X86_64_COMPLEX_X87_CLASS:
3563 return gen_rtx_REG (mode, FIRST_STACK_REG);
3564 case X86_64_NO_CLASS:
3565 /* Zero sized array, struct or class. */
3566 return NULL;
3567 default:
3568 gcc_unreachable ();
3569 }
3570 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3571 && mode != BLKmode)
3572 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3573 if (n == 2
3574 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3575 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3576 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3577 && class[1] == X86_64_INTEGER_CLASS
3578 && (mode == CDImode || mode == TImode || mode == TFmode)
3579 && intreg[0] + 1 == intreg[1])
3580 return gen_rtx_REG (mode, intreg[0]);
3581
3582 /* Otherwise figure out the entries of the PARALLEL. */
3583 for (i = 0; i < n; i++)
3584 {
3585 switch (class[i])
3586 {
3587 case X86_64_NO_CLASS:
3588 break;
3589 case X86_64_INTEGER_CLASS:
3590 case X86_64_INTEGERSI_CLASS:
3591 /* Merge TImodes on aligned occasions here too. */
3592 if (i * 8 + 8 > bytes)
3593 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3594 else if (class[i] == X86_64_INTEGERSI_CLASS)
3595 tmpmode = SImode;
3596 else
3597 tmpmode = DImode;
3598 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3599 if (tmpmode == BLKmode)
3600 tmpmode = DImode;
3601 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3602 gen_rtx_REG (tmpmode, *intreg),
3603 GEN_INT (i*8));
3604 intreg++;
3605 break;
3606 case X86_64_SSESF_CLASS:
3607 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3608 gen_rtx_REG (SFmode,
3609 SSE_REGNO (sse_regno)),
3610 GEN_INT (i*8));
3611 sse_regno++;
3612 break;
3613 case X86_64_SSEDF_CLASS:
3614 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3615 gen_rtx_REG (DFmode,
3616 SSE_REGNO (sse_regno)),
3617 GEN_INT (i*8));
3618 sse_regno++;
3619 break;
3620 case X86_64_SSE_CLASS:
3621 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3622 tmpmode = TImode;
3623 else
3624 tmpmode = DImode;
3625 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3626 gen_rtx_REG (tmpmode,
3627 SSE_REGNO (sse_regno)),
3628 GEN_INT (i*8));
3629 if (tmpmode == TImode)
3630 i++;
3631 sse_regno++;
3632 break;
3633 default:
3634 gcc_unreachable ();
3635 }
3636 }
3637
3638 /* Empty aligned struct, union or class. */
3639 if (nexps == 0)
3640 return NULL;
3641
3642 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3643 for (i = 0; i < nexps; i++)
3644 XVECEXP (ret, 0, i) = exp [i];
3645 return ret;
3646 }
3647
3648 /* Update the data in CUM to advance over an argument
3649 of mode MODE and data type TYPE.
3650 (TYPE is null for libcalls where that information may not be available.) */
3651
3652 void
3653 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3654 tree type, int named)
3655 {
3656 int bytes =
3657 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3658 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3659
3660 if (type)
3661 mode = type_natural_mode (type);
3662
3663 if (TARGET_DEBUG_ARG)
3664 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3665 "mode=%s, named=%d)\n\n",
3666 words, cum->words, cum->nregs, cum->sse_nregs,
3667 GET_MODE_NAME (mode), named);
3668
3669 if (TARGET_64BIT)
3670 {
3671 int int_nregs, sse_nregs;
3672 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3673 cum->words += words;
3674 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3675 {
3676 cum->nregs -= int_nregs;
3677 cum->sse_nregs -= sse_nregs;
3678 cum->regno += int_nregs;
3679 cum->sse_regno += sse_nregs;
3680 }
3681 else
3682 cum->words += words;
3683 }
3684 else
3685 {
3686 switch (mode)
3687 {
3688 default:
3689 break;
3690
3691 case BLKmode:
3692 if (bytes < 0)
3693 break;
3694 /* FALLTHRU */
3695
3696 case DImode:
3697 case SImode:
3698 case HImode:
3699 case QImode:
3700 cum->words += words;
3701 cum->nregs -= words;
3702 cum->regno += words;
3703
3704 if (cum->nregs <= 0)
3705 {
3706 cum->nregs = 0;
3707 cum->regno = 0;
3708 }
3709 break;
3710
3711 case DFmode:
3712 if (cum->float_in_sse < 2)
3713 break;
3714 case SFmode:
3715 if (cum->float_in_sse < 1)
3716 break;
3717 /* FALLTHRU */
3718
3719 case TImode:
3720 case V16QImode:
3721 case V8HImode:
3722 case V4SImode:
3723 case V2DImode:
3724 case V4SFmode:
3725 case V2DFmode:
3726 if (!type || !AGGREGATE_TYPE_P (type))
3727 {
3728 cum->sse_words += words;
3729 cum->sse_nregs -= 1;
3730 cum->sse_regno += 1;
3731 if (cum->sse_nregs <= 0)
3732 {
3733 cum->sse_nregs = 0;
3734 cum->sse_regno = 0;
3735 }
3736 }
3737 break;
3738
3739 case V8QImode:
3740 case V4HImode:
3741 case V2SImode:
3742 case V2SFmode:
3743 if (!type || !AGGREGATE_TYPE_P (type))
3744 {
3745 cum->mmx_words += words;
3746 cum->mmx_nregs -= 1;
3747 cum->mmx_regno += 1;
3748 if (cum->mmx_nregs <= 0)
3749 {
3750 cum->mmx_nregs = 0;
3751 cum->mmx_regno = 0;
3752 }
3753 }
3754 break;
3755 }
3756 }
3757 }
3758
3759 /* Define where to put the arguments to a function.
3760 Value is zero to push the argument on the stack,
3761 or a hard register in which to store the argument.
3762
3763 MODE is the argument's machine mode.
3764 TYPE is the data type of the argument (as a tree).
3765 This is null for libcalls where that information may
3766 not be available.
3767 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3768 the preceding args and about the function being called.
3769 NAMED is nonzero if this argument is a named parameter
3770 (otherwise it is an extra parameter matching an ellipsis). */
3771
3772 rtx
3773 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3774 tree type, int named)
3775 {
3776 enum machine_mode mode = orig_mode;
3777 rtx ret = NULL_RTX;
3778 int bytes =
3779 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3780 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3781 static bool warnedsse, warnedmmx;
3782
3783 /* To simplify the code below, represent vector types with a vector mode
3784 even if MMX/SSE are not active. */
3785 if (type && TREE_CODE (type) == VECTOR_TYPE)
3786 mode = type_natural_mode (type);
3787
3788 /* Handle a hidden AL argument containing number of registers for varargs
3789 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3790 any AL settings. */
3791 if (mode == VOIDmode)
3792 {
3793 if (TARGET_64BIT)
3794 return GEN_INT (cum->maybe_vaarg
3795 ? (cum->sse_nregs < 0
3796 ? SSE_REGPARM_MAX
3797 : cum->sse_regno)
3798 : -1);
3799 else
3800 return constm1_rtx;
3801 }
3802 if (TARGET_64BIT)
3803 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3804 cum->sse_nregs,
3805 &x86_64_int_parameter_registers [cum->regno],
3806 cum->sse_regno);
3807 else
3808 switch (mode)
3809 {
3810 /* For now, pass fp/complex values on the stack. */
3811 default:
3812 break;
3813
3814 case BLKmode:
3815 if (bytes < 0)
3816 break;
3817 /* FALLTHRU */
3818 case DImode:
3819 case SImode:
3820 case HImode:
3821 case QImode:
3822 if (words <= cum->nregs)
3823 {
3824 int regno = cum->regno;
3825
3826 /* Fastcall allocates the first two DWORD (SImode) or
3827 smaller arguments to ECX and EDX. */
3828 if (cum->fastcall)
3829 {
3830 if (mode == BLKmode || mode == DImode)
3831 break;
3832
3833 /* ECX not EAX is the first allocated register. */
3834 if (regno == 0)
3835 regno = 2;
3836 }
3837 ret = gen_rtx_REG (mode, regno);
3838 }
3839 break;
3840 case DFmode:
3841 if (cum->float_in_sse < 2)
3842 break;
3843 case SFmode:
3844 if (cum->float_in_sse < 1)
3845 break;
3846 /* FALLTHRU */
3847 case TImode:
3848 case V16QImode:
3849 case V8HImode:
3850 case V4SImode:
3851 case V2DImode:
3852 case V4SFmode:
3853 case V2DFmode:
3854 if (!type || !AGGREGATE_TYPE_P (type))
3855 {
3856 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3857 {
3858 warnedsse = true;
3859 warning (0, "SSE vector argument without SSE enabled "
3860 "changes the ABI");
3861 }
3862 if (cum->sse_nregs)
3863 ret = gen_reg_or_parallel (mode, orig_mode,
3864 cum->sse_regno + FIRST_SSE_REG);
3865 }
3866 break;
3867 case V8QImode:
3868 case V4HImode:
3869 case V2SImode:
3870 case V2SFmode:
3871 if (!type || !AGGREGATE_TYPE_P (type))
3872 {
3873 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3874 {
3875 warnedmmx = true;
3876 warning (0, "MMX vector argument without MMX enabled "
3877 "changes the ABI");
3878 }
3879 if (cum->mmx_nregs)
3880 ret = gen_reg_or_parallel (mode, orig_mode,
3881 cum->mmx_regno + FIRST_MMX_REG);
3882 }
3883 break;
3884 }
3885
3886 if (TARGET_DEBUG_ARG)
3887 {
3888 fprintf (stderr,
3889 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3890 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3891
3892 if (ret)
3893 print_simple_rtl (stderr, ret);
3894 else
3895 fprintf (stderr, ", stack");
3896
3897 fprintf (stderr, " )\n");
3898 }
3899
3900 return ret;
3901 }
3902
3903 /* A C expression that indicates when an argument must be passed by
3904 reference. If nonzero for an argument, a copy of that argument is
3905 made in memory and a pointer to the argument is passed instead of
3906 the argument itself. The pointer is passed in whatever way is
3907 appropriate for passing a pointer to that type. */
3908
3909 static bool
3910 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3911 enum machine_mode mode ATTRIBUTE_UNUSED,
3912 tree type, bool named ATTRIBUTE_UNUSED)
3913 {
3914 if (!TARGET_64BIT)
3915 return 0;
3916
3917 if (type && int_size_in_bytes (type) == -1)
3918 {
3919 if (TARGET_DEBUG_ARG)
3920 fprintf (stderr, "function_arg_pass_by_reference\n");
3921 return 1;
3922 }
3923
3924 return 0;
3925 }
3926
3927 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3928 ABI. Only called if TARGET_SSE. */
3929 static bool
3930 contains_128bit_aligned_vector_p (tree type)
3931 {
3932 enum machine_mode mode = TYPE_MODE (type);
3933 if (SSE_REG_MODE_P (mode)
3934 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3935 return true;
3936 if (TYPE_ALIGN (type) < 128)
3937 return false;
3938
3939 if (AGGREGATE_TYPE_P (type))
3940 {
3941 /* Walk the aggregates recursively. */
3942 switch (TREE_CODE (type))
3943 {
3944 case RECORD_TYPE:
3945 case UNION_TYPE:
3946 case QUAL_UNION_TYPE:
3947 {
3948 tree field;
3949
3950 /* Walk all the structure fields. */
3951 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3952 {
3953 if (TREE_CODE (field) == FIELD_DECL
3954 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3955 return true;
3956 }
3957 break;
3958 }
3959
3960 case ARRAY_TYPE:
3961 /* Just for use if some languages passes arrays by value. */
3962 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3963 return true;
3964 break;
3965
3966 default:
3967 gcc_unreachable ();
3968 }
3969 }
3970 return false;
3971 }
3972
3973 /* Gives the alignment boundary, in bits, of an argument with the
3974 specified mode and type. */
3975
3976 int
3977 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3978 {
3979 int align;
3980 if (type)
3981 align = TYPE_ALIGN (type);
3982 else
3983 align = GET_MODE_ALIGNMENT (mode);
3984 if (align < PARM_BOUNDARY)
3985 align = PARM_BOUNDARY;
3986 if (!TARGET_64BIT)
3987 {
3988 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3989 make an exception for SSE modes since these require 128bit
3990 alignment.
3991
3992 The handling here differs from field_alignment. ICC aligns MMX
3993 arguments to 4 byte boundaries, while structure fields are aligned
3994 to 8 byte boundaries. */
3995 if (!TARGET_SSE)
3996 align = PARM_BOUNDARY;
3997 else if (!type)
3998 {
3999 if (!SSE_REG_MODE_P (mode))
4000 align = PARM_BOUNDARY;
4001 }
4002 else
4003 {
4004 if (!contains_128bit_aligned_vector_p (type))
4005 align = PARM_BOUNDARY;
4006 }
4007 }
4008 if (align > 128)
4009 align = 128;
4010 return align;
4011 }
4012
4013 /* Return true if N is a possible register number of function value. */
4014 bool
4015 ix86_function_value_regno_p (int regno)
4016 {
4017 if (regno == 0
4018 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4019 || (regno == FIRST_SSE_REG && TARGET_SSE))
4020 return true;
4021
4022 if (!TARGET_64BIT
4023 && (regno == FIRST_MMX_REG && TARGET_MMX))
4024 return true;
4025
4026 return false;
4027 }
4028
4029 /* Define how to find the value returned by a function.
4030 VALTYPE is the data type of the value (as a tree).
4031 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4032 otherwise, FUNC is 0. */
4033 rtx
4034 ix86_function_value (tree valtype, tree fntype_or_decl,
4035 bool outgoing ATTRIBUTE_UNUSED)
4036 {
4037 enum machine_mode natmode = type_natural_mode (valtype);
4038
4039 if (TARGET_64BIT)
4040 {
4041 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4042 1, REGPARM_MAX, SSE_REGPARM_MAX,
4043 x86_64_int_return_registers, 0);
4044 /* For zero sized structures, construct_container return NULL, but we
4045 need to keep rest of compiler happy by returning meaningful value. */
4046 if (!ret)
4047 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4048 return ret;
4049 }
4050 else
4051 {
4052 tree fn = NULL_TREE, fntype;
4053 if (fntype_or_decl
4054 && DECL_P (fntype_or_decl))
4055 fn = fntype_or_decl;
4056 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4057 return gen_rtx_REG (TYPE_MODE (valtype),
4058 ix86_value_regno (natmode, fn, fntype));
4059 }
4060 }
4061
4062 /* Return true iff type is returned in memory. */
4063 int
4064 ix86_return_in_memory (tree type)
4065 {
4066 int needed_intregs, needed_sseregs, size;
4067 enum machine_mode mode = type_natural_mode (type);
4068
4069 if (TARGET_64BIT)
4070 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4071
4072 if (mode == BLKmode)
4073 return 1;
4074
4075 size = int_size_in_bytes (type);
4076
4077 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4078 return 0;
4079
4080 if (VECTOR_MODE_P (mode) || mode == TImode)
4081 {
4082 /* User-created vectors small enough to fit in EAX. */
4083 if (size < 8)
4084 return 0;
4085
4086 /* MMX/3dNow values are returned in MM0,
4087 except when it doesn't exits. */
4088 if (size == 8)
4089 return (TARGET_MMX ? 0 : 1);
4090
4091 /* SSE values are returned in XMM0, except when it doesn't exist. */
4092 if (size == 16)
4093 return (TARGET_SSE ? 0 : 1);
4094 }
4095
4096 if (mode == XFmode)
4097 return 0;
4098
4099 if (mode == TDmode)
4100 return 1;
4101
4102 if (size > 12)
4103 return 1;
4104 return 0;
4105 }
4106
4107 /* When returning SSE vector types, we have a choice of either
4108 (1) being abi incompatible with a -march switch, or
4109 (2) generating an error.
4110 Given no good solution, I think the safest thing is one warning.
4111 The user won't be able to use -Werror, but....
4112
4113 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4114 called in response to actually generating a caller or callee that
4115 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4116 via aggregate_value_p for general type probing from tree-ssa. */
4117
4118 static rtx
4119 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4120 {
4121 static bool warnedsse, warnedmmx;
4122
4123 if (type)
4124 {
4125 /* Look at the return type of the function, not the function type. */
4126 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4127
4128 if (!TARGET_SSE && !warnedsse)
4129 {
4130 if (mode == TImode
4131 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4132 {
4133 warnedsse = true;
4134 warning (0, "SSE vector return without SSE enabled "
4135 "changes the ABI");
4136 }
4137 }
4138
4139 if (!TARGET_MMX && !warnedmmx)
4140 {
4141 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4142 {
4143 warnedmmx = true;
4144 warning (0, "MMX vector return without MMX enabled "
4145 "changes the ABI");
4146 }
4147 }
4148 }
4149
4150 return NULL;
4151 }
4152
4153 /* Define how to find the value returned by a library function
4154 assuming the value has mode MODE. */
4155 rtx
4156 ix86_libcall_value (enum machine_mode mode)
4157 {
4158 if (TARGET_64BIT)
4159 {
4160 switch (mode)
4161 {
4162 case SFmode:
4163 case SCmode:
4164 case DFmode:
4165 case DCmode:
4166 case TFmode:
4167 case SDmode:
4168 case DDmode:
4169 case TDmode:
4170 return gen_rtx_REG (mode, FIRST_SSE_REG);
4171 case XFmode:
4172 case XCmode:
4173 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4174 case TCmode:
4175 return NULL;
4176 default:
4177 return gen_rtx_REG (mode, 0);
4178 }
4179 }
4180 else
4181 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4182 }
4183
4184 /* Given a mode, return the register to use for a return value. */
4185
4186 static int
4187 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4188 {
4189 gcc_assert (!TARGET_64BIT);
4190
4191 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4192 we normally prevent this case when mmx is not available. However
4193 some ABIs may require the result to be returned like DImode. */
4194 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4195 return TARGET_MMX ? FIRST_MMX_REG : 0;
4196
4197 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4198 we prevent this case when sse is not available. However some ABIs
4199 may require the result to be returned like integer TImode. */
4200 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4201 return TARGET_SSE ? FIRST_SSE_REG : 0;
4202
4203 /* Decimal floating point values can go in %eax, unlike other float modes. */
4204 if (DECIMAL_FLOAT_MODE_P (mode))
4205 return 0;
4206
4207 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4208 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4209 return 0;
4210
4211 /* Floating point return values in %st(0), except for local functions when
4212 SSE math is enabled or for functions with sseregparm attribute. */
4213 if ((func || fntype)
4214 && (mode == SFmode || mode == DFmode))
4215 {
4216 int sse_level = ix86_function_sseregparm (fntype, func);
4217 if ((sse_level >= 1 && mode == SFmode)
4218 || (sse_level == 2 && mode == DFmode))
4219 return FIRST_SSE_REG;
4220 }
4221
4222 return FIRST_FLOAT_REG;
4223 }
4224 \f
4225 /* Create the va_list data type. */
4226
4227 static tree
4228 ix86_build_builtin_va_list (void)
4229 {
4230 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4231
4232 /* For i386 we use plain pointer to argument area. */
4233 if (!TARGET_64BIT)
4234 return build_pointer_type (char_type_node);
4235
4236 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4237 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4238
4239 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4240 unsigned_type_node);
4241 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4242 unsigned_type_node);
4243 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4244 ptr_type_node);
4245 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4246 ptr_type_node);
4247
4248 va_list_gpr_counter_field = f_gpr;
4249 va_list_fpr_counter_field = f_fpr;
4250
4251 DECL_FIELD_CONTEXT (f_gpr) = record;
4252 DECL_FIELD_CONTEXT (f_fpr) = record;
4253 DECL_FIELD_CONTEXT (f_ovf) = record;
4254 DECL_FIELD_CONTEXT (f_sav) = record;
4255
4256 TREE_CHAIN (record) = type_decl;
4257 TYPE_NAME (record) = type_decl;
4258 TYPE_FIELDS (record) = f_gpr;
4259 TREE_CHAIN (f_gpr) = f_fpr;
4260 TREE_CHAIN (f_fpr) = f_ovf;
4261 TREE_CHAIN (f_ovf) = f_sav;
4262
4263 layout_type (record);
4264
4265 /* The correct type is an array type of one element. */
4266 return build_array_type (record, build_index_type (size_zero_node));
4267 }
4268
4269 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4270
4271 static void
4272 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4273 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4274 int no_rtl)
4275 {
4276 CUMULATIVE_ARGS next_cum;
4277 rtx save_area = NULL_RTX, mem;
4278 rtx label;
4279 rtx label_ref;
4280 rtx tmp_reg;
4281 rtx nsse_reg;
4282 int set;
4283 tree fntype;
4284 int stdarg_p;
4285 int i;
4286
4287 if (!TARGET_64BIT)
4288 return;
4289
4290 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4291 return;
4292
4293 /* Indicate to allocate space on the stack for varargs save area. */
4294 ix86_save_varrargs_registers = 1;
4295
4296 cfun->stack_alignment_needed = 128;
4297
4298 fntype = TREE_TYPE (current_function_decl);
4299 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4300 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4301 != void_type_node));
4302
4303 /* For varargs, we do not want to skip the dummy va_dcl argument.
4304 For stdargs, we do want to skip the last named argument. */
4305 next_cum = *cum;
4306 if (stdarg_p)
4307 function_arg_advance (&next_cum, mode, type, 1);
4308
4309 if (!no_rtl)
4310 save_area = frame_pointer_rtx;
4311
4312 set = get_varargs_alias_set ();
4313
4314 for (i = next_cum.regno;
4315 i < ix86_regparm
4316 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4317 i++)
4318 {
4319 mem = gen_rtx_MEM (Pmode,
4320 plus_constant (save_area, i * UNITS_PER_WORD));
4321 MEM_NOTRAP_P (mem) = 1;
4322 set_mem_alias_set (mem, set);
4323 emit_move_insn (mem, gen_rtx_REG (Pmode,
4324 x86_64_int_parameter_registers[i]));
4325 }
4326
4327 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4328 {
4329 /* Now emit code to save SSE registers. The AX parameter contains number
4330 of SSE parameter registers used to call this function. We use
4331 sse_prologue_save insn template that produces computed jump across
4332 SSE saves. We need some preparation work to get this working. */
4333
4334 label = gen_label_rtx ();
4335 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4336
4337 /* Compute address to jump to :
4338 label - 5*eax + nnamed_sse_arguments*5 */
4339 tmp_reg = gen_reg_rtx (Pmode);
4340 nsse_reg = gen_reg_rtx (Pmode);
4341 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4342 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4343 gen_rtx_MULT (Pmode, nsse_reg,
4344 GEN_INT (4))));
4345 if (next_cum.sse_regno)
4346 emit_move_insn
4347 (nsse_reg,
4348 gen_rtx_CONST (DImode,
4349 gen_rtx_PLUS (DImode,
4350 label_ref,
4351 GEN_INT (next_cum.sse_regno * 4))));
4352 else
4353 emit_move_insn (nsse_reg, label_ref);
4354 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4355
4356 /* Compute address of memory block we save into. We always use pointer
4357 pointing 127 bytes after first byte to store - this is needed to keep
4358 instruction size limited by 4 bytes. */
4359 tmp_reg = gen_reg_rtx (Pmode);
4360 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4361 plus_constant (save_area,
4362 8 * REGPARM_MAX + 127)));
4363 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4364 MEM_NOTRAP_P (mem) = 1;
4365 set_mem_alias_set (mem, set);
4366 set_mem_align (mem, BITS_PER_WORD);
4367
4368 /* And finally do the dirty job! */
4369 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4370 GEN_INT (next_cum.sse_regno), label));
4371 }
4372
4373 }
4374
4375 /* Implement va_start. */
4376
4377 void
4378 ix86_va_start (tree valist, rtx nextarg)
4379 {
4380 HOST_WIDE_INT words, n_gpr, n_fpr;
4381 tree f_gpr, f_fpr, f_ovf, f_sav;
4382 tree gpr, fpr, ovf, sav, t;
4383 tree type;
4384
4385 /* Only 64bit target needs something special. */
4386 if (!TARGET_64BIT)
4387 {
4388 std_expand_builtin_va_start (valist, nextarg);
4389 return;
4390 }
4391
4392 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4393 f_fpr = TREE_CHAIN (f_gpr);
4394 f_ovf = TREE_CHAIN (f_fpr);
4395 f_sav = TREE_CHAIN (f_ovf);
4396
4397 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4398 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4399 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4400 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4401 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4402
4403 /* Count number of gp and fp argument registers used. */
4404 words = current_function_args_info.words;
4405 n_gpr = current_function_args_info.regno;
4406 n_fpr = current_function_args_info.sse_regno;
4407
4408 if (TARGET_DEBUG_ARG)
4409 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4410 (int) words, (int) n_gpr, (int) n_fpr);
4411
4412 if (cfun->va_list_gpr_size)
4413 {
4414 type = TREE_TYPE (gpr);
4415 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4416 build_int_cst (type, n_gpr * 8));
4417 TREE_SIDE_EFFECTS (t) = 1;
4418 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4419 }
4420
4421 if (cfun->va_list_fpr_size)
4422 {
4423 type = TREE_TYPE (fpr);
4424 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4425 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4426 TREE_SIDE_EFFECTS (t) = 1;
4427 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4428 }
4429
4430 /* Find the overflow area. */
4431 type = TREE_TYPE (ovf);
4432 t = make_tree (type, virtual_incoming_args_rtx);
4433 if (words != 0)
4434 t = build2 (PLUS_EXPR, type, t,
4435 build_int_cst (type, words * UNITS_PER_WORD));
4436 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4437 TREE_SIDE_EFFECTS (t) = 1;
4438 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4439
4440 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4441 {
4442 /* Find the register save area.
4443 Prologue of the function save it right above stack frame. */
4444 type = TREE_TYPE (sav);
4445 t = make_tree (type, frame_pointer_rtx);
4446 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4447 TREE_SIDE_EFFECTS (t) = 1;
4448 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4449 }
4450 }
4451
4452 /* Implement va_arg. */
4453
4454 tree
4455 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4456 {
4457 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4458 tree f_gpr, f_fpr, f_ovf, f_sav;
4459 tree gpr, fpr, ovf, sav, t;
4460 int size, rsize;
4461 tree lab_false, lab_over = NULL_TREE;
4462 tree addr, t2;
4463 rtx container;
4464 int indirect_p = 0;
4465 tree ptrtype;
4466 enum machine_mode nat_mode;
4467
4468 /* Only 64bit target needs something special. */
4469 if (!TARGET_64BIT)
4470 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4471
4472 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4473 f_fpr = TREE_CHAIN (f_gpr);
4474 f_ovf = TREE_CHAIN (f_fpr);
4475 f_sav = TREE_CHAIN (f_ovf);
4476
4477 valist = build_va_arg_indirect_ref (valist);
4478 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4479 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4480 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4481 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4482
4483 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4484 if (indirect_p)
4485 type = build_pointer_type (type);
4486 size = int_size_in_bytes (type);
4487 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4488
4489 nat_mode = type_natural_mode (type);
4490 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4491 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4492
4493 /* Pull the value out of the saved registers. */
4494
4495 addr = create_tmp_var (ptr_type_node, "addr");
4496 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4497
4498 if (container)
4499 {
4500 int needed_intregs, needed_sseregs;
4501 bool need_temp;
4502 tree int_addr, sse_addr;
4503
4504 lab_false = create_artificial_label ();
4505 lab_over = create_artificial_label ();
4506
4507 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4508
4509 need_temp = (!REG_P (container)
4510 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4511 || TYPE_ALIGN (type) > 128));
4512
4513 /* In case we are passing structure, verify that it is consecutive block
4514 on the register save area. If not we need to do moves. */
4515 if (!need_temp && !REG_P (container))
4516 {
4517 /* Verify that all registers are strictly consecutive */
4518 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4519 {
4520 int i;
4521
4522 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4523 {
4524 rtx slot = XVECEXP (container, 0, i);
4525 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4526 || INTVAL (XEXP (slot, 1)) != i * 16)
4527 need_temp = 1;
4528 }
4529 }
4530 else
4531 {
4532 int i;
4533
4534 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4535 {
4536 rtx slot = XVECEXP (container, 0, i);
4537 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4538 || INTVAL (XEXP (slot, 1)) != i * 8)
4539 need_temp = 1;
4540 }
4541 }
4542 }
4543 if (!need_temp)
4544 {
4545 int_addr = addr;
4546 sse_addr = addr;
4547 }
4548 else
4549 {
4550 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4551 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4552 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4553 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4554 }
4555
4556 /* First ensure that we fit completely in registers. */
4557 if (needed_intregs)
4558 {
4559 t = build_int_cst (TREE_TYPE (gpr),
4560 (REGPARM_MAX - needed_intregs + 1) * 8);
4561 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4562 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4563 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4564 gimplify_and_add (t, pre_p);
4565 }
4566 if (needed_sseregs)
4567 {
4568 t = build_int_cst (TREE_TYPE (fpr),
4569 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4570 + REGPARM_MAX * 8);
4571 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4572 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4573 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4574 gimplify_and_add (t, pre_p);
4575 }
4576
4577 /* Compute index to start of area used for integer regs. */
4578 if (needed_intregs)
4579 {
4580 /* int_addr = gpr + sav; */
4581 t = fold_convert (ptr_type_node, gpr);
4582 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4583 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4584 gimplify_and_add (t, pre_p);
4585 }
4586 if (needed_sseregs)
4587 {
4588 /* sse_addr = fpr + sav; */
4589 t = fold_convert (ptr_type_node, fpr);
4590 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4591 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4592 gimplify_and_add (t, pre_p);
4593 }
4594 if (need_temp)
4595 {
4596 int i;
4597 tree temp = create_tmp_var (type, "va_arg_tmp");
4598
4599 /* addr = &temp; */
4600 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4601 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4602 gimplify_and_add (t, pre_p);
4603
4604 for (i = 0; i < XVECLEN (container, 0); i++)
4605 {
4606 rtx slot = XVECEXP (container, 0, i);
4607 rtx reg = XEXP (slot, 0);
4608 enum machine_mode mode = GET_MODE (reg);
4609 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4610 tree addr_type = build_pointer_type (piece_type);
4611 tree src_addr, src;
4612 int src_offset;
4613 tree dest_addr, dest;
4614
4615 if (SSE_REGNO_P (REGNO (reg)))
4616 {
4617 src_addr = sse_addr;
4618 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4619 }
4620 else
4621 {
4622 src_addr = int_addr;
4623 src_offset = REGNO (reg) * 8;
4624 }
4625 src_addr = fold_convert (addr_type, src_addr);
4626 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4627 size_int (src_offset)));
4628 src = build_va_arg_indirect_ref (src_addr);
4629
4630 dest_addr = fold_convert (addr_type, addr);
4631 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4632 size_int (INTVAL (XEXP (slot, 1)))));
4633 dest = build_va_arg_indirect_ref (dest_addr);
4634
4635 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4636 gimplify_and_add (t, pre_p);
4637 }
4638 }
4639
4640 if (needed_intregs)
4641 {
4642 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4643 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4644 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4645 gimplify_and_add (t, pre_p);
4646 }
4647 if (needed_sseregs)
4648 {
4649 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4650 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4651 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4652 gimplify_and_add (t, pre_p);
4653 }
4654
4655 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4656 gimplify_and_add (t, pre_p);
4657
4658 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4659 append_to_statement_list (t, pre_p);
4660 }
4661
4662 /* ... otherwise out of the overflow area. */
4663
4664 /* Care for on-stack alignment if needed. */
4665 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4666 || integer_zerop (TYPE_SIZE (type)))
4667 t = ovf;
4668 else
4669 {
4670 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4671 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4672 build_int_cst (TREE_TYPE (ovf), align - 1));
4673 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4674 build_int_cst (TREE_TYPE (t), -align));
4675 }
4676 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4677
4678 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4679 gimplify_and_add (t2, pre_p);
4680
4681 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4682 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4683 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4684 gimplify_and_add (t, pre_p);
4685
4686 if (container)
4687 {
4688 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4689 append_to_statement_list (t, pre_p);
4690 }
4691
4692 ptrtype = build_pointer_type (type);
4693 addr = fold_convert (ptrtype, addr);
4694
4695 if (indirect_p)
4696 addr = build_va_arg_indirect_ref (addr);
4697 return build_va_arg_indirect_ref (addr);
4698 }
4699 \f
4700 /* Return nonzero if OPNUM's MEM should be matched
4701 in movabs* patterns. */
4702
4703 int
4704 ix86_check_movabs (rtx insn, int opnum)
4705 {
4706 rtx set, mem;
4707
4708 set = PATTERN (insn);
4709 if (GET_CODE (set) == PARALLEL)
4710 set = XVECEXP (set, 0, 0);
4711 gcc_assert (GET_CODE (set) == SET);
4712 mem = XEXP (set, opnum);
4713 while (GET_CODE (mem) == SUBREG)
4714 mem = SUBREG_REG (mem);
4715 gcc_assert (MEM_P (mem));
4716 return (volatile_ok || !MEM_VOLATILE_P (mem));
4717 }
4718 \f
4719 /* Initialize the table of extra 80387 mathematical constants. */
4720
4721 static void
4722 init_ext_80387_constants (void)
4723 {
4724 static const char * cst[5] =
4725 {
4726 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4727 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4728 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4729 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4730 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4731 };
4732 int i;
4733
4734 for (i = 0; i < 5; i++)
4735 {
4736 real_from_string (&ext_80387_constants_table[i], cst[i]);
4737 /* Ensure each constant is rounded to XFmode precision. */
4738 real_convert (&ext_80387_constants_table[i],
4739 XFmode, &ext_80387_constants_table[i]);
4740 }
4741
4742 ext_80387_constants_init = 1;
4743 }
4744
4745 /* Return true if the constant is something that can be loaded with
4746 a special instruction. */
4747
4748 int
4749 standard_80387_constant_p (rtx x)
4750 {
4751 REAL_VALUE_TYPE r;
4752
4753 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4754 return -1;
4755
4756 if (x == CONST0_RTX (GET_MODE (x)))
4757 return 1;
4758 if (x == CONST1_RTX (GET_MODE (x)))
4759 return 2;
4760
4761 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4762
4763 /* For XFmode constants, try to find a special 80387 instruction when
4764 optimizing for size or on those CPUs that benefit from them. */
4765 if (GET_MODE (x) == XFmode
4766 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4767 {
4768 int i;
4769
4770 if (! ext_80387_constants_init)
4771 init_ext_80387_constants ();
4772
4773 for (i = 0; i < 5; i++)
4774 if (real_identical (&r, &ext_80387_constants_table[i]))
4775 return i + 3;
4776 }
4777
4778 /* Load of the constant -0.0 or -1.0 will be split as
4779 fldz;fchs or fld1;fchs sequence. */
4780 if (real_isnegzero (&r))
4781 return 8;
4782 if (real_identical (&r, &dconstm1))
4783 return 9;
4784
4785 return 0;
4786 }
4787
4788 /* Return the opcode of the special instruction to be used to load
4789 the constant X. */
4790
4791 const char *
4792 standard_80387_constant_opcode (rtx x)
4793 {
4794 switch (standard_80387_constant_p (x))
4795 {
4796 case 1:
4797 return "fldz";
4798 case 2:
4799 return "fld1";
4800 case 3:
4801 return "fldlg2";
4802 case 4:
4803 return "fldln2";
4804 case 5:
4805 return "fldl2e";
4806 case 6:
4807 return "fldl2t";
4808 case 7:
4809 return "fldpi";
4810 case 8:
4811 case 9:
4812 return "#";
4813 default:
4814 gcc_unreachable ();
4815 }
4816 }
4817
4818 /* Return the CONST_DOUBLE representing the 80387 constant that is
4819 loaded by the specified special instruction. The argument IDX
4820 matches the return value from standard_80387_constant_p. */
4821
4822 rtx
4823 standard_80387_constant_rtx (int idx)
4824 {
4825 int i;
4826
4827 if (! ext_80387_constants_init)
4828 init_ext_80387_constants ();
4829
4830 switch (idx)
4831 {
4832 case 3:
4833 case 4:
4834 case 5:
4835 case 6:
4836 case 7:
4837 i = idx - 3;
4838 break;
4839
4840 default:
4841 gcc_unreachable ();
4842 }
4843
4844 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4845 XFmode);
4846 }
4847
4848 /* Return 1 if mode is a valid mode for sse. */
4849 static int
4850 standard_sse_mode_p (enum machine_mode mode)
4851 {
4852 switch (mode)
4853 {
4854 case V16QImode:
4855 case V8HImode:
4856 case V4SImode:
4857 case V2DImode:
4858 case V4SFmode:
4859 case V2DFmode:
4860 return 1;
4861
4862 default:
4863 return 0;
4864 }
4865 }
4866
4867 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4868 */
4869 int
4870 standard_sse_constant_p (rtx x)
4871 {
4872 enum machine_mode mode = GET_MODE (x);
4873
4874 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4875 return 1;
4876 if (vector_all_ones_operand (x, mode)
4877 && standard_sse_mode_p (mode))
4878 return TARGET_SSE2 ? 2 : -1;
4879
4880 return 0;
4881 }
4882
4883 /* Return the opcode of the special instruction to be used to load
4884 the constant X. */
4885
4886 const char *
4887 standard_sse_constant_opcode (rtx insn, rtx x)
4888 {
4889 switch (standard_sse_constant_p (x))
4890 {
4891 case 1:
4892 if (get_attr_mode (insn) == MODE_V4SF)
4893 return "xorps\t%0, %0";
4894 else if (get_attr_mode (insn) == MODE_V2DF)
4895 return "xorpd\t%0, %0";
4896 else
4897 return "pxor\t%0, %0";
4898 case 2:
4899 return "pcmpeqd\t%0, %0";
4900 }
4901 gcc_unreachable ();
4902 }
4903
4904 /* Returns 1 if OP contains a symbol reference */
4905
4906 int
4907 symbolic_reference_mentioned_p (rtx op)
4908 {
4909 const char *fmt;
4910 int i;
4911
4912 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4913 return 1;
4914
4915 fmt = GET_RTX_FORMAT (GET_CODE (op));
4916 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4917 {
4918 if (fmt[i] == 'E')
4919 {
4920 int j;
4921
4922 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4923 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4924 return 1;
4925 }
4926
4927 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4928 return 1;
4929 }
4930
4931 return 0;
4932 }
4933
4934 /* Return 1 if it is appropriate to emit `ret' instructions in the
4935 body of a function. Do this only if the epilogue is simple, needing a
4936 couple of insns. Prior to reloading, we can't tell how many registers
4937 must be saved, so return 0 then. Return 0 if there is no frame
4938 marker to de-allocate. */
4939
4940 int
4941 ix86_can_use_return_insn_p (void)
4942 {
4943 struct ix86_frame frame;
4944
4945 if (! reload_completed || frame_pointer_needed)
4946 return 0;
4947
4948 /* Don't allow more than 32 pop, since that's all we can do
4949 with one instruction. */
4950 if (current_function_pops_args
4951 && current_function_args_size >= 32768)
4952 return 0;
4953
4954 ix86_compute_frame_layout (&frame);
4955 return frame.to_allocate == 0 && frame.nregs == 0;
4956 }
4957 \f
4958 /* Value should be nonzero if functions must have frame pointers.
4959 Zero means the frame pointer need not be set up (and parms may
4960 be accessed via the stack pointer) in functions that seem suitable. */
4961
4962 int
4963 ix86_frame_pointer_required (void)
4964 {
4965 /* If we accessed previous frames, then the generated code expects
4966 to be able to access the saved ebp value in our frame. */
4967 if (cfun->machine->accesses_prev_frame)
4968 return 1;
4969
4970 /* Several x86 os'es need a frame pointer for other reasons,
4971 usually pertaining to setjmp. */
4972 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4973 return 1;
4974
4975 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4976 the frame pointer by default. Turn it back on now if we've not
4977 got a leaf function. */
4978 if (TARGET_OMIT_LEAF_FRAME_POINTER
4979 && (!current_function_is_leaf
4980 || ix86_current_function_calls_tls_descriptor))
4981 return 1;
4982
4983 if (current_function_profile)
4984 return 1;
4985
4986 return 0;
4987 }
4988
4989 /* Record that the current function accesses previous call frames. */
4990
4991 void
4992 ix86_setup_frame_addresses (void)
4993 {
4994 cfun->machine->accesses_prev_frame = 1;
4995 }
4996 \f
4997 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4998 # define USE_HIDDEN_LINKONCE 1
4999 #else
5000 # define USE_HIDDEN_LINKONCE 0
5001 #endif
5002
5003 static int pic_labels_used;
5004
5005 /* Fills in the label name that should be used for a pc thunk for
5006 the given register. */
5007
5008 static void
5009 get_pc_thunk_name (char name[32], unsigned int regno)
5010 {
5011 gcc_assert (!TARGET_64BIT);
5012
5013 if (USE_HIDDEN_LINKONCE)
5014 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5015 else
5016 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5017 }
5018
5019
5020 /* This function generates code for -fpic that loads %ebx with
5021 the return address of the caller and then returns. */
5022
5023 void
5024 ix86_file_end (void)
5025 {
5026 rtx xops[2];
5027 int regno;
5028
5029 for (regno = 0; regno < 8; ++regno)
5030 {
5031 char name[32];
5032
5033 if (! ((pic_labels_used >> regno) & 1))
5034 continue;
5035
5036 get_pc_thunk_name (name, regno);
5037
5038 #if TARGET_MACHO
5039 if (TARGET_MACHO)
5040 {
5041 switch_to_section (darwin_sections[text_coal_section]);
5042 fputs ("\t.weak_definition\t", asm_out_file);
5043 assemble_name (asm_out_file, name);
5044 fputs ("\n\t.private_extern\t", asm_out_file);
5045 assemble_name (asm_out_file, name);
5046 fputs ("\n", asm_out_file);
5047 ASM_OUTPUT_LABEL (asm_out_file, name);
5048 }
5049 else
5050 #endif
5051 if (USE_HIDDEN_LINKONCE)
5052 {
5053 tree decl;
5054
5055 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5056 error_mark_node);
5057 TREE_PUBLIC (decl) = 1;
5058 TREE_STATIC (decl) = 1;
5059 DECL_ONE_ONLY (decl) = 1;
5060
5061 (*targetm.asm_out.unique_section) (decl, 0);
5062 switch_to_section (get_named_section (decl, NULL, 0));
5063
5064 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5065 fputs ("\t.hidden\t", asm_out_file);
5066 assemble_name (asm_out_file, name);
5067 fputc ('\n', asm_out_file);
5068 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5069 }
5070 else
5071 {
5072 switch_to_section (text_section);
5073 ASM_OUTPUT_LABEL (asm_out_file, name);
5074 }
5075
5076 xops[0] = gen_rtx_REG (SImode, regno);
5077 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5078 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5079 output_asm_insn ("ret", xops);
5080 }
5081
5082 if (NEED_INDICATE_EXEC_STACK)
5083 file_end_indicate_exec_stack ();
5084 }
5085
5086 /* Emit code for the SET_GOT patterns. */
5087
5088 const char *
5089 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5090 {
5091 rtx xops[3];
5092
5093 xops[0] = dest;
5094 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5095
5096 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5097 {
5098 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5099
5100 if (!flag_pic)
5101 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5102 else
5103 output_asm_insn ("call\t%a2", xops);
5104
5105 #if TARGET_MACHO
5106 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5107 is what will be referenced by the Mach-O PIC subsystem. */
5108 if (!label)
5109 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5110 #endif
5111
5112 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5113 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5114
5115 if (flag_pic)
5116 output_asm_insn ("pop{l}\t%0", xops);
5117 }
5118 else
5119 {
5120 char name[32];
5121 get_pc_thunk_name (name, REGNO (dest));
5122 pic_labels_used |= 1 << REGNO (dest);
5123
5124 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5125 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5126 output_asm_insn ("call\t%X2", xops);
5127 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5128 is what will be referenced by the Mach-O PIC subsystem. */
5129 #if TARGET_MACHO
5130 if (!label)
5131 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5132 else
5133 targetm.asm_out.internal_label (asm_out_file, "L",
5134 CODE_LABEL_NUMBER (label));
5135 #endif
5136 }
5137
5138 if (TARGET_MACHO)
5139 return "";
5140
5141 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5142 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5143 else
5144 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5145
5146 return "";
5147 }
5148
5149 /* Generate an "push" pattern for input ARG. */
5150
5151 static rtx
5152 gen_push (rtx arg)
5153 {
5154 return gen_rtx_SET (VOIDmode,
5155 gen_rtx_MEM (Pmode,
5156 gen_rtx_PRE_DEC (Pmode,
5157 stack_pointer_rtx)),
5158 arg);
5159 }
5160
5161 /* Return >= 0 if there is an unused call-clobbered register available
5162 for the entire function. */
5163
5164 static unsigned int
5165 ix86_select_alt_pic_regnum (void)
5166 {
5167 if (current_function_is_leaf && !current_function_profile
5168 && !ix86_current_function_calls_tls_descriptor)
5169 {
5170 int i;
5171 for (i = 2; i >= 0; --i)
5172 if (!regs_ever_live[i])
5173 return i;
5174 }
5175
5176 return INVALID_REGNUM;
5177 }
5178
5179 /* Return 1 if we need to save REGNO. */
5180 static int
5181 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5182 {
5183 if (pic_offset_table_rtx
5184 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5185 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5186 || current_function_profile
5187 || current_function_calls_eh_return
5188 || current_function_uses_const_pool))
5189 {
5190 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5191 return 0;
5192 return 1;
5193 }
5194
5195 if (current_function_calls_eh_return && maybe_eh_return)
5196 {
5197 unsigned i;
5198 for (i = 0; ; i++)
5199 {
5200 unsigned test = EH_RETURN_DATA_REGNO (i);
5201 if (test == INVALID_REGNUM)
5202 break;
5203 if (test == regno)
5204 return 1;
5205 }
5206 }
5207
5208 if (cfun->machine->force_align_arg_pointer
5209 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5210 return 1;
5211
5212 return (regs_ever_live[regno]
5213 && !call_used_regs[regno]
5214 && !fixed_regs[regno]
5215 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5216 }
5217
5218 /* Return number of registers to be saved on the stack. */
5219
5220 static int
5221 ix86_nsaved_regs (void)
5222 {
5223 int nregs = 0;
5224 int regno;
5225
5226 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5227 if (ix86_save_reg (regno, true))
5228 nregs++;
5229 return nregs;
5230 }
5231
5232 /* Return the offset between two registers, one to be eliminated, and the other
5233 its replacement, at the start of a routine. */
5234
5235 HOST_WIDE_INT
5236 ix86_initial_elimination_offset (int from, int to)
5237 {
5238 struct ix86_frame frame;
5239 ix86_compute_frame_layout (&frame);
5240
5241 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5242 return frame.hard_frame_pointer_offset;
5243 else if (from == FRAME_POINTER_REGNUM
5244 && to == HARD_FRAME_POINTER_REGNUM)
5245 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5246 else
5247 {
5248 gcc_assert (to == STACK_POINTER_REGNUM);
5249
5250 if (from == ARG_POINTER_REGNUM)
5251 return frame.stack_pointer_offset;
5252
5253 gcc_assert (from == FRAME_POINTER_REGNUM);
5254 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5255 }
5256 }
5257
5258 /* Fill structure ix86_frame about frame of currently computed function. */
5259
5260 static void
5261 ix86_compute_frame_layout (struct ix86_frame *frame)
5262 {
5263 HOST_WIDE_INT total_size;
5264 unsigned int stack_alignment_needed;
5265 HOST_WIDE_INT offset;
5266 unsigned int preferred_alignment;
5267 HOST_WIDE_INT size = get_frame_size ();
5268
5269 frame->nregs = ix86_nsaved_regs ();
5270 total_size = size;
5271
5272 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5273 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5274
5275 /* During reload iteration the amount of registers saved can change.
5276 Recompute the value as needed. Do not recompute when amount of registers
5277 didn't change as reload does multiple calls to the function and does not
5278 expect the decision to change within single iteration. */
5279 if (!optimize_size
5280 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5281 {
5282 int count = frame->nregs;
5283
5284 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5285 /* The fast prologue uses move instead of push to save registers. This
5286 is significantly longer, but also executes faster as modern hardware
5287 can execute the moves in parallel, but can't do that for push/pop.
5288
5289 Be careful about choosing what prologue to emit: When function takes
5290 many instructions to execute we may use slow version as well as in
5291 case function is known to be outside hot spot (this is known with
5292 feedback only). Weight the size of function by number of registers
5293 to save as it is cheap to use one or two push instructions but very
5294 slow to use many of them. */
5295 if (count)
5296 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5297 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5298 || (flag_branch_probabilities
5299 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5300 cfun->machine->use_fast_prologue_epilogue = false;
5301 else
5302 cfun->machine->use_fast_prologue_epilogue
5303 = !expensive_function_p (count);
5304 }
5305 if (TARGET_PROLOGUE_USING_MOVE
5306 && cfun->machine->use_fast_prologue_epilogue)
5307 frame->save_regs_using_mov = true;
5308 else
5309 frame->save_regs_using_mov = false;
5310
5311
5312 /* Skip return address and saved base pointer. */
5313 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5314
5315 frame->hard_frame_pointer_offset = offset;
5316
5317 /* Do some sanity checking of stack_alignment_needed and
5318 preferred_alignment, since i386 port is the only using those features
5319 that may break easily. */
5320
5321 gcc_assert (!size || stack_alignment_needed);
5322 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5323 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5324 gcc_assert (stack_alignment_needed
5325 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5326
5327 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5328 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5329
5330 /* Register save area */
5331 offset += frame->nregs * UNITS_PER_WORD;
5332
5333 /* Va-arg area */
5334 if (ix86_save_varrargs_registers)
5335 {
5336 offset += X86_64_VARARGS_SIZE;
5337 frame->va_arg_size = X86_64_VARARGS_SIZE;
5338 }
5339 else
5340 frame->va_arg_size = 0;
5341
5342 /* Align start of frame for local function. */
5343 frame->padding1 = ((offset + stack_alignment_needed - 1)
5344 & -stack_alignment_needed) - offset;
5345
5346 offset += frame->padding1;
5347
5348 /* Frame pointer points here. */
5349 frame->frame_pointer_offset = offset;
5350
5351 offset += size;
5352
5353 /* Add outgoing arguments area. Can be skipped if we eliminated
5354 all the function calls as dead code.
5355 Skipping is however impossible when function calls alloca. Alloca
5356 expander assumes that last current_function_outgoing_args_size
5357 of stack frame are unused. */
5358 if (ACCUMULATE_OUTGOING_ARGS
5359 && (!current_function_is_leaf || current_function_calls_alloca
5360 || ix86_current_function_calls_tls_descriptor))
5361 {
5362 offset += current_function_outgoing_args_size;
5363 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5364 }
5365 else
5366 frame->outgoing_arguments_size = 0;
5367
5368 /* Align stack boundary. Only needed if we're calling another function
5369 or using alloca. */
5370 if (!current_function_is_leaf || current_function_calls_alloca
5371 || ix86_current_function_calls_tls_descriptor)
5372 frame->padding2 = ((offset + preferred_alignment - 1)
5373 & -preferred_alignment) - offset;
5374 else
5375 frame->padding2 = 0;
5376
5377 offset += frame->padding2;
5378
5379 /* We've reached end of stack frame. */
5380 frame->stack_pointer_offset = offset;
5381
5382 /* Size prologue needs to allocate. */
5383 frame->to_allocate =
5384 (size + frame->padding1 + frame->padding2
5385 + frame->outgoing_arguments_size + frame->va_arg_size);
5386
5387 if ((!frame->to_allocate && frame->nregs <= 1)
5388 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5389 frame->save_regs_using_mov = false;
5390
5391 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5392 && current_function_is_leaf
5393 && !ix86_current_function_calls_tls_descriptor)
5394 {
5395 frame->red_zone_size = frame->to_allocate;
5396 if (frame->save_regs_using_mov)
5397 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5398 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5399 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5400 }
5401 else
5402 frame->red_zone_size = 0;
5403 frame->to_allocate -= frame->red_zone_size;
5404 frame->stack_pointer_offset -= frame->red_zone_size;
5405 #if 0
5406 fprintf (stderr, "\n");
5407 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
5408 fprintf (stderr, "size: %ld\n", (long)size);
5409 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
5410 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
5411 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
5412 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
5413 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
5414 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
5415 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
5416 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
5417 (long)frame->hard_frame_pointer_offset);
5418 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
5419 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
5420 fprintf (stderr, "current_function_calls_alloca: %ld\n", (long)current_function_calls_alloca);
5421 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
5422 #endif
5423 }
5424
5425 /* Emit code to save registers in the prologue. */
5426
5427 static void
5428 ix86_emit_save_regs (void)
5429 {
5430 unsigned int regno;
5431 rtx insn;
5432
5433 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5434 if (ix86_save_reg (regno, true))
5435 {
5436 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5437 RTX_FRAME_RELATED_P (insn) = 1;
5438 }
5439 }
5440
5441 /* Emit code to save registers using MOV insns. First register
5442 is restored from POINTER + OFFSET. */
5443 static void
5444 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5445 {
5446 unsigned int regno;
5447 rtx insn;
5448
5449 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5450 if (ix86_save_reg (regno, true))
5451 {
5452 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5453 Pmode, offset),
5454 gen_rtx_REG (Pmode, regno));
5455 RTX_FRAME_RELATED_P (insn) = 1;
5456 offset += UNITS_PER_WORD;
5457 }
5458 }
5459
5460 /* Expand prologue or epilogue stack adjustment.
5461 The pattern exist to put a dependency on all ebp-based memory accesses.
5462 STYLE should be negative if instructions should be marked as frame related,
5463 zero if %r11 register is live and cannot be freely used and positive
5464 otherwise. */
5465
5466 static void
5467 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5468 {
5469 rtx insn;
5470
5471 if (! TARGET_64BIT)
5472 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5473 else if (x86_64_immediate_operand (offset, DImode))
5474 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5475 else
5476 {
5477 rtx r11;
5478 /* r11 is used by indirect sibcall return as well, set before the
5479 epilogue and used after the epilogue. ATM indirect sibcall
5480 shouldn't be used together with huge frame sizes in one
5481 function because of the frame_size check in sibcall.c. */
5482 gcc_assert (style);
5483 r11 = gen_rtx_REG (DImode, R11_REG);
5484 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5485 if (style < 0)
5486 RTX_FRAME_RELATED_P (insn) = 1;
5487 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5488 offset));
5489 }
5490 if (style < 0)
5491 RTX_FRAME_RELATED_P (insn) = 1;
5492 }
5493
5494 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5495
5496 static rtx
5497 ix86_internal_arg_pointer (void)
5498 {
5499 bool has_force_align_arg_pointer =
5500 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5501 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5502 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5503 && DECL_NAME (current_function_decl)
5504 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5505 && DECL_FILE_SCOPE_P (current_function_decl))
5506 || ix86_force_align_arg_pointer
5507 || has_force_align_arg_pointer)
5508 {
5509 /* Nested functions can't realign the stack due to a register
5510 conflict. */
5511 if (DECL_CONTEXT (current_function_decl)
5512 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5513 {
5514 if (ix86_force_align_arg_pointer)
5515 warning (0, "-mstackrealign ignored for nested functions");
5516 if (has_force_align_arg_pointer)
5517 error ("%s not supported for nested functions",
5518 ix86_force_align_arg_pointer_string);
5519 return virtual_incoming_args_rtx;
5520 }
5521 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5522 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5523 }
5524 else
5525 return virtual_incoming_args_rtx;
5526 }
5527
5528 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5529 This is called from dwarf2out.c to emit call frame instructions
5530 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5531 static void
5532 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5533 {
5534 rtx unspec = SET_SRC (pattern);
5535 gcc_assert (GET_CODE (unspec) == UNSPEC);
5536
5537 switch (index)
5538 {
5539 case UNSPEC_REG_SAVE:
5540 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5541 SET_DEST (pattern));
5542 break;
5543 case UNSPEC_DEF_CFA:
5544 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5545 INTVAL (XVECEXP (unspec, 0, 0)));
5546 break;
5547 default:
5548 gcc_unreachable ();
5549 }
5550 }
5551
5552 /* Expand the prologue into a bunch of separate insns. */
5553
5554 void
5555 ix86_expand_prologue (void)
5556 {
5557 rtx insn;
5558 bool pic_reg_used;
5559 struct ix86_frame frame;
5560 HOST_WIDE_INT allocate;
5561
5562 ix86_compute_frame_layout (&frame);
5563
5564 if (cfun->machine->force_align_arg_pointer)
5565 {
5566 rtx x, y;
5567
5568 /* Grab the argument pointer. */
5569 x = plus_constant (stack_pointer_rtx, 4);
5570 y = cfun->machine->force_align_arg_pointer;
5571 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5572 RTX_FRAME_RELATED_P (insn) = 1;
5573
5574 /* The unwind info consists of two parts: install the fafp as the cfa,
5575 and record the fafp as the "save register" of the stack pointer.
5576 The later is there in order that the unwinder can see where it
5577 should restore the stack pointer across the and insn. */
5578 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5579 x = gen_rtx_SET (VOIDmode, y, x);
5580 RTX_FRAME_RELATED_P (x) = 1;
5581 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5582 UNSPEC_REG_SAVE);
5583 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5584 RTX_FRAME_RELATED_P (y) = 1;
5585 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5586 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5587 REG_NOTES (insn) = x;
5588
5589 /* Align the stack. */
5590 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5591 GEN_INT (-16)));
5592
5593 /* And here we cheat like madmen with the unwind info. We force the
5594 cfa register back to sp+4, which is exactly what it was at the
5595 start of the function. Re-pushing the return address results in
5596 the return at the same spot relative to the cfa, and thus is
5597 correct wrt the unwind info. */
5598 x = cfun->machine->force_align_arg_pointer;
5599 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5600 insn = emit_insn (gen_push (x));
5601 RTX_FRAME_RELATED_P (insn) = 1;
5602
5603 x = GEN_INT (4);
5604 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5605 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5606 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5607 REG_NOTES (insn) = x;
5608 }
5609
5610 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5611 slower on all targets. Also sdb doesn't like it. */
5612
5613 if (frame_pointer_needed)
5614 {
5615 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5616 RTX_FRAME_RELATED_P (insn) = 1;
5617
5618 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5619 RTX_FRAME_RELATED_P (insn) = 1;
5620 }
5621
5622 allocate = frame.to_allocate;
5623
5624 if (!frame.save_regs_using_mov)
5625 ix86_emit_save_regs ();
5626 else
5627 allocate += frame.nregs * UNITS_PER_WORD;
5628
5629 /* When using red zone we may start register saving before allocating
5630 the stack frame saving one cycle of the prologue. */
5631 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5632 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5633 : stack_pointer_rtx,
5634 -frame.nregs * UNITS_PER_WORD);
5635
5636 if (allocate == 0)
5637 ;
5638 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5639 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5640 GEN_INT (-allocate), -1);
5641 else
5642 {
5643 /* Only valid for Win32. */
5644 rtx eax = gen_rtx_REG (SImode, 0);
5645 bool eax_live = ix86_eax_live_at_start_p ();
5646 rtx t;
5647
5648 gcc_assert (!TARGET_64BIT);
5649
5650 if (eax_live)
5651 {
5652 emit_insn (gen_push (eax));
5653 allocate -= 4;
5654 }
5655
5656 emit_move_insn (eax, GEN_INT (allocate));
5657
5658 insn = emit_insn (gen_allocate_stack_worker (eax));
5659 RTX_FRAME_RELATED_P (insn) = 1;
5660 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5661 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5662 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5663 t, REG_NOTES (insn));
5664
5665 if (eax_live)
5666 {
5667 if (frame_pointer_needed)
5668 t = plus_constant (hard_frame_pointer_rtx,
5669 allocate
5670 - frame.to_allocate
5671 - frame.nregs * UNITS_PER_WORD);
5672 else
5673 t = plus_constant (stack_pointer_rtx, allocate);
5674 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5675 }
5676 }
5677
5678 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5679 {
5680 if (!frame_pointer_needed || !frame.to_allocate)
5681 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5682 else
5683 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5684 -frame.nregs * UNITS_PER_WORD);
5685 }
5686
5687 pic_reg_used = false;
5688 if (pic_offset_table_rtx
5689 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5690 || current_function_profile))
5691 {
5692 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5693
5694 if (alt_pic_reg_used != INVALID_REGNUM)
5695 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5696
5697 pic_reg_used = true;
5698 }
5699
5700 if (pic_reg_used)
5701 {
5702 if (TARGET_64BIT)
5703 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5704 else
5705 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5706
5707 /* Even with accurate pre-reload life analysis, we can wind up
5708 deleting all references to the pic register after reload.
5709 Consider if cross-jumping unifies two sides of a branch
5710 controlled by a comparison vs the only read from a global.
5711 In which case, allow the set_got to be deleted, though we're
5712 too late to do anything about the ebx save in the prologue. */
5713 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5714 }
5715
5716 /* Prevent function calls from be scheduled before the call to mcount.
5717 In the pic_reg_used case, make sure that the got load isn't deleted. */
5718 if (current_function_profile)
5719 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5720 }
5721
5722 /* Emit code to restore saved registers using MOV insns. First register
5723 is restored from POINTER + OFFSET. */
5724 static void
5725 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5726 int maybe_eh_return)
5727 {
5728 int regno;
5729 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5730
5731 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5732 if (ix86_save_reg (regno, maybe_eh_return))
5733 {
5734 /* Ensure that adjust_address won't be forced to produce pointer
5735 out of range allowed by x86-64 instruction set. */
5736 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5737 {
5738 rtx r11;
5739
5740 r11 = gen_rtx_REG (DImode, R11_REG);
5741 emit_move_insn (r11, GEN_INT (offset));
5742 emit_insn (gen_adddi3 (r11, r11, pointer));
5743 base_address = gen_rtx_MEM (Pmode, r11);
5744 offset = 0;
5745 }
5746 emit_move_insn (gen_rtx_REG (Pmode, regno),
5747 adjust_address (base_address, Pmode, offset));
5748 offset += UNITS_PER_WORD;
5749 }
5750 }
5751
5752 /* Restore function stack, frame, and registers. */
5753
5754 void
5755 ix86_expand_epilogue (int style)
5756 {
5757 int regno;
5758 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5759 struct ix86_frame frame;
5760 HOST_WIDE_INT offset;
5761
5762 ix86_compute_frame_layout (&frame);
5763
5764 /* Calculate start of saved registers relative to ebp. Special care
5765 must be taken for the normal return case of a function using
5766 eh_return: the eax and edx registers are marked as saved, but not
5767 restored along this path. */
5768 offset = frame.nregs;
5769 if (current_function_calls_eh_return && style != 2)
5770 offset -= 2;
5771 offset *= -UNITS_PER_WORD;
5772
5773 /* If we're only restoring one register and sp is not valid then
5774 using a move instruction to restore the register since it's
5775 less work than reloading sp and popping the register.
5776
5777 The default code result in stack adjustment using add/lea instruction,
5778 while this code results in LEAVE instruction (or discrete equivalent),
5779 so it is profitable in some other cases as well. Especially when there
5780 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5781 and there is exactly one register to pop. This heuristic may need some
5782 tuning in future. */
5783 if ((!sp_valid && frame.nregs <= 1)
5784 || (TARGET_EPILOGUE_USING_MOVE
5785 && cfun->machine->use_fast_prologue_epilogue
5786 && (frame.nregs > 1 || frame.to_allocate))
5787 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5788 || (frame_pointer_needed && TARGET_USE_LEAVE
5789 && cfun->machine->use_fast_prologue_epilogue
5790 && frame.nregs == 1)
5791 || current_function_calls_eh_return)
5792 {
5793 /* Restore registers. We can use ebp or esp to address the memory
5794 locations. If both are available, default to ebp, since offsets
5795 are known to be small. Only exception is esp pointing directly to the
5796 end of block of saved registers, where we may simplify addressing
5797 mode. */
5798
5799 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5800 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5801 frame.to_allocate, style == 2);
5802 else
5803 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5804 offset, style == 2);
5805
5806 /* eh_return epilogues need %ecx added to the stack pointer. */
5807 if (style == 2)
5808 {
5809 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5810
5811 if (frame_pointer_needed)
5812 {
5813 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5814 tmp = plus_constant (tmp, UNITS_PER_WORD);
5815 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5816
5817 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5818 emit_move_insn (hard_frame_pointer_rtx, tmp);
5819
5820 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5821 const0_rtx, style);
5822 }
5823 else
5824 {
5825 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5826 tmp = plus_constant (tmp, (frame.to_allocate
5827 + frame.nregs * UNITS_PER_WORD));
5828 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5829 }
5830 }
5831 else if (!frame_pointer_needed)
5832 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5833 GEN_INT (frame.to_allocate
5834 + frame.nregs * UNITS_PER_WORD),
5835 style);
5836 /* If not an i386, mov & pop is faster than "leave". */
5837 else if (TARGET_USE_LEAVE || optimize_size
5838 || !cfun->machine->use_fast_prologue_epilogue)
5839 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5840 else
5841 {
5842 pro_epilogue_adjust_stack (stack_pointer_rtx,
5843 hard_frame_pointer_rtx,
5844 const0_rtx, style);
5845 if (TARGET_64BIT)
5846 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5847 else
5848 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5849 }
5850 }
5851 else
5852 {
5853 /* First step is to deallocate the stack frame so that we can
5854 pop the registers. */
5855 if (!sp_valid)
5856 {
5857 gcc_assert (frame_pointer_needed);
5858 pro_epilogue_adjust_stack (stack_pointer_rtx,
5859 hard_frame_pointer_rtx,
5860 GEN_INT (offset), style);
5861 }
5862 else if (frame.to_allocate)
5863 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5864 GEN_INT (frame.to_allocate), style);
5865
5866 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5867 if (ix86_save_reg (regno, false))
5868 {
5869 if (TARGET_64BIT)
5870 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5871 else
5872 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5873 }
5874 if (frame_pointer_needed)
5875 {
5876 /* Leave results in shorter dependency chains on CPUs that are
5877 able to grok it fast. */
5878 if (TARGET_USE_LEAVE)
5879 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5880 else if (TARGET_64BIT)
5881 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5882 else
5883 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5884 }
5885 }
5886
5887 if (cfun->machine->force_align_arg_pointer)
5888 {
5889 emit_insn (gen_addsi3 (stack_pointer_rtx,
5890 cfun->machine->force_align_arg_pointer,
5891 GEN_INT (-4)));
5892 }
5893
5894 /* Sibcall epilogues don't want a return instruction. */
5895 if (style == 0)
5896 return;
5897
5898 if (current_function_pops_args && current_function_args_size)
5899 {
5900 rtx popc = GEN_INT (current_function_pops_args);
5901
5902 /* i386 can only pop 64K bytes. If asked to pop more, pop
5903 return address, do explicit add, and jump indirectly to the
5904 caller. */
5905
5906 if (current_function_pops_args >= 65536)
5907 {
5908 rtx ecx = gen_rtx_REG (SImode, 2);
5909
5910 /* There is no "pascal" calling convention in 64bit ABI. */
5911 gcc_assert (!TARGET_64BIT);
5912
5913 emit_insn (gen_popsi1 (ecx));
5914 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5915 emit_jump_insn (gen_return_indirect_internal (ecx));
5916 }
5917 else
5918 emit_jump_insn (gen_return_pop_internal (popc));
5919 }
5920 else
5921 emit_jump_insn (gen_return_internal ());
5922 }
5923
5924 /* Reset from the function's potential modifications. */
5925
5926 static void
5927 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5928 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5929 {
5930 if (pic_offset_table_rtx)
5931 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5932 #if TARGET_MACHO
5933 /* Mach-O doesn't support labels at the end of objects, so if
5934 it looks like we might want one, insert a NOP. */
5935 {
5936 rtx insn = get_last_insn ();
5937 while (insn
5938 && NOTE_P (insn)
5939 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
5940 insn = PREV_INSN (insn);
5941 if (insn
5942 && (LABEL_P (insn)
5943 || (NOTE_P (insn)
5944 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
5945 fputs ("\tnop\n", file);
5946 }
5947 #endif
5948
5949 }
5950 \f
5951 /* Extract the parts of an RTL expression that is a valid memory address
5952 for an instruction. Return 0 if the structure of the address is
5953 grossly off. Return -1 if the address contains ASHIFT, so it is not
5954 strictly valid, but still used for computing length of lea instruction. */
5955
5956 int
5957 ix86_decompose_address (rtx addr, struct ix86_address *out)
5958 {
5959 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5960 rtx base_reg, index_reg;
5961 HOST_WIDE_INT scale = 1;
5962 rtx scale_rtx = NULL_RTX;
5963 int retval = 1;
5964 enum ix86_address_seg seg = SEG_DEFAULT;
5965
5966 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
5967 base = addr;
5968 else if (GET_CODE (addr) == PLUS)
5969 {
5970 rtx addends[4], op;
5971 int n = 0, i;
5972
5973 op = addr;
5974 do
5975 {
5976 if (n >= 4)
5977 return 0;
5978 addends[n++] = XEXP (op, 1);
5979 op = XEXP (op, 0);
5980 }
5981 while (GET_CODE (op) == PLUS);
5982 if (n >= 4)
5983 return 0;
5984 addends[n] = op;
5985
5986 for (i = n; i >= 0; --i)
5987 {
5988 op = addends[i];
5989 switch (GET_CODE (op))
5990 {
5991 case MULT:
5992 if (index)
5993 return 0;
5994 index = XEXP (op, 0);
5995 scale_rtx = XEXP (op, 1);
5996 break;
5997
5998 case UNSPEC:
5999 if (XINT (op, 1) == UNSPEC_TP
6000 && TARGET_TLS_DIRECT_SEG_REFS
6001 && seg == SEG_DEFAULT)
6002 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
6003 else
6004 return 0;
6005 break;
6006
6007 case REG:
6008 case SUBREG:
6009 if (!base)
6010 base = op;
6011 else if (!index)
6012 index = op;
6013 else
6014 return 0;
6015 break;
6016
6017 case CONST:
6018 case CONST_INT:
6019 case SYMBOL_REF:
6020 case LABEL_REF:
6021 if (disp)
6022 return 0;
6023 disp = op;
6024 break;
6025
6026 default:
6027 return 0;
6028 }
6029 }
6030 }
6031 else if (GET_CODE (addr) == MULT)
6032 {
6033 index = XEXP (addr, 0); /* index*scale */
6034 scale_rtx = XEXP (addr, 1);
6035 }
6036 else if (GET_CODE (addr) == ASHIFT)
6037 {
6038 rtx tmp;
6039
6040 /* We're called for lea too, which implements ashift on occasion. */
6041 index = XEXP (addr, 0);
6042 tmp = XEXP (addr, 1);
6043 if (!CONST_INT_P (tmp))
6044 return 0;
6045 scale = INTVAL (tmp);
6046 if ((unsigned HOST_WIDE_INT) scale > 3)
6047 return 0;
6048 scale = 1 << scale;
6049 retval = -1;
6050 }
6051 else
6052 disp = addr; /* displacement */
6053
6054 /* Extract the integral value of scale. */
6055 if (scale_rtx)
6056 {
6057 if (!CONST_INT_P (scale_rtx))
6058 return 0;
6059 scale = INTVAL (scale_rtx);
6060 }
6061
6062 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6063 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6064
6065 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6066 if (base_reg && index_reg && scale == 1
6067 && (index_reg == arg_pointer_rtx
6068 || index_reg == frame_pointer_rtx
6069 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6070 {
6071 rtx tmp;
6072 tmp = base, base = index, index = tmp;
6073 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6074 }
6075
6076 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6077 if ((base_reg == hard_frame_pointer_rtx
6078 || base_reg == frame_pointer_rtx
6079 || base_reg == arg_pointer_rtx) && !disp)
6080 disp = const0_rtx;
6081
6082 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6083 Avoid this by transforming to [%esi+0]. */
6084 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6085 && base_reg && !index_reg && !disp
6086 && REG_P (base_reg)
6087 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6088 disp = const0_rtx;
6089
6090 /* Special case: encode reg+reg instead of reg*2. */
6091 if (!base && index && scale && scale == 2)
6092 base = index, base_reg = index_reg, scale = 1;
6093
6094 /* Special case: scaling cannot be encoded without base or displacement. */
6095 if (!base && !disp && index && scale != 1)
6096 disp = const0_rtx;
6097
6098 out->base = base;
6099 out->index = index;
6100 out->disp = disp;
6101 out->scale = scale;
6102 out->seg = seg;
6103
6104 return retval;
6105 }
6106 \f
6107 /* Return cost of the memory address x.
6108 For i386, it is better to use a complex address than let gcc copy
6109 the address into a reg and make a new pseudo. But not if the address
6110 requires to two regs - that would mean more pseudos with longer
6111 lifetimes. */
6112 static int
6113 ix86_address_cost (rtx x)
6114 {
6115 struct ix86_address parts;
6116 int cost = 1;
6117 int ok = ix86_decompose_address (x, &parts);
6118
6119 gcc_assert (ok);
6120
6121 if (parts.base && GET_CODE (parts.base) == SUBREG)
6122 parts.base = SUBREG_REG (parts.base);
6123 if (parts.index && GET_CODE (parts.index) == SUBREG)
6124 parts.index = SUBREG_REG (parts.index);
6125
6126 /* More complex memory references are better. */
6127 if (parts.disp && parts.disp != const0_rtx)
6128 cost--;
6129 if (parts.seg != SEG_DEFAULT)
6130 cost--;
6131
6132 /* Attempt to minimize number of registers in the address. */
6133 if ((parts.base
6134 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6135 || (parts.index
6136 && (!REG_P (parts.index)
6137 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6138 cost++;
6139
6140 if (parts.base
6141 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6142 && parts.index
6143 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6144 && parts.base != parts.index)
6145 cost++;
6146
6147 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6148 since it's predecode logic can't detect the length of instructions
6149 and it degenerates to vector decoded. Increase cost of such
6150 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6151 to split such addresses or even refuse such addresses at all.
6152
6153 Following addressing modes are affected:
6154 [base+scale*index]
6155 [scale*index+disp]
6156 [base+index]
6157
6158 The first and last case may be avoidable by explicitly coding the zero in
6159 memory address, but I don't have AMD-K6 machine handy to check this
6160 theory. */
6161
6162 if (TARGET_K6
6163 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6164 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6165 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6166 cost += 10;
6167
6168 return cost;
6169 }
6170 \f
6171 /* If X is a machine specific address (i.e. a symbol or label being
6172 referenced as a displacement from the GOT implemented using an
6173 UNSPEC), then return the base term. Otherwise return X. */
6174
6175 rtx
6176 ix86_find_base_term (rtx x)
6177 {
6178 rtx term;
6179
6180 if (TARGET_64BIT)
6181 {
6182 if (GET_CODE (x) != CONST)
6183 return x;
6184 term = XEXP (x, 0);
6185 if (GET_CODE (term) == PLUS
6186 && (CONST_INT_P (XEXP (term, 1))
6187 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6188 term = XEXP (term, 0);
6189 if (GET_CODE (term) != UNSPEC
6190 || XINT (term, 1) != UNSPEC_GOTPCREL)
6191 return x;
6192
6193 term = XVECEXP (term, 0, 0);
6194
6195 if (GET_CODE (term) != SYMBOL_REF
6196 && GET_CODE (term) != LABEL_REF)
6197 return x;
6198
6199 return term;
6200 }
6201
6202 term = ix86_delegitimize_address (x);
6203
6204 if (GET_CODE (term) != SYMBOL_REF
6205 && GET_CODE (term) != LABEL_REF)
6206 return x;
6207
6208 return term;
6209 }
6210
6211 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6212 this is used for to form addresses to local data when -fPIC is in
6213 use. */
6214
6215 static bool
6216 darwin_local_data_pic (rtx disp)
6217 {
6218 if (GET_CODE (disp) == MINUS)
6219 {
6220 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6221 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6222 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6223 {
6224 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6225 if (! strcmp (sym_name, "<pic base>"))
6226 return true;
6227 }
6228 }
6229
6230 return false;
6231 }
6232 \f
6233 /* Determine if a given RTX is a valid constant. We already know this
6234 satisfies CONSTANT_P. */
6235
6236 bool
6237 legitimate_constant_p (rtx x)
6238 {
6239 switch (GET_CODE (x))
6240 {
6241 case CONST:
6242 x = XEXP (x, 0);
6243
6244 if (GET_CODE (x) == PLUS)
6245 {
6246 if (!CONST_INT_P (XEXP (x, 1)))
6247 return false;
6248 x = XEXP (x, 0);
6249 }
6250
6251 if (TARGET_MACHO && darwin_local_data_pic (x))
6252 return true;
6253
6254 /* Only some unspecs are valid as "constants". */
6255 if (GET_CODE (x) == UNSPEC)
6256 switch (XINT (x, 1))
6257 {
6258 case UNSPEC_GOTOFF:
6259 return TARGET_64BIT;
6260 case UNSPEC_TPOFF:
6261 case UNSPEC_NTPOFF:
6262 x = XVECEXP (x, 0, 0);
6263 return (GET_CODE (x) == SYMBOL_REF
6264 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6265 case UNSPEC_DTPOFF:
6266 x = XVECEXP (x, 0, 0);
6267 return (GET_CODE (x) == SYMBOL_REF
6268 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6269 default:
6270 return false;
6271 }
6272
6273 /* We must have drilled down to a symbol. */
6274 if (GET_CODE (x) == LABEL_REF)
6275 return true;
6276 if (GET_CODE (x) != SYMBOL_REF)
6277 return false;
6278 /* FALLTHRU */
6279
6280 case SYMBOL_REF:
6281 /* TLS symbols are never valid. */
6282 if (SYMBOL_REF_TLS_MODEL (x))
6283 return false;
6284 break;
6285
6286 case CONST_DOUBLE:
6287 if (GET_MODE (x) == TImode
6288 && x != CONST0_RTX (TImode)
6289 && !TARGET_64BIT)
6290 return false;
6291 break;
6292
6293 case CONST_VECTOR:
6294 if (x == CONST0_RTX (GET_MODE (x)))
6295 return true;
6296 return false;
6297
6298 default:
6299 break;
6300 }
6301
6302 /* Otherwise we handle everything else in the move patterns. */
6303 return true;
6304 }
6305
6306 /* Determine if it's legal to put X into the constant pool. This
6307 is not possible for the address of thread-local symbols, which
6308 is checked above. */
6309
6310 static bool
6311 ix86_cannot_force_const_mem (rtx x)
6312 {
6313 /* We can always put integral constants and vectors in memory. */
6314 switch (GET_CODE (x))
6315 {
6316 case CONST_INT:
6317 case CONST_DOUBLE:
6318 case CONST_VECTOR:
6319 return false;
6320
6321 default:
6322 break;
6323 }
6324 return !legitimate_constant_p (x);
6325 }
6326
6327 /* Determine if a given RTX is a valid constant address. */
6328
6329 bool
6330 constant_address_p (rtx x)
6331 {
6332 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6333 }
6334
6335 /* Nonzero if the constant value X is a legitimate general operand
6336 when generating PIC code. It is given that flag_pic is on and
6337 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6338
6339 bool
6340 legitimate_pic_operand_p (rtx x)
6341 {
6342 rtx inner;
6343
6344 switch (GET_CODE (x))
6345 {
6346 case CONST:
6347 inner = XEXP (x, 0);
6348 if (GET_CODE (inner) == PLUS
6349 && CONST_INT_P (XEXP (inner, 1)))
6350 inner = XEXP (inner, 0);
6351
6352 /* Only some unspecs are valid as "constants". */
6353 if (GET_CODE (inner) == UNSPEC)
6354 switch (XINT (inner, 1))
6355 {
6356 case UNSPEC_GOTOFF:
6357 return TARGET_64BIT;
6358 case UNSPEC_TPOFF:
6359 x = XVECEXP (inner, 0, 0);
6360 return (GET_CODE (x) == SYMBOL_REF
6361 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6362 default:
6363 return false;
6364 }
6365 /* FALLTHRU */
6366
6367 case SYMBOL_REF:
6368 case LABEL_REF:
6369 return legitimate_pic_address_disp_p (x);
6370
6371 default:
6372 return true;
6373 }
6374 }
6375
6376 /* Determine if a given CONST RTX is a valid memory displacement
6377 in PIC mode. */
6378
6379 int
6380 legitimate_pic_address_disp_p (rtx disp)
6381 {
6382 bool saw_plus;
6383
6384 /* In 64bit mode we can allow direct addresses of symbols and labels
6385 when they are not dynamic symbols. */
6386 if (TARGET_64BIT)
6387 {
6388 rtx op0 = disp, op1;
6389
6390 switch (GET_CODE (disp))
6391 {
6392 case LABEL_REF:
6393 return true;
6394
6395 case CONST:
6396 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6397 break;
6398 op0 = XEXP (XEXP (disp, 0), 0);
6399 op1 = XEXP (XEXP (disp, 0), 1);
6400 if (!CONST_INT_P (op1)
6401 || INTVAL (op1) >= 16*1024*1024
6402 || INTVAL (op1) < -16*1024*1024)
6403 break;
6404 if (GET_CODE (op0) == LABEL_REF)
6405 return true;
6406 if (GET_CODE (op0) != SYMBOL_REF)
6407 break;
6408 /* FALLTHRU */
6409
6410 case SYMBOL_REF:
6411 /* TLS references should always be enclosed in UNSPEC. */
6412 if (SYMBOL_REF_TLS_MODEL (op0))
6413 return false;
6414 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6415 return true;
6416 break;
6417
6418 default:
6419 break;
6420 }
6421 }
6422 if (GET_CODE (disp) != CONST)
6423 return 0;
6424 disp = XEXP (disp, 0);
6425
6426 if (TARGET_64BIT)
6427 {
6428 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6429 of GOT tables. We should not need these anyway. */
6430 if (GET_CODE (disp) != UNSPEC
6431 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6432 && XINT (disp, 1) != UNSPEC_GOTOFF))
6433 return 0;
6434
6435 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6436 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6437 return 0;
6438 return 1;
6439 }
6440
6441 saw_plus = false;
6442 if (GET_CODE (disp) == PLUS)
6443 {
6444 if (!CONST_INT_P (XEXP (disp, 1)))
6445 return 0;
6446 disp = XEXP (disp, 0);
6447 saw_plus = true;
6448 }
6449
6450 if (TARGET_MACHO && darwin_local_data_pic (disp))
6451 return 1;
6452
6453 if (GET_CODE (disp) != UNSPEC)
6454 return 0;
6455
6456 switch (XINT (disp, 1))
6457 {
6458 case UNSPEC_GOT:
6459 if (saw_plus)
6460 return false;
6461 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6462 case UNSPEC_GOTOFF:
6463 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6464 While ABI specify also 32bit relocation but we don't produce it in
6465 small PIC model at all. */
6466 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6467 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6468 && !TARGET_64BIT)
6469 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6470 return false;
6471 case UNSPEC_GOTTPOFF:
6472 case UNSPEC_GOTNTPOFF:
6473 case UNSPEC_INDNTPOFF:
6474 if (saw_plus)
6475 return false;
6476 disp = XVECEXP (disp, 0, 0);
6477 return (GET_CODE (disp) == SYMBOL_REF
6478 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6479 case UNSPEC_NTPOFF:
6480 disp = XVECEXP (disp, 0, 0);
6481 return (GET_CODE (disp) == SYMBOL_REF
6482 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6483 case UNSPEC_DTPOFF:
6484 disp = XVECEXP (disp, 0, 0);
6485 return (GET_CODE (disp) == SYMBOL_REF
6486 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6487 }
6488
6489 return 0;
6490 }
6491
6492 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6493 memory address for an instruction. The MODE argument is the machine mode
6494 for the MEM expression that wants to use this address.
6495
6496 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6497 convert common non-canonical forms to canonical form so that they will
6498 be recognized. */
6499
6500 int
6501 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6502 {
6503 struct ix86_address parts;
6504 rtx base, index, disp;
6505 HOST_WIDE_INT scale;
6506 const char *reason = NULL;
6507 rtx reason_rtx = NULL_RTX;
6508
6509 if (TARGET_DEBUG_ADDR)
6510 {
6511 fprintf (stderr,
6512 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6513 GET_MODE_NAME (mode), strict);
6514 debug_rtx (addr);
6515 }
6516
6517 if (ix86_decompose_address (addr, &parts) <= 0)
6518 {
6519 reason = "decomposition failed";
6520 goto report_error;
6521 }
6522
6523 base = parts.base;
6524 index = parts.index;
6525 disp = parts.disp;
6526 scale = parts.scale;
6527
6528 /* Validate base register.
6529
6530 Don't allow SUBREG's that span more than a word here. It can lead to spill
6531 failures when the base is one word out of a two word structure, which is
6532 represented internally as a DImode int. */
6533
6534 if (base)
6535 {
6536 rtx reg;
6537 reason_rtx = base;
6538
6539 if (REG_P (base))
6540 reg = base;
6541 else if (GET_CODE (base) == SUBREG
6542 && REG_P (SUBREG_REG (base))
6543 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6544 <= UNITS_PER_WORD)
6545 reg = SUBREG_REG (base);
6546 else
6547 {
6548 reason = "base is not a register";
6549 goto report_error;
6550 }
6551
6552 if (GET_MODE (base) != Pmode)
6553 {
6554 reason = "base is not in Pmode";
6555 goto report_error;
6556 }
6557
6558 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6559 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6560 {
6561 reason = "base is not valid";
6562 goto report_error;
6563 }
6564 }
6565
6566 /* Validate index register.
6567
6568 Don't allow SUBREG's that span more than a word here -- same as above. */
6569
6570 if (index)
6571 {
6572 rtx reg;
6573 reason_rtx = index;
6574
6575 if (REG_P (index))
6576 reg = index;
6577 else if (GET_CODE (index) == SUBREG
6578 && REG_P (SUBREG_REG (index))
6579 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6580 <= UNITS_PER_WORD)
6581 reg = SUBREG_REG (index);
6582 else
6583 {
6584 reason = "index is not a register";
6585 goto report_error;
6586 }
6587
6588 if (GET_MODE (index) != Pmode)
6589 {
6590 reason = "index is not in Pmode";
6591 goto report_error;
6592 }
6593
6594 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6595 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6596 {
6597 reason = "index is not valid";
6598 goto report_error;
6599 }
6600 }
6601
6602 /* Validate scale factor. */
6603 if (scale != 1)
6604 {
6605 reason_rtx = GEN_INT (scale);
6606 if (!index)
6607 {
6608 reason = "scale without index";
6609 goto report_error;
6610 }
6611
6612 if (scale != 2 && scale != 4 && scale != 8)
6613 {
6614 reason = "scale is not a valid multiplier";
6615 goto report_error;
6616 }
6617 }
6618
6619 /* Validate displacement. */
6620 if (disp)
6621 {
6622 reason_rtx = disp;
6623
6624 if (GET_CODE (disp) == CONST
6625 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6626 switch (XINT (XEXP (disp, 0), 1))
6627 {
6628 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6629 used. While ABI specify also 32bit relocations, we don't produce
6630 them at all and use IP relative instead. */
6631 case UNSPEC_GOT:
6632 case UNSPEC_GOTOFF:
6633 gcc_assert (flag_pic);
6634 if (!TARGET_64BIT)
6635 goto is_legitimate_pic;
6636 reason = "64bit address unspec";
6637 goto report_error;
6638
6639 case UNSPEC_GOTPCREL:
6640 gcc_assert (flag_pic);
6641 goto is_legitimate_pic;
6642
6643 case UNSPEC_GOTTPOFF:
6644 case UNSPEC_GOTNTPOFF:
6645 case UNSPEC_INDNTPOFF:
6646 case UNSPEC_NTPOFF:
6647 case UNSPEC_DTPOFF:
6648 break;
6649
6650 default:
6651 reason = "invalid address unspec";
6652 goto report_error;
6653 }
6654
6655 else if (SYMBOLIC_CONST (disp)
6656 && (flag_pic
6657 || (TARGET_MACHO
6658 #if TARGET_MACHO
6659 && MACHOPIC_INDIRECT
6660 && !machopic_operand_p (disp)
6661 #endif
6662 )))
6663 {
6664
6665 is_legitimate_pic:
6666 if (TARGET_64BIT && (index || base))
6667 {
6668 /* foo@dtpoff(%rX) is ok. */
6669 if (GET_CODE (disp) != CONST
6670 || GET_CODE (XEXP (disp, 0)) != PLUS
6671 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6672 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
6673 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6674 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6675 {
6676 reason = "non-constant pic memory reference";
6677 goto report_error;
6678 }
6679 }
6680 else if (! legitimate_pic_address_disp_p (disp))
6681 {
6682 reason = "displacement is an invalid pic construct";
6683 goto report_error;
6684 }
6685
6686 /* This code used to verify that a symbolic pic displacement
6687 includes the pic_offset_table_rtx register.
6688
6689 While this is good idea, unfortunately these constructs may
6690 be created by "adds using lea" optimization for incorrect
6691 code like:
6692
6693 int a;
6694 int foo(int i)
6695 {
6696 return *(&a+i);
6697 }
6698
6699 This code is nonsensical, but results in addressing
6700 GOT table with pic_offset_table_rtx base. We can't
6701 just refuse it easily, since it gets matched by
6702 "addsi3" pattern, that later gets split to lea in the
6703 case output register differs from input. While this
6704 can be handled by separate addsi pattern for this case
6705 that never results in lea, this seems to be easier and
6706 correct fix for crash to disable this test. */
6707 }
6708 else if (GET_CODE (disp) != LABEL_REF
6709 && !CONST_INT_P (disp)
6710 && (GET_CODE (disp) != CONST
6711 || !legitimate_constant_p (disp))
6712 && (GET_CODE (disp) != SYMBOL_REF
6713 || !legitimate_constant_p (disp)))
6714 {
6715 reason = "displacement is not constant";
6716 goto report_error;
6717 }
6718 else if (TARGET_64BIT
6719 && !x86_64_immediate_operand (disp, VOIDmode))
6720 {
6721 reason = "displacement is out of range";
6722 goto report_error;
6723 }
6724 }
6725
6726 /* Everything looks valid. */
6727 if (TARGET_DEBUG_ADDR)
6728 fprintf (stderr, "Success.\n");
6729 return TRUE;
6730
6731 report_error:
6732 if (TARGET_DEBUG_ADDR)
6733 {
6734 fprintf (stderr, "Error: %s\n", reason);
6735 debug_rtx (reason_rtx);
6736 }
6737 return FALSE;
6738 }
6739 \f
6740 /* Return a unique alias set for the GOT. */
6741
6742 static HOST_WIDE_INT
6743 ix86_GOT_alias_set (void)
6744 {
6745 static HOST_WIDE_INT set = -1;
6746 if (set == -1)
6747 set = new_alias_set ();
6748 return set;
6749 }
6750
6751 /* Return a legitimate reference for ORIG (an address) using the
6752 register REG. If REG is 0, a new pseudo is generated.
6753
6754 There are two types of references that must be handled:
6755
6756 1. Global data references must load the address from the GOT, via
6757 the PIC reg. An insn is emitted to do this load, and the reg is
6758 returned.
6759
6760 2. Static data references, constant pool addresses, and code labels
6761 compute the address as an offset from the GOT, whose base is in
6762 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6763 differentiate them from global data objects. The returned
6764 address is the PIC reg + an unspec constant.
6765
6766 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6767 reg also appears in the address. */
6768
6769 static rtx
6770 legitimize_pic_address (rtx orig, rtx reg)
6771 {
6772 rtx addr = orig;
6773 rtx new = orig;
6774 rtx base;
6775
6776 #if TARGET_MACHO
6777 if (TARGET_MACHO && !TARGET_64BIT)
6778 {
6779 if (reg == 0)
6780 reg = gen_reg_rtx (Pmode);
6781 /* Use the generic Mach-O PIC machinery. */
6782 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6783 }
6784 #endif
6785
6786 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6787 new = addr;
6788 else if (TARGET_64BIT
6789 && ix86_cmodel != CM_SMALL_PIC
6790 && local_symbolic_operand (addr, Pmode))
6791 {
6792 rtx tmpreg;
6793 /* This symbol may be referenced via a displacement from the PIC
6794 base address (@GOTOFF). */
6795
6796 if (reload_in_progress)
6797 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6798 if (GET_CODE (addr) == CONST)
6799 addr = XEXP (addr, 0);
6800 if (GET_CODE (addr) == PLUS)
6801 {
6802 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6803 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6804 }
6805 else
6806 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6807 new = gen_rtx_CONST (Pmode, new);
6808 if (!reg)
6809 tmpreg = gen_reg_rtx (Pmode);
6810 else
6811 tmpreg = reg;
6812 emit_move_insn (tmpreg, new);
6813
6814 if (reg != 0)
6815 {
6816 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6817 tmpreg, 1, OPTAB_DIRECT);
6818 new = reg;
6819 }
6820 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6821 }
6822 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6823 {
6824 /* This symbol may be referenced via a displacement from the PIC
6825 base address (@GOTOFF). */
6826
6827 if (reload_in_progress)
6828 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6829 if (GET_CODE (addr) == CONST)
6830 addr = XEXP (addr, 0);
6831 if (GET_CODE (addr) == PLUS)
6832 {
6833 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6834 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6835 }
6836 else
6837 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6838 new = gen_rtx_CONST (Pmode, new);
6839 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6840
6841 if (reg != 0)
6842 {
6843 emit_move_insn (reg, new);
6844 new = reg;
6845 }
6846 }
6847 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6848 {
6849 if (TARGET_64BIT)
6850 {
6851 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6852 new = gen_rtx_CONST (Pmode, new);
6853 new = gen_const_mem (Pmode, new);
6854 set_mem_alias_set (new, ix86_GOT_alias_set ());
6855
6856 if (reg == 0)
6857 reg = gen_reg_rtx (Pmode);
6858 /* Use directly gen_movsi, otherwise the address is loaded
6859 into register for CSE. We don't want to CSE this addresses,
6860 instead we CSE addresses from the GOT table, so skip this. */
6861 emit_insn (gen_movsi (reg, new));
6862 new = reg;
6863 }
6864 else
6865 {
6866 /* This symbol must be referenced via a load from the
6867 Global Offset Table (@GOT). */
6868
6869 if (reload_in_progress)
6870 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6871 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6872 new = gen_rtx_CONST (Pmode, new);
6873 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6874 new = gen_const_mem (Pmode, new);
6875 set_mem_alias_set (new, ix86_GOT_alias_set ());
6876
6877 if (reg == 0)
6878 reg = gen_reg_rtx (Pmode);
6879 emit_move_insn (reg, new);
6880 new = reg;
6881 }
6882 }
6883 else
6884 {
6885 if (CONST_INT_P (addr)
6886 && !x86_64_immediate_operand (addr, VOIDmode))
6887 {
6888 if (reg)
6889 {
6890 emit_move_insn (reg, addr);
6891 new = reg;
6892 }
6893 else
6894 new = force_reg (Pmode, addr);
6895 }
6896 else if (GET_CODE (addr) == CONST)
6897 {
6898 addr = XEXP (addr, 0);
6899
6900 /* We must match stuff we generate before. Assume the only
6901 unspecs that can get here are ours. Not that we could do
6902 anything with them anyway.... */
6903 if (GET_CODE (addr) == UNSPEC
6904 || (GET_CODE (addr) == PLUS
6905 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6906 return orig;
6907 gcc_assert (GET_CODE (addr) == PLUS);
6908 }
6909 if (GET_CODE (addr) == PLUS)
6910 {
6911 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6912
6913 /* Check first to see if this is a constant offset from a @GOTOFF
6914 symbol reference. */
6915 if (local_symbolic_operand (op0, Pmode)
6916 && CONST_INT_P (op1))
6917 {
6918 if (!TARGET_64BIT)
6919 {
6920 if (reload_in_progress)
6921 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6922 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6923 UNSPEC_GOTOFF);
6924 new = gen_rtx_PLUS (Pmode, new, op1);
6925 new = gen_rtx_CONST (Pmode, new);
6926 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6927
6928 if (reg != 0)
6929 {
6930 emit_move_insn (reg, new);
6931 new = reg;
6932 }
6933 }
6934 else
6935 {
6936 if (INTVAL (op1) < -16*1024*1024
6937 || INTVAL (op1) >= 16*1024*1024)
6938 {
6939 if (!x86_64_immediate_operand (op1, Pmode))
6940 op1 = force_reg (Pmode, op1);
6941 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6942 }
6943 }
6944 }
6945 else
6946 {
6947 base = legitimize_pic_address (XEXP (addr, 0), reg);
6948 new = legitimize_pic_address (XEXP (addr, 1),
6949 base == reg ? NULL_RTX : reg);
6950
6951 if (CONST_INT_P (new))
6952 new = plus_constant (base, INTVAL (new));
6953 else
6954 {
6955 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6956 {
6957 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6958 new = XEXP (new, 1);
6959 }
6960 new = gen_rtx_PLUS (Pmode, base, new);
6961 }
6962 }
6963 }
6964 }
6965 return new;
6966 }
6967 \f
6968 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6969
6970 static rtx
6971 get_thread_pointer (int to_reg)
6972 {
6973 rtx tp, reg, insn;
6974
6975 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6976 if (!to_reg)
6977 return tp;
6978
6979 reg = gen_reg_rtx (Pmode);
6980 insn = gen_rtx_SET (VOIDmode, reg, tp);
6981 insn = emit_insn (insn);
6982
6983 return reg;
6984 }
6985
6986 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6987 false if we expect this to be used for a memory address and true if
6988 we expect to load the address into a register. */
6989
6990 static rtx
6991 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6992 {
6993 rtx dest, base, off, pic, tp;
6994 int type;
6995
6996 switch (model)
6997 {
6998 case TLS_MODEL_GLOBAL_DYNAMIC:
6999 dest = gen_reg_rtx (Pmode);
7000 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7001
7002 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7003 {
7004 rtx rax = gen_rtx_REG (Pmode, 0), insns;
7005
7006 start_sequence ();
7007 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7008 insns = get_insns ();
7009 end_sequence ();
7010
7011 emit_libcall_block (insns, dest, rax, x);
7012 }
7013 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7014 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7015 else
7016 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7017
7018 if (TARGET_GNU2_TLS)
7019 {
7020 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7021
7022 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7023 }
7024 break;
7025
7026 case TLS_MODEL_LOCAL_DYNAMIC:
7027 base = gen_reg_rtx (Pmode);
7028 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7029
7030 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7031 {
7032 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7033
7034 start_sequence ();
7035 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7036 insns = get_insns ();
7037 end_sequence ();
7038
7039 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7040 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7041 emit_libcall_block (insns, base, rax, note);
7042 }
7043 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7044 emit_insn (gen_tls_local_dynamic_base_64 (base));
7045 else
7046 emit_insn (gen_tls_local_dynamic_base_32 (base));
7047
7048 if (TARGET_GNU2_TLS)
7049 {
7050 rtx x = ix86_tls_module_base ();
7051
7052 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7053 gen_rtx_MINUS (Pmode, x, tp));
7054 }
7055
7056 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7057 off = gen_rtx_CONST (Pmode, off);
7058
7059 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7060
7061 if (TARGET_GNU2_TLS)
7062 {
7063 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7064
7065 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7066 }
7067
7068 break;
7069
7070 case TLS_MODEL_INITIAL_EXEC:
7071 if (TARGET_64BIT)
7072 {
7073 pic = NULL;
7074 type = UNSPEC_GOTNTPOFF;
7075 }
7076 else if (flag_pic)
7077 {
7078 if (reload_in_progress)
7079 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7080 pic = pic_offset_table_rtx;
7081 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7082 }
7083 else if (!TARGET_ANY_GNU_TLS)
7084 {
7085 pic = gen_reg_rtx (Pmode);
7086 emit_insn (gen_set_got (pic));
7087 type = UNSPEC_GOTTPOFF;
7088 }
7089 else
7090 {
7091 pic = NULL;
7092 type = UNSPEC_INDNTPOFF;
7093 }
7094
7095 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7096 off = gen_rtx_CONST (Pmode, off);
7097 if (pic)
7098 off = gen_rtx_PLUS (Pmode, pic, off);
7099 off = gen_const_mem (Pmode, off);
7100 set_mem_alias_set (off, ix86_GOT_alias_set ());
7101
7102 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7103 {
7104 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7105 off = force_reg (Pmode, off);
7106 return gen_rtx_PLUS (Pmode, base, off);
7107 }
7108 else
7109 {
7110 base = get_thread_pointer (true);
7111 dest = gen_reg_rtx (Pmode);
7112 emit_insn (gen_subsi3 (dest, base, off));
7113 }
7114 break;
7115
7116 case TLS_MODEL_LOCAL_EXEC:
7117 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7118 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7119 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7120 off = gen_rtx_CONST (Pmode, off);
7121
7122 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7123 {
7124 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7125 return gen_rtx_PLUS (Pmode, base, off);
7126 }
7127 else
7128 {
7129 base = get_thread_pointer (true);
7130 dest = gen_reg_rtx (Pmode);
7131 emit_insn (gen_subsi3 (dest, base, off));
7132 }
7133 break;
7134
7135 default:
7136 gcc_unreachable ();
7137 }
7138
7139 return dest;
7140 }
7141
7142 /* Try machine-dependent ways of modifying an illegitimate address
7143 to be legitimate. If we find one, return the new, valid address.
7144 This macro is used in only one place: `memory_address' in explow.c.
7145
7146 OLDX is the address as it was before break_out_memory_refs was called.
7147 In some cases it is useful to look at this to decide what needs to be done.
7148
7149 MODE and WIN are passed so that this macro can use
7150 GO_IF_LEGITIMATE_ADDRESS.
7151
7152 It is always safe for this macro to do nothing. It exists to recognize
7153 opportunities to optimize the output.
7154
7155 For the 80386, we handle X+REG by loading X into a register R and
7156 using R+REG. R will go in a general reg and indexing will be used.
7157 However, if REG is a broken-out memory address or multiplication,
7158 nothing needs to be done because REG can certainly go in a general reg.
7159
7160 When -fpic is used, special handling is needed for symbolic references.
7161 See comments by legitimize_pic_address in i386.c for details. */
7162
7163 rtx
7164 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7165 {
7166 int changed = 0;
7167 unsigned log;
7168
7169 if (TARGET_DEBUG_ADDR)
7170 {
7171 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7172 GET_MODE_NAME (mode));
7173 debug_rtx (x);
7174 }
7175
7176 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7177 if (log)
7178 return legitimize_tls_address (x, log, false);
7179 if (GET_CODE (x) == CONST
7180 && GET_CODE (XEXP (x, 0)) == PLUS
7181 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7182 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7183 {
7184 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7185 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7186 }
7187
7188 if (flag_pic && SYMBOLIC_CONST (x))
7189 return legitimize_pic_address (x, 0);
7190
7191 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7192 if (GET_CODE (x) == ASHIFT
7193 && CONST_INT_P (XEXP (x, 1))
7194 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7195 {
7196 changed = 1;
7197 log = INTVAL (XEXP (x, 1));
7198 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7199 GEN_INT (1 << log));
7200 }
7201
7202 if (GET_CODE (x) == PLUS)
7203 {
7204 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7205
7206 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7207 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7208 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7209 {
7210 changed = 1;
7211 log = INTVAL (XEXP (XEXP (x, 0), 1));
7212 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7213 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7214 GEN_INT (1 << log));
7215 }
7216
7217 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7218 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
7219 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7220 {
7221 changed = 1;
7222 log = INTVAL (XEXP (XEXP (x, 1), 1));
7223 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7224 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7225 GEN_INT (1 << log));
7226 }
7227
7228 /* Put multiply first if it isn't already. */
7229 if (GET_CODE (XEXP (x, 1)) == MULT)
7230 {
7231 rtx tmp = XEXP (x, 0);
7232 XEXP (x, 0) = XEXP (x, 1);
7233 XEXP (x, 1) = tmp;
7234 changed = 1;
7235 }
7236
7237 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7238 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7239 created by virtual register instantiation, register elimination, and
7240 similar optimizations. */
7241 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7242 {
7243 changed = 1;
7244 x = gen_rtx_PLUS (Pmode,
7245 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7246 XEXP (XEXP (x, 1), 0)),
7247 XEXP (XEXP (x, 1), 1));
7248 }
7249
7250 /* Canonicalize
7251 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7252 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7253 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7254 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7255 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7256 && CONSTANT_P (XEXP (x, 1)))
7257 {
7258 rtx constant;
7259 rtx other = NULL_RTX;
7260
7261 if (CONST_INT_P (XEXP (x, 1)))
7262 {
7263 constant = XEXP (x, 1);
7264 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7265 }
7266 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
7267 {
7268 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7269 other = XEXP (x, 1);
7270 }
7271 else
7272 constant = 0;
7273
7274 if (constant)
7275 {
7276 changed = 1;
7277 x = gen_rtx_PLUS (Pmode,
7278 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7279 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7280 plus_constant (other, INTVAL (constant)));
7281 }
7282 }
7283
7284 if (changed && legitimate_address_p (mode, x, FALSE))
7285 return x;
7286
7287 if (GET_CODE (XEXP (x, 0)) == MULT)
7288 {
7289 changed = 1;
7290 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7291 }
7292
7293 if (GET_CODE (XEXP (x, 1)) == MULT)
7294 {
7295 changed = 1;
7296 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7297 }
7298
7299 if (changed
7300 && REG_P (XEXP (x, 1))
7301 && REG_P (XEXP (x, 0)))
7302 return x;
7303
7304 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7305 {
7306 changed = 1;
7307 x = legitimize_pic_address (x, 0);
7308 }
7309
7310 if (changed && legitimate_address_p (mode, x, FALSE))
7311 return x;
7312
7313 if (REG_P (XEXP (x, 0)))
7314 {
7315 rtx temp = gen_reg_rtx (Pmode);
7316 rtx val = force_operand (XEXP (x, 1), temp);
7317 if (val != temp)
7318 emit_move_insn (temp, val);
7319
7320 XEXP (x, 1) = temp;
7321 return x;
7322 }
7323
7324 else if (REG_P (XEXP (x, 1)))
7325 {
7326 rtx temp = gen_reg_rtx (Pmode);
7327 rtx val = force_operand (XEXP (x, 0), temp);
7328 if (val != temp)
7329 emit_move_insn (temp, val);
7330
7331 XEXP (x, 0) = temp;
7332 return x;
7333 }
7334 }
7335
7336 return x;
7337 }
7338 \f
7339 /* Print an integer constant expression in assembler syntax. Addition
7340 and subtraction are the only arithmetic that may appear in these
7341 expressions. FILE is the stdio stream to write to, X is the rtx, and
7342 CODE is the operand print code from the output string. */
7343
7344 static void
7345 output_pic_addr_const (FILE *file, rtx x, int code)
7346 {
7347 char buf[256];
7348
7349 switch (GET_CODE (x))
7350 {
7351 case PC:
7352 gcc_assert (flag_pic);
7353 putc ('.', file);
7354 break;
7355
7356 case SYMBOL_REF:
7357 output_addr_const (file, x);
7358 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7359 fputs ("@PLT", file);
7360 break;
7361
7362 case LABEL_REF:
7363 x = XEXP (x, 0);
7364 /* FALLTHRU */
7365 case CODE_LABEL:
7366 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7367 assemble_name (asm_out_file, buf);
7368 break;
7369
7370 case CONST_INT:
7371 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7372 break;
7373
7374 case CONST:
7375 /* This used to output parentheses around the expression,
7376 but that does not work on the 386 (either ATT or BSD assembler). */
7377 output_pic_addr_const (file, XEXP (x, 0), code);
7378 break;
7379
7380 case CONST_DOUBLE:
7381 if (GET_MODE (x) == VOIDmode)
7382 {
7383 /* We can use %d if the number is <32 bits and positive. */
7384 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7385 fprintf (file, "0x%lx%08lx",
7386 (unsigned long) CONST_DOUBLE_HIGH (x),
7387 (unsigned long) CONST_DOUBLE_LOW (x));
7388 else
7389 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7390 }
7391 else
7392 /* We can't handle floating point constants;
7393 PRINT_OPERAND must handle them. */
7394 output_operand_lossage ("floating constant misused");
7395 break;
7396
7397 case PLUS:
7398 /* Some assemblers need integer constants to appear first. */
7399 if (CONST_INT_P (XEXP (x, 0)))
7400 {
7401 output_pic_addr_const (file, XEXP (x, 0), code);
7402 putc ('+', file);
7403 output_pic_addr_const (file, XEXP (x, 1), code);
7404 }
7405 else
7406 {
7407 gcc_assert (CONST_INT_P (XEXP (x, 1)));
7408 output_pic_addr_const (file, XEXP (x, 1), code);
7409 putc ('+', file);
7410 output_pic_addr_const (file, XEXP (x, 0), code);
7411 }
7412 break;
7413
7414 case MINUS:
7415 if (!TARGET_MACHO)
7416 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7417 output_pic_addr_const (file, XEXP (x, 0), code);
7418 putc ('-', file);
7419 output_pic_addr_const (file, XEXP (x, 1), code);
7420 if (!TARGET_MACHO)
7421 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7422 break;
7423
7424 case UNSPEC:
7425 gcc_assert (XVECLEN (x, 0) == 1);
7426 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7427 switch (XINT (x, 1))
7428 {
7429 case UNSPEC_GOT:
7430 fputs ("@GOT", file);
7431 break;
7432 case UNSPEC_GOTOFF:
7433 fputs ("@GOTOFF", file);
7434 break;
7435 case UNSPEC_GOTPCREL:
7436 fputs ("@GOTPCREL(%rip)", file);
7437 break;
7438 case UNSPEC_GOTTPOFF:
7439 /* FIXME: This might be @TPOFF in Sun ld too. */
7440 fputs ("@GOTTPOFF", file);
7441 break;
7442 case UNSPEC_TPOFF:
7443 fputs ("@TPOFF", file);
7444 break;
7445 case UNSPEC_NTPOFF:
7446 if (TARGET_64BIT)
7447 fputs ("@TPOFF", file);
7448 else
7449 fputs ("@NTPOFF", file);
7450 break;
7451 case UNSPEC_DTPOFF:
7452 fputs ("@DTPOFF", file);
7453 break;
7454 case UNSPEC_GOTNTPOFF:
7455 if (TARGET_64BIT)
7456 fputs ("@GOTTPOFF(%rip)", file);
7457 else
7458 fputs ("@GOTNTPOFF", file);
7459 break;
7460 case UNSPEC_INDNTPOFF:
7461 fputs ("@INDNTPOFF", file);
7462 break;
7463 default:
7464 output_operand_lossage ("invalid UNSPEC as operand");
7465 break;
7466 }
7467 break;
7468
7469 default:
7470 output_operand_lossage ("invalid expression as operand");
7471 }
7472 }
7473
7474 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7475 We need to emit DTP-relative relocations. */
7476
7477 static void
7478 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7479 {
7480 fputs (ASM_LONG, file);
7481 output_addr_const (file, x);
7482 fputs ("@DTPOFF", file);
7483 switch (size)
7484 {
7485 case 4:
7486 break;
7487 case 8:
7488 fputs (", 0", file);
7489 break;
7490 default:
7491 gcc_unreachable ();
7492 }
7493 }
7494
7495 /* In the name of slightly smaller debug output, and to cater to
7496 general assembler lossage, recognize PIC+GOTOFF and turn it back
7497 into a direct symbol reference.
7498
7499 On Darwin, this is necessary to avoid a crash, because Darwin
7500 has a different PIC label for each routine but the DWARF debugging
7501 information is not associated with any particular routine, so it's
7502 necessary to remove references to the PIC label from RTL stored by
7503 the DWARF output code. */
7504
7505 static rtx
7506 ix86_delegitimize_address (rtx orig_x)
7507 {
7508 rtx x = orig_x;
7509 /* reg_addend is NULL or a multiple of some register. */
7510 rtx reg_addend = NULL_RTX;
7511 /* const_addend is NULL or a const_int. */
7512 rtx const_addend = NULL_RTX;
7513 /* This is the result, or NULL. */
7514 rtx result = NULL_RTX;
7515
7516 if (MEM_P (x))
7517 x = XEXP (x, 0);
7518
7519 if (TARGET_64BIT)
7520 {
7521 if (GET_CODE (x) != CONST
7522 || GET_CODE (XEXP (x, 0)) != UNSPEC
7523 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7524 || !MEM_P (orig_x))
7525 return orig_x;
7526 return XVECEXP (XEXP (x, 0), 0, 0);
7527 }
7528
7529 if (GET_CODE (x) != PLUS
7530 || GET_CODE (XEXP (x, 1)) != CONST)
7531 return orig_x;
7532
7533 if (REG_P (XEXP (x, 0))
7534 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7535 /* %ebx + GOT/GOTOFF */
7536 ;
7537 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7538 {
7539 /* %ebx + %reg * scale + GOT/GOTOFF */
7540 reg_addend = XEXP (x, 0);
7541 if (REG_P (XEXP (reg_addend, 0))
7542 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7543 reg_addend = XEXP (reg_addend, 1);
7544 else if (REG_P (XEXP (reg_addend, 1))
7545 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7546 reg_addend = XEXP (reg_addend, 0);
7547 else
7548 return orig_x;
7549 if (!REG_P (reg_addend)
7550 && GET_CODE (reg_addend) != MULT
7551 && GET_CODE (reg_addend) != ASHIFT)
7552 return orig_x;
7553 }
7554 else
7555 return orig_x;
7556
7557 x = XEXP (XEXP (x, 1), 0);
7558 if (GET_CODE (x) == PLUS
7559 && CONST_INT_P (XEXP (x, 1)))
7560 {
7561 const_addend = XEXP (x, 1);
7562 x = XEXP (x, 0);
7563 }
7564
7565 if (GET_CODE (x) == UNSPEC
7566 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
7567 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
7568 result = XVECEXP (x, 0, 0);
7569
7570 if (TARGET_MACHO && darwin_local_data_pic (x)
7571 && !MEM_P (orig_x))
7572 result = XEXP (x, 0);
7573
7574 if (! result)
7575 return orig_x;
7576
7577 if (const_addend)
7578 result = gen_rtx_PLUS (Pmode, result, const_addend);
7579 if (reg_addend)
7580 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7581 return result;
7582 }
7583 \f
7584 static void
7585 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7586 int fp, FILE *file)
7587 {
7588 const char *suffix;
7589
7590 if (mode == CCFPmode || mode == CCFPUmode)
7591 {
7592 enum rtx_code second_code, bypass_code;
7593 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7594 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7595 code = ix86_fp_compare_code_to_integer (code);
7596 mode = CCmode;
7597 }
7598 if (reverse)
7599 code = reverse_condition (code);
7600
7601 switch (code)
7602 {
7603 case EQ:
7604 suffix = "e";
7605 break;
7606 case NE:
7607 suffix = "ne";
7608 break;
7609 case GT:
7610 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7611 suffix = "g";
7612 break;
7613 case GTU:
7614 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7615 Those same assemblers have the same but opposite lossage on cmov. */
7616 gcc_assert (mode == CCmode);
7617 suffix = fp ? "nbe" : "a";
7618 break;
7619 case LT:
7620 switch (mode)
7621 {
7622 case CCNOmode:
7623 case CCGOCmode:
7624 suffix = "s";
7625 break;
7626
7627 case CCmode:
7628 case CCGCmode:
7629 suffix = "l";
7630 break;
7631
7632 default:
7633 gcc_unreachable ();
7634 }
7635 break;
7636 case LTU:
7637 gcc_assert (mode == CCmode);
7638 suffix = "b";
7639 break;
7640 case GE:
7641 switch (mode)
7642 {
7643 case CCNOmode:
7644 case CCGOCmode:
7645 suffix = "ns";
7646 break;
7647
7648 case CCmode:
7649 case CCGCmode:
7650 suffix = "ge";
7651 break;
7652
7653 default:
7654 gcc_unreachable ();
7655 }
7656 break;
7657 case GEU:
7658 /* ??? As above. */
7659 gcc_assert (mode == CCmode);
7660 suffix = fp ? "nb" : "ae";
7661 break;
7662 case LE:
7663 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7664 suffix = "le";
7665 break;
7666 case LEU:
7667 gcc_assert (mode == CCmode);
7668 suffix = "be";
7669 break;
7670 case UNORDERED:
7671 suffix = fp ? "u" : "p";
7672 break;
7673 case ORDERED:
7674 suffix = fp ? "nu" : "np";
7675 break;
7676 default:
7677 gcc_unreachable ();
7678 }
7679 fputs (suffix, file);
7680 }
7681
7682 /* Print the name of register X to FILE based on its machine mode and number.
7683 If CODE is 'w', pretend the mode is HImode.
7684 If CODE is 'b', pretend the mode is QImode.
7685 If CODE is 'k', pretend the mode is SImode.
7686 If CODE is 'q', pretend the mode is DImode.
7687 If CODE is 'h', pretend the reg is the 'high' byte register.
7688 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7689
7690 void
7691 print_reg (rtx x, int code, FILE *file)
7692 {
7693 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7694 && REGNO (x) != FRAME_POINTER_REGNUM
7695 && REGNO (x) != FLAGS_REG
7696 && REGNO (x) != FPSR_REG
7697 && REGNO (x) != FPCR_REG);
7698
7699 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7700 putc ('%', file);
7701
7702 if (code == 'w' || MMX_REG_P (x))
7703 code = 2;
7704 else if (code == 'b')
7705 code = 1;
7706 else if (code == 'k')
7707 code = 4;
7708 else if (code == 'q')
7709 code = 8;
7710 else if (code == 'y')
7711 code = 3;
7712 else if (code == 'h')
7713 code = 0;
7714 else
7715 code = GET_MODE_SIZE (GET_MODE (x));
7716
7717 /* Irritatingly, AMD extended registers use different naming convention
7718 from the normal registers. */
7719 if (REX_INT_REG_P (x))
7720 {
7721 gcc_assert (TARGET_64BIT);
7722 switch (code)
7723 {
7724 case 0:
7725 error ("extended registers have no high halves");
7726 break;
7727 case 1:
7728 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7729 break;
7730 case 2:
7731 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7732 break;
7733 case 4:
7734 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7735 break;
7736 case 8:
7737 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7738 break;
7739 default:
7740 error ("unsupported operand size for extended register");
7741 break;
7742 }
7743 return;
7744 }
7745 switch (code)
7746 {
7747 case 3:
7748 if (STACK_TOP_P (x))
7749 {
7750 fputs ("st(0)", file);
7751 break;
7752 }
7753 /* FALLTHRU */
7754 case 8:
7755 case 4:
7756 case 12:
7757 if (! ANY_FP_REG_P (x))
7758 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7759 /* FALLTHRU */
7760 case 16:
7761 case 2:
7762 normal:
7763 fputs (hi_reg_name[REGNO (x)], file);
7764 break;
7765 case 1:
7766 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7767 goto normal;
7768 fputs (qi_reg_name[REGNO (x)], file);
7769 break;
7770 case 0:
7771 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7772 goto normal;
7773 fputs (qi_high_reg_name[REGNO (x)], file);
7774 break;
7775 default:
7776 gcc_unreachable ();
7777 }
7778 }
7779
7780 /* Locate some local-dynamic symbol still in use by this function
7781 so that we can print its name in some tls_local_dynamic_base
7782 pattern. */
7783
7784 static const char *
7785 get_some_local_dynamic_name (void)
7786 {
7787 rtx insn;
7788
7789 if (cfun->machine->some_ld_name)
7790 return cfun->machine->some_ld_name;
7791
7792 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7793 if (INSN_P (insn)
7794 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7795 return cfun->machine->some_ld_name;
7796
7797 gcc_unreachable ();
7798 }
7799
7800 static int
7801 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7802 {
7803 rtx x = *px;
7804
7805 if (GET_CODE (x) == SYMBOL_REF
7806 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7807 {
7808 cfun->machine->some_ld_name = XSTR (x, 0);
7809 return 1;
7810 }
7811
7812 return 0;
7813 }
7814
7815 /* Meaning of CODE:
7816 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7817 C -- print opcode suffix for set/cmov insn.
7818 c -- like C, but print reversed condition
7819 F,f -- likewise, but for floating-point.
7820 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7821 otherwise nothing
7822 R -- print the prefix for register names.
7823 z -- print the opcode suffix for the size of the current operand.
7824 * -- print a star (in certain assembler syntax)
7825 A -- print an absolute memory reference.
7826 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7827 s -- print a shift double count, followed by the assemblers argument
7828 delimiter.
7829 b -- print the QImode name of the register for the indicated operand.
7830 %b0 would print %al if operands[0] is reg 0.
7831 w -- likewise, print the HImode name of the register.
7832 k -- likewise, print the SImode name of the register.
7833 q -- likewise, print the DImode name of the register.
7834 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7835 y -- print "st(0)" instead of "st" as a register.
7836 D -- print condition for SSE cmp instruction.
7837 P -- if PIC, print an @PLT suffix.
7838 X -- don't print any sort of PIC '@' suffix for a symbol.
7839 & -- print some in-use local-dynamic symbol name.
7840 H -- print a memory address offset by 8; used for sse high-parts
7841 */
7842
7843 void
7844 print_operand (FILE *file, rtx x, int code)
7845 {
7846 if (code)
7847 {
7848 switch (code)
7849 {
7850 case '*':
7851 if (ASSEMBLER_DIALECT == ASM_ATT)
7852 putc ('*', file);
7853 return;
7854
7855 case '&':
7856 assemble_name (file, get_some_local_dynamic_name ());
7857 return;
7858
7859 case 'A':
7860 switch (ASSEMBLER_DIALECT)
7861 {
7862 case ASM_ATT:
7863 putc ('*', file);
7864 break;
7865
7866 case ASM_INTEL:
7867 /* Intel syntax. For absolute addresses, registers should not
7868 be surrounded by braces. */
7869 if (!REG_P (x))
7870 {
7871 putc ('[', file);
7872 PRINT_OPERAND (file, x, 0);
7873 putc (']', file);
7874 return;
7875 }
7876 break;
7877
7878 default:
7879 gcc_unreachable ();
7880 }
7881
7882 PRINT_OPERAND (file, x, 0);
7883 return;
7884
7885
7886 case 'L':
7887 if (ASSEMBLER_DIALECT == ASM_ATT)
7888 putc ('l', file);
7889 return;
7890
7891 case 'W':
7892 if (ASSEMBLER_DIALECT == ASM_ATT)
7893 putc ('w', file);
7894 return;
7895
7896 case 'B':
7897 if (ASSEMBLER_DIALECT == ASM_ATT)
7898 putc ('b', file);
7899 return;
7900
7901 case 'Q':
7902 if (ASSEMBLER_DIALECT == ASM_ATT)
7903 putc ('l', file);
7904 return;
7905
7906 case 'S':
7907 if (ASSEMBLER_DIALECT == ASM_ATT)
7908 putc ('s', file);
7909 return;
7910
7911 case 'T':
7912 if (ASSEMBLER_DIALECT == ASM_ATT)
7913 putc ('t', file);
7914 return;
7915
7916 case 'z':
7917 /* 387 opcodes don't get size suffixes if the operands are
7918 registers. */
7919 if (STACK_REG_P (x))
7920 return;
7921
7922 /* Likewise if using Intel opcodes. */
7923 if (ASSEMBLER_DIALECT == ASM_INTEL)
7924 return;
7925
7926 /* This is the size of op from size of operand. */
7927 switch (GET_MODE_SIZE (GET_MODE (x)))
7928 {
7929 case 1:
7930 putc ('b', file);
7931 return;
7932
7933 case 2:
7934 #ifdef HAVE_GAS_FILDS_FISTS
7935 putc ('s', file);
7936 #endif
7937 return;
7938
7939 case 4:
7940 if (GET_MODE (x) == SFmode)
7941 {
7942 putc ('s', file);
7943 return;
7944 }
7945 else
7946 putc ('l', file);
7947 return;
7948
7949 case 12:
7950 case 16:
7951 putc ('t', file);
7952 return;
7953
7954 case 8:
7955 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7956 {
7957 #ifdef GAS_MNEMONICS
7958 putc ('q', file);
7959 #else
7960 putc ('l', file);
7961 putc ('l', file);
7962 #endif
7963 }
7964 else
7965 putc ('l', file);
7966 return;
7967
7968 default:
7969 gcc_unreachable ();
7970 }
7971
7972 case 'b':
7973 case 'w':
7974 case 'k':
7975 case 'q':
7976 case 'h':
7977 case 'y':
7978 case 'X':
7979 case 'P':
7980 break;
7981
7982 case 's':
7983 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
7984 {
7985 PRINT_OPERAND (file, x, 0);
7986 putc (',', file);
7987 }
7988 return;
7989
7990 case 'D':
7991 /* Little bit of braindamage here. The SSE compare instructions
7992 does use completely different names for the comparisons that the
7993 fp conditional moves. */
7994 switch (GET_CODE (x))
7995 {
7996 case EQ:
7997 case UNEQ:
7998 fputs ("eq", file);
7999 break;
8000 case LT:
8001 case UNLT:
8002 fputs ("lt", file);
8003 break;
8004 case LE:
8005 case UNLE:
8006 fputs ("le", file);
8007 break;
8008 case UNORDERED:
8009 fputs ("unord", file);
8010 break;
8011 case NE:
8012 case LTGT:
8013 fputs ("neq", file);
8014 break;
8015 case UNGE:
8016 case GE:
8017 fputs ("nlt", file);
8018 break;
8019 case UNGT:
8020 case GT:
8021 fputs ("nle", file);
8022 break;
8023 case ORDERED:
8024 fputs ("ord", file);
8025 break;
8026 default:
8027 gcc_unreachable ();
8028 }
8029 return;
8030 case 'O':
8031 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8032 if (ASSEMBLER_DIALECT == ASM_ATT)
8033 {
8034 switch (GET_MODE (x))
8035 {
8036 case HImode: putc ('w', file); break;
8037 case SImode:
8038 case SFmode: putc ('l', file); break;
8039 case DImode:
8040 case DFmode: putc ('q', file); break;
8041 default: gcc_unreachable ();
8042 }
8043 putc ('.', file);
8044 }
8045 #endif
8046 return;
8047 case 'C':
8048 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8049 return;
8050 case 'F':
8051 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8052 if (ASSEMBLER_DIALECT == ASM_ATT)
8053 putc ('.', file);
8054 #endif
8055 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8056 return;
8057
8058 /* Like above, but reverse condition */
8059 case 'c':
8060 /* Check to see if argument to %c is really a constant
8061 and not a condition code which needs to be reversed. */
8062 if (!COMPARISON_P (x))
8063 {
8064 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8065 return;
8066 }
8067 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8068 return;
8069 case 'f':
8070 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8071 if (ASSEMBLER_DIALECT == ASM_ATT)
8072 putc ('.', file);
8073 #endif
8074 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8075 return;
8076
8077 case 'H':
8078 /* It doesn't actually matter what mode we use here, as we're
8079 only going to use this for printing. */
8080 x = adjust_address_nv (x, DImode, 8);
8081 break;
8082
8083 case '+':
8084 {
8085 rtx x;
8086
8087 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8088 return;
8089
8090 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8091 if (x)
8092 {
8093 int pred_val = INTVAL (XEXP (x, 0));
8094
8095 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8096 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8097 {
8098 int taken = pred_val > REG_BR_PROB_BASE / 2;
8099 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8100
8101 /* Emit hints only in the case default branch prediction
8102 heuristics would fail. */
8103 if (taken != cputaken)
8104 {
8105 /* We use 3e (DS) prefix for taken branches and
8106 2e (CS) prefix for not taken branches. */
8107 if (taken)
8108 fputs ("ds ; ", file);
8109 else
8110 fputs ("cs ; ", file);
8111 }
8112 }
8113 }
8114 return;
8115 }
8116 default:
8117 output_operand_lossage ("invalid operand code '%c'", code);
8118 }
8119 }
8120
8121 if (REG_P (x))
8122 print_reg (x, code, file);
8123
8124 else if (MEM_P (x))
8125 {
8126 /* No `byte ptr' prefix for call instructions. */
8127 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8128 {
8129 const char * size;
8130 switch (GET_MODE_SIZE (GET_MODE (x)))
8131 {
8132 case 1: size = "BYTE"; break;
8133 case 2: size = "WORD"; break;
8134 case 4: size = "DWORD"; break;
8135 case 8: size = "QWORD"; break;
8136 case 12: size = "XWORD"; break;
8137 case 16: size = "XMMWORD"; break;
8138 default:
8139 gcc_unreachable ();
8140 }
8141
8142 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8143 if (code == 'b')
8144 size = "BYTE";
8145 else if (code == 'w')
8146 size = "WORD";
8147 else if (code == 'k')
8148 size = "DWORD";
8149
8150 fputs (size, file);
8151 fputs (" PTR ", file);
8152 }
8153
8154 x = XEXP (x, 0);
8155 /* Avoid (%rip) for call operands. */
8156 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8157 && !CONST_INT_P (x))
8158 output_addr_const (file, x);
8159 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8160 output_operand_lossage ("invalid constraints for operand");
8161 else
8162 output_address (x);
8163 }
8164
8165 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8166 {
8167 REAL_VALUE_TYPE r;
8168 long l;
8169
8170 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8171 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8172
8173 if (ASSEMBLER_DIALECT == ASM_ATT)
8174 putc ('$', file);
8175 fprintf (file, "0x%08lx", l);
8176 }
8177
8178 /* These float cases don't actually occur as immediate operands. */
8179 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8180 {
8181 char dstr[30];
8182
8183 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8184 fprintf (file, "%s", dstr);
8185 }
8186
8187 else if (GET_CODE (x) == CONST_DOUBLE
8188 && GET_MODE (x) == XFmode)
8189 {
8190 char dstr[30];
8191
8192 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8193 fprintf (file, "%s", dstr);
8194 }
8195
8196 else
8197 {
8198 /* We have patterns that allow zero sets of memory, for instance.
8199 In 64-bit mode, we should probably support all 8-byte vectors,
8200 since we can in fact encode that into an immediate. */
8201 if (GET_CODE (x) == CONST_VECTOR)
8202 {
8203 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8204 x = const0_rtx;
8205 }
8206
8207 if (code != 'P')
8208 {
8209 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
8210 {
8211 if (ASSEMBLER_DIALECT == ASM_ATT)
8212 putc ('$', file);
8213 }
8214 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8215 || GET_CODE (x) == LABEL_REF)
8216 {
8217 if (ASSEMBLER_DIALECT == ASM_ATT)
8218 putc ('$', file);
8219 else
8220 fputs ("OFFSET FLAT:", file);
8221 }
8222 }
8223 if (CONST_INT_P (x))
8224 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8225 else if (flag_pic)
8226 output_pic_addr_const (file, x, code);
8227 else
8228 output_addr_const (file, x);
8229 }
8230 }
8231 \f
8232 /* Print a memory operand whose address is ADDR. */
8233
8234 void
8235 print_operand_address (FILE *file, rtx addr)
8236 {
8237 struct ix86_address parts;
8238 rtx base, index, disp;
8239 int scale;
8240 int ok = ix86_decompose_address (addr, &parts);
8241
8242 gcc_assert (ok);
8243
8244 base = parts.base;
8245 index = parts.index;
8246 disp = parts.disp;
8247 scale = parts.scale;
8248
8249 switch (parts.seg)
8250 {
8251 case SEG_DEFAULT:
8252 break;
8253 case SEG_FS:
8254 case SEG_GS:
8255 if (USER_LABEL_PREFIX[0] == 0)
8256 putc ('%', file);
8257 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8258 break;
8259 default:
8260 gcc_unreachable ();
8261 }
8262
8263 if (!base && !index)
8264 {
8265 /* Displacement only requires special attention. */
8266
8267 if (CONST_INT_P (disp))
8268 {
8269 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8270 {
8271 if (USER_LABEL_PREFIX[0] == 0)
8272 putc ('%', file);
8273 fputs ("ds:", file);
8274 }
8275 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8276 }
8277 else if (flag_pic)
8278 output_pic_addr_const (file, disp, 0);
8279 else
8280 output_addr_const (file, disp);
8281
8282 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8283 if (TARGET_64BIT)
8284 {
8285 if (GET_CODE (disp) == CONST
8286 && GET_CODE (XEXP (disp, 0)) == PLUS
8287 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
8288 disp = XEXP (XEXP (disp, 0), 0);
8289 if (GET_CODE (disp) == LABEL_REF
8290 || (GET_CODE (disp) == SYMBOL_REF
8291 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8292 fputs ("(%rip)", file);
8293 }
8294 }
8295 else
8296 {
8297 if (ASSEMBLER_DIALECT == ASM_ATT)
8298 {
8299 if (disp)
8300 {
8301 if (flag_pic)
8302 output_pic_addr_const (file, disp, 0);
8303 else if (GET_CODE (disp) == LABEL_REF)
8304 output_asm_label (disp);
8305 else
8306 output_addr_const (file, disp);
8307 }
8308
8309 putc ('(', file);
8310 if (base)
8311 print_reg (base, 0, file);
8312 if (index)
8313 {
8314 putc (',', file);
8315 print_reg (index, 0, file);
8316 if (scale != 1)
8317 fprintf (file, ",%d", scale);
8318 }
8319 putc (')', file);
8320 }
8321 else
8322 {
8323 rtx offset = NULL_RTX;
8324
8325 if (disp)
8326 {
8327 /* Pull out the offset of a symbol; print any symbol itself. */
8328 if (GET_CODE (disp) == CONST
8329 && GET_CODE (XEXP (disp, 0)) == PLUS
8330 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
8331 {
8332 offset = XEXP (XEXP (disp, 0), 1);
8333 disp = gen_rtx_CONST (VOIDmode,
8334 XEXP (XEXP (disp, 0), 0));
8335 }
8336
8337 if (flag_pic)
8338 output_pic_addr_const (file, disp, 0);
8339 else if (GET_CODE (disp) == LABEL_REF)
8340 output_asm_label (disp);
8341 else if (CONST_INT_P (disp))
8342 offset = disp;
8343 else
8344 output_addr_const (file, disp);
8345 }
8346
8347 putc ('[', file);
8348 if (base)
8349 {
8350 print_reg (base, 0, file);
8351 if (offset)
8352 {
8353 if (INTVAL (offset) >= 0)
8354 putc ('+', file);
8355 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8356 }
8357 }
8358 else if (offset)
8359 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8360 else
8361 putc ('0', file);
8362
8363 if (index)
8364 {
8365 putc ('+', file);
8366 print_reg (index, 0, file);
8367 if (scale != 1)
8368 fprintf (file, "*%d", scale);
8369 }
8370 putc (']', file);
8371 }
8372 }
8373 }
8374
8375 bool
8376 output_addr_const_extra (FILE *file, rtx x)
8377 {
8378 rtx op;
8379
8380 if (GET_CODE (x) != UNSPEC)
8381 return false;
8382
8383 op = XVECEXP (x, 0, 0);
8384 switch (XINT (x, 1))
8385 {
8386 case UNSPEC_GOTTPOFF:
8387 output_addr_const (file, op);
8388 /* FIXME: This might be @TPOFF in Sun ld. */
8389 fputs ("@GOTTPOFF", file);
8390 break;
8391 case UNSPEC_TPOFF:
8392 output_addr_const (file, op);
8393 fputs ("@TPOFF", file);
8394 break;
8395 case UNSPEC_NTPOFF:
8396 output_addr_const (file, op);
8397 if (TARGET_64BIT)
8398 fputs ("@TPOFF", file);
8399 else
8400 fputs ("@NTPOFF", file);
8401 break;
8402 case UNSPEC_DTPOFF:
8403 output_addr_const (file, op);
8404 fputs ("@DTPOFF", file);
8405 break;
8406 case UNSPEC_GOTNTPOFF:
8407 output_addr_const (file, op);
8408 if (TARGET_64BIT)
8409 fputs ("@GOTTPOFF(%rip)", file);
8410 else
8411 fputs ("@GOTNTPOFF", file);
8412 break;
8413 case UNSPEC_INDNTPOFF:
8414 output_addr_const (file, op);
8415 fputs ("@INDNTPOFF", file);
8416 break;
8417
8418 default:
8419 return false;
8420 }
8421
8422 return true;
8423 }
8424 \f
8425 /* Split one or more DImode RTL references into pairs of SImode
8426 references. The RTL can be REG, offsettable MEM, integer constant, or
8427 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8428 split and "num" is its length. lo_half and hi_half are output arrays
8429 that parallel "operands". */
8430
8431 void
8432 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8433 {
8434 while (num--)
8435 {
8436 rtx op = operands[num];
8437
8438 /* simplify_subreg refuse to split volatile memory addresses,
8439 but we still have to handle it. */
8440 if (MEM_P (op))
8441 {
8442 lo_half[num] = adjust_address (op, SImode, 0);
8443 hi_half[num] = adjust_address (op, SImode, 4);
8444 }
8445 else
8446 {
8447 lo_half[num] = simplify_gen_subreg (SImode, op,
8448 GET_MODE (op) == VOIDmode
8449 ? DImode : GET_MODE (op), 0);
8450 hi_half[num] = simplify_gen_subreg (SImode, op,
8451 GET_MODE (op) == VOIDmode
8452 ? DImode : GET_MODE (op), 4);
8453 }
8454 }
8455 }
8456 /* Split one or more TImode RTL references into pairs of DImode
8457 references. The RTL can be REG, offsettable MEM, integer constant, or
8458 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8459 split and "num" is its length. lo_half and hi_half are output arrays
8460 that parallel "operands". */
8461
8462 void
8463 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8464 {
8465 while (num--)
8466 {
8467 rtx op = operands[num];
8468
8469 /* simplify_subreg refuse to split volatile memory addresses, but we
8470 still have to handle it. */
8471 if (MEM_P (op))
8472 {
8473 lo_half[num] = adjust_address (op, DImode, 0);
8474 hi_half[num] = adjust_address (op, DImode, 8);
8475 }
8476 else
8477 {
8478 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8479 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8480 }
8481 }
8482 }
8483 \f
8484 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8485 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8486 is the expression of the binary operation. The output may either be
8487 emitted here, or returned to the caller, like all output_* functions.
8488
8489 There is no guarantee that the operands are the same mode, as they
8490 might be within FLOAT or FLOAT_EXTEND expressions. */
8491
8492 #ifndef SYSV386_COMPAT
8493 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8494 wants to fix the assemblers because that causes incompatibility
8495 with gcc. No-one wants to fix gcc because that causes
8496 incompatibility with assemblers... You can use the option of
8497 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8498 #define SYSV386_COMPAT 1
8499 #endif
8500
8501 const char *
8502 output_387_binary_op (rtx insn, rtx *operands)
8503 {
8504 static char buf[30];
8505 const char *p;
8506 const char *ssep;
8507 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8508
8509 #ifdef ENABLE_CHECKING
8510 /* Even if we do not want to check the inputs, this documents input
8511 constraints. Which helps in understanding the following code. */
8512 if (STACK_REG_P (operands[0])
8513 && ((REG_P (operands[1])
8514 && REGNO (operands[0]) == REGNO (operands[1])
8515 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
8516 || (REG_P (operands[2])
8517 && REGNO (operands[0]) == REGNO (operands[2])
8518 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
8519 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8520 ; /* ok */
8521 else
8522 gcc_assert (is_sse);
8523 #endif
8524
8525 switch (GET_CODE (operands[3]))
8526 {
8527 case PLUS:
8528 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8529 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8530 p = "fiadd";
8531 else
8532 p = "fadd";
8533 ssep = "add";
8534 break;
8535
8536 case MINUS:
8537 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8538 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8539 p = "fisub";
8540 else
8541 p = "fsub";
8542 ssep = "sub";
8543 break;
8544
8545 case MULT:
8546 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8547 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8548 p = "fimul";
8549 else
8550 p = "fmul";
8551 ssep = "mul";
8552 break;
8553
8554 case DIV:
8555 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8556 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8557 p = "fidiv";
8558 else
8559 p = "fdiv";
8560 ssep = "div";
8561 break;
8562
8563 default:
8564 gcc_unreachable ();
8565 }
8566
8567 if (is_sse)
8568 {
8569 strcpy (buf, ssep);
8570 if (GET_MODE (operands[0]) == SFmode)
8571 strcat (buf, "ss\t{%2, %0|%0, %2}");
8572 else
8573 strcat (buf, "sd\t{%2, %0|%0, %2}");
8574 return buf;
8575 }
8576 strcpy (buf, p);
8577
8578 switch (GET_CODE (operands[3]))
8579 {
8580 case MULT:
8581 case PLUS:
8582 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8583 {
8584 rtx temp = operands[2];
8585 operands[2] = operands[1];
8586 operands[1] = temp;
8587 }
8588
8589 /* know operands[0] == operands[1]. */
8590
8591 if (MEM_P (operands[2]))
8592 {
8593 p = "%z2\t%2";
8594 break;
8595 }
8596
8597 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8598 {
8599 if (STACK_TOP_P (operands[0]))
8600 /* How is it that we are storing to a dead operand[2]?
8601 Well, presumably operands[1] is dead too. We can't
8602 store the result to st(0) as st(0) gets popped on this
8603 instruction. Instead store to operands[2] (which I
8604 think has to be st(1)). st(1) will be popped later.
8605 gcc <= 2.8.1 didn't have this check and generated
8606 assembly code that the Unixware assembler rejected. */
8607 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8608 else
8609 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8610 break;
8611 }
8612
8613 if (STACK_TOP_P (operands[0]))
8614 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8615 else
8616 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8617 break;
8618
8619 case MINUS:
8620 case DIV:
8621 if (MEM_P (operands[1]))
8622 {
8623 p = "r%z1\t%1";
8624 break;
8625 }
8626
8627 if (MEM_P (operands[2]))
8628 {
8629 p = "%z2\t%2";
8630 break;
8631 }
8632
8633 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8634 {
8635 #if SYSV386_COMPAT
8636 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8637 derived assemblers, confusingly reverse the direction of
8638 the operation for fsub{r} and fdiv{r} when the
8639 destination register is not st(0). The Intel assembler
8640 doesn't have this brain damage. Read !SYSV386_COMPAT to
8641 figure out what the hardware really does. */
8642 if (STACK_TOP_P (operands[0]))
8643 p = "{p\t%0, %2|rp\t%2, %0}";
8644 else
8645 p = "{rp\t%2, %0|p\t%0, %2}";
8646 #else
8647 if (STACK_TOP_P (operands[0]))
8648 /* As above for fmul/fadd, we can't store to st(0). */
8649 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8650 else
8651 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8652 #endif
8653 break;
8654 }
8655
8656 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8657 {
8658 #if SYSV386_COMPAT
8659 if (STACK_TOP_P (operands[0]))
8660 p = "{rp\t%0, %1|p\t%1, %0}";
8661 else
8662 p = "{p\t%1, %0|rp\t%0, %1}";
8663 #else
8664 if (STACK_TOP_P (operands[0]))
8665 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8666 else
8667 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8668 #endif
8669 break;
8670 }
8671
8672 if (STACK_TOP_P (operands[0]))
8673 {
8674 if (STACK_TOP_P (operands[1]))
8675 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8676 else
8677 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8678 break;
8679 }
8680 else if (STACK_TOP_P (operands[1]))
8681 {
8682 #if SYSV386_COMPAT
8683 p = "{\t%1, %0|r\t%0, %1}";
8684 #else
8685 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8686 #endif
8687 }
8688 else
8689 {
8690 #if SYSV386_COMPAT
8691 p = "{r\t%2, %0|\t%0, %2}";
8692 #else
8693 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8694 #endif
8695 }
8696 break;
8697
8698 default:
8699 gcc_unreachable ();
8700 }
8701
8702 strcat (buf, p);
8703 return buf;
8704 }
8705
8706 /* Return needed mode for entity in optimize_mode_switching pass. */
8707
8708 int
8709 ix86_mode_needed (int entity, rtx insn)
8710 {
8711 enum attr_i387_cw mode;
8712
8713 /* The mode UNINITIALIZED is used to store control word after a
8714 function call or ASM pattern. The mode ANY specify that function
8715 has no requirements on the control word and make no changes in the
8716 bits we are interested in. */
8717
8718 if (CALL_P (insn)
8719 || (NONJUMP_INSN_P (insn)
8720 && (asm_noperands (PATTERN (insn)) >= 0
8721 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8722 return I387_CW_UNINITIALIZED;
8723
8724 if (recog_memoized (insn) < 0)
8725 return I387_CW_ANY;
8726
8727 mode = get_attr_i387_cw (insn);
8728
8729 switch (entity)
8730 {
8731 case I387_TRUNC:
8732 if (mode == I387_CW_TRUNC)
8733 return mode;
8734 break;
8735
8736 case I387_FLOOR:
8737 if (mode == I387_CW_FLOOR)
8738 return mode;
8739 break;
8740
8741 case I387_CEIL:
8742 if (mode == I387_CW_CEIL)
8743 return mode;
8744 break;
8745
8746 case I387_MASK_PM:
8747 if (mode == I387_CW_MASK_PM)
8748 return mode;
8749 break;
8750
8751 default:
8752 gcc_unreachable ();
8753 }
8754
8755 return I387_CW_ANY;
8756 }
8757
8758 /* Output code to initialize control word copies used by trunc?f?i and
8759 rounding patterns. CURRENT_MODE is set to current control word,
8760 while NEW_MODE is set to new control word. */
8761
8762 void
8763 emit_i387_cw_initialization (int mode)
8764 {
8765 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8766 rtx new_mode;
8767
8768 int slot;
8769
8770 rtx reg = gen_reg_rtx (HImode);
8771
8772 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8773 emit_move_insn (reg, copy_rtx (stored_mode));
8774
8775 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8776 {
8777 switch (mode)
8778 {
8779 case I387_CW_TRUNC:
8780 /* round toward zero (truncate) */
8781 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8782 slot = SLOT_CW_TRUNC;
8783 break;
8784
8785 case I387_CW_FLOOR:
8786 /* round down toward -oo */
8787 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8788 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8789 slot = SLOT_CW_FLOOR;
8790 break;
8791
8792 case I387_CW_CEIL:
8793 /* round up toward +oo */
8794 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8795 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8796 slot = SLOT_CW_CEIL;
8797 break;
8798
8799 case I387_CW_MASK_PM:
8800 /* mask precision exception for nearbyint() */
8801 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8802 slot = SLOT_CW_MASK_PM;
8803 break;
8804
8805 default:
8806 gcc_unreachable ();
8807 }
8808 }
8809 else
8810 {
8811 switch (mode)
8812 {
8813 case I387_CW_TRUNC:
8814 /* round toward zero (truncate) */
8815 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8816 slot = SLOT_CW_TRUNC;
8817 break;
8818
8819 case I387_CW_FLOOR:
8820 /* round down toward -oo */
8821 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8822 slot = SLOT_CW_FLOOR;
8823 break;
8824
8825 case I387_CW_CEIL:
8826 /* round up toward +oo */
8827 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8828 slot = SLOT_CW_CEIL;
8829 break;
8830
8831 case I387_CW_MASK_PM:
8832 /* mask precision exception for nearbyint() */
8833 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8834 slot = SLOT_CW_MASK_PM;
8835 break;
8836
8837 default:
8838 gcc_unreachable ();
8839 }
8840 }
8841
8842 gcc_assert (slot < MAX_386_STACK_LOCALS);
8843
8844 new_mode = assign_386_stack_local (HImode, slot);
8845 emit_move_insn (new_mode, reg);
8846 }
8847
8848 /* Output code for INSN to convert a float to a signed int. OPERANDS
8849 are the insn operands. The output may be [HSD]Imode and the input
8850 operand may be [SDX]Fmode. */
8851
8852 const char *
8853 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8854 {
8855 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8856 int dimode_p = GET_MODE (operands[0]) == DImode;
8857 int round_mode = get_attr_i387_cw (insn);
8858
8859 /* Jump through a hoop or two for DImode, since the hardware has no
8860 non-popping instruction. We used to do this a different way, but
8861 that was somewhat fragile and broke with post-reload splitters. */
8862 if ((dimode_p || fisttp) && !stack_top_dies)
8863 output_asm_insn ("fld\t%y1", operands);
8864
8865 gcc_assert (STACK_TOP_P (operands[1]));
8866 gcc_assert (MEM_P (operands[0]));
8867
8868 if (fisttp)
8869 output_asm_insn ("fisttp%z0\t%0", operands);
8870 else
8871 {
8872 if (round_mode != I387_CW_ANY)
8873 output_asm_insn ("fldcw\t%3", operands);
8874 if (stack_top_dies || dimode_p)
8875 output_asm_insn ("fistp%z0\t%0", operands);
8876 else
8877 output_asm_insn ("fist%z0\t%0", operands);
8878 if (round_mode != I387_CW_ANY)
8879 output_asm_insn ("fldcw\t%2", operands);
8880 }
8881
8882 return "";
8883 }
8884
8885 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8886 have the values zero or one, indicates the ffreep insn's operand
8887 from the OPERANDS array. */
8888
8889 static const char *
8890 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8891 {
8892 if (TARGET_USE_FFREEP)
8893 #if HAVE_AS_IX86_FFREEP
8894 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8895 #else
8896 {
8897 static char retval[] = ".word\t0xc_df";
8898 int regno = REGNO (operands[opno]);
8899
8900 gcc_assert (FP_REGNO_P (regno));
8901
8902 retval[9] = '0' + (regno - FIRST_STACK_REG);
8903 return retval;
8904 }
8905 #endif
8906
8907 return opno ? "fstp\t%y1" : "fstp\t%y0";
8908 }
8909
8910
8911 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8912 should be used. UNORDERED_P is true when fucom should be used. */
8913
8914 const char *
8915 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8916 {
8917 int stack_top_dies;
8918 rtx cmp_op0, cmp_op1;
8919 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8920
8921 if (eflags_p)
8922 {
8923 cmp_op0 = operands[0];
8924 cmp_op1 = operands[1];
8925 }
8926 else
8927 {
8928 cmp_op0 = operands[1];
8929 cmp_op1 = operands[2];
8930 }
8931
8932 if (is_sse)
8933 {
8934 if (GET_MODE (operands[0]) == SFmode)
8935 if (unordered_p)
8936 return "ucomiss\t{%1, %0|%0, %1}";
8937 else
8938 return "comiss\t{%1, %0|%0, %1}";
8939 else
8940 if (unordered_p)
8941 return "ucomisd\t{%1, %0|%0, %1}";
8942 else
8943 return "comisd\t{%1, %0|%0, %1}";
8944 }
8945
8946 gcc_assert (STACK_TOP_P (cmp_op0));
8947
8948 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8949
8950 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8951 {
8952 if (stack_top_dies)
8953 {
8954 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8955 return output_387_ffreep (operands, 1);
8956 }
8957 else
8958 return "ftst\n\tfnstsw\t%0";
8959 }
8960
8961 if (STACK_REG_P (cmp_op1)
8962 && stack_top_dies
8963 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8964 && REGNO (cmp_op1) != FIRST_STACK_REG)
8965 {
8966 /* If both the top of the 387 stack dies, and the other operand
8967 is also a stack register that dies, then this must be a
8968 `fcompp' float compare */
8969
8970 if (eflags_p)
8971 {
8972 /* There is no double popping fcomi variant. Fortunately,
8973 eflags is immune from the fstp's cc clobbering. */
8974 if (unordered_p)
8975 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8976 else
8977 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8978 return output_387_ffreep (operands, 0);
8979 }
8980 else
8981 {
8982 if (unordered_p)
8983 return "fucompp\n\tfnstsw\t%0";
8984 else
8985 return "fcompp\n\tfnstsw\t%0";
8986 }
8987 }
8988 else
8989 {
8990 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8991
8992 static const char * const alt[16] =
8993 {
8994 "fcom%z2\t%y2\n\tfnstsw\t%0",
8995 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8996 "fucom%z2\t%y2\n\tfnstsw\t%0",
8997 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8998
8999 "ficom%z2\t%y2\n\tfnstsw\t%0",
9000 "ficomp%z2\t%y2\n\tfnstsw\t%0",
9001 NULL,
9002 NULL,
9003
9004 "fcomi\t{%y1, %0|%0, %y1}",
9005 "fcomip\t{%y1, %0|%0, %y1}",
9006 "fucomi\t{%y1, %0|%0, %y1}",
9007 "fucomip\t{%y1, %0|%0, %y1}",
9008
9009 NULL,
9010 NULL,
9011 NULL,
9012 NULL
9013 };
9014
9015 int mask;
9016 const char *ret;
9017
9018 mask = eflags_p << 3;
9019 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9020 mask |= unordered_p << 1;
9021 mask |= stack_top_dies;
9022
9023 gcc_assert (mask < 16);
9024 ret = alt[mask];
9025 gcc_assert (ret);
9026
9027 return ret;
9028 }
9029 }
9030
9031 void
9032 ix86_output_addr_vec_elt (FILE *file, int value)
9033 {
9034 const char *directive = ASM_LONG;
9035
9036 #ifdef ASM_QUAD
9037 if (TARGET_64BIT)
9038 directive = ASM_QUAD;
9039 #else
9040 gcc_assert (!TARGET_64BIT);
9041 #endif
9042
9043 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9044 }
9045
9046 void
9047 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9048 {
9049 if (TARGET_64BIT)
9050 fprintf (file, "%s%s%d-%s%d\n",
9051 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9052 else if (HAVE_AS_GOTOFF_IN_DATA)
9053 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9054 #if TARGET_MACHO
9055 else if (TARGET_MACHO)
9056 {
9057 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9058 machopic_output_function_base_name (file);
9059 fprintf(file, "\n");
9060 }
9061 #endif
9062 else
9063 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9064 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9065 }
9066 \f
9067 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9068 for the target. */
9069
9070 void
9071 ix86_expand_clear (rtx dest)
9072 {
9073 rtx tmp;
9074
9075 /* We play register width games, which are only valid after reload. */
9076 gcc_assert (reload_completed);
9077
9078 /* Avoid HImode and its attendant prefix byte. */
9079 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9080 dest = gen_rtx_REG (SImode, REGNO (dest));
9081
9082 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9083
9084 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9085 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9086 {
9087 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9088 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9089 }
9090
9091 emit_insn (tmp);
9092 }
9093
9094 /* X is an unchanging MEM. If it is a constant pool reference, return
9095 the constant pool rtx, else NULL. */
9096
9097 rtx
9098 maybe_get_pool_constant (rtx x)
9099 {
9100 x = ix86_delegitimize_address (XEXP (x, 0));
9101
9102 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9103 return get_pool_constant (x);
9104
9105 return NULL_RTX;
9106 }
9107
9108 void
9109 ix86_expand_move (enum machine_mode mode, rtx operands[])
9110 {
9111 int strict = (reload_in_progress || reload_completed);
9112 rtx op0, op1;
9113 enum tls_model model;
9114
9115 op0 = operands[0];
9116 op1 = operands[1];
9117
9118 if (GET_CODE (op1) == SYMBOL_REF)
9119 {
9120 model = SYMBOL_REF_TLS_MODEL (op1);
9121 if (model)
9122 {
9123 op1 = legitimize_tls_address (op1, model, true);
9124 op1 = force_operand (op1, op0);
9125 if (op1 == op0)
9126 return;
9127 }
9128 }
9129 else if (GET_CODE (op1) == CONST
9130 && GET_CODE (XEXP (op1, 0)) == PLUS
9131 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9132 {
9133 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9134 if (model)
9135 {
9136 rtx addend = XEXP (XEXP (op1, 0), 1);
9137 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9138 op1 = force_operand (op1, NULL);
9139 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9140 op0, 1, OPTAB_DIRECT);
9141 if (op1 == op0)
9142 return;
9143 }
9144 }
9145
9146 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9147 {
9148 if (TARGET_MACHO && !TARGET_64BIT)
9149 {
9150 #if TARGET_MACHO
9151 if (MACHOPIC_PURE)
9152 {
9153 rtx temp = ((reload_in_progress
9154 || ((op0 && REG_P (op0))
9155 && mode == Pmode))
9156 ? op0 : gen_reg_rtx (Pmode));
9157 op1 = machopic_indirect_data_reference (op1, temp);
9158 op1 = machopic_legitimize_pic_address (op1, mode,
9159 temp == op1 ? 0 : temp);
9160 }
9161 else if (MACHOPIC_INDIRECT)
9162 op1 = machopic_indirect_data_reference (op1, 0);
9163 if (op0 == op1)
9164 return;
9165 #endif
9166 }
9167 else
9168 {
9169 if (MEM_P (op0))
9170 op1 = force_reg (Pmode, op1);
9171 else
9172 op1 = legitimize_address (op1, op1, Pmode);
9173 }
9174 }
9175 else
9176 {
9177 if (MEM_P (op0)
9178 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9179 || !push_operand (op0, mode))
9180 && MEM_P (op1))
9181 op1 = force_reg (mode, op1);
9182
9183 if (push_operand (op0, mode)
9184 && ! general_no_elim_operand (op1, mode))
9185 op1 = copy_to_mode_reg (mode, op1);
9186
9187 /* Force large constants in 64bit compilation into register
9188 to get them CSEed. */
9189 if (TARGET_64BIT && mode == DImode
9190 && immediate_operand (op1, mode)
9191 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9192 && !register_operand (op0, mode)
9193 && optimize && !reload_completed && !reload_in_progress)
9194 op1 = copy_to_mode_reg (mode, op1);
9195
9196 if (FLOAT_MODE_P (mode))
9197 {
9198 /* If we are loading a floating point constant to a register,
9199 force the value to memory now, since we'll get better code
9200 out the back end. */
9201
9202 if (strict)
9203 ;
9204 else if (GET_CODE (op1) == CONST_DOUBLE)
9205 {
9206 op1 = validize_mem (force_const_mem (mode, op1));
9207 if (!register_operand (op0, mode))
9208 {
9209 rtx temp = gen_reg_rtx (mode);
9210 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9211 emit_move_insn (op0, temp);
9212 return;
9213 }
9214 }
9215 }
9216 }
9217
9218 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9219 }
9220
9221 void
9222 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9223 {
9224 rtx op0 = operands[0], op1 = operands[1];
9225
9226 /* Force constants other than zero into memory. We do not know how
9227 the instructions used to build constants modify the upper 64 bits
9228 of the register, once we have that information we may be able
9229 to handle some of them more efficiently. */
9230 if ((reload_in_progress | reload_completed) == 0
9231 && register_operand (op0, mode)
9232 && CONSTANT_P (op1)
9233 && standard_sse_constant_p (op1) <= 0)
9234 op1 = validize_mem (force_const_mem (mode, op1));
9235
9236 /* Make operand1 a register if it isn't already. */
9237 if (!no_new_pseudos
9238 && !register_operand (op0, mode)
9239 && !register_operand (op1, mode))
9240 {
9241 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9242 return;
9243 }
9244
9245 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9246 }
9247
9248 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9249 straight to ix86_expand_vector_move. */
9250
9251 void
9252 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9253 {
9254 rtx op0, op1, m;
9255
9256 op0 = operands[0];
9257 op1 = operands[1];
9258
9259 if (MEM_P (op1))
9260 {
9261 /* If we're optimizing for size, movups is the smallest. */
9262 if (optimize_size)
9263 {
9264 op0 = gen_lowpart (V4SFmode, op0);
9265 op1 = gen_lowpart (V4SFmode, op1);
9266 emit_insn (gen_sse_movups (op0, op1));
9267 return;
9268 }
9269
9270 /* ??? If we have typed data, then it would appear that using
9271 movdqu is the only way to get unaligned data loaded with
9272 integer type. */
9273 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9274 {
9275 op0 = gen_lowpart (V16QImode, op0);
9276 op1 = gen_lowpart (V16QImode, op1);
9277 emit_insn (gen_sse2_movdqu (op0, op1));
9278 return;
9279 }
9280
9281 if (TARGET_SSE2 && mode == V2DFmode)
9282 {
9283 rtx zero;
9284
9285 /* When SSE registers are split into halves, we can avoid
9286 writing to the top half twice. */
9287 if (TARGET_SSE_SPLIT_REGS)
9288 {
9289 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9290 zero = op0;
9291 }
9292 else
9293 {
9294 /* ??? Not sure about the best option for the Intel chips.
9295 The following would seem to satisfy; the register is
9296 entirely cleared, breaking the dependency chain. We
9297 then store to the upper half, with a dependency depth
9298 of one. A rumor has it that Intel recommends two movsd
9299 followed by an unpacklpd, but this is unconfirmed. And
9300 given that the dependency depth of the unpacklpd would
9301 still be one, I'm not sure why this would be better. */
9302 zero = CONST0_RTX (V2DFmode);
9303 }
9304
9305 m = adjust_address (op1, DFmode, 0);
9306 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9307 m = adjust_address (op1, DFmode, 8);
9308 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9309 }
9310 else
9311 {
9312 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9313 emit_move_insn (op0, CONST0_RTX (mode));
9314 else
9315 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9316
9317 if (mode != V4SFmode)
9318 op0 = gen_lowpart (V4SFmode, op0);
9319 m = adjust_address (op1, V2SFmode, 0);
9320 emit_insn (gen_sse_loadlps (op0, op0, m));
9321 m = adjust_address (op1, V2SFmode, 8);
9322 emit_insn (gen_sse_loadhps (op0, op0, m));
9323 }
9324 }
9325 else if (MEM_P (op0))
9326 {
9327 /* If we're optimizing for size, movups is the smallest. */
9328 if (optimize_size)
9329 {
9330 op0 = gen_lowpart (V4SFmode, op0);
9331 op1 = gen_lowpart (V4SFmode, op1);
9332 emit_insn (gen_sse_movups (op0, op1));
9333 return;
9334 }
9335
9336 /* ??? Similar to above, only less clear because of quote
9337 typeless stores unquote. */
9338 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9339 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9340 {
9341 op0 = gen_lowpart (V16QImode, op0);
9342 op1 = gen_lowpart (V16QImode, op1);
9343 emit_insn (gen_sse2_movdqu (op0, op1));
9344 return;
9345 }
9346
9347 if (TARGET_SSE2 && mode == V2DFmode)
9348 {
9349 m = adjust_address (op0, DFmode, 0);
9350 emit_insn (gen_sse2_storelpd (m, op1));
9351 m = adjust_address (op0, DFmode, 8);
9352 emit_insn (gen_sse2_storehpd (m, op1));
9353 }
9354 else
9355 {
9356 if (mode != V4SFmode)
9357 op1 = gen_lowpart (V4SFmode, op1);
9358 m = adjust_address (op0, V2SFmode, 0);
9359 emit_insn (gen_sse_storelps (m, op1));
9360 m = adjust_address (op0, V2SFmode, 8);
9361 emit_insn (gen_sse_storehps (m, op1));
9362 }
9363 }
9364 else
9365 gcc_unreachable ();
9366 }
9367
9368 /* Expand a push in MODE. This is some mode for which we do not support
9369 proper push instructions, at least from the registers that we expect
9370 the value to live in. */
9371
9372 void
9373 ix86_expand_push (enum machine_mode mode, rtx x)
9374 {
9375 rtx tmp;
9376
9377 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9378 GEN_INT (-GET_MODE_SIZE (mode)),
9379 stack_pointer_rtx, 1, OPTAB_DIRECT);
9380 if (tmp != stack_pointer_rtx)
9381 emit_move_insn (stack_pointer_rtx, tmp);
9382
9383 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9384 emit_move_insn (tmp, x);
9385 }
9386
9387 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9388 destination to use for the operation. If different from the true
9389 destination in operands[0], a copy operation will be required. */
9390
9391 rtx
9392 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9393 rtx operands[])
9394 {
9395 int matching_memory;
9396 rtx src1, src2, dst;
9397
9398 dst = operands[0];
9399 src1 = operands[1];
9400 src2 = operands[2];
9401
9402 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9403 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9404 && (rtx_equal_p (dst, src2)
9405 || immediate_operand (src1, mode)))
9406 {
9407 rtx temp = src1;
9408 src1 = src2;
9409 src2 = temp;
9410 }
9411
9412 /* If the destination is memory, and we do not have matching source
9413 operands, do things in registers. */
9414 matching_memory = 0;
9415 if (MEM_P (dst))
9416 {
9417 if (rtx_equal_p (dst, src1))
9418 matching_memory = 1;
9419 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9420 && rtx_equal_p (dst, src2))
9421 matching_memory = 2;
9422 else
9423 dst = gen_reg_rtx (mode);
9424 }
9425
9426 /* Both source operands cannot be in memory. */
9427 if (MEM_P (src1) && MEM_P (src2))
9428 {
9429 if (matching_memory != 2)
9430 src2 = force_reg (mode, src2);
9431 else
9432 src1 = force_reg (mode, src1);
9433 }
9434
9435 /* If the operation is not commutable, source 1 cannot be a constant
9436 or non-matching memory. */
9437 if ((CONSTANT_P (src1)
9438 || (!matching_memory && MEM_P (src1)))
9439 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9440 src1 = force_reg (mode, src1);
9441
9442 src1 = operands[1] = src1;
9443 src2 = operands[2] = src2;
9444 return dst;
9445 }
9446
9447 /* Similarly, but assume that the destination has already been
9448 set up properly. */
9449
9450 void
9451 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9452 enum machine_mode mode, rtx operands[])
9453 {
9454 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9455 gcc_assert (dst == operands[0]);
9456 }
9457
9458 /* Attempt to expand a binary operator. Make the expansion closer to the
9459 actual machine, then just general_operand, which will allow 3 separate
9460 memory references (one output, two input) in a single insn. */
9461
9462 void
9463 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9464 rtx operands[])
9465 {
9466 rtx src1, src2, dst, op, clob;
9467
9468 dst = ix86_fixup_binary_operands (code, mode, operands);
9469 src1 = operands[1];
9470 src2 = operands[2];
9471
9472 /* Emit the instruction. */
9473
9474 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9475 if (reload_in_progress)
9476 {
9477 /* Reload doesn't know about the flags register, and doesn't know that
9478 it doesn't want to clobber it. We can only do this with PLUS. */
9479 gcc_assert (code == PLUS);
9480 emit_insn (op);
9481 }
9482 else
9483 {
9484 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9485 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9486 }
9487
9488 /* Fix up the destination if needed. */
9489 if (dst != operands[0])
9490 emit_move_insn (operands[0], dst);
9491 }
9492
9493 /* Return TRUE or FALSE depending on whether the binary operator meets the
9494 appropriate constraints. */
9495
9496 int
9497 ix86_binary_operator_ok (enum rtx_code code,
9498 enum machine_mode mode ATTRIBUTE_UNUSED,
9499 rtx operands[3])
9500 {
9501 /* Both source operands cannot be in memory. */
9502 if (MEM_P (operands[1]) && MEM_P (operands[2]))
9503 return 0;
9504 /* If the operation is not commutable, source 1 cannot be a constant. */
9505 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9506 return 0;
9507 /* If the destination is memory, we must have a matching source operand. */
9508 if (MEM_P (operands[0])
9509 && ! (rtx_equal_p (operands[0], operands[1])
9510 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9511 && rtx_equal_p (operands[0], operands[2]))))
9512 return 0;
9513 /* If the operation is not commutable and the source 1 is memory, we must
9514 have a matching destination. */
9515 if (MEM_P (operands[1])
9516 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9517 && ! rtx_equal_p (operands[0], operands[1]))
9518 return 0;
9519 return 1;
9520 }
9521
9522 /* Attempt to expand a unary operator. Make the expansion closer to the
9523 actual machine, then just general_operand, which will allow 2 separate
9524 memory references (one output, one input) in a single insn. */
9525
9526 void
9527 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9528 rtx operands[])
9529 {
9530 int matching_memory;
9531 rtx src, dst, op, clob;
9532
9533 dst = operands[0];
9534 src = operands[1];
9535
9536 /* If the destination is memory, and we do not have matching source
9537 operands, do things in registers. */
9538 matching_memory = 0;
9539 if (MEM_P (dst))
9540 {
9541 if (rtx_equal_p (dst, src))
9542 matching_memory = 1;
9543 else
9544 dst = gen_reg_rtx (mode);
9545 }
9546
9547 /* When source operand is memory, destination must match. */
9548 if (MEM_P (src) && !matching_memory)
9549 src = force_reg (mode, src);
9550
9551 /* Emit the instruction. */
9552
9553 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9554 if (reload_in_progress || code == NOT)
9555 {
9556 /* Reload doesn't know about the flags register, and doesn't know that
9557 it doesn't want to clobber it. */
9558 gcc_assert (code == NOT);
9559 emit_insn (op);
9560 }
9561 else
9562 {
9563 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9564 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9565 }
9566
9567 /* Fix up the destination if needed. */
9568 if (dst != operands[0])
9569 emit_move_insn (operands[0], dst);
9570 }
9571
9572 /* Return TRUE or FALSE depending on whether the unary operator meets the
9573 appropriate constraints. */
9574
9575 int
9576 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9577 enum machine_mode mode ATTRIBUTE_UNUSED,
9578 rtx operands[2] ATTRIBUTE_UNUSED)
9579 {
9580 /* If one of operands is memory, source and destination must match. */
9581 if ((MEM_P (operands[0])
9582 || MEM_P (operands[1]))
9583 && ! rtx_equal_p (operands[0], operands[1]))
9584 return FALSE;
9585 return TRUE;
9586 }
9587
9588 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9589 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9590 true, then replicate the mask for all elements of the vector register.
9591 If INVERT is true, then create a mask excluding the sign bit. */
9592
9593 rtx
9594 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9595 {
9596 enum machine_mode vec_mode;
9597 HOST_WIDE_INT hi, lo;
9598 int shift = 63;
9599 rtvec v;
9600 rtx mask;
9601
9602 /* Find the sign bit, sign extended to 2*HWI. */
9603 if (mode == SFmode)
9604 lo = 0x80000000, hi = lo < 0;
9605 else if (HOST_BITS_PER_WIDE_INT >= 64)
9606 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9607 else
9608 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9609
9610 if (invert)
9611 lo = ~lo, hi = ~hi;
9612
9613 /* Force this value into the low part of a fp vector constant. */
9614 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9615 mask = gen_lowpart (mode, mask);
9616
9617 if (mode == SFmode)
9618 {
9619 if (vect)
9620 v = gen_rtvec (4, mask, mask, mask, mask);
9621 else
9622 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9623 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9624 vec_mode = V4SFmode;
9625 }
9626 else
9627 {
9628 if (vect)
9629 v = gen_rtvec (2, mask, mask);
9630 else
9631 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9632 vec_mode = V2DFmode;
9633 }
9634
9635 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9636 }
9637
9638 /* Generate code for floating point ABS or NEG. */
9639
9640 void
9641 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9642 rtx operands[])
9643 {
9644 rtx mask, set, use, clob, dst, src;
9645 bool matching_memory;
9646 bool use_sse = false;
9647 bool vector_mode = VECTOR_MODE_P (mode);
9648 enum machine_mode elt_mode = mode;
9649
9650 if (vector_mode)
9651 {
9652 elt_mode = GET_MODE_INNER (mode);
9653 use_sse = true;
9654 }
9655 else if (TARGET_SSE_MATH)
9656 use_sse = SSE_FLOAT_MODE_P (mode);
9657
9658 /* NEG and ABS performed with SSE use bitwise mask operations.
9659 Create the appropriate mask now. */
9660 if (use_sse)
9661 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9662 else
9663 mask = NULL_RTX;
9664
9665 dst = operands[0];
9666 src = operands[1];
9667
9668 /* If the destination is memory, and we don't have matching source
9669 operands or we're using the x87, do things in registers. */
9670 matching_memory = false;
9671 if (MEM_P (dst))
9672 {
9673 if (use_sse && rtx_equal_p (dst, src))
9674 matching_memory = true;
9675 else
9676 dst = gen_reg_rtx (mode);
9677 }
9678 if (MEM_P (src) && !matching_memory)
9679 src = force_reg (mode, src);
9680
9681 if (vector_mode)
9682 {
9683 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9684 set = gen_rtx_SET (VOIDmode, dst, set);
9685 emit_insn (set);
9686 }
9687 else
9688 {
9689 set = gen_rtx_fmt_e (code, mode, src);
9690 set = gen_rtx_SET (VOIDmode, dst, set);
9691 if (mask)
9692 {
9693 use = gen_rtx_USE (VOIDmode, mask);
9694 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9695 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9696 gen_rtvec (3, set, use, clob)));
9697 }
9698 else
9699 emit_insn (set);
9700 }
9701
9702 if (dst != operands[0])
9703 emit_move_insn (operands[0], dst);
9704 }
9705
9706 /* Expand a copysign operation. Special case operand 0 being a constant. */
9707
9708 void
9709 ix86_expand_copysign (rtx operands[])
9710 {
9711 enum machine_mode mode, vmode;
9712 rtx dest, op0, op1, mask, nmask;
9713
9714 dest = operands[0];
9715 op0 = operands[1];
9716 op1 = operands[2];
9717
9718 mode = GET_MODE (dest);
9719 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9720
9721 if (GET_CODE (op0) == CONST_DOUBLE)
9722 {
9723 rtvec v;
9724
9725 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9726 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9727
9728 if (op0 == CONST0_RTX (mode))
9729 op0 = CONST0_RTX (vmode);
9730 else
9731 {
9732 if (mode == SFmode)
9733 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9734 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9735 else
9736 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9737 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9738 }
9739
9740 mask = ix86_build_signbit_mask (mode, 0, 0);
9741
9742 if (mode == SFmode)
9743 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9744 else
9745 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9746 }
9747 else
9748 {
9749 nmask = ix86_build_signbit_mask (mode, 0, 1);
9750 mask = ix86_build_signbit_mask (mode, 0, 0);
9751
9752 if (mode == SFmode)
9753 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9754 else
9755 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9756 }
9757 }
9758
9759 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9760 be a constant, and so has already been expanded into a vector constant. */
9761
9762 void
9763 ix86_split_copysign_const (rtx operands[])
9764 {
9765 enum machine_mode mode, vmode;
9766 rtx dest, op0, op1, mask, x;
9767
9768 dest = operands[0];
9769 op0 = operands[1];
9770 op1 = operands[2];
9771 mask = operands[3];
9772
9773 mode = GET_MODE (dest);
9774 vmode = GET_MODE (mask);
9775
9776 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9777 x = gen_rtx_AND (vmode, dest, mask);
9778 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9779
9780 if (op0 != CONST0_RTX (vmode))
9781 {
9782 x = gen_rtx_IOR (vmode, dest, op0);
9783 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9784 }
9785 }
9786
9787 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9788 so we have to do two masks. */
9789
9790 void
9791 ix86_split_copysign_var (rtx operands[])
9792 {
9793 enum machine_mode mode, vmode;
9794 rtx dest, scratch, op0, op1, mask, nmask, x;
9795
9796 dest = operands[0];
9797 scratch = operands[1];
9798 op0 = operands[2];
9799 op1 = operands[3];
9800 nmask = operands[4];
9801 mask = operands[5];
9802
9803 mode = GET_MODE (dest);
9804 vmode = GET_MODE (mask);
9805
9806 if (rtx_equal_p (op0, op1))
9807 {
9808 /* Shouldn't happen often (it's useless, obviously), but when it does
9809 we'd generate incorrect code if we continue below. */
9810 emit_move_insn (dest, op0);
9811 return;
9812 }
9813
9814 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9815 {
9816 gcc_assert (REGNO (op1) == REGNO (scratch));
9817
9818 x = gen_rtx_AND (vmode, scratch, mask);
9819 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9820
9821 dest = mask;
9822 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9823 x = gen_rtx_NOT (vmode, dest);
9824 x = gen_rtx_AND (vmode, x, op0);
9825 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9826 }
9827 else
9828 {
9829 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9830 {
9831 x = gen_rtx_AND (vmode, scratch, mask);
9832 }
9833 else /* alternative 2,4 */
9834 {
9835 gcc_assert (REGNO (mask) == REGNO (scratch));
9836 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9837 x = gen_rtx_AND (vmode, scratch, op1);
9838 }
9839 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9840
9841 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9842 {
9843 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9844 x = gen_rtx_AND (vmode, dest, nmask);
9845 }
9846 else /* alternative 3,4 */
9847 {
9848 gcc_assert (REGNO (nmask) == REGNO (dest));
9849 dest = nmask;
9850 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9851 x = gen_rtx_AND (vmode, dest, op0);
9852 }
9853 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9854 }
9855
9856 x = gen_rtx_IOR (vmode, dest, scratch);
9857 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9858 }
9859
9860 /* Return TRUE or FALSE depending on whether the first SET in INSN
9861 has source and destination with matching CC modes, and that the
9862 CC mode is at least as constrained as REQ_MODE. */
9863
9864 int
9865 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9866 {
9867 rtx set;
9868 enum machine_mode set_mode;
9869
9870 set = PATTERN (insn);
9871 if (GET_CODE (set) == PARALLEL)
9872 set = XVECEXP (set, 0, 0);
9873 gcc_assert (GET_CODE (set) == SET);
9874 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9875
9876 set_mode = GET_MODE (SET_DEST (set));
9877 switch (set_mode)
9878 {
9879 case CCNOmode:
9880 if (req_mode != CCNOmode
9881 && (req_mode != CCmode
9882 || XEXP (SET_SRC (set), 1) != const0_rtx))
9883 return 0;
9884 break;
9885 case CCmode:
9886 if (req_mode == CCGCmode)
9887 return 0;
9888 /* FALLTHRU */
9889 case CCGCmode:
9890 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9891 return 0;
9892 /* FALLTHRU */
9893 case CCGOCmode:
9894 if (req_mode == CCZmode)
9895 return 0;
9896 /* FALLTHRU */
9897 case CCZmode:
9898 break;
9899
9900 default:
9901 gcc_unreachable ();
9902 }
9903
9904 return (GET_MODE (SET_SRC (set)) == set_mode);
9905 }
9906
9907 /* Generate insn patterns to do an integer compare of OPERANDS. */
9908
9909 static rtx
9910 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9911 {
9912 enum machine_mode cmpmode;
9913 rtx tmp, flags;
9914
9915 cmpmode = SELECT_CC_MODE (code, op0, op1);
9916 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9917
9918 /* This is very simple, but making the interface the same as in the
9919 FP case makes the rest of the code easier. */
9920 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9921 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9922
9923 /* Return the test that should be put into the flags user, i.e.
9924 the bcc, scc, or cmov instruction. */
9925 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9926 }
9927
9928 /* Figure out whether to use ordered or unordered fp comparisons.
9929 Return the appropriate mode to use. */
9930
9931 enum machine_mode
9932 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9933 {
9934 /* ??? In order to make all comparisons reversible, we do all comparisons
9935 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9936 all forms trapping and nontrapping comparisons, we can make inequality
9937 comparisons trapping again, since it results in better code when using
9938 FCOM based compares. */
9939 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9940 }
9941
9942 enum machine_mode
9943 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9944 {
9945 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9946 return ix86_fp_compare_mode (code);
9947 switch (code)
9948 {
9949 /* Only zero flag is needed. */
9950 case EQ: /* ZF=0 */
9951 case NE: /* ZF!=0 */
9952 return CCZmode;
9953 /* Codes needing carry flag. */
9954 case GEU: /* CF=0 */
9955 case GTU: /* CF=0 & ZF=0 */
9956 case LTU: /* CF=1 */
9957 case LEU: /* CF=1 | ZF=1 */
9958 return CCmode;
9959 /* Codes possibly doable only with sign flag when
9960 comparing against zero. */
9961 case GE: /* SF=OF or SF=0 */
9962 case LT: /* SF<>OF or SF=1 */
9963 if (op1 == const0_rtx)
9964 return CCGOCmode;
9965 else
9966 /* For other cases Carry flag is not required. */
9967 return CCGCmode;
9968 /* Codes doable only with sign flag when comparing
9969 against zero, but we miss jump instruction for it
9970 so we need to use relational tests against overflow
9971 that thus needs to be zero. */
9972 case GT: /* ZF=0 & SF=OF */
9973 case LE: /* ZF=1 | SF<>OF */
9974 if (op1 == const0_rtx)
9975 return CCNOmode;
9976 else
9977 return CCGCmode;
9978 /* strcmp pattern do (use flags) and combine may ask us for proper
9979 mode. */
9980 case USE:
9981 return CCmode;
9982 default:
9983 gcc_unreachable ();
9984 }
9985 }
9986
9987 /* Return the fixed registers used for condition codes. */
9988
9989 static bool
9990 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9991 {
9992 *p1 = FLAGS_REG;
9993 *p2 = FPSR_REG;
9994 return true;
9995 }
9996
9997 /* If two condition code modes are compatible, return a condition code
9998 mode which is compatible with both. Otherwise, return
9999 VOIDmode. */
10000
10001 static enum machine_mode
10002 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
10003 {
10004 if (m1 == m2)
10005 return m1;
10006
10007 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
10008 return VOIDmode;
10009
10010 if ((m1 == CCGCmode && m2 == CCGOCmode)
10011 || (m1 == CCGOCmode && m2 == CCGCmode))
10012 return CCGCmode;
10013
10014 switch (m1)
10015 {
10016 default:
10017 gcc_unreachable ();
10018
10019 case CCmode:
10020 case CCGCmode:
10021 case CCGOCmode:
10022 case CCNOmode:
10023 case CCZmode:
10024 switch (m2)
10025 {
10026 default:
10027 return VOIDmode;
10028
10029 case CCmode:
10030 case CCGCmode:
10031 case CCGOCmode:
10032 case CCNOmode:
10033 case CCZmode:
10034 return CCmode;
10035 }
10036
10037 case CCFPmode:
10038 case CCFPUmode:
10039 /* These are only compatible with themselves, which we already
10040 checked above. */
10041 return VOIDmode;
10042 }
10043 }
10044
10045 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10046
10047 int
10048 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10049 {
10050 enum rtx_code swapped_code = swap_condition (code);
10051 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10052 || (ix86_fp_comparison_cost (swapped_code)
10053 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10054 }
10055
10056 /* Swap, force into registers, or otherwise massage the two operands
10057 to a fp comparison. The operands are updated in place; the new
10058 comparison code is returned. */
10059
10060 static enum rtx_code
10061 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10062 {
10063 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10064 rtx op0 = *pop0, op1 = *pop1;
10065 enum machine_mode op_mode = GET_MODE (op0);
10066 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10067
10068 /* All of the unordered compare instructions only work on registers.
10069 The same is true of the fcomi compare instructions. The XFmode
10070 compare instructions require registers except when comparing
10071 against zero or when converting operand 1 from fixed point to
10072 floating point. */
10073
10074 if (!is_sse
10075 && (fpcmp_mode == CCFPUmode
10076 || (op_mode == XFmode
10077 && ! (standard_80387_constant_p (op0) == 1
10078 || standard_80387_constant_p (op1) == 1)
10079 && GET_CODE (op1) != FLOAT)
10080 || ix86_use_fcomi_compare (code)))
10081 {
10082 op0 = force_reg (op_mode, op0);
10083 op1 = force_reg (op_mode, op1);
10084 }
10085 else
10086 {
10087 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10088 things around if they appear profitable, otherwise force op0
10089 into a register. */
10090
10091 if (standard_80387_constant_p (op0) == 0
10092 || (MEM_P (op0)
10093 && ! (standard_80387_constant_p (op1) == 0
10094 || MEM_P (op1))))
10095 {
10096 rtx tmp;
10097 tmp = op0, op0 = op1, op1 = tmp;
10098 code = swap_condition (code);
10099 }
10100
10101 if (!REG_P (op0))
10102 op0 = force_reg (op_mode, op0);
10103
10104 if (CONSTANT_P (op1))
10105 {
10106 int tmp = standard_80387_constant_p (op1);
10107 if (tmp == 0)
10108 op1 = validize_mem (force_const_mem (op_mode, op1));
10109 else if (tmp == 1)
10110 {
10111 if (TARGET_CMOVE)
10112 op1 = force_reg (op_mode, op1);
10113 }
10114 else
10115 op1 = force_reg (op_mode, op1);
10116 }
10117 }
10118
10119 /* Try to rearrange the comparison to make it cheaper. */
10120 if (ix86_fp_comparison_cost (code)
10121 > ix86_fp_comparison_cost (swap_condition (code))
10122 && (REG_P (op1) || !no_new_pseudos))
10123 {
10124 rtx tmp;
10125 tmp = op0, op0 = op1, op1 = tmp;
10126 code = swap_condition (code);
10127 if (!REG_P (op0))
10128 op0 = force_reg (op_mode, op0);
10129 }
10130
10131 *pop0 = op0;
10132 *pop1 = op1;
10133 return code;
10134 }
10135
10136 /* Convert comparison codes we use to represent FP comparison to integer
10137 code that will result in proper branch. Return UNKNOWN if no such code
10138 is available. */
10139
10140 enum rtx_code
10141 ix86_fp_compare_code_to_integer (enum rtx_code code)
10142 {
10143 switch (code)
10144 {
10145 case GT:
10146 return GTU;
10147 case GE:
10148 return GEU;
10149 case ORDERED:
10150 case UNORDERED:
10151 return code;
10152 break;
10153 case UNEQ:
10154 return EQ;
10155 break;
10156 case UNLT:
10157 return LTU;
10158 break;
10159 case UNLE:
10160 return LEU;
10161 break;
10162 case LTGT:
10163 return NE;
10164 break;
10165 default:
10166 return UNKNOWN;
10167 }
10168 }
10169
10170 /* Split comparison code CODE into comparisons we can do using branch
10171 instructions. BYPASS_CODE is comparison code for branch that will
10172 branch around FIRST_CODE and SECOND_CODE. If some of branches
10173 is not required, set value to UNKNOWN.
10174 We never require more than two branches. */
10175
10176 void
10177 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10178 enum rtx_code *first_code,
10179 enum rtx_code *second_code)
10180 {
10181 *first_code = code;
10182 *bypass_code = UNKNOWN;
10183 *second_code = UNKNOWN;
10184
10185 /* The fcomi comparison sets flags as follows:
10186
10187 cmp ZF PF CF
10188 > 0 0 0
10189 < 0 0 1
10190 = 1 0 0
10191 un 1 1 1 */
10192
10193 switch (code)
10194 {
10195 case GT: /* GTU - CF=0 & ZF=0 */
10196 case GE: /* GEU - CF=0 */
10197 case ORDERED: /* PF=0 */
10198 case UNORDERED: /* PF=1 */
10199 case UNEQ: /* EQ - ZF=1 */
10200 case UNLT: /* LTU - CF=1 */
10201 case UNLE: /* LEU - CF=1 | ZF=1 */
10202 case LTGT: /* EQ - ZF=0 */
10203 break;
10204 case LT: /* LTU - CF=1 - fails on unordered */
10205 *first_code = UNLT;
10206 *bypass_code = UNORDERED;
10207 break;
10208 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10209 *first_code = UNLE;
10210 *bypass_code = UNORDERED;
10211 break;
10212 case EQ: /* EQ - ZF=1 - fails on unordered */
10213 *first_code = UNEQ;
10214 *bypass_code = UNORDERED;
10215 break;
10216 case NE: /* NE - ZF=0 - fails on unordered */
10217 *first_code = LTGT;
10218 *second_code = UNORDERED;
10219 break;
10220 case UNGE: /* GEU - CF=0 - fails on unordered */
10221 *first_code = GE;
10222 *second_code = UNORDERED;
10223 break;
10224 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10225 *first_code = GT;
10226 *second_code = UNORDERED;
10227 break;
10228 default:
10229 gcc_unreachable ();
10230 }
10231 if (!TARGET_IEEE_FP)
10232 {
10233 *second_code = UNKNOWN;
10234 *bypass_code = UNKNOWN;
10235 }
10236 }
10237
10238 /* Return cost of comparison done fcom + arithmetics operations on AX.
10239 All following functions do use number of instructions as a cost metrics.
10240 In future this should be tweaked to compute bytes for optimize_size and
10241 take into account performance of various instructions on various CPUs. */
10242 static int
10243 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10244 {
10245 if (!TARGET_IEEE_FP)
10246 return 4;
10247 /* The cost of code output by ix86_expand_fp_compare. */
10248 switch (code)
10249 {
10250 case UNLE:
10251 case UNLT:
10252 case LTGT:
10253 case GT:
10254 case GE:
10255 case UNORDERED:
10256 case ORDERED:
10257 case UNEQ:
10258 return 4;
10259 break;
10260 case LT:
10261 case NE:
10262 case EQ:
10263 case UNGE:
10264 return 5;
10265 break;
10266 case LE:
10267 case UNGT:
10268 return 6;
10269 break;
10270 default:
10271 gcc_unreachable ();
10272 }
10273 }
10274
10275 /* Return cost of comparison done using fcomi operation.
10276 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10277 static int
10278 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10279 {
10280 enum rtx_code bypass_code, first_code, second_code;
10281 /* Return arbitrarily high cost when instruction is not supported - this
10282 prevents gcc from using it. */
10283 if (!TARGET_CMOVE)
10284 return 1024;
10285 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10286 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10287 }
10288
10289 /* Return cost of comparison done using sahf operation.
10290 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10291 static int
10292 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10293 {
10294 enum rtx_code bypass_code, first_code, second_code;
10295 /* Return arbitrarily high cost when instruction is not preferred - this
10296 avoids gcc from using it. */
10297 if (!TARGET_USE_SAHF && !optimize_size)
10298 return 1024;
10299 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10300 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10301 }
10302
10303 /* Compute cost of the comparison done using any method.
10304 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10305 static int
10306 ix86_fp_comparison_cost (enum rtx_code code)
10307 {
10308 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10309 int min;
10310
10311 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10312 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10313
10314 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10315 if (min > sahf_cost)
10316 min = sahf_cost;
10317 if (min > fcomi_cost)
10318 min = fcomi_cost;
10319 return min;
10320 }
10321
10322 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10323
10324 static rtx
10325 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10326 rtx *second_test, rtx *bypass_test)
10327 {
10328 enum machine_mode fpcmp_mode, intcmp_mode;
10329 rtx tmp, tmp2;
10330 int cost = ix86_fp_comparison_cost (code);
10331 enum rtx_code bypass_code, first_code, second_code;
10332
10333 fpcmp_mode = ix86_fp_compare_mode (code);
10334 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10335
10336 if (second_test)
10337 *second_test = NULL_RTX;
10338 if (bypass_test)
10339 *bypass_test = NULL_RTX;
10340
10341 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10342
10343 /* Do fcomi/sahf based test when profitable. */
10344 if ((bypass_code == UNKNOWN || bypass_test)
10345 && (second_code == UNKNOWN || second_test)
10346 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10347 {
10348 if (TARGET_CMOVE)
10349 {
10350 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10351 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10352 tmp);
10353 emit_insn (tmp);
10354 }
10355 else
10356 {
10357 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10358 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10359 if (!scratch)
10360 scratch = gen_reg_rtx (HImode);
10361 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10362 emit_insn (gen_x86_sahf_1 (scratch));
10363 }
10364
10365 /* The FP codes work out to act like unsigned. */
10366 intcmp_mode = fpcmp_mode;
10367 code = first_code;
10368 if (bypass_code != UNKNOWN)
10369 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10370 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10371 const0_rtx);
10372 if (second_code != UNKNOWN)
10373 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10374 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10375 const0_rtx);
10376 }
10377 else
10378 {
10379 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10380 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10381 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10382 if (!scratch)
10383 scratch = gen_reg_rtx (HImode);
10384 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10385
10386 /* In the unordered case, we have to check C2 for NaN's, which
10387 doesn't happen to work out to anything nice combination-wise.
10388 So do some bit twiddling on the value we've got in AH to come
10389 up with an appropriate set of condition codes. */
10390
10391 intcmp_mode = CCNOmode;
10392 switch (code)
10393 {
10394 case GT:
10395 case UNGT:
10396 if (code == GT || !TARGET_IEEE_FP)
10397 {
10398 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10399 code = EQ;
10400 }
10401 else
10402 {
10403 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10404 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10405 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10406 intcmp_mode = CCmode;
10407 code = GEU;
10408 }
10409 break;
10410 case LT:
10411 case UNLT:
10412 if (code == LT && TARGET_IEEE_FP)
10413 {
10414 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10415 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10416 intcmp_mode = CCmode;
10417 code = EQ;
10418 }
10419 else
10420 {
10421 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10422 code = NE;
10423 }
10424 break;
10425 case GE:
10426 case UNGE:
10427 if (code == GE || !TARGET_IEEE_FP)
10428 {
10429 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10430 code = EQ;
10431 }
10432 else
10433 {
10434 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10435 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10436 GEN_INT (0x01)));
10437 code = NE;
10438 }
10439 break;
10440 case LE:
10441 case UNLE:
10442 if (code == LE && TARGET_IEEE_FP)
10443 {
10444 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10445 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10446 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10447 intcmp_mode = CCmode;
10448 code = LTU;
10449 }
10450 else
10451 {
10452 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10453 code = NE;
10454 }
10455 break;
10456 case EQ:
10457 case UNEQ:
10458 if (code == EQ && TARGET_IEEE_FP)
10459 {
10460 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10461 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10462 intcmp_mode = CCmode;
10463 code = EQ;
10464 }
10465 else
10466 {
10467 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10468 code = NE;
10469 break;
10470 }
10471 break;
10472 case NE:
10473 case LTGT:
10474 if (code == NE && TARGET_IEEE_FP)
10475 {
10476 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10477 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10478 GEN_INT (0x40)));
10479 code = NE;
10480 }
10481 else
10482 {
10483 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10484 code = EQ;
10485 }
10486 break;
10487
10488 case UNORDERED:
10489 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10490 code = NE;
10491 break;
10492 case ORDERED:
10493 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10494 code = EQ;
10495 break;
10496
10497 default:
10498 gcc_unreachable ();
10499 }
10500 }
10501
10502 /* Return the test that should be put into the flags user, i.e.
10503 the bcc, scc, or cmov instruction. */
10504 return gen_rtx_fmt_ee (code, VOIDmode,
10505 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10506 const0_rtx);
10507 }
10508
10509 rtx
10510 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10511 {
10512 rtx op0, op1, ret;
10513 op0 = ix86_compare_op0;
10514 op1 = ix86_compare_op1;
10515
10516 if (second_test)
10517 *second_test = NULL_RTX;
10518 if (bypass_test)
10519 *bypass_test = NULL_RTX;
10520
10521 if (ix86_compare_emitted)
10522 {
10523 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10524 ix86_compare_emitted = NULL_RTX;
10525 }
10526 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10527 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10528 second_test, bypass_test);
10529 else
10530 ret = ix86_expand_int_compare (code, op0, op1);
10531
10532 return ret;
10533 }
10534
10535 /* Return true if the CODE will result in nontrivial jump sequence. */
10536 bool
10537 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10538 {
10539 enum rtx_code bypass_code, first_code, second_code;
10540 if (!TARGET_CMOVE)
10541 return true;
10542 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10543 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10544 }
10545
10546 void
10547 ix86_expand_branch (enum rtx_code code, rtx label)
10548 {
10549 rtx tmp;
10550
10551 /* If we have emitted a compare insn, go straight to simple.
10552 ix86_expand_compare won't emit anything if ix86_compare_emitted
10553 is non NULL. */
10554 if (ix86_compare_emitted)
10555 goto simple;
10556
10557 switch (GET_MODE (ix86_compare_op0))
10558 {
10559 case QImode:
10560 case HImode:
10561 case SImode:
10562 simple:
10563 tmp = ix86_expand_compare (code, NULL, NULL);
10564 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10565 gen_rtx_LABEL_REF (VOIDmode, label),
10566 pc_rtx);
10567 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10568 return;
10569
10570 case SFmode:
10571 case DFmode:
10572 case XFmode:
10573 {
10574 rtvec vec;
10575 int use_fcomi;
10576 enum rtx_code bypass_code, first_code, second_code;
10577
10578 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10579 &ix86_compare_op1);
10580
10581 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10582
10583 /* Check whether we will use the natural sequence with one jump. If
10584 so, we can expand jump early. Otherwise delay expansion by
10585 creating compound insn to not confuse optimizers. */
10586 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10587 && TARGET_CMOVE)
10588 {
10589 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10590 gen_rtx_LABEL_REF (VOIDmode, label),
10591 pc_rtx, NULL_RTX, NULL_RTX);
10592 }
10593 else
10594 {
10595 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10596 ix86_compare_op0, ix86_compare_op1);
10597 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10598 gen_rtx_LABEL_REF (VOIDmode, label),
10599 pc_rtx);
10600 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10601
10602 use_fcomi = ix86_use_fcomi_compare (code);
10603 vec = rtvec_alloc (3 + !use_fcomi);
10604 RTVEC_ELT (vec, 0) = tmp;
10605 RTVEC_ELT (vec, 1)
10606 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10607 RTVEC_ELT (vec, 2)
10608 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10609 if (! use_fcomi)
10610 RTVEC_ELT (vec, 3)
10611 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10612
10613 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10614 }
10615 return;
10616 }
10617
10618 case DImode:
10619 if (TARGET_64BIT)
10620 goto simple;
10621 case TImode:
10622 /* Expand DImode branch into multiple compare+branch. */
10623 {
10624 rtx lo[2], hi[2], label2;
10625 enum rtx_code code1, code2, code3;
10626 enum machine_mode submode;
10627
10628 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10629 {
10630 tmp = ix86_compare_op0;
10631 ix86_compare_op0 = ix86_compare_op1;
10632 ix86_compare_op1 = tmp;
10633 code = swap_condition (code);
10634 }
10635 if (GET_MODE (ix86_compare_op0) == DImode)
10636 {
10637 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10638 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10639 submode = SImode;
10640 }
10641 else
10642 {
10643 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10644 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10645 submode = DImode;
10646 }
10647
10648 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10649 avoid two branches. This costs one extra insn, so disable when
10650 optimizing for size. */
10651
10652 if ((code == EQ || code == NE)
10653 && (!optimize_size
10654 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10655 {
10656 rtx xor0, xor1;
10657
10658 xor1 = hi[0];
10659 if (hi[1] != const0_rtx)
10660 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10661 NULL_RTX, 0, OPTAB_WIDEN);
10662
10663 xor0 = lo[0];
10664 if (lo[1] != const0_rtx)
10665 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10666 NULL_RTX, 0, OPTAB_WIDEN);
10667
10668 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10669 NULL_RTX, 0, OPTAB_WIDEN);
10670
10671 ix86_compare_op0 = tmp;
10672 ix86_compare_op1 = const0_rtx;
10673 ix86_expand_branch (code, label);
10674 return;
10675 }
10676
10677 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10678 op1 is a constant and the low word is zero, then we can just
10679 examine the high word. */
10680
10681 if (CONST_INT_P (hi[1]) && lo[1] == const0_rtx)
10682 switch (code)
10683 {
10684 case LT: case LTU: case GE: case GEU:
10685 ix86_compare_op0 = hi[0];
10686 ix86_compare_op1 = hi[1];
10687 ix86_expand_branch (code, label);
10688 return;
10689 default:
10690 break;
10691 }
10692
10693 /* Otherwise, we need two or three jumps. */
10694
10695 label2 = gen_label_rtx ();
10696
10697 code1 = code;
10698 code2 = swap_condition (code);
10699 code3 = unsigned_condition (code);
10700
10701 switch (code)
10702 {
10703 case LT: case GT: case LTU: case GTU:
10704 break;
10705
10706 case LE: code1 = LT; code2 = GT; break;
10707 case GE: code1 = GT; code2 = LT; break;
10708 case LEU: code1 = LTU; code2 = GTU; break;
10709 case GEU: code1 = GTU; code2 = LTU; break;
10710
10711 case EQ: code1 = UNKNOWN; code2 = NE; break;
10712 case NE: code2 = UNKNOWN; break;
10713
10714 default:
10715 gcc_unreachable ();
10716 }
10717
10718 /*
10719 * a < b =>
10720 * if (hi(a) < hi(b)) goto true;
10721 * if (hi(a) > hi(b)) goto false;
10722 * if (lo(a) < lo(b)) goto true;
10723 * false:
10724 */
10725
10726 ix86_compare_op0 = hi[0];
10727 ix86_compare_op1 = hi[1];
10728
10729 if (code1 != UNKNOWN)
10730 ix86_expand_branch (code1, label);
10731 if (code2 != UNKNOWN)
10732 ix86_expand_branch (code2, label2);
10733
10734 ix86_compare_op0 = lo[0];
10735 ix86_compare_op1 = lo[1];
10736 ix86_expand_branch (code3, label);
10737
10738 if (code2 != UNKNOWN)
10739 emit_label (label2);
10740 return;
10741 }
10742
10743 default:
10744 gcc_unreachable ();
10745 }
10746 }
10747
10748 /* Split branch based on floating point condition. */
10749 void
10750 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10751 rtx target1, rtx target2, rtx tmp, rtx pushed)
10752 {
10753 rtx second, bypass;
10754 rtx label = NULL_RTX;
10755 rtx condition;
10756 int bypass_probability = -1, second_probability = -1, probability = -1;
10757 rtx i;
10758
10759 if (target2 != pc_rtx)
10760 {
10761 rtx tmp = target2;
10762 code = reverse_condition_maybe_unordered (code);
10763 target2 = target1;
10764 target1 = tmp;
10765 }
10766
10767 condition = ix86_expand_fp_compare (code, op1, op2,
10768 tmp, &second, &bypass);
10769
10770 /* Remove pushed operand from stack. */
10771 if (pushed)
10772 ix86_free_from_memory (GET_MODE (pushed));
10773
10774 if (split_branch_probability >= 0)
10775 {
10776 /* Distribute the probabilities across the jumps.
10777 Assume the BYPASS and SECOND to be always test
10778 for UNORDERED. */
10779 probability = split_branch_probability;
10780
10781 /* Value of 1 is low enough to make no need for probability
10782 to be updated. Later we may run some experiments and see
10783 if unordered values are more frequent in practice. */
10784 if (bypass)
10785 bypass_probability = 1;
10786 if (second)
10787 second_probability = 1;
10788 }
10789 if (bypass != NULL_RTX)
10790 {
10791 label = gen_label_rtx ();
10792 i = emit_jump_insn (gen_rtx_SET
10793 (VOIDmode, pc_rtx,
10794 gen_rtx_IF_THEN_ELSE (VOIDmode,
10795 bypass,
10796 gen_rtx_LABEL_REF (VOIDmode,
10797 label),
10798 pc_rtx)));
10799 if (bypass_probability >= 0)
10800 REG_NOTES (i)
10801 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10802 GEN_INT (bypass_probability),
10803 REG_NOTES (i));
10804 }
10805 i = emit_jump_insn (gen_rtx_SET
10806 (VOIDmode, pc_rtx,
10807 gen_rtx_IF_THEN_ELSE (VOIDmode,
10808 condition, target1, target2)));
10809 if (probability >= 0)
10810 REG_NOTES (i)
10811 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10812 GEN_INT (probability),
10813 REG_NOTES (i));
10814 if (second != NULL_RTX)
10815 {
10816 i = emit_jump_insn (gen_rtx_SET
10817 (VOIDmode, pc_rtx,
10818 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10819 target2)));
10820 if (second_probability >= 0)
10821 REG_NOTES (i)
10822 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10823 GEN_INT (second_probability),
10824 REG_NOTES (i));
10825 }
10826 if (label != NULL_RTX)
10827 emit_label (label);
10828 }
10829
10830 int
10831 ix86_expand_setcc (enum rtx_code code, rtx dest)
10832 {
10833 rtx ret, tmp, tmpreg, equiv;
10834 rtx second_test, bypass_test;
10835
10836 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10837 return 0; /* FAIL */
10838
10839 gcc_assert (GET_MODE (dest) == QImode);
10840
10841 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10842 PUT_MODE (ret, QImode);
10843
10844 tmp = dest;
10845 tmpreg = dest;
10846
10847 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10848 if (bypass_test || second_test)
10849 {
10850 rtx test = second_test;
10851 int bypass = 0;
10852 rtx tmp2 = gen_reg_rtx (QImode);
10853 if (bypass_test)
10854 {
10855 gcc_assert (!second_test);
10856 test = bypass_test;
10857 bypass = 1;
10858 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10859 }
10860 PUT_MODE (test, QImode);
10861 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10862
10863 if (bypass)
10864 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10865 else
10866 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10867 }
10868
10869 /* Attach a REG_EQUAL note describing the comparison result. */
10870 if (ix86_compare_op0 && ix86_compare_op1)
10871 {
10872 equiv = simplify_gen_relational (code, QImode,
10873 GET_MODE (ix86_compare_op0),
10874 ix86_compare_op0, ix86_compare_op1);
10875 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10876 }
10877
10878 return 1; /* DONE */
10879 }
10880
10881 /* Expand comparison setting or clearing carry flag. Return true when
10882 successful and set pop for the operation. */
10883 static bool
10884 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10885 {
10886 enum machine_mode mode =
10887 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10888
10889 /* Do not handle DImode compares that go through special path. Also we can't
10890 deal with FP compares yet. This is possible to add. */
10891 if (mode == (TARGET_64BIT ? TImode : DImode))
10892 return false;
10893 if (FLOAT_MODE_P (mode))
10894 {
10895 rtx second_test = NULL, bypass_test = NULL;
10896 rtx compare_op, compare_seq;
10897
10898 /* Shortcut: following common codes never translate into carry flag compares. */
10899 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10900 || code == ORDERED || code == UNORDERED)
10901 return false;
10902
10903 /* These comparisons require zero flag; swap operands so they won't. */
10904 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10905 && !TARGET_IEEE_FP)
10906 {
10907 rtx tmp = op0;
10908 op0 = op1;
10909 op1 = tmp;
10910 code = swap_condition (code);
10911 }
10912
10913 /* Try to expand the comparison and verify that we end up with carry flag
10914 based comparison. This is fails to be true only when we decide to expand
10915 comparison using arithmetic that is not too common scenario. */
10916 start_sequence ();
10917 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10918 &second_test, &bypass_test);
10919 compare_seq = get_insns ();
10920 end_sequence ();
10921
10922 if (second_test || bypass_test)
10923 return false;
10924 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10925 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10926 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10927 else
10928 code = GET_CODE (compare_op);
10929 if (code != LTU && code != GEU)
10930 return false;
10931 emit_insn (compare_seq);
10932 *pop = compare_op;
10933 return true;
10934 }
10935 if (!INTEGRAL_MODE_P (mode))
10936 return false;
10937 switch (code)
10938 {
10939 case LTU:
10940 case GEU:
10941 break;
10942
10943 /* Convert a==0 into (unsigned)a<1. */
10944 case EQ:
10945 case NE:
10946 if (op1 != const0_rtx)
10947 return false;
10948 op1 = const1_rtx;
10949 code = (code == EQ ? LTU : GEU);
10950 break;
10951
10952 /* Convert a>b into b<a or a>=b-1. */
10953 case GTU:
10954 case LEU:
10955 if (CONST_INT_P (op1))
10956 {
10957 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10958 /* Bail out on overflow. We still can swap operands but that
10959 would force loading of the constant into register. */
10960 if (op1 == const0_rtx
10961 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10962 return false;
10963 code = (code == GTU ? GEU : LTU);
10964 }
10965 else
10966 {
10967 rtx tmp = op1;
10968 op1 = op0;
10969 op0 = tmp;
10970 code = (code == GTU ? LTU : GEU);
10971 }
10972 break;
10973
10974 /* Convert a>=0 into (unsigned)a<0x80000000. */
10975 case LT:
10976 case GE:
10977 if (mode == DImode || op1 != const0_rtx)
10978 return false;
10979 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10980 code = (code == LT ? GEU : LTU);
10981 break;
10982 case LE:
10983 case GT:
10984 if (mode == DImode || op1 != constm1_rtx)
10985 return false;
10986 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10987 code = (code == LE ? GEU : LTU);
10988 break;
10989
10990 default:
10991 return false;
10992 }
10993 /* Swapping operands may cause constant to appear as first operand. */
10994 if (!nonimmediate_operand (op0, VOIDmode))
10995 {
10996 if (no_new_pseudos)
10997 return false;
10998 op0 = force_reg (mode, op0);
10999 }
11000 ix86_compare_op0 = op0;
11001 ix86_compare_op1 = op1;
11002 *pop = ix86_expand_compare (code, NULL, NULL);
11003 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
11004 return true;
11005 }
11006
11007 int
11008 ix86_expand_int_movcc (rtx operands[])
11009 {
11010 enum rtx_code code = GET_CODE (operands[1]), compare_code;
11011 rtx compare_seq, compare_op;
11012 rtx second_test, bypass_test;
11013 enum machine_mode mode = GET_MODE (operands[0]);
11014 bool sign_bit_compare_p = false;;
11015
11016 start_sequence ();
11017 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11018 compare_seq = get_insns ();
11019 end_sequence ();
11020
11021 compare_code = GET_CODE (compare_op);
11022
11023 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11024 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11025 sign_bit_compare_p = true;
11026
11027 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11028 HImode insns, we'd be swallowed in word prefix ops. */
11029
11030 if ((mode != HImode || TARGET_FAST_PREFIX)
11031 && (mode != (TARGET_64BIT ? TImode : DImode))
11032 && CONST_INT_P (operands[2])
11033 && CONST_INT_P (operands[3]))
11034 {
11035 rtx out = operands[0];
11036 HOST_WIDE_INT ct = INTVAL (operands[2]);
11037 HOST_WIDE_INT cf = INTVAL (operands[3]);
11038 HOST_WIDE_INT diff;
11039
11040 diff = ct - cf;
11041 /* Sign bit compares are better done using shifts than we do by using
11042 sbb. */
11043 if (sign_bit_compare_p
11044 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11045 ix86_compare_op1, &compare_op))
11046 {
11047 /* Detect overlap between destination and compare sources. */
11048 rtx tmp = out;
11049
11050 if (!sign_bit_compare_p)
11051 {
11052 bool fpcmp = false;
11053
11054 compare_code = GET_CODE (compare_op);
11055
11056 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11057 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11058 {
11059 fpcmp = true;
11060 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11061 }
11062
11063 /* To simplify rest of code, restrict to the GEU case. */
11064 if (compare_code == LTU)
11065 {
11066 HOST_WIDE_INT tmp = ct;
11067 ct = cf;
11068 cf = tmp;
11069 compare_code = reverse_condition (compare_code);
11070 code = reverse_condition (code);
11071 }
11072 else
11073 {
11074 if (fpcmp)
11075 PUT_CODE (compare_op,
11076 reverse_condition_maybe_unordered
11077 (GET_CODE (compare_op)));
11078 else
11079 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11080 }
11081 diff = ct - cf;
11082
11083 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11084 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11085 tmp = gen_reg_rtx (mode);
11086
11087 if (mode == DImode)
11088 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11089 else
11090 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11091 }
11092 else
11093 {
11094 if (code == GT || code == GE)
11095 code = reverse_condition (code);
11096 else
11097 {
11098 HOST_WIDE_INT tmp = ct;
11099 ct = cf;
11100 cf = tmp;
11101 diff = ct - cf;
11102 }
11103 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11104 ix86_compare_op1, VOIDmode, 0, -1);
11105 }
11106
11107 if (diff == 1)
11108 {
11109 /*
11110 * cmpl op0,op1
11111 * sbbl dest,dest
11112 * [addl dest, ct]
11113 *
11114 * Size 5 - 8.
11115 */
11116 if (ct)
11117 tmp = expand_simple_binop (mode, PLUS,
11118 tmp, GEN_INT (ct),
11119 copy_rtx (tmp), 1, OPTAB_DIRECT);
11120 }
11121 else if (cf == -1)
11122 {
11123 /*
11124 * cmpl op0,op1
11125 * sbbl dest,dest
11126 * orl $ct, dest
11127 *
11128 * Size 8.
11129 */
11130 tmp = expand_simple_binop (mode, IOR,
11131 tmp, GEN_INT (ct),
11132 copy_rtx (tmp), 1, OPTAB_DIRECT);
11133 }
11134 else if (diff == -1 && ct)
11135 {
11136 /*
11137 * cmpl op0,op1
11138 * sbbl dest,dest
11139 * notl dest
11140 * [addl dest, cf]
11141 *
11142 * Size 8 - 11.
11143 */
11144 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11145 if (cf)
11146 tmp = expand_simple_binop (mode, PLUS,
11147 copy_rtx (tmp), GEN_INT (cf),
11148 copy_rtx (tmp), 1, OPTAB_DIRECT);
11149 }
11150 else
11151 {
11152 /*
11153 * cmpl op0,op1
11154 * sbbl dest,dest
11155 * [notl dest]
11156 * andl cf - ct, dest
11157 * [addl dest, ct]
11158 *
11159 * Size 8 - 11.
11160 */
11161
11162 if (cf == 0)
11163 {
11164 cf = ct;
11165 ct = 0;
11166 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11167 }
11168
11169 tmp = expand_simple_binop (mode, AND,
11170 copy_rtx (tmp),
11171 gen_int_mode (cf - ct, mode),
11172 copy_rtx (tmp), 1, OPTAB_DIRECT);
11173 if (ct)
11174 tmp = expand_simple_binop (mode, PLUS,
11175 copy_rtx (tmp), GEN_INT (ct),
11176 copy_rtx (tmp), 1, OPTAB_DIRECT);
11177 }
11178
11179 if (!rtx_equal_p (tmp, out))
11180 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11181
11182 return 1; /* DONE */
11183 }
11184
11185 if (diff < 0)
11186 {
11187 HOST_WIDE_INT tmp;
11188 tmp = ct, ct = cf, cf = tmp;
11189 diff = -diff;
11190 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11191 {
11192 /* We may be reversing unordered compare to normal compare, that
11193 is not valid in general (we may convert non-trapping condition
11194 to trapping one), however on i386 we currently emit all
11195 comparisons unordered. */
11196 compare_code = reverse_condition_maybe_unordered (compare_code);
11197 code = reverse_condition_maybe_unordered (code);
11198 }
11199 else
11200 {
11201 compare_code = reverse_condition (compare_code);
11202 code = reverse_condition (code);
11203 }
11204 }
11205
11206 compare_code = UNKNOWN;
11207 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11208 && CONST_INT_P (ix86_compare_op1))
11209 {
11210 if (ix86_compare_op1 == const0_rtx
11211 && (code == LT || code == GE))
11212 compare_code = code;
11213 else if (ix86_compare_op1 == constm1_rtx)
11214 {
11215 if (code == LE)
11216 compare_code = LT;
11217 else if (code == GT)
11218 compare_code = GE;
11219 }
11220 }
11221
11222 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11223 if (compare_code != UNKNOWN
11224 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11225 && (cf == -1 || ct == -1))
11226 {
11227 /* If lea code below could be used, only optimize
11228 if it results in a 2 insn sequence. */
11229
11230 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11231 || diff == 3 || diff == 5 || diff == 9)
11232 || (compare_code == LT && ct == -1)
11233 || (compare_code == GE && cf == -1))
11234 {
11235 /*
11236 * notl op1 (if necessary)
11237 * sarl $31, op1
11238 * orl cf, op1
11239 */
11240 if (ct != -1)
11241 {
11242 cf = ct;
11243 ct = -1;
11244 code = reverse_condition (code);
11245 }
11246
11247 out = emit_store_flag (out, code, ix86_compare_op0,
11248 ix86_compare_op1, VOIDmode, 0, -1);
11249
11250 out = expand_simple_binop (mode, IOR,
11251 out, GEN_INT (cf),
11252 out, 1, OPTAB_DIRECT);
11253 if (out != operands[0])
11254 emit_move_insn (operands[0], out);
11255
11256 return 1; /* DONE */
11257 }
11258 }
11259
11260
11261 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11262 || diff == 3 || diff == 5 || diff == 9)
11263 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11264 && (mode != DImode
11265 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11266 {
11267 /*
11268 * xorl dest,dest
11269 * cmpl op1,op2
11270 * setcc dest
11271 * lea cf(dest*(ct-cf)),dest
11272 *
11273 * Size 14.
11274 *
11275 * This also catches the degenerate setcc-only case.
11276 */
11277
11278 rtx tmp;
11279 int nops;
11280
11281 out = emit_store_flag (out, code, ix86_compare_op0,
11282 ix86_compare_op1, VOIDmode, 0, 1);
11283
11284 nops = 0;
11285 /* On x86_64 the lea instruction operates on Pmode, so we need
11286 to get arithmetics done in proper mode to match. */
11287 if (diff == 1)
11288 tmp = copy_rtx (out);
11289 else
11290 {
11291 rtx out1;
11292 out1 = copy_rtx (out);
11293 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11294 nops++;
11295 if (diff & 1)
11296 {
11297 tmp = gen_rtx_PLUS (mode, tmp, out1);
11298 nops++;
11299 }
11300 }
11301 if (cf != 0)
11302 {
11303 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11304 nops++;
11305 }
11306 if (!rtx_equal_p (tmp, out))
11307 {
11308 if (nops == 1)
11309 out = force_operand (tmp, copy_rtx (out));
11310 else
11311 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11312 }
11313 if (!rtx_equal_p (out, operands[0]))
11314 emit_move_insn (operands[0], copy_rtx (out));
11315
11316 return 1; /* DONE */
11317 }
11318
11319 /*
11320 * General case: Jumpful:
11321 * xorl dest,dest cmpl op1, op2
11322 * cmpl op1, op2 movl ct, dest
11323 * setcc dest jcc 1f
11324 * decl dest movl cf, dest
11325 * andl (cf-ct),dest 1:
11326 * addl ct,dest
11327 *
11328 * Size 20. Size 14.
11329 *
11330 * This is reasonably steep, but branch mispredict costs are
11331 * high on modern cpus, so consider failing only if optimizing
11332 * for space.
11333 */
11334
11335 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11336 && BRANCH_COST >= 2)
11337 {
11338 if (cf == 0)
11339 {
11340 cf = ct;
11341 ct = 0;
11342 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11343 /* We may be reversing unordered compare to normal compare,
11344 that is not valid in general (we may convert non-trapping
11345 condition to trapping one), however on i386 we currently
11346 emit all comparisons unordered. */
11347 code = reverse_condition_maybe_unordered (code);
11348 else
11349 {
11350 code = reverse_condition (code);
11351 if (compare_code != UNKNOWN)
11352 compare_code = reverse_condition (compare_code);
11353 }
11354 }
11355
11356 if (compare_code != UNKNOWN)
11357 {
11358 /* notl op1 (if needed)
11359 sarl $31, op1
11360 andl (cf-ct), op1
11361 addl ct, op1
11362
11363 For x < 0 (resp. x <= -1) there will be no notl,
11364 so if possible swap the constants to get rid of the
11365 complement.
11366 True/false will be -1/0 while code below (store flag
11367 followed by decrement) is 0/-1, so the constants need
11368 to be exchanged once more. */
11369
11370 if (compare_code == GE || !cf)
11371 {
11372 code = reverse_condition (code);
11373 compare_code = LT;
11374 }
11375 else
11376 {
11377 HOST_WIDE_INT tmp = cf;
11378 cf = ct;
11379 ct = tmp;
11380 }
11381
11382 out = emit_store_flag (out, code, ix86_compare_op0,
11383 ix86_compare_op1, VOIDmode, 0, -1);
11384 }
11385 else
11386 {
11387 out = emit_store_flag (out, code, ix86_compare_op0,
11388 ix86_compare_op1, VOIDmode, 0, 1);
11389
11390 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11391 copy_rtx (out), 1, OPTAB_DIRECT);
11392 }
11393
11394 out = expand_simple_binop (mode, AND, copy_rtx (out),
11395 gen_int_mode (cf - ct, mode),
11396 copy_rtx (out), 1, OPTAB_DIRECT);
11397 if (ct)
11398 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11399 copy_rtx (out), 1, OPTAB_DIRECT);
11400 if (!rtx_equal_p (out, operands[0]))
11401 emit_move_insn (operands[0], copy_rtx (out));
11402
11403 return 1; /* DONE */
11404 }
11405 }
11406
11407 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11408 {
11409 /* Try a few things more with specific constants and a variable. */
11410
11411 optab op;
11412 rtx var, orig_out, out, tmp;
11413
11414 if (BRANCH_COST <= 2)
11415 return 0; /* FAIL */
11416
11417 /* If one of the two operands is an interesting constant, load a
11418 constant with the above and mask it in with a logical operation. */
11419
11420 if (CONST_INT_P (operands[2]))
11421 {
11422 var = operands[3];
11423 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11424 operands[3] = constm1_rtx, op = and_optab;
11425 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11426 operands[3] = const0_rtx, op = ior_optab;
11427 else
11428 return 0; /* FAIL */
11429 }
11430 else if (CONST_INT_P (operands[3]))
11431 {
11432 var = operands[2];
11433 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11434 operands[2] = constm1_rtx, op = and_optab;
11435 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11436 operands[2] = const0_rtx, op = ior_optab;
11437 else
11438 return 0; /* FAIL */
11439 }
11440 else
11441 return 0; /* FAIL */
11442
11443 orig_out = operands[0];
11444 tmp = gen_reg_rtx (mode);
11445 operands[0] = tmp;
11446
11447 /* Recurse to get the constant loaded. */
11448 if (ix86_expand_int_movcc (operands) == 0)
11449 return 0; /* FAIL */
11450
11451 /* Mask in the interesting variable. */
11452 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11453 OPTAB_WIDEN);
11454 if (!rtx_equal_p (out, orig_out))
11455 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11456
11457 return 1; /* DONE */
11458 }
11459
11460 /*
11461 * For comparison with above,
11462 *
11463 * movl cf,dest
11464 * movl ct,tmp
11465 * cmpl op1,op2
11466 * cmovcc tmp,dest
11467 *
11468 * Size 15.
11469 */
11470
11471 if (! nonimmediate_operand (operands[2], mode))
11472 operands[2] = force_reg (mode, operands[2]);
11473 if (! nonimmediate_operand (operands[3], mode))
11474 operands[3] = force_reg (mode, operands[3]);
11475
11476 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11477 {
11478 rtx tmp = gen_reg_rtx (mode);
11479 emit_move_insn (tmp, operands[3]);
11480 operands[3] = tmp;
11481 }
11482 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11483 {
11484 rtx tmp = gen_reg_rtx (mode);
11485 emit_move_insn (tmp, operands[2]);
11486 operands[2] = tmp;
11487 }
11488
11489 if (! register_operand (operands[2], VOIDmode)
11490 && (mode == QImode
11491 || ! register_operand (operands[3], VOIDmode)))
11492 operands[2] = force_reg (mode, operands[2]);
11493
11494 if (mode == QImode
11495 && ! register_operand (operands[3], VOIDmode))
11496 operands[3] = force_reg (mode, operands[3]);
11497
11498 emit_insn (compare_seq);
11499 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11500 gen_rtx_IF_THEN_ELSE (mode,
11501 compare_op, operands[2],
11502 operands[3])));
11503 if (bypass_test)
11504 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11505 gen_rtx_IF_THEN_ELSE (mode,
11506 bypass_test,
11507 copy_rtx (operands[3]),
11508 copy_rtx (operands[0]))));
11509 if (second_test)
11510 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11511 gen_rtx_IF_THEN_ELSE (mode,
11512 second_test,
11513 copy_rtx (operands[2]),
11514 copy_rtx (operands[0]))));
11515
11516 return 1; /* DONE */
11517 }
11518
11519 /* Swap, force into registers, or otherwise massage the two operands
11520 to an sse comparison with a mask result. Thus we differ a bit from
11521 ix86_prepare_fp_compare_args which expects to produce a flags result.
11522
11523 The DEST operand exists to help determine whether to commute commutative
11524 operators. The POP0/POP1 operands are updated in place. The new
11525 comparison code is returned, or UNKNOWN if not implementable. */
11526
11527 static enum rtx_code
11528 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11529 rtx *pop0, rtx *pop1)
11530 {
11531 rtx tmp;
11532
11533 switch (code)
11534 {
11535 case LTGT:
11536 case UNEQ:
11537 /* We have no LTGT as an operator. We could implement it with
11538 NE & ORDERED, but this requires an extra temporary. It's
11539 not clear that it's worth it. */
11540 return UNKNOWN;
11541
11542 case LT:
11543 case LE:
11544 case UNGT:
11545 case UNGE:
11546 /* These are supported directly. */
11547 break;
11548
11549 case EQ:
11550 case NE:
11551 case UNORDERED:
11552 case ORDERED:
11553 /* For commutative operators, try to canonicalize the destination
11554 operand to be first in the comparison - this helps reload to
11555 avoid extra moves. */
11556 if (!dest || !rtx_equal_p (dest, *pop1))
11557 break;
11558 /* FALLTHRU */
11559
11560 case GE:
11561 case GT:
11562 case UNLE:
11563 case UNLT:
11564 /* These are not supported directly. Swap the comparison operands
11565 to transform into something that is supported. */
11566 tmp = *pop0;
11567 *pop0 = *pop1;
11568 *pop1 = tmp;
11569 code = swap_condition (code);
11570 break;
11571
11572 default:
11573 gcc_unreachable ();
11574 }
11575
11576 return code;
11577 }
11578
11579 /* Detect conditional moves that exactly match min/max operational
11580 semantics. Note that this is IEEE safe, as long as we don't
11581 interchange the operands.
11582
11583 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11584 and TRUE if the operation is successful and instructions are emitted. */
11585
11586 static bool
11587 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11588 rtx cmp_op1, rtx if_true, rtx if_false)
11589 {
11590 enum machine_mode mode;
11591 bool is_min;
11592 rtx tmp;
11593
11594 if (code == LT)
11595 ;
11596 else if (code == UNGE)
11597 {
11598 tmp = if_true;
11599 if_true = if_false;
11600 if_false = tmp;
11601 }
11602 else
11603 return false;
11604
11605 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11606 is_min = true;
11607 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11608 is_min = false;
11609 else
11610 return false;
11611
11612 mode = GET_MODE (dest);
11613
11614 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11615 but MODE may be a vector mode and thus not appropriate. */
11616 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11617 {
11618 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11619 rtvec v;
11620
11621 if_true = force_reg (mode, if_true);
11622 v = gen_rtvec (2, if_true, if_false);
11623 tmp = gen_rtx_UNSPEC (mode, v, u);
11624 }
11625 else
11626 {
11627 code = is_min ? SMIN : SMAX;
11628 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11629 }
11630
11631 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11632 return true;
11633 }
11634
11635 /* Expand an sse vector comparison. Return the register with the result. */
11636
11637 static rtx
11638 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11639 rtx op_true, rtx op_false)
11640 {
11641 enum machine_mode mode = GET_MODE (dest);
11642 rtx x;
11643
11644 cmp_op0 = force_reg (mode, cmp_op0);
11645 if (!nonimmediate_operand (cmp_op1, mode))
11646 cmp_op1 = force_reg (mode, cmp_op1);
11647
11648 if (optimize
11649 || reg_overlap_mentioned_p (dest, op_true)
11650 || reg_overlap_mentioned_p (dest, op_false))
11651 dest = gen_reg_rtx (mode);
11652
11653 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11654 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11655
11656 return dest;
11657 }
11658
11659 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11660 operations. This is used for both scalar and vector conditional moves. */
11661
11662 static void
11663 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11664 {
11665 enum machine_mode mode = GET_MODE (dest);
11666 rtx t2, t3, x;
11667
11668 if (op_false == CONST0_RTX (mode))
11669 {
11670 op_true = force_reg (mode, op_true);
11671 x = gen_rtx_AND (mode, cmp, op_true);
11672 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11673 }
11674 else if (op_true == CONST0_RTX (mode))
11675 {
11676 op_false = force_reg (mode, op_false);
11677 x = gen_rtx_NOT (mode, cmp);
11678 x = gen_rtx_AND (mode, x, op_false);
11679 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11680 }
11681 else
11682 {
11683 op_true = force_reg (mode, op_true);
11684 op_false = force_reg (mode, op_false);
11685
11686 t2 = gen_reg_rtx (mode);
11687 if (optimize)
11688 t3 = gen_reg_rtx (mode);
11689 else
11690 t3 = dest;
11691
11692 x = gen_rtx_AND (mode, op_true, cmp);
11693 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11694
11695 x = gen_rtx_NOT (mode, cmp);
11696 x = gen_rtx_AND (mode, x, op_false);
11697 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11698
11699 x = gen_rtx_IOR (mode, t3, t2);
11700 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11701 }
11702 }
11703
11704 /* Expand a floating-point conditional move. Return true if successful. */
11705
11706 int
11707 ix86_expand_fp_movcc (rtx operands[])
11708 {
11709 enum machine_mode mode = GET_MODE (operands[0]);
11710 enum rtx_code code = GET_CODE (operands[1]);
11711 rtx tmp, compare_op, second_test, bypass_test;
11712
11713 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11714 {
11715 enum machine_mode cmode;
11716
11717 /* Since we've no cmove for sse registers, don't force bad register
11718 allocation just to gain access to it. Deny movcc when the
11719 comparison mode doesn't match the move mode. */
11720 cmode = GET_MODE (ix86_compare_op0);
11721 if (cmode == VOIDmode)
11722 cmode = GET_MODE (ix86_compare_op1);
11723 if (cmode != mode)
11724 return 0;
11725
11726 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11727 &ix86_compare_op0,
11728 &ix86_compare_op1);
11729 if (code == UNKNOWN)
11730 return 0;
11731
11732 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11733 ix86_compare_op1, operands[2],
11734 operands[3]))
11735 return 1;
11736
11737 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11738 ix86_compare_op1, operands[2], operands[3]);
11739 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11740 return 1;
11741 }
11742
11743 /* The floating point conditional move instructions don't directly
11744 support conditions resulting from a signed integer comparison. */
11745
11746 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11747
11748 /* The floating point conditional move instructions don't directly
11749 support signed integer comparisons. */
11750
11751 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11752 {
11753 gcc_assert (!second_test && !bypass_test);
11754 tmp = gen_reg_rtx (QImode);
11755 ix86_expand_setcc (code, tmp);
11756 code = NE;
11757 ix86_compare_op0 = tmp;
11758 ix86_compare_op1 = const0_rtx;
11759 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11760 }
11761 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11762 {
11763 tmp = gen_reg_rtx (mode);
11764 emit_move_insn (tmp, operands[3]);
11765 operands[3] = tmp;
11766 }
11767 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11768 {
11769 tmp = gen_reg_rtx (mode);
11770 emit_move_insn (tmp, operands[2]);
11771 operands[2] = tmp;
11772 }
11773
11774 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11775 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11776 operands[2], operands[3])));
11777 if (bypass_test)
11778 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11779 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11780 operands[3], operands[0])));
11781 if (second_test)
11782 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11783 gen_rtx_IF_THEN_ELSE (mode, second_test,
11784 operands[2], operands[0])));
11785
11786 return 1;
11787 }
11788
11789 /* Expand a floating-point vector conditional move; a vcond operation
11790 rather than a movcc operation. */
11791
11792 bool
11793 ix86_expand_fp_vcond (rtx operands[])
11794 {
11795 enum rtx_code code = GET_CODE (operands[3]);
11796 rtx cmp;
11797
11798 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11799 &operands[4], &operands[5]);
11800 if (code == UNKNOWN)
11801 return false;
11802
11803 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11804 operands[5], operands[1], operands[2]))
11805 return true;
11806
11807 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11808 operands[1], operands[2]);
11809 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11810 return true;
11811 }
11812
11813 /* Expand a signed integral vector conditional move. */
11814
11815 bool
11816 ix86_expand_int_vcond (rtx operands[])
11817 {
11818 enum machine_mode mode = GET_MODE (operands[0]);
11819 enum rtx_code code = GET_CODE (operands[3]);
11820 bool negate = false;
11821 rtx x, cop0, cop1;
11822
11823 cop0 = operands[4];
11824 cop1 = operands[5];
11825
11826 /* Canonicalize the comparison to EQ, GT, GTU. */
11827 switch (code)
11828 {
11829 case EQ:
11830 case GT:
11831 case GTU:
11832 break;
11833
11834 case NE:
11835 case LE:
11836 case LEU:
11837 code = reverse_condition (code);
11838 negate = true;
11839 break;
11840
11841 case GE:
11842 case GEU:
11843 code = reverse_condition (code);
11844 negate = true;
11845 /* FALLTHRU */
11846
11847 case LT:
11848 case LTU:
11849 code = swap_condition (code);
11850 x = cop0, cop0 = cop1, cop1 = x;
11851 break;
11852
11853 default:
11854 gcc_unreachable ();
11855 }
11856
11857 /* Unsigned parallel compare is not supported by the hardware. Play some
11858 tricks to turn this into a signed comparison against 0. */
11859 if (code == GTU)
11860 {
11861 cop0 = force_reg (mode, cop0);
11862
11863 switch (mode)
11864 {
11865 case V4SImode:
11866 {
11867 rtx t1, t2, mask;
11868
11869 /* Perform a parallel modulo subtraction. */
11870 t1 = gen_reg_rtx (mode);
11871 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11872
11873 /* Extract the original sign bit of op0. */
11874 mask = GEN_INT (-0x80000000);
11875 mask = gen_rtx_CONST_VECTOR (mode,
11876 gen_rtvec (4, mask, mask, mask, mask));
11877 mask = force_reg (mode, mask);
11878 t2 = gen_reg_rtx (mode);
11879 emit_insn (gen_andv4si3 (t2, cop0, mask));
11880
11881 /* XOR it back into the result of the subtraction. This results
11882 in the sign bit set iff we saw unsigned underflow. */
11883 x = gen_reg_rtx (mode);
11884 emit_insn (gen_xorv4si3 (x, t1, t2));
11885
11886 code = GT;
11887 }
11888 break;
11889
11890 case V16QImode:
11891 case V8HImode:
11892 /* Perform a parallel unsigned saturating subtraction. */
11893 x = gen_reg_rtx (mode);
11894 emit_insn (gen_rtx_SET (VOIDmode, x,
11895 gen_rtx_US_MINUS (mode, cop0, cop1)));
11896
11897 code = EQ;
11898 negate = !negate;
11899 break;
11900
11901 default:
11902 gcc_unreachable ();
11903 }
11904
11905 cop0 = x;
11906 cop1 = CONST0_RTX (mode);
11907 }
11908
11909 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11910 operands[1+negate], operands[2-negate]);
11911
11912 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11913 operands[2-negate]);
11914 return true;
11915 }
11916
11917 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11918 true if we should do zero extension, else sign extension. HIGH_P is
11919 true if we want the N/2 high elements, else the low elements. */
11920
11921 void
11922 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11923 {
11924 enum machine_mode imode = GET_MODE (operands[1]);
11925 rtx (*unpack)(rtx, rtx, rtx);
11926 rtx se, dest;
11927
11928 switch (imode)
11929 {
11930 case V16QImode:
11931 if (high_p)
11932 unpack = gen_vec_interleave_highv16qi;
11933 else
11934 unpack = gen_vec_interleave_lowv16qi;
11935 break;
11936 case V8HImode:
11937 if (high_p)
11938 unpack = gen_vec_interleave_highv8hi;
11939 else
11940 unpack = gen_vec_interleave_lowv8hi;
11941 break;
11942 case V4SImode:
11943 if (high_p)
11944 unpack = gen_vec_interleave_highv4si;
11945 else
11946 unpack = gen_vec_interleave_lowv4si;
11947 break;
11948 default:
11949 gcc_unreachable ();
11950 }
11951
11952 dest = gen_lowpart (imode, operands[0]);
11953
11954 if (unsigned_p)
11955 se = force_reg (imode, CONST0_RTX (imode));
11956 else
11957 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11958 operands[1], pc_rtx, pc_rtx);
11959
11960 emit_insn (unpack (dest, operands[1], se));
11961 }
11962
11963 /* Expand conditional increment or decrement using adb/sbb instructions.
11964 The default case using setcc followed by the conditional move can be
11965 done by generic code. */
11966 int
11967 ix86_expand_int_addcc (rtx operands[])
11968 {
11969 enum rtx_code code = GET_CODE (operands[1]);
11970 rtx compare_op;
11971 rtx val = const0_rtx;
11972 bool fpcmp = false;
11973 enum machine_mode mode = GET_MODE (operands[0]);
11974
11975 if (operands[3] != const1_rtx
11976 && operands[3] != constm1_rtx)
11977 return 0;
11978 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11979 ix86_compare_op1, &compare_op))
11980 return 0;
11981 code = GET_CODE (compare_op);
11982
11983 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11984 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11985 {
11986 fpcmp = true;
11987 code = ix86_fp_compare_code_to_integer (code);
11988 }
11989
11990 if (code != LTU)
11991 {
11992 val = constm1_rtx;
11993 if (fpcmp)
11994 PUT_CODE (compare_op,
11995 reverse_condition_maybe_unordered
11996 (GET_CODE (compare_op)));
11997 else
11998 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11999 }
12000 PUT_MODE (compare_op, mode);
12001
12002 /* Construct either adc or sbb insn. */
12003 if ((code == LTU) == (operands[3] == constm1_rtx))
12004 {
12005 switch (GET_MODE (operands[0]))
12006 {
12007 case QImode:
12008 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
12009 break;
12010 case HImode:
12011 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12012 break;
12013 case SImode:
12014 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12015 break;
12016 case DImode:
12017 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12018 break;
12019 default:
12020 gcc_unreachable ();
12021 }
12022 }
12023 else
12024 {
12025 switch (GET_MODE (operands[0]))
12026 {
12027 case QImode:
12028 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12029 break;
12030 case HImode:
12031 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12032 break;
12033 case SImode:
12034 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12035 break;
12036 case DImode:
12037 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12038 break;
12039 default:
12040 gcc_unreachable ();
12041 }
12042 }
12043 return 1; /* DONE */
12044 }
12045
12046
12047 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12048 works for floating pointer parameters and nonoffsetable memories.
12049 For pushes, it returns just stack offsets; the values will be saved
12050 in the right order. Maximally three parts are generated. */
12051
12052 static int
12053 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12054 {
12055 int size;
12056
12057 if (!TARGET_64BIT)
12058 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12059 else
12060 size = (GET_MODE_SIZE (mode) + 4) / 8;
12061
12062 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
12063 gcc_assert (size >= 2 && size <= 3);
12064
12065 /* Optimize constant pool reference to immediates. This is used by fp
12066 moves, that force all constants to memory to allow combining. */
12067 if (MEM_P (operand) && MEM_READONLY_P (operand))
12068 {
12069 rtx tmp = maybe_get_pool_constant (operand);
12070 if (tmp)
12071 operand = tmp;
12072 }
12073
12074 if (MEM_P (operand) && !offsettable_memref_p (operand))
12075 {
12076 /* The only non-offsetable memories we handle are pushes. */
12077 int ok = push_operand (operand, VOIDmode);
12078
12079 gcc_assert (ok);
12080
12081 operand = copy_rtx (operand);
12082 PUT_MODE (operand, Pmode);
12083 parts[0] = parts[1] = parts[2] = operand;
12084 return size;
12085 }
12086
12087 if (GET_CODE (operand) == CONST_VECTOR)
12088 {
12089 enum machine_mode imode = int_mode_for_mode (mode);
12090 /* Caution: if we looked through a constant pool memory above,
12091 the operand may actually have a different mode now. That's
12092 ok, since we want to pun this all the way back to an integer. */
12093 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12094 gcc_assert (operand != NULL);
12095 mode = imode;
12096 }
12097
12098 if (!TARGET_64BIT)
12099 {
12100 if (mode == DImode)
12101 split_di (&operand, 1, &parts[0], &parts[1]);
12102 else
12103 {
12104 if (REG_P (operand))
12105 {
12106 gcc_assert (reload_completed);
12107 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12108 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12109 if (size == 3)
12110 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12111 }
12112 else if (offsettable_memref_p (operand))
12113 {
12114 operand = adjust_address (operand, SImode, 0);
12115 parts[0] = operand;
12116 parts[1] = adjust_address (operand, SImode, 4);
12117 if (size == 3)
12118 parts[2] = adjust_address (operand, SImode, 8);
12119 }
12120 else if (GET_CODE (operand) == CONST_DOUBLE)
12121 {
12122 REAL_VALUE_TYPE r;
12123 long l[4];
12124
12125 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12126 switch (mode)
12127 {
12128 case XFmode:
12129 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12130 parts[2] = gen_int_mode (l[2], SImode);
12131 break;
12132 case DFmode:
12133 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12134 break;
12135 default:
12136 gcc_unreachable ();
12137 }
12138 parts[1] = gen_int_mode (l[1], SImode);
12139 parts[0] = gen_int_mode (l[0], SImode);
12140 }
12141 else
12142 gcc_unreachable ();
12143 }
12144 }
12145 else
12146 {
12147 if (mode == TImode)
12148 split_ti (&operand, 1, &parts[0], &parts[1]);
12149 if (mode == XFmode || mode == TFmode)
12150 {
12151 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12152 if (REG_P (operand))
12153 {
12154 gcc_assert (reload_completed);
12155 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12156 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12157 }
12158 else if (offsettable_memref_p (operand))
12159 {
12160 operand = adjust_address (operand, DImode, 0);
12161 parts[0] = operand;
12162 parts[1] = adjust_address (operand, upper_mode, 8);
12163 }
12164 else if (GET_CODE (operand) == CONST_DOUBLE)
12165 {
12166 REAL_VALUE_TYPE r;
12167 long l[4];
12168
12169 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12170 real_to_target (l, &r, mode);
12171
12172 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12173 if (HOST_BITS_PER_WIDE_INT >= 64)
12174 parts[0]
12175 = gen_int_mode
12176 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12177 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12178 DImode);
12179 else
12180 parts[0] = immed_double_const (l[0], l[1], DImode);
12181
12182 if (upper_mode == SImode)
12183 parts[1] = gen_int_mode (l[2], SImode);
12184 else if (HOST_BITS_PER_WIDE_INT >= 64)
12185 parts[1]
12186 = gen_int_mode
12187 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12188 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12189 DImode);
12190 else
12191 parts[1] = immed_double_const (l[2], l[3], DImode);
12192 }
12193 else
12194 gcc_unreachable ();
12195 }
12196 }
12197
12198 return size;
12199 }
12200
12201 /* Emit insns to perform a move or push of DI, DF, and XF values.
12202 Return false when normal moves are needed; true when all required
12203 insns have been emitted. Operands 2-4 contain the input values
12204 int the correct order; operands 5-7 contain the output values. */
12205
12206 void
12207 ix86_split_long_move (rtx operands[])
12208 {
12209 rtx part[2][3];
12210 int nparts;
12211 int push = 0;
12212 int collisions = 0;
12213 enum machine_mode mode = GET_MODE (operands[0]);
12214
12215 /* The DFmode expanders may ask us to move double.
12216 For 64bit target this is single move. By hiding the fact
12217 here we simplify i386.md splitters. */
12218 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12219 {
12220 /* Optimize constant pool reference to immediates. This is used by
12221 fp moves, that force all constants to memory to allow combining. */
12222
12223 if (MEM_P (operands[1])
12224 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12225 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12226 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12227 if (push_operand (operands[0], VOIDmode))
12228 {
12229 operands[0] = copy_rtx (operands[0]);
12230 PUT_MODE (operands[0], Pmode);
12231 }
12232 else
12233 operands[0] = gen_lowpart (DImode, operands[0]);
12234 operands[1] = gen_lowpart (DImode, operands[1]);
12235 emit_move_insn (operands[0], operands[1]);
12236 return;
12237 }
12238
12239 /* The only non-offsettable memory we handle is push. */
12240 if (push_operand (operands[0], VOIDmode))
12241 push = 1;
12242 else
12243 gcc_assert (!MEM_P (operands[0])
12244 || offsettable_memref_p (operands[0]));
12245
12246 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12247 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12248
12249 /* When emitting push, take care for source operands on the stack. */
12250 if (push && MEM_P (operands[1])
12251 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12252 {
12253 if (nparts == 3)
12254 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12255 XEXP (part[1][2], 0));
12256 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12257 XEXP (part[1][1], 0));
12258 }
12259
12260 /* We need to do copy in the right order in case an address register
12261 of the source overlaps the destination. */
12262 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
12263 {
12264 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12265 collisions++;
12266 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12267 collisions++;
12268 if (nparts == 3
12269 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12270 collisions++;
12271
12272 /* Collision in the middle part can be handled by reordering. */
12273 if (collisions == 1 && nparts == 3
12274 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12275 {
12276 rtx tmp;
12277 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12278 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12279 }
12280
12281 /* If there are more collisions, we can't handle it by reordering.
12282 Do an lea to the last part and use only one colliding move. */
12283 else if (collisions > 1)
12284 {
12285 rtx base;
12286
12287 collisions = 1;
12288
12289 base = part[0][nparts - 1];
12290
12291 /* Handle the case when the last part isn't valid for lea.
12292 Happens in 64-bit mode storing the 12-byte XFmode. */
12293 if (GET_MODE (base) != Pmode)
12294 base = gen_rtx_REG (Pmode, REGNO (base));
12295
12296 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12297 part[1][0] = replace_equiv_address (part[1][0], base);
12298 part[1][1] = replace_equiv_address (part[1][1],
12299 plus_constant (base, UNITS_PER_WORD));
12300 if (nparts == 3)
12301 part[1][2] = replace_equiv_address (part[1][2],
12302 plus_constant (base, 8));
12303 }
12304 }
12305
12306 if (push)
12307 {
12308 if (!TARGET_64BIT)
12309 {
12310 if (nparts == 3)
12311 {
12312 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12313 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12314 emit_move_insn (part[0][2], part[1][2]);
12315 }
12316 }
12317 else
12318 {
12319 /* In 64bit mode we don't have 32bit push available. In case this is
12320 register, it is OK - we will just use larger counterpart. We also
12321 retype memory - these comes from attempt to avoid REX prefix on
12322 moving of second half of TFmode value. */
12323 if (GET_MODE (part[1][1]) == SImode)
12324 {
12325 switch (GET_CODE (part[1][1]))
12326 {
12327 case MEM:
12328 part[1][1] = adjust_address (part[1][1], DImode, 0);
12329 break;
12330
12331 case REG:
12332 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12333 break;
12334
12335 default:
12336 gcc_unreachable ();
12337 }
12338
12339 if (GET_MODE (part[1][0]) == SImode)
12340 part[1][0] = part[1][1];
12341 }
12342 }
12343 emit_move_insn (part[0][1], part[1][1]);
12344 emit_move_insn (part[0][0], part[1][0]);
12345 return;
12346 }
12347
12348 /* Choose correct order to not overwrite the source before it is copied. */
12349 if ((REG_P (part[0][0])
12350 && REG_P (part[1][1])
12351 && (REGNO (part[0][0]) == REGNO (part[1][1])
12352 || (nparts == 3
12353 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12354 || (collisions > 0
12355 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12356 {
12357 if (nparts == 3)
12358 {
12359 operands[2] = part[0][2];
12360 operands[3] = part[0][1];
12361 operands[4] = part[0][0];
12362 operands[5] = part[1][2];
12363 operands[6] = part[1][1];
12364 operands[7] = part[1][0];
12365 }
12366 else
12367 {
12368 operands[2] = part[0][1];
12369 operands[3] = part[0][0];
12370 operands[5] = part[1][1];
12371 operands[6] = part[1][0];
12372 }
12373 }
12374 else
12375 {
12376 if (nparts == 3)
12377 {
12378 operands[2] = part[0][0];
12379 operands[3] = part[0][1];
12380 operands[4] = part[0][2];
12381 operands[5] = part[1][0];
12382 operands[6] = part[1][1];
12383 operands[7] = part[1][2];
12384 }
12385 else
12386 {
12387 operands[2] = part[0][0];
12388 operands[3] = part[0][1];
12389 operands[5] = part[1][0];
12390 operands[6] = part[1][1];
12391 }
12392 }
12393
12394 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12395 if (optimize_size)
12396 {
12397 if (CONST_INT_P (operands[5])
12398 && operands[5] != const0_rtx
12399 && REG_P (operands[2]))
12400 {
12401 if (CONST_INT_P (operands[6])
12402 && INTVAL (operands[6]) == INTVAL (operands[5]))
12403 operands[6] = operands[2];
12404
12405 if (nparts == 3
12406 && CONST_INT_P (operands[7])
12407 && INTVAL (operands[7]) == INTVAL (operands[5]))
12408 operands[7] = operands[2];
12409 }
12410
12411 if (nparts == 3
12412 && CONST_INT_P (operands[6])
12413 && operands[6] != const0_rtx
12414 && REG_P (operands[3])
12415 && CONST_INT_P (operands[7])
12416 && INTVAL (operands[7]) == INTVAL (operands[6]))
12417 operands[7] = operands[3];
12418 }
12419
12420 emit_move_insn (operands[2], operands[5]);
12421 emit_move_insn (operands[3], operands[6]);
12422 if (nparts == 3)
12423 emit_move_insn (operands[4], operands[7]);
12424
12425 return;
12426 }
12427
12428 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12429 left shift by a constant, either using a single shift or
12430 a sequence of add instructions. */
12431
12432 static void
12433 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12434 {
12435 if (count == 1)
12436 {
12437 emit_insn ((mode == DImode
12438 ? gen_addsi3
12439 : gen_adddi3) (operand, operand, operand));
12440 }
12441 else if (!optimize_size
12442 && count * ix86_cost->add <= ix86_cost->shift_const)
12443 {
12444 int i;
12445 for (i=0; i<count; i++)
12446 {
12447 emit_insn ((mode == DImode
12448 ? gen_addsi3
12449 : gen_adddi3) (operand, operand, operand));
12450 }
12451 }
12452 else
12453 emit_insn ((mode == DImode
12454 ? gen_ashlsi3
12455 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12456 }
12457
12458 void
12459 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12460 {
12461 rtx low[2], high[2];
12462 int count;
12463 const int single_width = mode == DImode ? 32 : 64;
12464
12465 if (CONST_INT_P (operands[2]))
12466 {
12467 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12468 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12469
12470 if (count >= single_width)
12471 {
12472 emit_move_insn (high[0], low[1]);
12473 emit_move_insn (low[0], const0_rtx);
12474
12475 if (count > single_width)
12476 ix86_expand_ashl_const (high[0], count - single_width, mode);
12477 }
12478 else
12479 {
12480 if (!rtx_equal_p (operands[0], operands[1]))
12481 emit_move_insn (operands[0], operands[1]);
12482 emit_insn ((mode == DImode
12483 ? gen_x86_shld_1
12484 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12485 ix86_expand_ashl_const (low[0], count, mode);
12486 }
12487 return;
12488 }
12489
12490 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12491
12492 if (operands[1] == const1_rtx)
12493 {
12494 /* Assuming we've chosen a QImode capable registers, then 1 << N
12495 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12496 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12497 {
12498 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12499
12500 ix86_expand_clear (low[0]);
12501 ix86_expand_clear (high[0]);
12502 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12503
12504 d = gen_lowpart (QImode, low[0]);
12505 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12506 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12507 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12508
12509 d = gen_lowpart (QImode, high[0]);
12510 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12511 s = gen_rtx_NE (QImode, flags, const0_rtx);
12512 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12513 }
12514
12515 /* Otherwise, we can get the same results by manually performing
12516 a bit extract operation on bit 5/6, and then performing the two
12517 shifts. The two methods of getting 0/1 into low/high are exactly
12518 the same size. Avoiding the shift in the bit extract case helps
12519 pentium4 a bit; no one else seems to care much either way. */
12520 else
12521 {
12522 rtx x;
12523
12524 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12525 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12526 else
12527 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12528 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12529
12530 emit_insn ((mode == DImode
12531 ? gen_lshrsi3
12532 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12533 emit_insn ((mode == DImode
12534 ? gen_andsi3
12535 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12536 emit_move_insn (low[0], high[0]);
12537 emit_insn ((mode == DImode
12538 ? gen_xorsi3
12539 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12540 }
12541
12542 emit_insn ((mode == DImode
12543 ? gen_ashlsi3
12544 : gen_ashldi3) (low[0], low[0], operands[2]));
12545 emit_insn ((mode == DImode
12546 ? gen_ashlsi3
12547 : gen_ashldi3) (high[0], high[0], operands[2]));
12548 return;
12549 }
12550
12551 if (operands[1] == constm1_rtx)
12552 {
12553 /* For -1 << N, we can avoid the shld instruction, because we
12554 know that we're shifting 0...31/63 ones into a -1. */
12555 emit_move_insn (low[0], constm1_rtx);
12556 if (optimize_size)
12557 emit_move_insn (high[0], low[0]);
12558 else
12559 emit_move_insn (high[0], constm1_rtx);
12560 }
12561 else
12562 {
12563 if (!rtx_equal_p (operands[0], operands[1]))
12564 emit_move_insn (operands[0], operands[1]);
12565
12566 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12567 emit_insn ((mode == DImode
12568 ? gen_x86_shld_1
12569 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12570 }
12571
12572 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12573
12574 if (TARGET_CMOVE && scratch)
12575 {
12576 ix86_expand_clear (scratch);
12577 emit_insn ((mode == DImode
12578 ? gen_x86_shift_adj_1
12579 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12580 }
12581 else
12582 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12583 }
12584
12585 void
12586 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12587 {
12588 rtx low[2], high[2];
12589 int count;
12590 const int single_width = mode == DImode ? 32 : 64;
12591
12592 if (CONST_INT_P (operands[2]))
12593 {
12594 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12595 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12596
12597 if (count == single_width * 2 - 1)
12598 {
12599 emit_move_insn (high[0], high[1]);
12600 emit_insn ((mode == DImode
12601 ? gen_ashrsi3
12602 : gen_ashrdi3) (high[0], high[0],
12603 GEN_INT (single_width - 1)));
12604 emit_move_insn (low[0], high[0]);
12605
12606 }
12607 else if (count >= single_width)
12608 {
12609 emit_move_insn (low[0], high[1]);
12610 emit_move_insn (high[0], low[0]);
12611 emit_insn ((mode == DImode
12612 ? gen_ashrsi3
12613 : gen_ashrdi3) (high[0], high[0],
12614 GEN_INT (single_width - 1)));
12615 if (count > single_width)
12616 emit_insn ((mode == DImode
12617 ? gen_ashrsi3
12618 : gen_ashrdi3) (low[0], low[0],
12619 GEN_INT (count - single_width)));
12620 }
12621 else
12622 {
12623 if (!rtx_equal_p (operands[0], operands[1]))
12624 emit_move_insn (operands[0], operands[1]);
12625 emit_insn ((mode == DImode
12626 ? gen_x86_shrd_1
12627 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12628 emit_insn ((mode == DImode
12629 ? gen_ashrsi3
12630 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12631 }
12632 }
12633 else
12634 {
12635 if (!rtx_equal_p (operands[0], operands[1]))
12636 emit_move_insn (operands[0], operands[1]);
12637
12638 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12639
12640 emit_insn ((mode == DImode
12641 ? gen_x86_shrd_1
12642 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12643 emit_insn ((mode == DImode
12644 ? gen_ashrsi3
12645 : gen_ashrdi3) (high[0], high[0], operands[2]));
12646
12647 if (TARGET_CMOVE && scratch)
12648 {
12649 emit_move_insn (scratch, high[0]);
12650 emit_insn ((mode == DImode
12651 ? gen_ashrsi3
12652 : gen_ashrdi3) (scratch, scratch,
12653 GEN_INT (single_width - 1)));
12654 emit_insn ((mode == DImode
12655 ? gen_x86_shift_adj_1
12656 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12657 scratch));
12658 }
12659 else
12660 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12661 }
12662 }
12663
12664 void
12665 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12666 {
12667 rtx low[2], high[2];
12668 int count;
12669 const int single_width = mode == DImode ? 32 : 64;
12670
12671 if (CONST_INT_P (operands[2]))
12672 {
12673 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12674 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12675
12676 if (count >= single_width)
12677 {
12678 emit_move_insn (low[0], high[1]);
12679 ix86_expand_clear (high[0]);
12680
12681 if (count > single_width)
12682 emit_insn ((mode == DImode
12683 ? gen_lshrsi3
12684 : gen_lshrdi3) (low[0], low[0],
12685 GEN_INT (count - single_width)));
12686 }
12687 else
12688 {
12689 if (!rtx_equal_p (operands[0], operands[1]))
12690 emit_move_insn (operands[0], operands[1]);
12691 emit_insn ((mode == DImode
12692 ? gen_x86_shrd_1
12693 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12694 emit_insn ((mode == DImode
12695 ? gen_lshrsi3
12696 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12697 }
12698 }
12699 else
12700 {
12701 if (!rtx_equal_p (operands[0], operands[1]))
12702 emit_move_insn (operands[0], operands[1]);
12703
12704 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12705
12706 emit_insn ((mode == DImode
12707 ? gen_x86_shrd_1
12708 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12709 emit_insn ((mode == DImode
12710 ? gen_lshrsi3
12711 : gen_lshrdi3) (high[0], high[0], operands[2]));
12712
12713 /* Heh. By reversing the arguments, we can reuse this pattern. */
12714 if (TARGET_CMOVE && scratch)
12715 {
12716 ix86_expand_clear (scratch);
12717 emit_insn ((mode == DImode
12718 ? gen_x86_shift_adj_1
12719 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12720 scratch));
12721 }
12722 else
12723 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12724 }
12725 }
12726
12727 /* Predict just emitted jump instruction to be taken with probability PROB. */
12728 static void
12729 predict_jump (int prob)
12730 {
12731 rtx insn = get_last_insn ();
12732 gcc_assert (JUMP_P (insn));
12733 REG_NOTES (insn)
12734 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12735 GEN_INT (prob),
12736 REG_NOTES (insn));
12737 }
12738
12739 /* Helper function for the string operations below. Dest VARIABLE whether
12740 it is aligned to VALUE bytes. If true, jump to the label. */
12741 static rtx
12742 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12743 {
12744 rtx label = gen_label_rtx ();
12745 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12746 if (GET_MODE (variable) == DImode)
12747 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12748 else
12749 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12750 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12751 1, label);
12752 if (epilogue)
12753 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12754 else
12755 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12756 return label;
12757 }
12758
12759 /* Adjust COUNTER by the VALUE. */
12760 static void
12761 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12762 {
12763 if (GET_MODE (countreg) == DImode)
12764 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12765 else
12766 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12767 }
12768
12769 /* Zero extend possibly SImode EXP to Pmode register. */
12770 rtx
12771 ix86_zero_extend_to_Pmode (rtx exp)
12772 {
12773 rtx r;
12774 if (GET_MODE (exp) == VOIDmode)
12775 return force_reg (Pmode, exp);
12776 if (GET_MODE (exp) == Pmode)
12777 return copy_to_mode_reg (Pmode, exp);
12778 r = gen_reg_rtx (Pmode);
12779 emit_insn (gen_zero_extendsidi2 (r, exp));
12780 return r;
12781 }
12782
12783 /* Divide COUNTREG by SCALE. */
12784 static rtx
12785 scale_counter (rtx countreg, int scale)
12786 {
12787 rtx sc;
12788 rtx piece_size_mask;
12789
12790 if (scale == 1)
12791 return countreg;
12792 if (CONST_INT_P (countreg))
12793 return GEN_INT (INTVAL (countreg) / scale);
12794 gcc_assert (REG_P (countreg));
12795
12796 piece_size_mask = GEN_INT (scale - 1);
12797 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12798 GEN_INT (exact_log2 (scale)),
12799 NULL, 1, OPTAB_DIRECT);
12800 return sc;
12801 }
12802
12803 /* When SRCPTR is non-NULL, output simple loop to move memory
12804 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12805 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12806 equivalent loop to set memory by VALUE (supposed to be in MODE).
12807
12808 The size is rounded down to whole number of chunk size moved at once.
12809 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12810
12811
12812 static void
12813 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12814 rtx destptr, rtx srcptr, rtx value,
12815 rtx count, enum machine_mode mode, int unroll,
12816 int expected_size)
12817 {
12818 rtx out_label, top_label, iter, tmp;
12819 enum machine_mode iter_mode;
12820 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12821 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12822 rtx size;
12823 rtx x_addr;
12824 rtx y_addr;
12825 int i;
12826
12827 iter_mode = GET_MODE (count);
12828 if (iter_mode == VOIDmode)
12829 iter_mode = word_mode;
12830
12831 top_label = gen_label_rtx ();
12832 out_label = gen_label_rtx ();
12833 iter = gen_reg_rtx (iter_mode);
12834
12835 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12836 NULL, 1, OPTAB_DIRECT);
12837 /* Those two should combine. */
12838 if (piece_size == const1_rtx)
12839 {
12840 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12841 true, out_label);
12842 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12843 }
12844 emit_move_insn (iter, const0_rtx);
12845
12846 emit_label (top_label);
12847
12848 tmp = convert_modes (Pmode, iter_mode, iter, true);
12849 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12850 destmem = change_address (destmem, mode, x_addr);
12851
12852 if (srcmem)
12853 {
12854 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12855 srcmem = change_address (srcmem, mode, y_addr);
12856
12857 /* When unrolling for chips that reorder memory reads and writes,
12858 we can save registers by using single temporary.
12859 Also using 4 temporaries is overkill in 32bit mode. */
12860 if (!TARGET_64BIT && 0)
12861 {
12862 for (i = 0; i < unroll; i++)
12863 {
12864 if (i)
12865 {
12866 destmem =
12867 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12868 srcmem =
12869 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12870 }
12871 emit_move_insn (destmem, srcmem);
12872 }
12873 }
12874 else
12875 {
12876 rtx tmpreg[4];
12877 gcc_assert (unroll <= 4);
12878 for (i = 0; i < unroll; i++)
12879 {
12880 tmpreg[i] = gen_reg_rtx (mode);
12881 if (i)
12882 {
12883 srcmem =
12884 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12885 }
12886 emit_move_insn (tmpreg[i], srcmem);
12887 }
12888 for (i = 0; i < unroll; i++)
12889 {
12890 if (i)
12891 {
12892 destmem =
12893 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12894 }
12895 emit_move_insn (destmem, tmpreg[i]);
12896 }
12897 }
12898 }
12899 else
12900 for (i = 0; i < unroll; i++)
12901 {
12902 if (i)
12903 destmem =
12904 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12905 emit_move_insn (destmem, value);
12906 }
12907
12908 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
12909 true, OPTAB_LIB_WIDEN);
12910 if (tmp != iter)
12911 emit_move_insn (iter, tmp);
12912
12913 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
12914 true, top_label);
12915 if (expected_size != -1)
12916 {
12917 expected_size /= GET_MODE_SIZE (mode) * unroll;
12918 if (expected_size == 0)
12919 predict_jump (0);
12920 else if (expected_size > REG_BR_PROB_BASE)
12921 predict_jump (REG_BR_PROB_BASE - 1);
12922 else
12923 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
12924 }
12925 else
12926 predict_jump (REG_BR_PROB_BASE * 80 / 100);
12927 iter = ix86_zero_extend_to_Pmode (iter);
12928 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
12929 true, OPTAB_LIB_WIDEN);
12930 if (tmp != destptr)
12931 emit_move_insn (destptr, tmp);
12932 if (srcptr)
12933 {
12934 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
12935 true, OPTAB_LIB_WIDEN);
12936 if (tmp != srcptr)
12937 emit_move_insn (srcptr, tmp);
12938 }
12939 emit_label (out_label);
12940 }
12941
12942 /* Output "rep; mov" instruction.
12943 Arguments have same meaning as for previous function */
12944 static void
12945 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
12946 rtx destptr, rtx srcptr,
12947 rtx count,
12948 enum machine_mode mode)
12949 {
12950 rtx destexp;
12951 rtx srcexp;
12952 rtx countreg;
12953
12954 /* If the size is known, it is shorter to use rep movs. */
12955 if (mode == QImode && CONST_INT_P (count)
12956 && !(INTVAL (count) & 3))
12957 mode = SImode;
12958
12959 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12960 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12961 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
12962 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
12963 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12964 if (mode != QImode)
12965 {
12966 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12967 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12968 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12969 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
12970 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12971 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
12972 }
12973 else
12974 {
12975 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12976 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
12977 }
12978 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
12979 destexp, srcexp));
12980 }
12981
12982 /* Output "rep; stos" instruction.
12983 Arguments have same meaning as for previous function */
12984 static void
12985 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
12986 rtx count,
12987 enum machine_mode mode)
12988 {
12989 rtx destexp;
12990 rtx countreg;
12991
12992 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12993 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12994 value = force_reg (mode, gen_lowpart (mode, value));
12995 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12996 if (mode != QImode)
12997 {
12998 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12999 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13000 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
13001 }
13002 else
13003 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
13004 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
13005 }
13006
13007 static void
13008 emit_strmov (rtx destmem, rtx srcmem,
13009 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
13010 {
13011 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13012 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13013 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13014 }
13015
13016 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13017 static void
13018 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13019 rtx destptr, rtx srcptr, rtx count, int max_size)
13020 {
13021 rtx src, dest;
13022 if (CONST_INT_P (count))
13023 {
13024 HOST_WIDE_INT countval = INTVAL (count);
13025 int offset = 0;
13026
13027 if ((countval & 0x16) && max_size > 16)
13028 {
13029 if (TARGET_64BIT)
13030 {
13031 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13032 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13033 }
13034 else
13035 gcc_unreachable ();
13036 offset += 16;
13037 }
13038 if ((countval & 0x08) && max_size > 8)
13039 {
13040 if (TARGET_64BIT)
13041 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13042 else
13043 {
13044 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13045 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13046 }
13047 offset += 8;
13048 }
13049 if ((countval & 0x04) && max_size > 4)
13050 {
13051 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13052 offset += 4;
13053 }
13054 if ((countval & 0x02) && max_size > 2)
13055 {
13056 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13057 offset += 2;
13058 }
13059 if ((countval & 0x01) && max_size > 1)
13060 {
13061 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13062 offset += 1;
13063 }
13064 return;
13065 }
13066 if (max_size > 8)
13067 {
13068 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13069 count, 1, OPTAB_DIRECT);
13070 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13071 count, QImode, 1, 4);
13072 return;
13073 }
13074
13075 /* When there are stringops, we can cheaply increase dest and src pointers.
13076 Otherwise we save code size by maintaining offset (zero is readily
13077 available from preceding rep operation) and using x86 addressing modes.
13078 */
13079 if (TARGET_SINGLE_STRINGOP)
13080 {
13081 if (max_size > 4)
13082 {
13083 rtx label = ix86_expand_aligntest (count, 4, true);
13084 src = change_address (srcmem, SImode, srcptr);
13085 dest = change_address (destmem, SImode, destptr);
13086 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13087 emit_label (label);
13088 LABEL_NUSES (label) = 1;
13089 }
13090 if (max_size > 2)
13091 {
13092 rtx label = ix86_expand_aligntest (count, 2, true);
13093 src = change_address (srcmem, HImode, srcptr);
13094 dest = change_address (destmem, HImode, destptr);
13095 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13096 emit_label (label);
13097 LABEL_NUSES (label) = 1;
13098 }
13099 if (max_size > 1)
13100 {
13101 rtx label = ix86_expand_aligntest (count, 1, true);
13102 src = change_address (srcmem, QImode, srcptr);
13103 dest = change_address (destmem, QImode, destptr);
13104 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13105 emit_label (label);
13106 LABEL_NUSES (label) = 1;
13107 }
13108 }
13109 else
13110 {
13111 rtx offset = force_reg (Pmode, const0_rtx);
13112 rtx tmp;
13113
13114 if (max_size > 4)
13115 {
13116 rtx label = ix86_expand_aligntest (count, 4, true);
13117 src = change_address (srcmem, SImode, srcptr);
13118 dest = change_address (destmem, SImode, destptr);
13119 emit_move_insn (dest, src);
13120 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13121 true, OPTAB_LIB_WIDEN);
13122 if (tmp != offset)
13123 emit_move_insn (offset, tmp);
13124 emit_label (label);
13125 LABEL_NUSES (label) = 1;
13126 }
13127 if (max_size > 2)
13128 {
13129 rtx label = ix86_expand_aligntest (count, 2, true);
13130 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13131 src = change_address (srcmem, HImode, tmp);
13132 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13133 dest = change_address (destmem, HImode, tmp);
13134 emit_move_insn (dest, src);
13135 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13136 true, OPTAB_LIB_WIDEN);
13137 if (tmp != offset)
13138 emit_move_insn (offset, tmp);
13139 emit_label (label);
13140 LABEL_NUSES (label) = 1;
13141 }
13142 if (max_size > 1)
13143 {
13144 rtx label = ix86_expand_aligntest (count, 1, true);
13145 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13146 src = change_address (srcmem, QImode, tmp);
13147 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13148 dest = change_address (destmem, QImode, tmp);
13149 emit_move_insn (dest, src);
13150 emit_label (label);
13151 LABEL_NUSES (label) = 1;
13152 }
13153 }
13154 }
13155
13156 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13157 static void
13158 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13159 rtx count, int max_size)
13160 {
13161 count =
13162 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13163 count, 1, OPTAB_DIRECT);
13164 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13165 gen_lowpart (QImode, value), count, QImode,
13166 1, max_size / 2);
13167 }
13168
13169 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13170 static void
13171 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13172 {
13173 rtx dest;
13174
13175 if (CONST_INT_P (count))
13176 {
13177 HOST_WIDE_INT countval = INTVAL (count);
13178 int offset = 0;
13179
13180 if ((countval & 0x16) && max_size > 16)
13181 {
13182 if (TARGET_64BIT)
13183 {
13184 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13185 emit_insn (gen_strset (destptr, dest, value));
13186 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13187 emit_insn (gen_strset (destptr, dest, value));
13188 }
13189 else
13190 gcc_unreachable ();
13191 offset += 16;
13192 }
13193 if ((countval & 0x08) && max_size > 8)
13194 {
13195 if (TARGET_64BIT)
13196 {
13197 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13198 emit_insn (gen_strset (destptr, dest, value));
13199 }
13200 else
13201 {
13202 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13203 emit_insn (gen_strset (destptr, dest, value));
13204 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13205 emit_insn (gen_strset (destptr, dest, value));
13206 }
13207 offset += 8;
13208 }
13209 if ((countval & 0x04) && max_size > 4)
13210 {
13211 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13212 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13213 offset += 4;
13214 }
13215 if ((countval & 0x02) && max_size > 2)
13216 {
13217 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13218 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13219 offset += 2;
13220 }
13221 if ((countval & 0x01) && max_size > 1)
13222 {
13223 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13224 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13225 offset += 1;
13226 }
13227 return;
13228 }
13229 if (max_size > 32)
13230 {
13231 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13232 return;
13233 }
13234 if (max_size > 16)
13235 {
13236 rtx label = ix86_expand_aligntest (count, 16, true);
13237 if (TARGET_64BIT)
13238 {
13239 dest = change_address (destmem, DImode, destptr);
13240 emit_insn (gen_strset (destptr, dest, value));
13241 emit_insn (gen_strset (destptr, dest, value));
13242 }
13243 else
13244 {
13245 dest = change_address (destmem, SImode, destptr);
13246 emit_insn (gen_strset (destptr, dest, value));
13247 emit_insn (gen_strset (destptr, dest, value));
13248 emit_insn (gen_strset (destptr, dest, value));
13249 emit_insn (gen_strset (destptr, dest, value));
13250 }
13251 emit_label (label);
13252 LABEL_NUSES (label) = 1;
13253 }
13254 if (max_size > 8)
13255 {
13256 rtx label = ix86_expand_aligntest (count, 8, true);
13257 if (TARGET_64BIT)
13258 {
13259 dest = change_address (destmem, DImode, destptr);
13260 emit_insn (gen_strset (destptr, dest, value));
13261 }
13262 else
13263 {
13264 dest = change_address (destmem, SImode, destptr);
13265 emit_insn (gen_strset (destptr, dest, value));
13266 emit_insn (gen_strset (destptr, dest, value));
13267 }
13268 emit_label (label);
13269 LABEL_NUSES (label) = 1;
13270 }
13271 if (max_size > 4)
13272 {
13273 rtx label = ix86_expand_aligntest (count, 4, true);
13274 dest = change_address (destmem, SImode, destptr);
13275 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13276 emit_label (label);
13277 LABEL_NUSES (label) = 1;
13278 }
13279 if (max_size > 2)
13280 {
13281 rtx label = ix86_expand_aligntest (count, 2, true);
13282 dest = change_address (destmem, HImode, destptr);
13283 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13284 emit_label (label);
13285 LABEL_NUSES (label) = 1;
13286 }
13287 if (max_size > 1)
13288 {
13289 rtx label = ix86_expand_aligntest (count, 1, true);
13290 dest = change_address (destmem, QImode, destptr);
13291 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13292 emit_label (label);
13293 LABEL_NUSES (label) = 1;
13294 }
13295 }
13296
13297 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13298 DESIRED_ALIGNMENT. */
13299 static void
13300 expand_movmem_prologue (rtx destmem, rtx srcmem,
13301 rtx destptr, rtx srcptr, rtx count,
13302 int align, int desired_alignment)
13303 {
13304 if (align <= 1 && desired_alignment > 1)
13305 {
13306 rtx label = ix86_expand_aligntest (destptr, 1, false);
13307 srcmem = change_address (srcmem, QImode, srcptr);
13308 destmem = change_address (destmem, QImode, destptr);
13309 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13310 ix86_adjust_counter (count, 1);
13311 emit_label (label);
13312 LABEL_NUSES (label) = 1;
13313 }
13314 if (align <= 2 && desired_alignment > 2)
13315 {
13316 rtx label = ix86_expand_aligntest (destptr, 2, false);
13317 srcmem = change_address (srcmem, HImode, srcptr);
13318 destmem = change_address (destmem, HImode, destptr);
13319 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13320 ix86_adjust_counter (count, 2);
13321 emit_label (label);
13322 LABEL_NUSES (label) = 1;
13323 }
13324 if (align <= 4 && desired_alignment > 4)
13325 {
13326 rtx label = ix86_expand_aligntest (destptr, 4, false);
13327 srcmem = change_address (srcmem, SImode, srcptr);
13328 destmem = change_address (destmem, SImode, destptr);
13329 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13330 ix86_adjust_counter (count, 4);
13331 emit_label (label);
13332 LABEL_NUSES (label) = 1;
13333 }
13334 gcc_assert (desired_alignment <= 8);
13335 }
13336
13337 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13338 DESIRED_ALIGNMENT. */
13339 static void
13340 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13341 int align, int desired_alignment)
13342 {
13343 if (align <= 1 && desired_alignment > 1)
13344 {
13345 rtx label = ix86_expand_aligntest (destptr, 1, false);
13346 destmem = change_address (destmem, QImode, destptr);
13347 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13348 ix86_adjust_counter (count, 1);
13349 emit_label (label);
13350 LABEL_NUSES (label) = 1;
13351 }
13352 if (align <= 2 && desired_alignment > 2)
13353 {
13354 rtx label = ix86_expand_aligntest (destptr, 2, false);
13355 destmem = change_address (destmem, HImode, destptr);
13356 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13357 ix86_adjust_counter (count, 2);
13358 emit_label (label);
13359 LABEL_NUSES (label) = 1;
13360 }
13361 if (align <= 4 && desired_alignment > 4)
13362 {
13363 rtx label = ix86_expand_aligntest (destptr, 4, false);
13364 destmem = change_address (destmem, SImode, destptr);
13365 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13366 ix86_adjust_counter (count, 4);
13367 emit_label (label);
13368 LABEL_NUSES (label) = 1;
13369 }
13370 gcc_assert (desired_alignment <= 8);
13371 }
13372
13373 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13374 static enum stringop_alg
13375 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13376 int *dynamic_check)
13377 {
13378 const struct stringop_algs * algs;
13379
13380 *dynamic_check = -1;
13381 if (memset)
13382 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13383 else
13384 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13385 if (stringop_alg != no_stringop)
13386 return stringop_alg;
13387 /* rep; movq or rep; movl is the smallest variant. */
13388 else if (optimize_size)
13389 {
13390 if (!count || (count & 3))
13391 return rep_prefix_1_byte;
13392 else
13393 return rep_prefix_4_byte;
13394 }
13395 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13396 */
13397 else if (expected_size != -1 && expected_size < 4)
13398 return loop_1_byte;
13399 else if (expected_size != -1)
13400 {
13401 unsigned int i;
13402 enum stringop_alg alg = libcall;
13403 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13404 {
13405 gcc_assert (algs->size[i].max);
13406 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13407 {
13408 if (algs->size[i].alg != libcall)
13409 alg = algs->size[i].alg;
13410 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13411 last non-libcall inline algorithm. */
13412 if (TARGET_INLINE_ALL_STRINGOPS)
13413 {
13414 /* When the current size is best to be copied by a libcall,
13415 but we are still forced to inline, run the heuristic bellow
13416 that will pick code for medium sized blocks. */
13417 if (alg != libcall)
13418 return alg;
13419 break;
13420 }
13421 else
13422 return algs->size[i].alg;
13423 }
13424 }
13425 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13426 }
13427 /* When asked to inline the call anyway, try to pick meaningful choice.
13428 We look for maximal size of block that is faster to copy by hand and
13429 take blocks of at most of that size guessing that average size will
13430 be roughly half of the block.
13431
13432 If this turns out to be bad, we might simply specify the preferred
13433 choice in ix86_costs. */
13434 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13435 && algs->unknown_size == libcall)
13436 {
13437 int max = -1;
13438 enum stringop_alg alg;
13439 int i;
13440
13441 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13442 if (algs->size[i].alg != libcall && algs->size[i].alg)
13443 max = algs->size[i].max;
13444 if (max == -1)
13445 max = 4096;
13446 alg = decide_alg (count, max / 2, memset, dynamic_check);
13447 gcc_assert (*dynamic_check == -1);
13448 gcc_assert (alg != libcall);
13449 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13450 *dynamic_check = max;
13451 return alg;
13452 }
13453 return algs->unknown_size;
13454 }
13455
13456 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13457 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13458 static int
13459 decide_alignment (int align,
13460 enum stringop_alg alg,
13461 int expected_size)
13462 {
13463 int desired_align = 0;
13464 switch (alg)
13465 {
13466 case no_stringop:
13467 gcc_unreachable ();
13468 case loop:
13469 case unrolled_loop:
13470 desired_align = GET_MODE_SIZE (Pmode);
13471 break;
13472 case rep_prefix_8_byte:
13473 desired_align = 8;
13474 break;
13475 case rep_prefix_4_byte:
13476 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13477 copying whole cacheline at once. */
13478 if (TARGET_PENTIUMPRO)
13479 desired_align = 8;
13480 else
13481 desired_align = 4;
13482 break;
13483 case rep_prefix_1_byte:
13484 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13485 copying whole cacheline at once. */
13486 if (TARGET_PENTIUMPRO)
13487 desired_align = 8;
13488 else
13489 desired_align = 1;
13490 break;
13491 case loop_1_byte:
13492 desired_align = 1;
13493 break;
13494 case libcall:
13495 return 0;
13496 }
13497
13498 if (optimize_size)
13499 desired_align = 1;
13500 if (desired_align < align)
13501 desired_align = align;
13502 if (expected_size != -1 && expected_size < 4)
13503 desired_align = align;
13504 return desired_align;
13505 }
13506
13507 /* Return the smallest power of 2 greater than VAL. */
13508 static int
13509 smallest_pow2_greater_than (int val)
13510 {
13511 int ret = 1;
13512 while (ret <= val)
13513 ret <<= 1;
13514 return ret;
13515 }
13516
13517 /* Expand string move (memcpy) operation. Use i386 string operations when
13518 profitable. expand_clrmem contains similar code. The code depends upon
13519 architecture, block size and alignment, but always has the same
13520 overall structure:
13521
13522 1) Prologue guard: Conditional that jumps up to epilogues for small
13523 blocks that can be handled by epilogue alone. This is faster but
13524 also needed for correctness, since prologue assume the block is larger
13525 than the desired alignment.
13526
13527 Optional dynamic check for size and libcall for large
13528 blocks is emitted here too, with -minline-stringops-dynamically.
13529
13530 2) Prologue: copy first few bytes in order to get destination aligned
13531 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
13532 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
13533 We emit either a jump tree on power of two sized blocks, or a byte loop.
13534
13535 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
13536 with specified algorithm.
13537
13538 4) Epilogue: code copying tail of the block that is too small to be
13539 handled by main body (or up to size guarded by prologue guard). */
13540
13541 int
13542 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13543 rtx expected_align_exp, rtx expected_size_exp)
13544 {
13545 rtx destreg;
13546 rtx srcreg;
13547 rtx label = NULL;
13548 rtx tmp;
13549 rtx jump_around_label = NULL;
13550 HOST_WIDE_INT align = 1;
13551 unsigned HOST_WIDE_INT count = 0;
13552 HOST_WIDE_INT expected_size = -1;
13553 int size_needed = 0, epilogue_size_needed;
13554 int desired_align = 0;
13555 enum stringop_alg alg;
13556 int dynamic_check;
13557
13558 if (CONST_INT_P (align_exp))
13559 align = INTVAL (align_exp);
13560 /* i386 can do misaligned access on reasonably increased cost. */
13561 if (CONST_INT_P (expected_align_exp)
13562 && INTVAL (expected_align_exp) > align)
13563 align = INTVAL (expected_align_exp);
13564 if (CONST_INT_P (count_exp))
13565 count = expected_size = INTVAL (count_exp);
13566 if (CONST_INT_P (expected_size_exp) && count == 0)
13567 expected_size = INTVAL (expected_size_exp);
13568
13569 /* Step 0: Decide on preferred algorithm, desired alignment and
13570 size of chunks to be copied by main loop. */
13571
13572 alg = decide_alg (count, expected_size, false, &dynamic_check);
13573 desired_align = decide_alignment (align, alg, expected_size);
13574
13575 if (!TARGET_ALIGN_STRINGOPS)
13576 align = desired_align;
13577
13578 if (alg == libcall)
13579 return 0;
13580 gcc_assert (alg != no_stringop);
13581 if (!count)
13582 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13583 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13584 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13585 switch (alg)
13586 {
13587 case libcall:
13588 case no_stringop:
13589 gcc_unreachable ();
13590 case loop:
13591 size_needed = GET_MODE_SIZE (Pmode);
13592 break;
13593 case unrolled_loop:
13594 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13595 break;
13596 case rep_prefix_8_byte:
13597 size_needed = 8;
13598 break;
13599 case rep_prefix_4_byte:
13600 size_needed = 4;
13601 break;
13602 case rep_prefix_1_byte:
13603 case loop_1_byte:
13604 size_needed = 1;
13605 break;
13606 }
13607
13608 epilogue_size_needed = size_needed;
13609
13610 /* Step 1: Prologue guard. */
13611
13612 /* Alignment code needs count to be in register. */
13613 if (CONST_INT_P (count_exp) && desired_align > align)
13614 {
13615 enum machine_mode mode = SImode;
13616 if (TARGET_64BIT && (count & ~0xffffffff))
13617 mode = DImode;
13618 count_exp = force_reg (mode, count_exp);
13619 }
13620 gcc_assert (desired_align >= 1 && align >= 1);
13621
13622 /* Ensure that alignment prologue won't copy past end of block. */
13623 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13624 && !count)
13625 {
13626 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13627
13628 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13629 Make sure it is power of 2. */
13630 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13631
13632 label = gen_label_rtx ();
13633 emit_cmp_and_jump_insns (count_exp,
13634 GEN_INT (epilogue_size_needed),
13635 LTU, 0, GET_MODE (count_exp), 1, label);
13636 if (expected_size == -1 || expected_size < epilogue_size_needed)
13637 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13638 else
13639 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13640 }
13641 /* Emit code to decide on runtime whether library call or inline should be
13642 used. */
13643 if (dynamic_check != -1)
13644 {
13645 rtx hot_label = gen_label_rtx ();
13646 jump_around_label = gen_label_rtx ();
13647 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13648 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13649 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13650 emit_block_move_via_libcall (dst, src, count_exp, false);
13651 emit_jump (jump_around_label);
13652 emit_label (hot_label);
13653 }
13654
13655 /* Step 2: Alignment prologue. */
13656
13657 if (desired_align > align)
13658 {
13659 /* Except for the first move in epilogue, we no longer know
13660 constant offset in aliasing info. It don't seems to worth
13661 the pain to maintain it for the first move, so throw away
13662 the info early. */
13663 src = change_address (src, BLKmode, srcreg);
13664 dst = change_address (dst, BLKmode, destreg);
13665 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13666 desired_align);
13667 }
13668 if (label && size_needed == 1)
13669 {
13670 emit_label (label);
13671 LABEL_NUSES (label) = 1;
13672 label = NULL;
13673 }
13674
13675 /* Step 3: Main loop. */
13676
13677 switch (alg)
13678 {
13679 case libcall:
13680 case no_stringop:
13681 gcc_unreachable ();
13682 case loop_1_byte:
13683 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13684 count_exp, QImode, 1, expected_size);
13685 break;
13686 case loop:
13687 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13688 count_exp, Pmode, 1, expected_size);
13689 break;
13690 case unrolled_loop:
13691 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13692 registers for 4 temporaries anyway. */
13693 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13694 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13695 expected_size);
13696 break;
13697 case rep_prefix_8_byte:
13698 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13699 DImode);
13700 break;
13701 case rep_prefix_4_byte:
13702 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13703 SImode);
13704 break;
13705 case rep_prefix_1_byte:
13706 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13707 QImode);
13708 break;
13709 }
13710 /* Adjust properly the offset of src and dest memory for aliasing. */
13711 if (CONST_INT_P (count_exp))
13712 {
13713 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13714 (count / size_needed) * size_needed);
13715 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13716 (count / size_needed) * size_needed);
13717 }
13718 else
13719 {
13720 src = change_address (src, BLKmode, srcreg);
13721 dst = change_address (dst, BLKmode, destreg);
13722 }
13723
13724 /* Step 4: Epilogue to copy the remaining bytes. */
13725
13726 if (label)
13727 {
13728 /* When the main loop is done, COUNT_EXP might hold original count,
13729 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
13730 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
13731 bytes. Compensate if needed. */
13732
13733 if (size_needed < epilogue_size_needed)
13734 {
13735 tmp =
13736 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13737 GEN_INT (size_needed - 1), count_exp, 1,
13738 OPTAB_DIRECT);
13739 if (tmp != count_exp)
13740 emit_move_insn (count_exp, tmp);
13741 }
13742 emit_label (label);
13743 LABEL_NUSES (label) = 1;
13744 }
13745
13746 if (count_exp != const0_rtx && epilogue_size_needed > 1)
13747 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13748 epilogue_size_needed);
13749 if (jump_around_label)
13750 emit_label (jump_around_label);
13751 return 1;
13752 }
13753
13754 /* Helper function for memcpy. For QImode value 0xXY produce
13755 0xXYXYXYXY of wide specified by MODE. This is essentially
13756 a * 0x10101010, but we can do slightly better than
13757 synth_mult by unwinding the sequence by hand on CPUs with
13758 slow multiply. */
13759 static rtx
13760 promote_duplicated_reg (enum machine_mode mode, rtx val)
13761 {
13762 enum machine_mode valmode = GET_MODE (val);
13763 rtx tmp;
13764 int nops = mode == DImode ? 3 : 2;
13765
13766 gcc_assert (mode == SImode || mode == DImode);
13767 if (val == const0_rtx)
13768 return copy_to_mode_reg (mode, const0_rtx);
13769 if (CONST_INT_P (val))
13770 {
13771 HOST_WIDE_INT v = INTVAL (val) & 255;
13772
13773 v |= v << 8;
13774 v |= v << 16;
13775 if (mode == DImode)
13776 v |= (v << 16) << 16;
13777 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13778 }
13779
13780 if (valmode == VOIDmode)
13781 valmode = QImode;
13782 if (valmode != QImode)
13783 val = gen_lowpart (QImode, val);
13784 if (mode == QImode)
13785 return val;
13786 if (!TARGET_PARTIAL_REG_STALL)
13787 nops--;
13788 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13789 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13790 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13791 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13792 {
13793 rtx reg = convert_modes (mode, QImode, val, true);
13794 tmp = promote_duplicated_reg (mode, const1_rtx);
13795 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13796 OPTAB_DIRECT);
13797 }
13798 else
13799 {
13800 rtx reg = convert_modes (mode, QImode, val, true);
13801
13802 if (!TARGET_PARTIAL_REG_STALL)
13803 if (mode == SImode)
13804 emit_insn (gen_movsi_insv_1 (reg, reg));
13805 else
13806 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13807 else
13808 {
13809 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13810 NULL, 1, OPTAB_DIRECT);
13811 reg =
13812 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13813 }
13814 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13815 NULL, 1, OPTAB_DIRECT);
13816 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13817 if (mode == SImode)
13818 return reg;
13819 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13820 NULL, 1, OPTAB_DIRECT);
13821 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13822 return reg;
13823 }
13824 }
13825
13826 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
13827 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
13828 alignment from ALIGN to DESIRED_ALIGN. */
13829 static rtx
13830 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
13831 {
13832 rtx promoted_val;
13833
13834 if (TARGET_64BIT
13835 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
13836 promoted_val = promote_duplicated_reg (DImode, val);
13837 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
13838 promoted_val = promote_duplicated_reg (SImode, val);
13839 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
13840 promoted_val = promote_duplicated_reg (HImode, val);
13841 else
13842 promoted_val = val;
13843
13844 return promoted_val;
13845 }
13846
13847 /* Expand string clear operation (bzero). Use i386 string operations when
13848 profitable. See expand_movmem comment for explanation of individual
13849 steps performed. */
13850 int
13851 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13852 rtx expected_align_exp, rtx expected_size_exp)
13853 {
13854 rtx destreg;
13855 rtx label = NULL;
13856 rtx tmp;
13857 rtx jump_around_label = NULL;
13858 HOST_WIDE_INT align = 1;
13859 unsigned HOST_WIDE_INT count = 0;
13860 HOST_WIDE_INT expected_size = -1;
13861 int size_needed = 0, epilogue_size_needed;
13862 int desired_align = 0;
13863 enum stringop_alg alg;
13864 rtx promoted_val = NULL;
13865 bool force_loopy_epilogue = false;
13866 int dynamic_check;
13867
13868 if (CONST_INT_P (align_exp))
13869 align = INTVAL (align_exp);
13870 /* i386 can do misaligned access on reasonably increased cost. */
13871 if (CONST_INT_P (expected_align_exp)
13872 && INTVAL (expected_align_exp) > align)
13873 align = INTVAL (expected_align_exp);
13874 if (CONST_INT_P (count_exp))
13875 count = expected_size = INTVAL (count_exp);
13876 if (CONST_INT_P (expected_size_exp) && count == 0)
13877 expected_size = INTVAL (expected_size_exp);
13878
13879 /* Step 0: Decide on preferred algorithm, desired alignment and
13880 size of chunks to be copied by main loop. */
13881
13882 alg = decide_alg (count, expected_size, true, &dynamic_check);
13883 desired_align = decide_alignment (align, alg, expected_size);
13884
13885 if (!TARGET_ALIGN_STRINGOPS)
13886 align = desired_align;
13887
13888 if (alg == libcall)
13889 return 0;
13890 gcc_assert (alg != no_stringop);
13891 if (!count)
13892 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13893 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13894 switch (alg)
13895 {
13896 case libcall:
13897 case no_stringop:
13898 gcc_unreachable ();
13899 case loop:
13900 size_needed = GET_MODE_SIZE (Pmode);
13901 break;
13902 case unrolled_loop:
13903 size_needed = GET_MODE_SIZE (Pmode) * 4;
13904 break;
13905 case rep_prefix_8_byte:
13906 size_needed = 8;
13907 break;
13908 case rep_prefix_4_byte:
13909 size_needed = 4;
13910 break;
13911 case rep_prefix_1_byte:
13912 case loop_1_byte:
13913 size_needed = 1;
13914 break;
13915 }
13916 epilogue_size_needed = size_needed;
13917
13918 /* Step 1: Prologue guard. */
13919
13920 /* Alignment code needs count to be in register. */
13921 if (CONST_INT_P (count_exp) && desired_align > align)
13922 {
13923 enum machine_mode mode = SImode;
13924 if (TARGET_64BIT && (count & ~0xffffffff))
13925 mode = DImode;
13926 count_exp = force_reg (mode, count_exp);
13927 }
13928 /* Do the cheap promotion to allow better CSE across the
13929 main loop and epilogue (ie one load of the big constant in the
13930 front of all code. */
13931 if (CONST_INT_P (val_exp))
13932 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13933 desired_align, align);
13934 /* Ensure that alignment prologue won't copy past end of block. */
13935 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13936 && !count)
13937 {
13938 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13939
13940 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13941 Make sure it is power of 2. */
13942 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13943
13944 /* To improve performance of small blocks, we jump around the VAL
13945 promoting mode. This mean that if the promoted VAL is not constant,
13946 we might not use it in the epilogue and have to use byte
13947 loop variant. */
13948 if (epilogue_size_needed > 2 && !promoted_val)
13949 force_loopy_epilogue = true;
13950 label = gen_label_rtx ();
13951 emit_cmp_and_jump_insns (count_exp,
13952 GEN_INT (epilogue_size_needed),
13953 LTU, 0, GET_MODE (count_exp), 1, label);
13954 if (expected_size == -1 || expected_size <= epilogue_size_needed)
13955 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13956 else
13957 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13958 }
13959 if (dynamic_check != -1)
13960 {
13961 rtx hot_label = gen_label_rtx ();
13962 jump_around_label = gen_label_rtx ();
13963 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13964 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13965 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13966 set_storage_via_libcall (dst, count_exp, val_exp, false);
13967 emit_jump (jump_around_label);
13968 emit_label (hot_label);
13969 }
13970
13971 /* Step 2: Alignment prologue. */
13972
13973 /* Do the expensive promotion once we branched off the small blocks. */
13974 if (!promoted_val)
13975 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13976 desired_align, align);
13977 gcc_assert (desired_align >= 1 && align >= 1);
13978
13979 if (desired_align > align)
13980 {
13981 /* Except for the first move in epilogue, we no longer know
13982 constant offset in aliasing info. It don't seems to worth
13983 the pain to maintain it for the first move, so throw away
13984 the info early. */
13985 dst = change_address (dst, BLKmode, destreg);
13986 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
13987 desired_align);
13988 }
13989 if (label && size_needed == 1)
13990 {
13991 emit_label (label);
13992 LABEL_NUSES (label) = 1;
13993 label = NULL;
13994 }
13995
13996 /* Step 3: Main loop. */
13997
13998 switch (alg)
13999 {
14000 case libcall:
14001 case no_stringop:
14002 gcc_unreachable ();
14003 case loop_1_byte:
14004 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14005 count_exp, QImode, 1, expected_size);
14006 break;
14007 case loop:
14008 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14009 count_exp, Pmode, 1, expected_size);
14010 break;
14011 case unrolled_loop:
14012 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14013 count_exp, Pmode, 4, expected_size);
14014 break;
14015 case rep_prefix_8_byte:
14016 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14017 DImode);
14018 break;
14019 case rep_prefix_4_byte:
14020 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14021 SImode);
14022 break;
14023 case rep_prefix_1_byte:
14024 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14025 QImode);
14026 break;
14027 }
14028 /* Adjust properly the offset of src and dest memory for aliasing. */
14029 if (CONST_INT_P (count_exp))
14030 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14031 (count / size_needed) * size_needed);
14032 else
14033 dst = change_address (dst, BLKmode, destreg);
14034
14035 /* Step 4: Epilogue to copy the remaining bytes. */
14036
14037 if (label)
14038 {
14039 /* When the main loop is done, COUNT_EXP might hold original count,
14040 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14041 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14042 bytes. Compensate if needed. */
14043
14044 if (size_needed < desired_align - align)
14045 {
14046 tmp =
14047 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
14048 GEN_INT (size_needed - 1), count_exp, 1,
14049 OPTAB_DIRECT);
14050 size_needed = desired_align - align + 1;
14051 if (tmp != count_exp)
14052 emit_move_insn (count_exp, tmp);
14053 }
14054 emit_label (label);
14055 LABEL_NUSES (label) = 1;
14056 }
14057 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14058 {
14059 if (force_loopy_epilogue)
14060 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14061 size_needed);
14062 else
14063 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14064 size_needed);
14065 }
14066 if (jump_around_label)
14067 emit_label (jump_around_label);
14068 return 1;
14069 }
14070
14071 /* Expand strlen. */
14072 int
14073 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
14074 {
14075 rtx addr, scratch1, scratch2, scratch3, scratch4;
14076
14077 /* The generic case of strlen expander is long. Avoid it's
14078 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
14079
14080 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14081 && !TARGET_INLINE_ALL_STRINGOPS
14082 && !optimize_size
14083 && (!CONST_INT_P (align) || INTVAL (align) < 4))
14084 return 0;
14085
14086 addr = force_reg (Pmode, XEXP (src, 0));
14087 scratch1 = gen_reg_rtx (Pmode);
14088
14089 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14090 && !optimize_size)
14091 {
14092 /* Well it seems that some optimizer does not combine a call like
14093 foo(strlen(bar), strlen(bar));
14094 when the move and the subtraction is done here. It does calculate
14095 the length just once when these instructions are done inside of
14096 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
14097 often used and I use one fewer register for the lifetime of
14098 output_strlen_unroll() this is better. */
14099
14100 emit_move_insn (out, addr);
14101
14102 ix86_expand_strlensi_unroll_1 (out, src, align);
14103
14104 /* strlensi_unroll_1 returns the address of the zero at the end of
14105 the string, like memchr(), so compute the length by subtracting
14106 the start address. */
14107 if (TARGET_64BIT)
14108 emit_insn (gen_subdi3 (out, out, addr));
14109 else
14110 emit_insn (gen_subsi3 (out, out, addr));
14111 }
14112 else
14113 {
14114 rtx unspec;
14115 scratch2 = gen_reg_rtx (Pmode);
14116 scratch3 = gen_reg_rtx (Pmode);
14117 scratch4 = force_reg (Pmode, constm1_rtx);
14118
14119 emit_move_insn (scratch3, addr);
14120 eoschar = force_reg (QImode, eoschar);
14121
14122 src = replace_equiv_address_nv (src, scratch3);
14123
14124 /* If .md starts supporting :P, this can be done in .md. */
14125 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14126 scratch4), UNSPEC_SCAS);
14127 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14128 if (TARGET_64BIT)
14129 {
14130 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14131 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14132 }
14133 else
14134 {
14135 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14136 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14137 }
14138 }
14139 return 1;
14140 }
14141
14142 /* Expand the appropriate insns for doing strlen if not just doing
14143 repnz; scasb
14144
14145 out = result, initialized with the start address
14146 align_rtx = alignment of the address.
14147 scratch = scratch register, initialized with the startaddress when
14148 not aligned, otherwise undefined
14149
14150 This is just the body. It needs the initializations mentioned above and
14151 some address computing at the end. These things are done in i386.md. */
14152
14153 static void
14154 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14155 {
14156 int align;
14157 rtx tmp;
14158 rtx align_2_label = NULL_RTX;
14159 rtx align_3_label = NULL_RTX;
14160 rtx align_4_label = gen_label_rtx ();
14161 rtx end_0_label = gen_label_rtx ();
14162 rtx mem;
14163 rtx tmpreg = gen_reg_rtx (SImode);
14164 rtx scratch = gen_reg_rtx (SImode);
14165 rtx cmp;
14166
14167 align = 0;
14168 if (CONST_INT_P (align_rtx))
14169 align = INTVAL (align_rtx);
14170
14171 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14172
14173 /* Is there a known alignment and is it less than 4? */
14174 if (align < 4)
14175 {
14176 rtx scratch1 = gen_reg_rtx (Pmode);
14177 emit_move_insn (scratch1, out);
14178 /* Is there a known alignment and is it not 2? */
14179 if (align != 2)
14180 {
14181 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14182 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14183
14184 /* Leave just the 3 lower bits. */
14185 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14186 NULL_RTX, 0, OPTAB_WIDEN);
14187
14188 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14189 Pmode, 1, align_4_label);
14190 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14191 Pmode, 1, align_2_label);
14192 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14193 Pmode, 1, align_3_label);
14194 }
14195 else
14196 {
14197 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14198 check if is aligned to 4 - byte. */
14199
14200 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14201 NULL_RTX, 0, OPTAB_WIDEN);
14202
14203 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14204 Pmode, 1, align_4_label);
14205 }
14206
14207 mem = change_address (src, QImode, out);
14208
14209 /* Now compare the bytes. */
14210
14211 /* Compare the first n unaligned byte on a byte per byte basis. */
14212 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14213 QImode, 1, end_0_label);
14214
14215 /* Increment the address. */
14216 if (TARGET_64BIT)
14217 emit_insn (gen_adddi3 (out, out, const1_rtx));
14218 else
14219 emit_insn (gen_addsi3 (out, out, const1_rtx));
14220
14221 /* Not needed with an alignment of 2 */
14222 if (align != 2)
14223 {
14224 emit_label (align_2_label);
14225
14226 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14227 end_0_label);
14228
14229 if (TARGET_64BIT)
14230 emit_insn (gen_adddi3 (out, out, const1_rtx));
14231 else
14232 emit_insn (gen_addsi3 (out, out, const1_rtx));
14233
14234 emit_label (align_3_label);
14235 }
14236
14237 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14238 end_0_label);
14239
14240 if (TARGET_64BIT)
14241 emit_insn (gen_adddi3 (out, out, const1_rtx));
14242 else
14243 emit_insn (gen_addsi3 (out, out, const1_rtx));
14244 }
14245
14246 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14247 align this loop. It gives only huge programs, but does not help to
14248 speed up. */
14249 emit_label (align_4_label);
14250
14251 mem = change_address (src, SImode, out);
14252 emit_move_insn (scratch, mem);
14253 if (TARGET_64BIT)
14254 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14255 else
14256 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14257
14258 /* This formula yields a nonzero result iff one of the bytes is zero.
14259 This saves three branches inside loop and many cycles. */
14260
14261 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14262 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14263 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14264 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14265 gen_int_mode (0x80808080, SImode)));
14266 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14267 align_4_label);
14268
14269 if (TARGET_CMOVE)
14270 {
14271 rtx reg = gen_reg_rtx (SImode);
14272 rtx reg2 = gen_reg_rtx (Pmode);
14273 emit_move_insn (reg, tmpreg);
14274 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14275
14276 /* If zero is not in the first two bytes, move two bytes forward. */
14277 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14278 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14279 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14280 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14281 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14282 reg,
14283 tmpreg)));
14284 /* Emit lea manually to avoid clobbering of flags. */
14285 emit_insn (gen_rtx_SET (SImode, reg2,
14286 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14287
14288 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14289 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14290 emit_insn (gen_rtx_SET (VOIDmode, out,
14291 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14292 reg2,
14293 out)));
14294
14295 }
14296 else
14297 {
14298 rtx end_2_label = gen_label_rtx ();
14299 /* Is zero in the first two bytes? */
14300
14301 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14302 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14303 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14304 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14305 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14306 pc_rtx);
14307 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14308 JUMP_LABEL (tmp) = end_2_label;
14309
14310 /* Not in the first two. Move two bytes forward. */
14311 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14312 if (TARGET_64BIT)
14313 emit_insn (gen_adddi3 (out, out, const2_rtx));
14314 else
14315 emit_insn (gen_addsi3 (out, out, const2_rtx));
14316
14317 emit_label (end_2_label);
14318
14319 }
14320
14321 /* Avoid branch in fixing the byte. */
14322 tmpreg = gen_lowpart (QImode, tmpreg);
14323 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14324 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14325 if (TARGET_64BIT)
14326 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14327 else
14328 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14329
14330 emit_label (end_0_label);
14331 }
14332
14333 void
14334 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14335 rtx callarg2 ATTRIBUTE_UNUSED,
14336 rtx pop, int sibcall)
14337 {
14338 rtx use = NULL, call;
14339
14340 if (pop == const0_rtx)
14341 pop = NULL;
14342 gcc_assert (!TARGET_64BIT || !pop);
14343
14344 if (TARGET_MACHO && !TARGET_64BIT)
14345 {
14346 #if TARGET_MACHO
14347 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14348 fnaddr = machopic_indirect_call_target (fnaddr);
14349 #endif
14350 }
14351 else
14352 {
14353 /* Static functions and indirect calls don't need the pic register. */
14354 if (! TARGET_64BIT && flag_pic
14355 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14356 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14357 use_reg (&use, pic_offset_table_rtx);
14358 }
14359
14360 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14361 {
14362 rtx al = gen_rtx_REG (QImode, 0);
14363 emit_move_insn (al, callarg2);
14364 use_reg (&use, al);
14365 }
14366
14367 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14368 {
14369 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14370 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14371 }
14372 if (sibcall && TARGET_64BIT
14373 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14374 {
14375 rtx addr;
14376 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14377 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14378 emit_move_insn (fnaddr, addr);
14379 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14380 }
14381
14382 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14383 if (retval)
14384 call = gen_rtx_SET (VOIDmode, retval, call);
14385 if (pop)
14386 {
14387 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14388 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14389 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14390 }
14391
14392 call = emit_call_insn (call);
14393 if (use)
14394 CALL_INSN_FUNCTION_USAGE (call) = use;
14395 }
14396
14397 \f
14398 /* Clear stack slot assignments remembered from previous functions.
14399 This is called from INIT_EXPANDERS once before RTL is emitted for each
14400 function. */
14401
14402 static struct machine_function *
14403 ix86_init_machine_status (void)
14404 {
14405 struct machine_function *f;
14406
14407 f = ggc_alloc_cleared (sizeof (struct machine_function));
14408 f->use_fast_prologue_epilogue_nregs = -1;
14409 f->tls_descriptor_call_expanded_p = 0;
14410
14411 return f;
14412 }
14413
14414 /* Return a MEM corresponding to a stack slot with mode MODE.
14415 Allocate a new slot if necessary.
14416
14417 The RTL for a function can have several slots available: N is
14418 which slot to use. */
14419
14420 rtx
14421 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14422 {
14423 struct stack_local_entry *s;
14424
14425 gcc_assert (n < MAX_386_STACK_LOCALS);
14426
14427 for (s = ix86_stack_locals; s; s = s->next)
14428 if (s->mode == mode && s->n == n)
14429 return copy_rtx (s->rtl);
14430
14431 s = (struct stack_local_entry *)
14432 ggc_alloc (sizeof (struct stack_local_entry));
14433 s->n = n;
14434 s->mode = mode;
14435 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14436
14437 s->next = ix86_stack_locals;
14438 ix86_stack_locals = s;
14439 return s->rtl;
14440 }
14441
14442 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14443
14444 static GTY(()) rtx ix86_tls_symbol;
14445 rtx
14446 ix86_tls_get_addr (void)
14447 {
14448
14449 if (!ix86_tls_symbol)
14450 {
14451 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14452 (TARGET_ANY_GNU_TLS
14453 && !TARGET_64BIT)
14454 ? "___tls_get_addr"
14455 : "__tls_get_addr");
14456 }
14457
14458 return ix86_tls_symbol;
14459 }
14460
14461 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14462
14463 static GTY(()) rtx ix86_tls_module_base_symbol;
14464 rtx
14465 ix86_tls_module_base (void)
14466 {
14467
14468 if (!ix86_tls_module_base_symbol)
14469 {
14470 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14471 "_TLS_MODULE_BASE_");
14472 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14473 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14474 }
14475
14476 return ix86_tls_module_base_symbol;
14477 }
14478 \f
14479 /* Calculate the length of the memory address in the instruction
14480 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14481
14482 int
14483 memory_address_length (rtx addr)
14484 {
14485 struct ix86_address parts;
14486 rtx base, index, disp;
14487 int len;
14488 int ok;
14489
14490 if (GET_CODE (addr) == PRE_DEC
14491 || GET_CODE (addr) == POST_INC
14492 || GET_CODE (addr) == PRE_MODIFY
14493 || GET_CODE (addr) == POST_MODIFY)
14494 return 0;
14495
14496 ok = ix86_decompose_address (addr, &parts);
14497 gcc_assert (ok);
14498
14499 if (parts.base && GET_CODE (parts.base) == SUBREG)
14500 parts.base = SUBREG_REG (parts.base);
14501 if (parts.index && GET_CODE (parts.index) == SUBREG)
14502 parts.index = SUBREG_REG (parts.index);
14503
14504 base = parts.base;
14505 index = parts.index;
14506 disp = parts.disp;
14507 len = 0;
14508
14509 /* Rule of thumb:
14510 - esp as the base always wants an index,
14511 - ebp as the base always wants a displacement. */
14512
14513 /* Register Indirect. */
14514 if (base && !index && !disp)
14515 {
14516 /* esp (for its index) and ebp (for its displacement) need
14517 the two-byte modrm form. */
14518 if (addr == stack_pointer_rtx
14519 || addr == arg_pointer_rtx
14520 || addr == frame_pointer_rtx
14521 || addr == hard_frame_pointer_rtx)
14522 len = 1;
14523 }
14524
14525 /* Direct Addressing. */
14526 else if (disp && !base && !index)
14527 len = 4;
14528
14529 else
14530 {
14531 /* Find the length of the displacement constant. */
14532 if (disp)
14533 {
14534 if (base && satisfies_constraint_K (disp))
14535 len = 1;
14536 else
14537 len = 4;
14538 }
14539 /* ebp always wants a displacement. */
14540 else if (base == hard_frame_pointer_rtx)
14541 len = 1;
14542
14543 /* An index requires the two-byte modrm form.... */
14544 if (index
14545 /* ...like esp, which always wants an index. */
14546 || base == stack_pointer_rtx
14547 || base == arg_pointer_rtx
14548 || base == frame_pointer_rtx)
14549 len += 1;
14550 }
14551
14552 return len;
14553 }
14554
14555 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14556 is set, expect that insn have 8bit immediate alternative. */
14557 int
14558 ix86_attr_length_immediate_default (rtx insn, int shortform)
14559 {
14560 int len = 0;
14561 int i;
14562 extract_insn_cached (insn);
14563 for (i = recog_data.n_operands - 1; i >= 0; --i)
14564 if (CONSTANT_P (recog_data.operand[i]))
14565 {
14566 gcc_assert (!len);
14567 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14568 len = 1;
14569 else
14570 {
14571 switch (get_attr_mode (insn))
14572 {
14573 case MODE_QI:
14574 len+=1;
14575 break;
14576 case MODE_HI:
14577 len+=2;
14578 break;
14579 case MODE_SI:
14580 len+=4;
14581 break;
14582 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14583 case MODE_DI:
14584 len+=4;
14585 break;
14586 default:
14587 fatal_insn ("unknown insn mode", insn);
14588 }
14589 }
14590 }
14591 return len;
14592 }
14593 /* Compute default value for "length_address" attribute. */
14594 int
14595 ix86_attr_length_address_default (rtx insn)
14596 {
14597 int i;
14598
14599 if (get_attr_type (insn) == TYPE_LEA)
14600 {
14601 rtx set = PATTERN (insn);
14602
14603 if (GET_CODE (set) == PARALLEL)
14604 set = XVECEXP (set, 0, 0);
14605
14606 gcc_assert (GET_CODE (set) == SET);
14607
14608 return memory_address_length (SET_SRC (set));
14609 }
14610
14611 extract_insn_cached (insn);
14612 for (i = recog_data.n_operands - 1; i >= 0; --i)
14613 if (MEM_P (recog_data.operand[i]))
14614 {
14615 return memory_address_length (XEXP (recog_data.operand[i], 0));
14616 break;
14617 }
14618 return 0;
14619 }
14620 \f
14621 /* Return the maximum number of instructions a cpu can issue. */
14622
14623 static int
14624 ix86_issue_rate (void)
14625 {
14626 switch (ix86_tune)
14627 {
14628 case PROCESSOR_PENTIUM:
14629 case PROCESSOR_K6:
14630 return 2;
14631
14632 case PROCESSOR_PENTIUMPRO:
14633 case PROCESSOR_PENTIUM4:
14634 case PROCESSOR_ATHLON:
14635 case PROCESSOR_K8:
14636 case PROCESSOR_NOCONA:
14637 case PROCESSOR_GENERIC32:
14638 case PROCESSOR_GENERIC64:
14639 return 3;
14640
14641 case PROCESSOR_CORE2:
14642 return 4;
14643
14644 default:
14645 return 1;
14646 }
14647 }
14648
14649 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14650 by DEP_INSN and nothing set by DEP_INSN. */
14651
14652 static int
14653 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14654 {
14655 rtx set, set2;
14656
14657 /* Simplify the test for uninteresting insns. */
14658 if (insn_type != TYPE_SETCC
14659 && insn_type != TYPE_ICMOV
14660 && insn_type != TYPE_FCMOV
14661 && insn_type != TYPE_IBR)
14662 return 0;
14663
14664 if ((set = single_set (dep_insn)) != 0)
14665 {
14666 set = SET_DEST (set);
14667 set2 = NULL_RTX;
14668 }
14669 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14670 && XVECLEN (PATTERN (dep_insn), 0) == 2
14671 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14672 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14673 {
14674 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14675 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14676 }
14677 else
14678 return 0;
14679
14680 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
14681 return 0;
14682
14683 /* This test is true if the dependent insn reads the flags but
14684 not any other potentially set register. */
14685 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14686 return 0;
14687
14688 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14689 return 0;
14690
14691 return 1;
14692 }
14693
14694 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14695 address with operands set by DEP_INSN. */
14696
14697 static int
14698 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14699 {
14700 rtx addr;
14701
14702 if (insn_type == TYPE_LEA
14703 && TARGET_PENTIUM)
14704 {
14705 addr = PATTERN (insn);
14706
14707 if (GET_CODE (addr) == PARALLEL)
14708 addr = XVECEXP (addr, 0, 0);
14709
14710 gcc_assert (GET_CODE (addr) == SET);
14711
14712 addr = SET_SRC (addr);
14713 }
14714 else
14715 {
14716 int i;
14717 extract_insn_cached (insn);
14718 for (i = recog_data.n_operands - 1; i >= 0; --i)
14719 if (MEM_P (recog_data.operand[i]))
14720 {
14721 addr = XEXP (recog_data.operand[i], 0);
14722 goto found;
14723 }
14724 return 0;
14725 found:;
14726 }
14727
14728 return modified_in_p (addr, dep_insn);
14729 }
14730
14731 static int
14732 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14733 {
14734 enum attr_type insn_type, dep_insn_type;
14735 enum attr_memory memory;
14736 rtx set, set2;
14737 int dep_insn_code_number;
14738
14739 /* Anti and output dependencies have zero cost on all CPUs. */
14740 if (REG_NOTE_KIND (link) != 0)
14741 return 0;
14742
14743 dep_insn_code_number = recog_memoized (dep_insn);
14744
14745 /* If we can't recognize the insns, we can't really do anything. */
14746 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14747 return cost;
14748
14749 insn_type = get_attr_type (insn);
14750 dep_insn_type = get_attr_type (dep_insn);
14751
14752 switch (ix86_tune)
14753 {
14754 case PROCESSOR_PENTIUM:
14755 /* Address Generation Interlock adds a cycle of latency. */
14756 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14757 cost += 1;
14758
14759 /* ??? Compares pair with jump/setcc. */
14760 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14761 cost = 0;
14762
14763 /* Floating point stores require value to be ready one cycle earlier. */
14764 if (insn_type == TYPE_FMOV
14765 && get_attr_memory (insn) == MEMORY_STORE
14766 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14767 cost += 1;
14768 break;
14769
14770 case PROCESSOR_PENTIUMPRO:
14771 memory = get_attr_memory (insn);
14772
14773 /* INT->FP conversion is expensive. */
14774 if (get_attr_fp_int_src (dep_insn))
14775 cost += 5;
14776
14777 /* There is one cycle extra latency between an FP op and a store. */
14778 if (insn_type == TYPE_FMOV
14779 && (set = single_set (dep_insn)) != NULL_RTX
14780 && (set2 = single_set (insn)) != NULL_RTX
14781 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14782 && MEM_P (SET_DEST (set2)))
14783 cost += 1;
14784
14785 /* Show ability of reorder buffer to hide latency of load by executing
14786 in parallel with previous instruction in case
14787 previous instruction is not needed to compute the address. */
14788 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14789 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14790 {
14791 /* Claim moves to take one cycle, as core can issue one load
14792 at time and the next load can start cycle later. */
14793 if (dep_insn_type == TYPE_IMOV
14794 || dep_insn_type == TYPE_FMOV)
14795 cost = 1;
14796 else if (cost > 1)
14797 cost--;
14798 }
14799 break;
14800
14801 case PROCESSOR_K6:
14802 memory = get_attr_memory (insn);
14803
14804 /* The esp dependency is resolved before the instruction is really
14805 finished. */
14806 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14807 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14808 return 1;
14809
14810 /* INT->FP conversion is expensive. */
14811 if (get_attr_fp_int_src (dep_insn))
14812 cost += 5;
14813
14814 /* Show ability of reorder buffer to hide latency of load by executing
14815 in parallel with previous instruction in case
14816 previous instruction is not needed to compute the address. */
14817 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14818 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14819 {
14820 /* Claim moves to take one cycle, as core can issue one load
14821 at time and the next load can start cycle later. */
14822 if (dep_insn_type == TYPE_IMOV
14823 || dep_insn_type == TYPE_FMOV)
14824 cost = 1;
14825 else if (cost > 2)
14826 cost -= 2;
14827 else
14828 cost = 1;
14829 }
14830 break;
14831
14832 case PROCESSOR_ATHLON:
14833 case PROCESSOR_K8:
14834 case PROCESSOR_GENERIC32:
14835 case PROCESSOR_GENERIC64:
14836 memory = get_attr_memory (insn);
14837
14838 /* Show ability of reorder buffer to hide latency of load by executing
14839 in parallel with previous instruction in case
14840 previous instruction is not needed to compute the address. */
14841 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14842 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14843 {
14844 enum attr_unit unit = get_attr_unit (insn);
14845 int loadcost = 3;
14846
14847 /* Because of the difference between the length of integer and
14848 floating unit pipeline preparation stages, the memory operands
14849 for floating point are cheaper.
14850
14851 ??? For Athlon it the difference is most probably 2. */
14852 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14853 loadcost = 3;
14854 else
14855 loadcost = TARGET_ATHLON ? 2 : 0;
14856
14857 if (cost >= loadcost)
14858 cost -= loadcost;
14859 else
14860 cost = 0;
14861 }
14862
14863 default:
14864 break;
14865 }
14866
14867 return cost;
14868 }
14869
14870 /* How many alternative schedules to try. This should be as wide as the
14871 scheduling freedom in the DFA, but no wider. Making this value too
14872 large results extra work for the scheduler. */
14873
14874 static int
14875 ia32_multipass_dfa_lookahead (void)
14876 {
14877 if (ix86_tune == PROCESSOR_PENTIUM)
14878 return 2;
14879
14880 if (ix86_tune == PROCESSOR_PENTIUMPRO
14881 || ix86_tune == PROCESSOR_K6)
14882 return 1;
14883
14884 else
14885 return 0;
14886 }
14887
14888 \f
14889 /* Compute the alignment given to a constant that is being placed in memory.
14890 EXP is the constant and ALIGN is the alignment that the object would
14891 ordinarily have.
14892 The value of this function is used instead of that alignment to align
14893 the object. */
14894
14895 int
14896 ix86_constant_alignment (tree exp, int align)
14897 {
14898 if (TREE_CODE (exp) == REAL_CST)
14899 {
14900 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14901 return 64;
14902 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14903 return 128;
14904 }
14905 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14906 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14907 return BITS_PER_WORD;
14908
14909 return align;
14910 }
14911
14912 /* Compute the alignment for a static variable.
14913 TYPE is the data type, and ALIGN is the alignment that
14914 the object would ordinarily have. The value of this function is used
14915 instead of that alignment to align the object. */
14916
14917 int
14918 ix86_data_alignment (tree type, int align)
14919 {
14920 int max_align = optimize_size ? BITS_PER_WORD : 256;
14921
14922 if (AGGREGATE_TYPE_P (type)
14923 && TYPE_SIZE (type)
14924 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14925 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14926 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14927 && align < max_align)
14928 align = max_align;
14929
14930 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14931 to 16byte boundary. */
14932 if (TARGET_64BIT)
14933 {
14934 if (AGGREGATE_TYPE_P (type)
14935 && TYPE_SIZE (type)
14936 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14937 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14938 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14939 return 128;
14940 }
14941
14942 if (TREE_CODE (type) == ARRAY_TYPE)
14943 {
14944 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14945 return 64;
14946 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14947 return 128;
14948 }
14949 else if (TREE_CODE (type) == COMPLEX_TYPE)
14950 {
14951
14952 if (TYPE_MODE (type) == DCmode && align < 64)
14953 return 64;
14954 if (TYPE_MODE (type) == XCmode && align < 128)
14955 return 128;
14956 }
14957 else if ((TREE_CODE (type) == RECORD_TYPE
14958 || TREE_CODE (type) == UNION_TYPE
14959 || TREE_CODE (type) == QUAL_UNION_TYPE)
14960 && TYPE_FIELDS (type))
14961 {
14962 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14963 return 64;
14964 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14965 return 128;
14966 }
14967 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14968 || TREE_CODE (type) == INTEGER_TYPE)
14969 {
14970 if (TYPE_MODE (type) == DFmode && align < 64)
14971 return 64;
14972 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14973 return 128;
14974 }
14975
14976 return align;
14977 }
14978
14979 /* Compute the alignment for a local variable.
14980 TYPE is the data type, and ALIGN is the alignment that
14981 the object would ordinarily have. The value of this macro is used
14982 instead of that alignment to align the object. */
14983
14984 int
14985 ix86_local_alignment (tree type, int align)
14986 {
14987 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14988 to 16byte boundary. */
14989 if (TARGET_64BIT)
14990 {
14991 if (AGGREGATE_TYPE_P (type)
14992 && TYPE_SIZE (type)
14993 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14994 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
14995 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14996 return 128;
14997 }
14998 if (TREE_CODE (type) == ARRAY_TYPE)
14999 {
15000 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
15001 return 64;
15002 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
15003 return 128;
15004 }
15005 else if (TREE_CODE (type) == COMPLEX_TYPE)
15006 {
15007 if (TYPE_MODE (type) == DCmode && align < 64)
15008 return 64;
15009 if (TYPE_MODE (type) == XCmode && align < 128)
15010 return 128;
15011 }
15012 else if ((TREE_CODE (type) == RECORD_TYPE
15013 || TREE_CODE (type) == UNION_TYPE
15014 || TREE_CODE (type) == QUAL_UNION_TYPE)
15015 && TYPE_FIELDS (type))
15016 {
15017 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15018 return 64;
15019 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15020 return 128;
15021 }
15022 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15023 || TREE_CODE (type) == INTEGER_TYPE)
15024 {
15025
15026 if (TYPE_MODE (type) == DFmode && align < 64)
15027 return 64;
15028 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15029 return 128;
15030 }
15031 return align;
15032 }
15033 \f
15034 /* Emit RTL insns to initialize the variable parts of a trampoline.
15035 FNADDR is an RTX for the address of the function's pure code.
15036 CXT is an RTX for the static chain value for the function. */
15037 void
15038 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15039 {
15040 if (!TARGET_64BIT)
15041 {
15042 /* Compute offset from the end of the jmp to the target function. */
15043 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15044 plus_constant (tramp, 10),
15045 NULL_RTX, 1, OPTAB_DIRECT);
15046 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15047 gen_int_mode (0xb9, QImode));
15048 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15049 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15050 gen_int_mode (0xe9, QImode));
15051 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15052 }
15053 else
15054 {
15055 int offset = 0;
15056 /* Try to load address using shorter movl instead of movabs.
15057 We may want to support movq for kernel mode, but kernel does not use
15058 trampolines at the moment. */
15059 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15060 {
15061 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15062 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15063 gen_int_mode (0xbb41, HImode));
15064 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15065 gen_lowpart (SImode, fnaddr));
15066 offset += 6;
15067 }
15068 else
15069 {
15070 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15071 gen_int_mode (0xbb49, HImode));
15072 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15073 fnaddr);
15074 offset += 10;
15075 }
15076 /* Load static chain using movabs to r10. */
15077 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15078 gen_int_mode (0xba49, HImode));
15079 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15080 cxt);
15081 offset += 10;
15082 /* Jump to the r11 */
15083 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15084 gen_int_mode (0xff49, HImode));
15085 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15086 gen_int_mode (0xe3, QImode));
15087 offset += 3;
15088 gcc_assert (offset <= TRAMPOLINE_SIZE);
15089 }
15090
15091 #ifdef ENABLE_EXECUTE_STACK
15092 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15093 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15094 #endif
15095 }
15096 \f
15097 /* Codes for all the SSE/MMX builtins. */
15098 enum ix86_builtins
15099 {
15100 IX86_BUILTIN_ADDPS,
15101 IX86_BUILTIN_ADDSS,
15102 IX86_BUILTIN_DIVPS,
15103 IX86_BUILTIN_DIVSS,
15104 IX86_BUILTIN_MULPS,
15105 IX86_BUILTIN_MULSS,
15106 IX86_BUILTIN_SUBPS,
15107 IX86_BUILTIN_SUBSS,
15108
15109 IX86_BUILTIN_CMPEQPS,
15110 IX86_BUILTIN_CMPLTPS,
15111 IX86_BUILTIN_CMPLEPS,
15112 IX86_BUILTIN_CMPGTPS,
15113 IX86_BUILTIN_CMPGEPS,
15114 IX86_BUILTIN_CMPNEQPS,
15115 IX86_BUILTIN_CMPNLTPS,
15116 IX86_BUILTIN_CMPNLEPS,
15117 IX86_BUILTIN_CMPNGTPS,
15118 IX86_BUILTIN_CMPNGEPS,
15119 IX86_BUILTIN_CMPORDPS,
15120 IX86_BUILTIN_CMPUNORDPS,
15121 IX86_BUILTIN_CMPEQSS,
15122 IX86_BUILTIN_CMPLTSS,
15123 IX86_BUILTIN_CMPLESS,
15124 IX86_BUILTIN_CMPNEQSS,
15125 IX86_BUILTIN_CMPNLTSS,
15126 IX86_BUILTIN_CMPNLESS,
15127 IX86_BUILTIN_CMPNGTSS,
15128 IX86_BUILTIN_CMPNGESS,
15129 IX86_BUILTIN_CMPORDSS,
15130 IX86_BUILTIN_CMPUNORDSS,
15131
15132 IX86_BUILTIN_COMIEQSS,
15133 IX86_BUILTIN_COMILTSS,
15134 IX86_BUILTIN_COMILESS,
15135 IX86_BUILTIN_COMIGTSS,
15136 IX86_BUILTIN_COMIGESS,
15137 IX86_BUILTIN_COMINEQSS,
15138 IX86_BUILTIN_UCOMIEQSS,
15139 IX86_BUILTIN_UCOMILTSS,
15140 IX86_BUILTIN_UCOMILESS,
15141 IX86_BUILTIN_UCOMIGTSS,
15142 IX86_BUILTIN_UCOMIGESS,
15143 IX86_BUILTIN_UCOMINEQSS,
15144
15145 IX86_BUILTIN_CVTPI2PS,
15146 IX86_BUILTIN_CVTPS2PI,
15147 IX86_BUILTIN_CVTSI2SS,
15148 IX86_BUILTIN_CVTSI642SS,
15149 IX86_BUILTIN_CVTSS2SI,
15150 IX86_BUILTIN_CVTSS2SI64,
15151 IX86_BUILTIN_CVTTPS2PI,
15152 IX86_BUILTIN_CVTTSS2SI,
15153 IX86_BUILTIN_CVTTSS2SI64,
15154
15155 IX86_BUILTIN_MAXPS,
15156 IX86_BUILTIN_MAXSS,
15157 IX86_BUILTIN_MINPS,
15158 IX86_BUILTIN_MINSS,
15159
15160 IX86_BUILTIN_LOADUPS,
15161 IX86_BUILTIN_STOREUPS,
15162 IX86_BUILTIN_MOVSS,
15163
15164 IX86_BUILTIN_MOVHLPS,
15165 IX86_BUILTIN_MOVLHPS,
15166 IX86_BUILTIN_LOADHPS,
15167 IX86_BUILTIN_LOADLPS,
15168 IX86_BUILTIN_STOREHPS,
15169 IX86_BUILTIN_STORELPS,
15170
15171 IX86_BUILTIN_MASKMOVQ,
15172 IX86_BUILTIN_MOVMSKPS,
15173 IX86_BUILTIN_PMOVMSKB,
15174
15175 IX86_BUILTIN_MOVNTPS,
15176 IX86_BUILTIN_MOVNTQ,
15177
15178 IX86_BUILTIN_LOADDQU,
15179 IX86_BUILTIN_STOREDQU,
15180
15181 IX86_BUILTIN_PACKSSWB,
15182 IX86_BUILTIN_PACKSSDW,
15183 IX86_BUILTIN_PACKUSWB,
15184
15185 IX86_BUILTIN_PADDB,
15186 IX86_BUILTIN_PADDW,
15187 IX86_BUILTIN_PADDD,
15188 IX86_BUILTIN_PADDQ,
15189 IX86_BUILTIN_PADDSB,
15190 IX86_BUILTIN_PADDSW,
15191 IX86_BUILTIN_PADDUSB,
15192 IX86_BUILTIN_PADDUSW,
15193 IX86_BUILTIN_PSUBB,
15194 IX86_BUILTIN_PSUBW,
15195 IX86_BUILTIN_PSUBD,
15196 IX86_BUILTIN_PSUBQ,
15197 IX86_BUILTIN_PSUBSB,
15198 IX86_BUILTIN_PSUBSW,
15199 IX86_BUILTIN_PSUBUSB,
15200 IX86_BUILTIN_PSUBUSW,
15201
15202 IX86_BUILTIN_PAND,
15203 IX86_BUILTIN_PANDN,
15204 IX86_BUILTIN_POR,
15205 IX86_BUILTIN_PXOR,
15206
15207 IX86_BUILTIN_PAVGB,
15208 IX86_BUILTIN_PAVGW,
15209
15210 IX86_BUILTIN_PCMPEQB,
15211 IX86_BUILTIN_PCMPEQW,
15212 IX86_BUILTIN_PCMPEQD,
15213 IX86_BUILTIN_PCMPGTB,
15214 IX86_BUILTIN_PCMPGTW,
15215 IX86_BUILTIN_PCMPGTD,
15216
15217 IX86_BUILTIN_PMADDWD,
15218
15219 IX86_BUILTIN_PMAXSW,
15220 IX86_BUILTIN_PMAXUB,
15221 IX86_BUILTIN_PMINSW,
15222 IX86_BUILTIN_PMINUB,
15223
15224 IX86_BUILTIN_PMULHUW,
15225 IX86_BUILTIN_PMULHW,
15226 IX86_BUILTIN_PMULLW,
15227
15228 IX86_BUILTIN_PSADBW,
15229 IX86_BUILTIN_PSHUFW,
15230
15231 IX86_BUILTIN_PSLLW,
15232 IX86_BUILTIN_PSLLD,
15233 IX86_BUILTIN_PSLLQ,
15234 IX86_BUILTIN_PSRAW,
15235 IX86_BUILTIN_PSRAD,
15236 IX86_BUILTIN_PSRLW,
15237 IX86_BUILTIN_PSRLD,
15238 IX86_BUILTIN_PSRLQ,
15239 IX86_BUILTIN_PSLLWI,
15240 IX86_BUILTIN_PSLLDI,
15241 IX86_BUILTIN_PSLLQI,
15242 IX86_BUILTIN_PSRAWI,
15243 IX86_BUILTIN_PSRADI,
15244 IX86_BUILTIN_PSRLWI,
15245 IX86_BUILTIN_PSRLDI,
15246 IX86_BUILTIN_PSRLQI,
15247
15248 IX86_BUILTIN_PUNPCKHBW,
15249 IX86_BUILTIN_PUNPCKHWD,
15250 IX86_BUILTIN_PUNPCKHDQ,
15251 IX86_BUILTIN_PUNPCKLBW,
15252 IX86_BUILTIN_PUNPCKLWD,
15253 IX86_BUILTIN_PUNPCKLDQ,
15254
15255 IX86_BUILTIN_SHUFPS,
15256
15257 IX86_BUILTIN_RCPPS,
15258 IX86_BUILTIN_RCPSS,
15259 IX86_BUILTIN_RSQRTPS,
15260 IX86_BUILTIN_RSQRTSS,
15261 IX86_BUILTIN_SQRTPS,
15262 IX86_BUILTIN_SQRTSS,
15263
15264 IX86_BUILTIN_UNPCKHPS,
15265 IX86_BUILTIN_UNPCKLPS,
15266
15267 IX86_BUILTIN_ANDPS,
15268 IX86_BUILTIN_ANDNPS,
15269 IX86_BUILTIN_ORPS,
15270 IX86_BUILTIN_XORPS,
15271
15272 IX86_BUILTIN_EMMS,
15273 IX86_BUILTIN_LDMXCSR,
15274 IX86_BUILTIN_STMXCSR,
15275 IX86_BUILTIN_SFENCE,
15276
15277 /* 3DNow! Original */
15278 IX86_BUILTIN_FEMMS,
15279 IX86_BUILTIN_PAVGUSB,
15280 IX86_BUILTIN_PF2ID,
15281 IX86_BUILTIN_PFACC,
15282 IX86_BUILTIN_PFADD,
15283 IX86_BUILTIN_PFCMPEQ,
15284 IX86_BUILTIN_PFCMPGE,
15285 IX86_BUILTIN_PFCMPGT,
15286 IX86_BUILTIN_PFMAX,
15287 IX86_BUILTIN_PFMIN,
15288 IX86_BUILTIN_PFMUL,
15289 IX86_BUILTIN_PFRCP,
15290 IX86_BUILTIN_PFRCPIT1,
15291 IX86_BUILTIN_PFRCPIT2,
15292 IX86_BUILTIN_PFRSQIT1,
15293 IX86_BUILTIN_PFRSQRT,
15294 IX86_BUILTIN_PFSUB,
15295 IX86_BUILTIN_PFSUBR,
15296 IX86_BUILTIN_PI2FD,
15297 IX86_BUILTIN_PMULHRW,
15298
15299 /* 3DNow! Athlon Extensions */
15300 IX86_BUILTIN_PF2IW,
15301 IX86_BUILTIN_PFNACC,
15302 IX86_BUILTIN_PFPNACC,
15303 IX86_BUILTIN_PI2FW,
15304 IX86_BUILTIN_PSWAPDSI,
15305 IX86_BUILTIN_PSWAPDSF,
15306
15307 /* SSE2 */
15308 IX86_BUILTIN_ADDPD,
15309 IX86_BUILTIN_ADDSD,
15310 IX86_BUILTIN_DIVPD,
15311 IX86_BUILTIN_DIVSD,
15312 IX86_BUILTIN_MULPD,
15313 IX86_BUILTIN_MULSD,
15314 IX86_BUILTIN_SUBPD,
15315 IX86_BUILTIN_SUBSD,
15316
15317 IX86_BUILTIN_CMPEQPD,
15318 IX86_BUILTIN_CMPLTPD,
15319 IX86_BUILTIN_CMPLEPD,
15320 IX86_BUILTIN_CMPGTPD,
15321 IX86_BUILTIN_CMPGEPD,
15322 IX86_BUILTIN_CMPNEQPD,
15323 IX86_BUILTIN_CMPNLTPD,
15324 IX86_BUILTIN_CMPNLEPD,
15325 IX86_BUILTIN_CMPNGTPD,
15326 IX86_BUILTIN_CMPNGEPD,
15327 IX86_BUILTIN_CMPORDPD,
15328 IX86_BUILTIN_CMPUNORDPD,
15329 IX86_BUILTIN_CMPNEPD,
15330 IX86_BUILTIN_CMPEQSD,
15331 IX86_BUILTIN_CMPLTSD,
15332 IX86_BUILTIN_CMPLESD,
15333 IX86_BUILTIN_CMPNEQSD,
15334 IX86_BUILTIN_CMPNLTSD,
15335 IX86_BUILTIN_CMPNLESD,
15336 IX86_BUILTIN_CMPORDSD,
15337 IX86_BUILTIN_CMPUNORDSD,
15338 IX86_BUILTIN_CMPNESD,
15339
15340 IX86_BUILTIN_COMIEQSD,
15341 IX86_BUILTIN_COMILTSD,
15342 IX86_BUILTIN_COMILESD,
15343 IX86_BUILTIN_COMIGTSD,
15344 IX86_BUILTIN_COMIGESD,
15345 IX86_BUILTIN_COMINEQSD,
15346 IX86_BUILTIN_UCOMIEQSD,
15347 IX86_BUILTIN_UCOMILTSD,
15348 IX86_BUILTIN_UCOMILESD,
15349 IX86_BUILTIN_UCOMIGTSD,
15350 IX86_BUILTIN_UCOMIGESD,
15351 IX86_BUILTIN_UCOMINEQSD,
15352
15353 IX86_BUILTIN_MAXPD,
15354 IX86_BUILTIN_MAXSD,
15355 IX86_BUILTIN_MINPD,
15356 IX86_BUILTIN_MINSD,
15357
15358 IX86_BUILTIN_ANDPD,
15359 IX86_BUILTIN_ANDNPD,
15360 IX86_BUILTIN_ORPD,
15361 IX86_BUILTIN_XORPD,
15362
15363 IX86_BUILTIN_SQRTPD,
15364 IX86_BUILTIN_SQRTSD,
15365
15366 IX86_BUILTIN_UNPCKHPD,
15367 IX86_BUILTIN_UNPCKLPD,
15368
15369 IX86_BUILTIN_SHUFPD,
15370
15371 IX86_BUILTIN_LOADUPD,
15372 IX86_BUILTIN_STOREUPD,
15373 IX86_BUILTIN_MOVSD,
15374
15375 IX86_BUILTIN_LOADHPD,
15376 IX86_BUILTIN_LOADLPD,
15377
15378 IX86_BUILTIN_CVTDQ2PD,
15379 IX86_BUILTIN_CVTDQ2PS,
15380
15381 IX86_BUILTIN_CVTPD2DQ,
15382 IX86_BUILTIN_CVTPD2PI,
15383 IX86_BUILTIN_CVTPD2PS,
15384 IX86_BUILTIN_CVTTPD2DQ,
15385 IX86_BUILTIN_CVTTPD2PI,
15386
15387 IX86_BUILTIN_CVTPI2PD,
15388 IX86_BUILTIN_CVTSI2SD,
15389 IX86_BUILTIN_CVTSI642SD,
15390
15391 IX86_BUILTIN_CVTSD2SI,
15392 IX86_BUILTIN_CVTSD2SI64,
15393 IX86_BUILTIN_CVTSD2SS,
15394 IX86_BUILTIN_CVTSS2SD,
15395 IX86_BUILTIN_CVTTSD2SI,
15396 IX86_BUILTIN_CVTTSD2SI64,
15397
15398 IX86_BUILTIN_CVTPS2DQ,
15399 IX86_BUILTIN_CVTPS2PD,
15400 IX86_BUILTIN_CVTTPS2DQ,
15401
15402 IX86_BUILTIN_MOVNTI,
15403 IX86_BUILTIN_MOVNTPD,
15404 IX86_BUILTIN_MOVNTDQ,
15405
15406 /* SSE2 MMX */
15407 IX86_BUILTIN_MASKMOVDQU,
15408 IX86_BUILTIN_MOVMSKPD,
15409 IX86_BUILTIN_PMOVMSKB128,
15410
15411 IX86_BUILTIN_PACKSSWB128,
15412 IX86_BUILTIN_PACKSSDW128,
15413 IX86_BUILTIN_PACKUSWB128,
15414
15415 IX86_BUILTIN_PADDB128,
15416 IX86_BUILTIN_PADDW128,
15417 IX86_BUILTIN_PADDD128,
15418 IX86_BUILTIN_PADDQ128,
15419 IX86_BUILTIN_PADDSB128,
15420 IX86_BUILTIN_PADDSW128,
15421 IX86_BUILTIN_PADDUSB128,
15422 IX86_BUILTIN_PADDUSW128,
15423 IX86_BUILTIN_PSUBB128,
15424 IX86_BUILTIN_PSUBW128,
15425 IX86_BUILTIN_PSUBD128,
15426 IX86_BUILTIN_PSUBQ128,
15427 IX86_BUILTIN_PSUBSB128,
15428 IX86_BUILTIN_PSUBSW128,
15429 IX86_BUILTIN_PSUBUSB128,
15430 IX86_BUILTIN_PSUBUSW128,
15431
15432 IX86_BUILTIN_PAND128,
15433 IX86_BUILTIN_PANDN128,
15434 IX86_BUILTIN_POR128,
15435 IX86_BUILTIN_PXOR128,
15436
15437 IX86_BUILTIN_PAVGB128,
15438 IX86_BUILTIN_PAVGW128,
15439
15440 IX86_BUILTIN_PCMPEQB128,
15441 IX86_BUILTIN_PCMPEQW128,
15442 IX86_BUILTIN_PCMPEQD128,
15443 IX86_BUILTIN_PCMPGTB128,
15444 IX86_BUILTIN_PCMPGTW128,
15445 IX86_BUILTIN_PCMPGTD128,
15446
15447 IX86_BUILTIN_PMADDWD128,
15448
15449 IX86_BUILTIN_PMAXSW128,
15450 IX86_BUILTIN_PMAXUB128,
15451 IX86_BUILTIN_PMINSW128,
15452 IX86_BUILTIN_PMINUB128,
15453
15454 IX86_BUILTIN_PMULUDQ,
15455 IX86_BUILTIN_PMULUDQ128,
15456 IX86_BUILTIN_PMULHUW128,
15457 IX86_BUILTIN_PMULHW128,
15458 IX86_BUILTIN_PMULLW128,
15459
15460 IX86_BUILTIN_PSADBW128,
15461 IX86_BUILTIN_PSHUFHW,
15462 IX86_BUILTIN_PSHUFLW,
15463 IX86_BUILTIN_PSHUFD,
15464
15465 IX86_BUILTIN_PSLLW128,
15466 IX86_BUILTIN_PSLLD128,
15467 IX86_BUILTIN_PSLLQ128,
15468 IX86_BUILTIN_PSRAW128,
15469 IX86_BUILTIN_PSRAD128,
15470 IX86_BUILTIN_PSRLW128,
15471 IX86_BUILTIN_PSRLD128,
15472 IX86_BUILTIN_PSRLQ128,
15473 IX86_BUILTIN_PSLLDQI128,
15474 IX86_BUILTIN_PSLLWI128,
15475 IX86_BUILTIN_PSLLDI128,
15476 IX86_BUILTIN_PSLLQI128,
15477 IX86_BUILTIN_PSRAWI128,
15478 IX86_BUILTIN_PSRADI128,
15479 IX86_BUILTIN_PSRLDQI128,
15480 IX86_BUILTIN_PSRLWI128,
15481 IX86_BUILTIN_PSRLDI128,
15482 IX86_BUILTIN_PSRLQI128,
15483
15484 IX86_BUILTIN_PUNPCKHBW128,
15485 IX86_BUILTIN_PUNPCKHWD128,
15486 IX86_BUILTIN_PUNPCKHDQ128,
15487 IX86_BUILTIN_PUNPCKHQDQ128,
15488 IX86_BUILTIN_PUNPCKLBW128,
15489 IX86_BUILTIN_PUNPCKLWD128,
15490 IX86_BUILTIN_PUNPCKLDQ128,
15491 IX86_BUILTIN_PUNPCKLQDQ128,
15492
15493 IX86_BUILTIN_CLFLUSH,
15494 IX86_BUILTIN_MFENCE,
15495 IX86_BUILTIN_LFENCE,
15496
15497 /* Prescott New Instructions. */
15498 IX86_BUILTIN_ADDSUBPS,
15499 IX86_BUILTIN_HADDPS,
15500 IX86_BUILTIN_HSUBPS,
15501 IX86_BUILTIN_MOVSHDUP,
15502 IX86_BUILTIN_MOVSLDUP,
15503 IX86_BUILTIN_ADDSUBPD,
15504 IX86_BUILTIN_HADDPD,
15505 IX86_BUILTIN_HSUBPD,
15506 IX86_BUILTIN_LDDQU,
15507
15508 IX86_BUILTIN_MONITOR,
15509 IX86_BUILTIN_MWAIT,
15510
15511 /* SSSE3. */
15512 IX86_BUILTIN_PHADDW,
15513 IX86_BUILTIN_PHADDD,
15514 IX86_BUILTIN_PHADDSW,
15515 IX86_BUILTIN_PHSUBW,
15516 IX86_BUILTIN_PHSUBD,
15517 IX86_BUILTIN_PHSUBSW,
15518 IX86_BUILTIN_PMADDUBSW,
15519 IX86_BUILTIN_PMULHRSW,
15520 IX86_BUILTIN_PSHUFB,
15521 IX86_BUILTIN_PSIGNB,
15522 IX86_BUILTIN_PSIGNW,
15523 IX86_BUILTIN_PSIGND,
15524 IX86_BUILTIN_PALIGNR,
15525 IX86_BUILTIN_PABSB,
15526 IX86_BUILTIN_PABSW,
15527 IX86_BUILTIN_PABSD,
15528
15529 IX86_BUILTIN_PHADDW128,
15530 IX86_BUILTIN_PHADDD128,
15531 IX86_BUILTIN_PHADDSW128,
15532 IX86_BUILTIN_PHSUBW128,
15533 IX86_BUILTIN_PHSUBD128,
15534 IX86_BUILTIN_PHSUBSW128,
15535 IX86_BUILTIN_PMADDUBSW128,
15536 IX86_BUILTIN_PMULHRSW128,
15537 IX86_BUILTIN_PSHUFB128,
15538 IX86_BUILTIN_PSIGNB128,
15539 IX86_BUILTIN_PSIGNW128,
15540 IX86_BUILTIN_PSIGND128,
15541 IX86_BUILTIN_PALIGNR128,
15542 IX86_BUILTIN_PABSB128,
15543 IX86_BUILTIN_PABSW128,
15544 IX86_BUILTIN_PABSD128,
15545
15546 IX86_BUILTIN_VEC_INIT_V2SI,
15547 IX86_BUILTIN_VEC_INIT_V4HI,
15548 IX86_BUILTIN_VEC_INIT_V8QI,
15549 IX86_BUILTIN_VEC_EXT_V2DF,
15550 IX86_BUILTIN_VEC_EXT_V2DI,
15551 IX86_BUILTIN_VEC_EXT_V4SF,
15552 IX86_BUILTIN_VEC_EXT_V4SI,
15553 IX86_BUILTIN_VEC_EXT_V8HI,
15554 IX86_BUILTIN_VEC_EXT_V2SI,
15555 IX86_BUILTIN_VEC_EXT_V4HI,
15556 IX86_BUILTIN_VEC_SET_V8HI,
15557 IX86_BUILTIN_VEC_SET_V4HI,
15558
15559 IX86_BUILTIN_MAX
15560 };
15561
15562 /* Table for the ix86 builtin decls. */
15563 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15564
15565 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15566 * if the target_flags include one of MASK. Stores the function decl
15567 * in the ix86_builtins array.
15568 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15569
15570 static inline tree
15571 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15572 {
15573 tree decl = NULL_TREE;
15574
15575 if (mask & target_flags
15576 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15577 {
15578 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15579 NULL, NULL_TREE);
15580 ix86_builtins[(int) code] = decl;
15581 }
15582
15583 return decl;
15584 }
15585
15586 /* Like def_builtin, but also marks the function decl "const". */
15587
15588 static inline tree
15589 def_builtin_const (int mask, const char *name, tree type,
15590 enum ix86_builtins code)
15591 {
15592 tree decl = def_builtin (mask, name, type, code);
15593 if (decl)
15594 TREE_READONLY (decl) = 1;
15595 return decl;
15596 }
15597
15598 /* Bits for builtin_description.flag. */
15599
15600 /* Set when we don't support the comparison natively, and should
15601 swap_comparison in order to support it. */
15602 #define BUILTIN_DESC_SWAP_OPERANDS 1
15603
15604 struct builtin_description
15605 {
15606 const unsigned int mask;
15607 const enum insn_code icode;
15608 const char *const name;
15609 const enum ix86_builtins code;
15610 const enum rtx_code comparison;
15611 const unsigned int flag;
15612 };
15613
15614 static const struct builtin_description bdesc_comi[] =
15615 {
15616 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15617 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15618 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15619 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15620 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15621 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15622 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15623 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15624 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15625 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15626 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15627 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15628 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15629 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15630 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15631 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15632 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15633 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15634 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15635 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15636 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15637 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15638 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15639 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15640 };
15641
15642 static const struct builtin_description bdesc_2arg[] =
15643 {
15644 /* SSE */
15645 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15646 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15647 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15648 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15649 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15650 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15651 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15652 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15653
15654 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15655 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15656 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15657 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15658 BUILTIN_DESC_SWAP_OPERANDS },
15659 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15660 BUILTIN_DESC_SWAP_OPERANDS },
15661 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15662 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15663 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15664 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15665 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15666 BUILTIN_DESC_SWAP_OPERANDS },
15667 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15668 BUILTIN_DESC_SWAP_OPERANDS },
15669 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15670 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15671 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15672 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15673 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15674 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15675 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15676 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15677 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15678 BUILTIN_DESC_SWAP_OPERANDS },
15679 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15680 BUILTIN_DESC_SWAP_OPERANDS },
15681 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15682
15683 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15684 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15685 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15686 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15687
15688 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15689 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15690 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15691 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15692
15693 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15694 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15695 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15696 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15697 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15698
15699 /* MMX */
15700 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15701 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15702 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15703 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15704 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15705 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15706 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15707 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15708
15709 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15710 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15711 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15712 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15713 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15714 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15715 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15716 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15717
15718 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15719 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15720 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15721
15722 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15723 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15724 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15725 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15726
15727 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15728 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15729
15730 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15731 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15732 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15733 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15734 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15735 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15736
15737 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15738 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15739 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15740 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15741
15742 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15743 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15744 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15745 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15746 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15747 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15748
15749 /* Special. */
15750 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15751 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15752 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15753
15754 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15755 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15756 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15757
15758 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15759 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15760 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15761 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15762 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15763 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15764
15765 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15766 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15767 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15768 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15769 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15770 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15771
15772 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15773 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15774 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15775 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15776
15777 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15778 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15779
15780 /* SSE2 */
15781 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15782 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15783 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15784 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15785 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15786 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15787 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15788 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15789
15790 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15791 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15792 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15793 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15794 BUILTIN_DESC_SWAP_OPERANDS },
15795 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15796 BUILTIN_DESC_SWAP_OPERANDS },
15797 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15798 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15799 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15800 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15801 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15802 BUILTIN_DESC_SWAP_OPERANDS },
15803 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15804 BUILTIN_DESC_SWAP_OPERANDS },
15805 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15806 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15807 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15808 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15809 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15810 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15811 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15812 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15813 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15814
15815 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15816 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15817 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15818 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15819
15820 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15821 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15822 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15823 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15824
15825 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15826 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15827 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15828
15829 /* SSE2 MMX */
15830 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15831 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15832 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15833 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15834 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15835 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15836 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15837 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15838
15839 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15840 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15841 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15842 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15843 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15844 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15845 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15846 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15847
15848 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15849 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15850
15851 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15852 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15853 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15854 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15855
15856 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15857 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15858
15859 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15860 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15861 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15862 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15863 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15864 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15865
15866 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15867 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15868 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15870
15871 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15872 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15873 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15874 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15875 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15876 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15877 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15878 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15879
15880 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15881 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15882 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15883
15884 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15885 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15886
15887 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15888 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15889
15890 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15891 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15892 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15893
15894 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15895 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15896 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15897
15898 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15899 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15900
15901 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15902
15903 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15904 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15905 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15906 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15907
15908 /* SSE3 MMX */
15909 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15910 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15911 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15912 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15913 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15914 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15915
15916 /* SSSE3 */
15917 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15918 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15919 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15920 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15921 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15922 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15923 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15924 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15925 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15926 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15927 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15928 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15929 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15930 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15931 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15932 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15933 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15934 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15935 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15936 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15937 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15938 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15939 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15940 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15941 };
15942
15943 static const struct builtin_description bdesc_1arg[] =
15944 {
15945 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15946 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15947
15948 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
15949 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
15950 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
15951
15952 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
15953 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
15954 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
15955 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
15956 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
15957 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
15958
15959 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
15960 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
15961
15962 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
15963
15964 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
15965 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
15966
15967 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
15968 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
15969 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
15970 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
15971 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
15972
15973 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
15974
15975 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
15976 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
15977 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
15978 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
15979
15980 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
15981 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
15982 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
15983
15984 /* SSE3 */
15985 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
15986 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
15987
15988 /* SSSE3 */
15989 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
15990 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
15991 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
15992 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
15993 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
15994 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
15995 };
15996
15997 static void
15998 ix86_init_builtins (void)
15999 {
16000 if (TARGET_MMX)
16001 ix86_init_mmx_sse_builtins ();
16002 }
16003
16004 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
16005 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
16006 builtins. */
16007 static void
16008 ix86_init_mmx_sse_builtins (void)
16009 {
16010 const struct builtin_description * d;
16011 size_t i;
16012
16013 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
16014 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16015 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16016 tree V2DI_type_node
16017 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16018 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16019 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16020 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16021 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16022 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
16023 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16024
16025 tree pchar_type_node = build_pointer_type (char_type_node);
16026 tree pcchar_type_node = build_pointer_type (
16027 build_type_variant (char_type_node, 1, 0));
16028 tree pfloat_type_node = build_pointer_type (float_type_node);
16029 tree pcfloat_type_node = build_pointer_type (
16030 build_type_variant (float_type_node, 1, 0));
16031 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16032 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16033 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16034
16035 /* Comparisons. */
16036 tree int_ftype_v4sf_v4sf
16037 = build_function_type_list (integer_type_node,
16038 V4SF_type_node, V4SF_type_node, NULL_TREE);
16039 tree v4si_ftype_v4sf_v4sf
16040 = build_function_type_list (V4SI_type_node,
16041 V4SF_type_node, V4SF_type_node, NULL_TREE);
16042 /* MMX/SSE/integer conversions. */
16043 tree int_ftype_v4sf
16044 = build_function_type_list (integer_type_node,
16045 V4SF_type_node, NULL_TREE);
16046 tree int64_ftype_v4sf
16047 = build_function_type_list (long_long_integer_type_node,
16048 V4SF_type_node, NULL_TREE);
16049 tree int_ftype_v8qi
16050 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16051 tree v4sf_ftype_v4sf_int
16052 = build_function_type_list (V4SF_type_node,
16053 V4SF_type_node, integer_type_node, NULL_TREE);
16054 tree v4sf_ftype_v4sf_int64
16055 = build_function_type_list (V4SF_type_node,
16056 V4SF_type_node, long_long_integer_type_node,
16057 NULL_TREE);
16058 tree v4sf_ftype_v4sf_v2si
16059 = build_function_type_list (V4SF_type_node,
16060 V4SF_type_node, V2SI_type_node, NULL_TREE);
16061
16062 /* Miscellaneous. */
16063 tree v8qi_ftype_v4hi_v4hi
16064 = build_function_type_list (V8QI_type_node,
16065 V4HI_type_node, V4HI_type_node, NULL_TREE);
16066 tree v4hi_ftype_v2si_v2si
16067 = build_function_type_list (V4HI_type_node,
16068 V2SI_type_node, V2SI_type_node, NULL_TREE);
16069 tree v4sf_ftype_v4sf_v4sf_int
16070 = build_function_type_list (V4SF_type_node,
16071 V4SF_type_node, V4SF_type_node,
16072 integer_type_node, NULL_TREE);
16073 tree v2si_ftype_v4hi_v4hi
16074 = build_function_type_list (V2SI_type_node,
16075 V4HI_type_node, V4HI_type_node, NULL_TREE);
16076 tree v4hi_ftype_v4hi_int
16077 = build_function_type_list (V4HI_type_node,
16078 V4HI_type_node, integer_type_node, NULL_TREE);
16079 tree v4hi_ftype_v4hi_di
16080 = build_function_type_list (V4HI_type_node,
16081 V4HI_type_node, long_long_unsigned_type_node,
16082 NULL_TREE);
16083 tree v2si_ftype_v2si_di
16084 = build_function_type_list (V2SI_type_node,
16085 V2SI_type_node, long_long_unsigned_type_node,
16086 NULL_TREE);
16087 tree void_ftype_void
16088 = build_function_type (void_type_node, void_list_node);
16089 tree void_ftype_unsigned
16090 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16091 tree void_ftype_unsigned_unsigned
16092 = build_function_type_list (void_type_node, unsigned_type_node,
16093 unsigned_type_node, NULL_TREE);
16094 tree void_ftype_pcvoid_unsigned_unsigned
16095 = build_function_type_list (void_type_node, const_ptr_type_node,
16096 unsigned_type_node, unsigned_type_node,
16097 NULL_TREE);
16098 tree unsigned_ftype_void
16099 = build_function_type (unsigned_type_node, void_list_node);
16100 tree v2si_ftype_v4sf
16101 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16102 /* Loads/stores. */
16103 tree void_ftype_v8qi_v8qi_pchar
16104 = build_function_type_list (void_type_node,
16105 V8QI_type_node, V8QI_type_node,
16106 pchar_type_node, NULL_TREE);
16107 tree v4sf_ftype_pcfloat
16108 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16109 /* @@@ the type is bogus */
16110 tree v4sf_ftype_v4sf_pv2si
16111 = build_function_type_list (V4SF_type_node,
16112 V4SF_type_node, pv2si_type_node, NULL_TREE);
16113 tree void_ftype_pv2si_v4sf
16114 = build_function_type_list (void_type_node,
16115 pv2si_type_node, V4SF_type_node, NULL_TREE);
16116 tree void_ftype_pfloat_v4sf
16117 = build_function_type_list (void_type_node,
16118 pfloat_type_node, V4SF_type_node, NULL_TREE);
16119 tree void_ftype_pdi_di
16120 = build_function_type_list (void_type_node,
16121 pdi_type_node, long_long_unsigned_type_node,
16122 NULL_TREE);
16123 tree void_ftype_pv2di_v2di
16124 = build_function_type_list (void_type_node,
16125 pv2di_type_node, V2DI_type_node, NULL_TREE);
16126 /* Normal vector unops. */
16127 tree v4sf_ftype_v4sf
16128 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16129 tree v16qi_ftype_v16qi
16130 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16131 tree v8hi_ftype_v8hi
16132 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16133 tree v4si_ftype_v4si
16134 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16135 tree v8qi_ftype_v8qi
16136 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16137 tree v4hi_ftype_v4hi
16138 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16139
16140 /* Normal vector binops. */
16141 tree v4sf_ftype_v4sf_v4sf
16142 = build_function_type_list (V4SF_type_node,
16143 V4SF_type_node, V4SF_type_node, NULL_TREE);
16144 tree v8qi_ftype_v8qi_v8qi
16145 = build_function_type_list (V8QI_type_node,
16146 V8QI_type_node, V8QI_type_node, NULL_TREE);
16147 tree v4hi_ftype_v4hi_v4hi
16148 = build_function_type_list (V4HI_type_node,
16149 V4HI_type_node, V4HI_type_node, NULL_TREE);
16150 tree v2si_ftype_v2si_v2si
16151 = build_function_type_list (V2SI_type_node,
16152 V2SI_type_node, V2SI_type_node, NULL_TREE);
16153 tree di_ftype_di_di
16154 = build_function_type_list (long_long_unsigned_type_node,
16155 long_long_unsigned_type_node,
16156 long_long_unsigned_type_node, NULL_TREE);
16157
16158 tree di_ftype_di_di_int
16159 = build_function_type_list (long_long_unsigned_type_node,
16160 long_long_unsigned_type_node,
16161 long_long_unsigned_type_node,
16162 integer_type_node, NULL_TREE);
16163
16164 tree v2si_ftype_v2sf
16165 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16166 tree v2sf_ftype_v2si
16167 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16168 tree v2si_ftype_v2si
16169 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16170 tree v2sf_ftype_v2sf
16171 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16172 tree v2sf_ftype_v2sf_v2sf
16173 = build_function_type_list (V2SF_type_node,
16174 V2SF_type_node, V2SF_type_node, NULL_TREE);
16175 tree v2si_ftype_v2sf_v2sf
16176 = build_function_type_list (V2SI_type_node,
16177 V2SF_type_node, V2SF_type_node, NULL_TREE);
16178 tree pint_type_node = build_pointer_type (integer_type_node);
16179 tree pdouble_type_node = build_pointer_type (double_type_node);
16180 tree pcdouble_type_node = build_pointer_type (
16181 build_type_variant (double_type_node, 1, 0));
16182 tree int_ftype_v2df_v2df
16183 = build_function_type_list (integer_type_node,
16184 V2DF_type_node, V2DF_type_node, NULL_TREE);
16185
16186 tree void_ftype_pcvoid
16187 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16188 tree v4sf_ftype_v4si
16189 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16190 tree v4si_ftype_v4sf
16191 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16192 tree v2df_ftype_v4si
16193 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16194 tree v4si_ftype_v2df
16195 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16196 tree v2si_ftype_v2df
16197 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16198 tree v4sf_ftype_v2df
16199 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16200 tree v2df_ftype_v2si
16201 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16202 tree v2df_ftype_v4sf
16203 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16204 tree int_ftype_v2df
16205 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16206 tree int64_ftype_v2df
16207 = build_function_type_list (long_long_integer_type_node,
16208 V2DF_type_node, NULL_TREE);
16209 tree v2df_ftype_v2df_int
16210 = build_function_type_list (V2DF_type_node,
16211 V2DF_type_node, integer_type_node, NULL_TREE);
16212 tree v2df_ftype_v2df_int64
16213 = build_function_type_list (V2DF_type_node,
16214 V2DF_type_node, long_long_integer_type_node,
16215 NULL_TREE);
16216 tree v4sf_ftype_v4sf_v2df
16217 = build_function_type_list (V4SF_type_node,
16218 V4SF_type_node, V2DF_type_node, NULL_TREE);
16219 tree v2df_ftype_v2df_v4sf
16220 = build_function_type_list (V2DF_type_node,
16221 V2DF_type_node, V4SF_type_node, NULL_TREE);
16222 tree v2df_ftype_v2df_v2df_int
16223 = build_function_type_list (V2DF_type_node,
16224 V2DF_type_node, V2DF_type_node,
16225 integer_type_node,
16226 NULL_TREE);
16227 tree v2df_ftype_v2df_pcdouble
16228 = build_function_type_list (V2DF_type_node,
16229 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16230 tree void_ftype_pdouble_v2df
16231 = build_function_type_list (void_type_node,
16232 pdouble_type_node, V2DF_type_node, NULL_TREE);
16233 tree void_ftype_pint_int
16234 = build_function_type_list (void_type_node,
16235 pint_type_node, integer_type_node, NULL_TREE);
16236 tree void_ftype_v16qi_v16qi_pchar
16237 = build_function_type_list (void_type_node,
16238 V16QI_type_node, V16QI_type_node,
16239 pchar_type_node, NULL_TREE);
16240 tree v2df_ftype_pcdouble
16241 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16242 tree v2df_ftype_v2df_v2df
16243 = build_function_type_list (V2DF_type_node,
16244 V2DF_type_node, V2DF_type_node, NULL_TREE);
16245 tree v16qi_ftype_v16qi_v16qi
16246 = build_function_type_list (V16QI_type_node,
16247 V16QI_type_node, V16QI_type_node, NULL_TREE);
16248 tree v8hi_ftype_v8hi_v8hi
16249 = build_function_type_list (V8HI_type_node,
16250 V8HI_type_node, V8HI_type_node, NULL_TREE);
16251 tree v4si_ftype_v4si_v4si
16252 = build_function_type_list (V4SI_type_node,
16253 V4SI_type_node, V4SI_type_node, NULL_TREE);
16254 tree v2di_ftype_v2di_v2di
16255 = build_function_type_list (V2DI_type_node,
16256 V2DI_type_node, V2DI_type_node, NULL_TREE);
16257 tree v2di_ftype_v2df_v2df
16258 = build_function_type_list (V2DI_type_node,
16259 V2DF_type_node, V2DF_type_node, NULL_TREE);
16260 tree v2df_ftype_v2df
16261 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16262 tree v2di_ftype_v2di_int
16263 = build_function_type_list (V2DI_type_node,
16264 V2DI_type_node, integer_type_node, NULL_TREE);
16265 tree v2di_ftype_v2di_v2di_int
16266 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16267 V2DI_type_node, integer_type_node, NULL_TREE);
16268 tree v4si_ftype_v4si_int
16269 = build_function_type_list (V4SI_type_node,
16270 V4SI_type_node, integer_type_node, NULL_TREE);
16271 tree v8hi_ftype_v8hi_int
16272 = build_function_type_list (V8HI_type_node,
16273 V8HI_type_node, integer_type_node, NULL_TREE);
16274 tree v8hi_ftype_v8hi_v2di
16275 = build_function_type_list (V8HI_type_node,
16276 V8HI_type_node, V2DI_type_node, NULL_TREE);
16277 tree v4si_ftype_v4si_v2di
16278 = build_function_type_list (V4SI_type_node,
16279 V4SI_type_node, V2DI_type_node, NULL_TREE);
16280 tree v4si_ftype_v8hi_v8hi
16281 = build_function_type_list (V4SI_type_node,
16282 V8HI_type_node, V8HI_type_node, NULL_TREE);
16283 tree di_ftype_v8qi_v8qi
16284 = build_function_type_list (long_long_unsigned_type_node,
16285 V8QI_type_node, V8QI_type_node, NULL_TREE);
16286 tree di_ftype_v2si_v2si
16287 = build_function_type_list (long_long_unsigned_type_node,
16288 V2SI_type_node, V2SI_type_node, NULL_TREE);
16289 tree v2di_ftype_v16qi_v16qi
16290 = build_function_type_list (V2DI_type_node,
16291 V16QI_type_node, V16QI_type_node, NULL_TREE);
16292 tree v2di_ftype_v4si_v4si
16293 = build_function_type_list (V2DI_type_node,
16294 V4SI_type_node, V4SI_type_node, NULL_TREE);
16295 tree int_ftype_v16qi
16296 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16297 tree v16qi_ftype_pcchar
16298 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16299 tree void_ftype_pchar_v16qi
16300 = build_function_type_list (void_type_node,
16301 pchar_type_node, V16QI_type_node, NULL_TREE);
16302
16303 tree float80_type;
16304 tree float128_type;
16305 tree ftype;
16306
16307 /* The __float80 type. */
16308 if (TYPE_MODE (long_double_type_node) == XFmode)
16309 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16310 "__float80");
16311 else
16312 {
16313 /* The __float80 type. */
16314 float80_type = make_node (REAL_TYPE);
16315 TYPE_PRECISION (float80_type) = 80;
16316 layout_type (float80_type);
16317 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16318 }
16319
16320 if (TARGET_64BIT)
16321 {
16322 float128_type = make_node (REAL_TYPE);
16323 TYPE_PRECISION (float128_type) = 128;
16324 layout_type (float128_type);
16325 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16326 }
16327
16328 /* Add all builtins that are more or less simple operations on two
16329 operands. */
16330 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16331 {
16332 /* Use one of the operands; the target can have a different mode for
16333 mask-generating compares. */
16334 enum machine_mode mode;
16335 tree type;
16336
16337 if (d->name == 0)
16338 continue;
16339 mode = insn_data[d->icode].operand[1].mode;
16340
16341 switch (mode)
16342 {
16343 case V16QImode:
16344 type = v16qi_ftype_v16qi_v16qi;
16345 break;
16346 case V8HImode:
16347 type = v8hi_ftype_v8hi_v8hi;
16348 break;
16349 case V4SImode:
16350 type = v4si_ftype_v4si_v4si;
16351 break;
16352 case V2DImode:
16353 type = v2di_ftype_v2di_v2di;
16354 break;
16355 case V2DFmode:
16356 type = v2df_ftype_v2df_v2df;
16357 break;
16358 case V4SFmode:
16359 type = v4sf_ftype_v4sf_v4sf;
16360 break;
16361 case V8QImode:
16362 type = v8qi_ftype_v8qi_v8qi;
16363 break;
16364 case V4HImode:
16365 type = v4hi_ftype_v4hi_v4hi;
16366 break;
16367 case V2SImode:
16368 type = v2si_ftype_v2si_v2si;
16369 break;
16370 case DImode:
16371 type = di_ftype_di_di;
16372 break;
16373
16374 default:
16375 gcc_unreachable ();
16376 }
16377
16378 /* Override for comparisons. */
16379 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16380 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16381 type = v4si_ftype_v4sf_v4sf;
16382
16383 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16384 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16385 type = v2di_ftype_v2df_v2df;
16386
16387 def_builtin (d->mask, d->name, type, d->code);
16388 }
16389
16390 /* Add all builtins that are more or less simple operations on 1 operand. */
16391 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16392 {
16393 enum machine_mode mode;
16394 tree type;
16395
16396 if (d->name == 0)
16397 continue;
16398 mode = insn_data[d->icode].operand[1].mode;
16399
16400 switch (mode)
16401 {
16402 case V16QImode:
16403 type = v16qi_ftype_v16qi;
16404 break;
16405 case V8HImode:
16406 type = v8hi_ftype_v8hi;
16407 break;
16408 case V4SImode:
16409 type = v4si_ftype_v4si;
16410 break;
16411 case V2DFmode:
16412 type = v2df_ftype_v2df;
16413 break;
16414 case V4SFmode:
16415 type = v4sf_ftype_v4sf;
16416 break;
16417 case V8QImode:
16418 type = v8qi_ftype_v8qi;
16419 break;
16420 case V4HImode:
16421 type = v4hi_ftype_v4hi;
16422 break;
16423 case V2SImode:
16424 type = v2si_ftype_v2si;
16425 break;
16426
16427 default:
16428 abort ();
16429 }
16430
16431 def_builtin (d->mask, d->name, type, d->code);
16432 }
16433
16434 /* Add the remaining MMX insns with somewhat more complicated types. */
16435 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16436 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16437 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16438 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16439
16440 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16441 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16442 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16443
16444 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16445 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16446
16447 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16448 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16449
16450 /* comi/ucomi insns. */
16451 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16452 if (d->mask == MASK_SSE2)
16453 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16454 else
16455 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16456
16457 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16458 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16459 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16460
16461 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16462 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16463 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16464 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16465 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16466 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16467 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16468 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16469 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16470 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16471 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16472
16473 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16474
16475 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16476 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16477
16478 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16479 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16480 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16481 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16482
16483 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16484 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16485 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16486 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16487
16488 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16489
16490 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16491
16492 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16493 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16494 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16495 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16496 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16497 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16498
16499 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16500
16501 /* Original 3DNow! */
16502 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16503 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16504 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16505 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16506 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16507 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16508 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16509 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16510 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16511 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16512 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16513 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16514 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16515 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16516 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16517 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16518 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16519 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16520 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16521 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16522
16523 /* 3DNow! extension as used in the Athlon CPU. */
16524 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16525 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16526 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16527 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16528 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16529 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16530
16531 /* SSE2 */
16532 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16533
16534 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16535 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16536
16537 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16538 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16539
16540 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16541 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16542 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16543 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16544 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16545
16546 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16547 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16548 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16549 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16550
16551 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16552 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16553
16554 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16555
16556 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16557 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16558
16559 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16560 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16561 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16562 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16563 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16564
16565 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16566
16567 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16568 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16569 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16570 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16571
16572 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16573 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16574 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16575
16576 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16577 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16578 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16579 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16580
16581 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16582 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16583 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16584
16585 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16586 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16587
16588 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16589 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16590
16591 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16592 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16593 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16594
16595 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16596 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16597 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16598
16599 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16600 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16601
16602 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16603 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16604 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16605 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16606
16607 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16608 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16609 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16610 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16611
16612 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16613 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16614
16615 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16616
16617 /* Prescott New Instructions. */
16618 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16619 void_ftype_pcvoid_unsigned_unsigned,
16620 IX86_BUILTIN_MONITOR);
16621 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16622 void_ftype_unsigned_unsigned,
16623 IX86_BUILTIN_MWAIT);
16624 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16625 v4sf_ftype_v4sf,
16626 IX86_BUILTIN_MOVSHDUP);
16627 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16628 v4sf_ftype_v4sf,
16629 IX86_BUILTIN_MOVSLDUP);
16630 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16631 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16632
16633 /* SSSE3. */
16634 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16635 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16636 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16637 IX86_BUILTIN_PALIGNR);
16638
16639 /* Access to the vec_init patterns. */
16640 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16641 integer_type_node, NULL_TREE);
16642 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16643 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16644
16645 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16646 short_integer_type_node,
16647 short_integer_type_node,
16648 short_integer_type_node, NULL_TREE);
16649 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16650 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16651
16652 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16653 char_type_node, char_type_node,
16654 char_type_node, char_type_node,
16655 char_type_node, char_type_node,
16656 char_type_node, NULL_TREE);
16657 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16658 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16659
16660 /* Access to the vec_extract patterns. */
16661 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16662 integer_type_node, NULL_TREE);
16663 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16664 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16665
16666 ftype = build_function_type_list (long_long_integer_type_node,
16667 V2DI_type_node, integer_type_node,
16668 NULL_TREE);
16669 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16670 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16671
16672 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16673 integer_type_node, NULL_TREE);
16674 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16675 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16676
16677 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16678 integer_type_node, NULL_TREE);
16679 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16680 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16681
16682 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16683 integer_type_node, NULL_TREE);
16684 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16685 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16686
16687 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16688 integer_type_node, NULL_TREE);
16689 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16690 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16691
16692 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16693 integer_type_node, NULL_TREE);
16694 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16695 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16696
16697 /* Access to the vec_set patterns. */
16698 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16699 intHI_type_node,
16700 integer_type_node, NULL_TREE);
16701 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16702 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16703
16704 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16705 intHI_type_node,
16706 integer_type_node, NULL_TREE);
16707 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16708 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16709 }
16710
16711 /* Errors in the source file can cause expand_expr to return const0_rtx
16712 where we expect a vector. To avoid crashing, use one of the vector
16713 clear instructions. */
16714 static rtx
16715 safe_vector_operand (rtx x, enum machine_mode mode)
16716 {
16717 if (x == const0_rtx)
16718 x = CONST0_RTX (mode);
16719 return x;
16720 }
16721
16722 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16723
16724 static rtx
16725 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16726 {
16727 rtx pat, xops[3];
16728 tree arg0 = TREE_VALUE (arglist);
16729 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16730 rtx op0 = expand_normal (arg0);
16731 rtx op1 = expand_normal (arg1);
16732 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16733 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16734 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16735
16736 if (VECTOR_MODE_P (mode0))
16737 op0 = safe_vector_operand (op0, mode0);
16738 if (VECTOR_MODE_P (mode1))
16739 op1 = safe_vector_operand (op1, mode1);
16740
16741 if (optimize || !target
16742 || GET_MODE (target) != tmode
16743 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16744 target = gen_reg_rtx (tmode);
16745
16746 if (GET_MODE (op1) == SImode && mode1 == TImode)
16747 {
16748 rtx x = gen_reg_rtx (V4SImode);
16749 emit_insn (gen_sse2_loadd (x, op1));
16750 op1 = gen_lowpart (TImode, x);
16751 }
16752
16753 /* The insn must want input operands in the same modes as the
16754 result. */
16755 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16756 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16757
16758 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16759 op0 = copy_to_mode_reg (mode0, op0);
16760 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16761 op1 = copy_to_mode_reg (mode1, op1);
16762
16763 /* ??? Using ix86_fixup_binary_operands is problematic when
16764 we've got mismatched modes. Fake it. */
16765
16766 xops[0] = target;
16767 xops[1] = op0;
16768 xops[2] = op1;
16769
16770 if (tmode == mode0 && tmode == mode1)
16771 {
16772 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16773 op0 = xops[1];
16774 op1 = xops[2];
16775 }
16776 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16777 {
16778 op0 = force_reg (mode0, op0);
16779 op1 = force_reg (mode1, op1);
16780 target = gen_reg_rtx (tmode);
16781 }
16782
16783 pat = GEN_FCN (icode) (target, op0, op1);
16784 if (! pat)
16785 return 0;
16786 emit_insn (pat);
16787 return target;
16788 }
16789
16790 /* Subroutine of ix86_expand_builtin to take care of stores. */
16791
16792 static rtx
16793 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16794 {
16795 rtx pat;
16796 tree arg0 = TREE_VALUE (arglist);
16797 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16798 rtx op0 = expand_normal (arg0);
16799 rtx op1 = expand_normal (arg1);
16800 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16801 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16802
16803 if (VECTOR_MODE_P (mode1))
16804 op1 = safe_vector_operand (op1, mode1);
16805
16806 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16807 op1 = copy_to_mode_reg (mode1, op1);
16808
16809 pat = GEN_FCN (icode) (op0, op1);
16810 if (pat)
16811 emit_insn (pat);
16812 return 0;
16813 }
16814
16815 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16816
16817 static rtx
16818 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16819 rtx target, int do_load)
16820 {
16821 rtx pat;
16822 tree arg0 = TREE_VALUE (arglist);
16823 rtx op0 = expand_normal (arg0);
16824 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16825 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16826
16827 if (optimize || !target
16828 || GET_MODE (target) != tmode
16829 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16830 target = gen_reg_rtx (tmode);
16831 if (do_load)
16832 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16833 else
16834 {
16835 if (VECTOR_MODE_P (mode0))
16836 op0 = safe_vector_operand (op0, mode0);
16837
16838 if ((optimize && !register_operand (op0, mode0))
16839 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16840 op0 = copy_to_mode_reg (mode0, op0);
16841 }
16842
16843 pat = GEN_FCN (icode) (target, op0);
16844 if (! pat)
16845 return 0;
16846 emit_insn (pat);
16847 return target;
16848 }
16849
16850 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16851 sqrtss, rsqrtss, rcpss. */
16852
16853 static rtx
16854 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16855 {
16856 rtx pat;
16857 tree arg0 = TREE_VALUE (arglist);
16858 rtx op1, op0 = expand_normal (arg0);
16859 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16860 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16861
16862 if (optimize || !target
16863 || GET_MODE (target) != tmode
16864 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16865 target = gen_reg_rtx (tmode);
16866
16867 if (VECTOR_MODE_P (mode0))
16868 op0 = safe_vector_operand (op0, mode0);
16869
16870 if ((optimize && !register_operand (op0, mode0))
16871 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16872 op0 = copy_to_mode_reg (mode0, op0);
16873
16874 op1 = op0;
16875 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16876 op1 = copy_to_mode_reg (mode0, op1);
16877
16878 pat = GEN_FCN (icode) (target, op0, op1);
16879 if (! pat)
16880 return 0;
16881 emit_insn (pat);
16882 return target;
16883 }
16884
16885 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16886
16887 static rtx
16888 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16889 rtx target)
16890 {
16891 rtx pat;
16892 tree arg0 = TREE_VALUE (arglist);
16893 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16894 rtx op0 = expand_normal (arg0);
16895 rtx op1 = expand_normal (arg1);
16896 rtx op2;
16897 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16898 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16899 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16900 enum rtx_code comparison = d->comparison;
16901
16902 if (VECTOR_MODE_P (mode0))
16903 op0 = safe_vector_operand (op0, mode0);
16904 if (VECTOR_MODE_P (mode1))
16905 op1 = safe_vector_operand (op1, mode1);
16906
16907 /* Swap operands if we have a comparison that isn't available in
16908 hardware. */
16909 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16910 {
16911 rtx tmp = gen_reg_rtx (mode1);
16912 emit_move_insn (tmp, op1);
16913 op1 = op0;
16914 op0 = tmp;
16915 }
16916
16917 if (optimize || !target
16918 || GET_MODE (target) != tmode
16919 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16920 target = gen_reg_rtx (tmode);
16921
16922 if ((optimize && !register_operand (op0, mode0))
16923 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16924 op0 = copy_to_mode_reg (mode0, op0);
16925 if ((optimize && !register_operand (op1, mode1))
16926 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16927 op1 = copy_to_mode_reg (mode1, op1);
16928
16929 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16930 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16931 if (! pat)
16932 return 0;
16933 emit_insn (pat);
16934 return target;
16935 }
16936
16937 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16938
16939 static rtx
16940 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16941 rtx target)
16942 {
16943 rtx pat;
16944 tree arg0 = TREE_VALUE (arglist);
16945 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16946 rtx op0 = expand_normal (arg0);
16947 rtx op1 = expand_normal (arg1);
16948 rtx op2;
16949 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
16950 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
16951 enum rtx_code comparison = d->comparison;
16952
16953 if (VECTOR_MODE_P (mode0))
16954 op0 = safe_vector_operand (op0, mode0);
16955 if (VECTOR_MODE_P (mode1))
16956 op1 = safe_vector_operand (op1, mode1);
16957
16958 /* Swap operands if we have a comparison that isn't available in
16959 hardware. */
16960 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16961 {
16962 rtx tmp = op1;
16963 op1 = op0;
16964 op0 = tmp;
16965 }
16966
16967 target = gen_reg_rtx (SImode);
16968 emit_move_insn (target, const0_rtx);
16969 target = gen_rtx_SUBREG (QImode, target, 0);
16970
16971 if ((optimize && !register_operand (op0, mode0))
16972 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16973 op0 = copy_to_mode_reg (mode0, op0);
16974 if ((optimize && !register_operand (op1, mode1))
16975 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16976 op1 = copy_to_mode_reg (mode1, op1);
16977
16978 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16979 pat = GEN_FCN (d->icode) (op0, op1);
16980 if (! pat)
16981 return 0;
16982 emit_insn (pat);
16983 emit_insn (gen_rtx_SET (VOIDmode,
16984 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
16985 gen_rtx_fmt_ee (comparison, QImode,
16986 SET_DEST (pat),
16987 const0_rtx)));
16988
16989 return SUBREG_REG (target);
16990 }
16991
16992 /* Return the integer constant in ARG. Constrain it to be in the range
16993 of the subparts of VEC_TYPE; issue an error if not. */
16994
16995 static int
16996 get_element_number (tree vec_type, tree arg)
16997 {
16998 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16999
17000 if (!host_integerp (arg, 1)
17001 || (elt = tree_low_cst (arg, 1), elt > max))
17002 {
17003 error ("selector must be an integer constant in the range 0..%wi", max);
17004 return 0;
17005 }
17006
17007 return elt;
17008 }
17009
17010 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17011 ix86_expand_vector_init. We DO have language-level syntax for this, in
17012 the form of (type){ init-list }. Except that since we can't place emms
17013 instructions from inside the compiler, we can't allow the use of MMX
17014 registers unless the user explicitly asks for it. So we do *not* define
17015 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17016 we have builtins invoked by mmintrin.h that gives us license to emit
17017 these sorts of instructions. */
17018
17019 static rtx
17020 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
17021 {
17022 enum machine_mode tmode = TYPE_MODE (type);
17023 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17024 int i, n_elt = GET_MODE_NUNITS (tmode);
17025 rtvec v = rtvec_alloc (n_elt);
17026
17027 gcc_assert (VECTOR_MODE_P (tmode));
17028
17029 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
17030 {
17031 rtx x = expand_normal (TREE_VALUE (arglist));
17032 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17033 }
17034
17035 gcc_assert (arglist == NULL);
17036
17037 if (!target || !register_operand (target, tmode))
17038 target = gen_reg_rtx (tmode);
17039
17040 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17041 return target;
17042 }
17043
17044 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17045 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17046 had a language-level syntax for referencing vector elements. */
17047
17048 static rtx
17049 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
17050 {
17051 enum machine_mode tmode, mode0;
17052 tree arg0, arg1;
17053 int elt;
17054 rtx op0;
17055
17056 arg0 = TREE_VALUE (arglist);
17057 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17058
17059 op0 = expand_normal (arg0);
17060 elt = get_element_number (TREE_TYPE (arg0), arg1);
17061
17062 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17063 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17064 gcc_assert (VECTOR_MODE_P (mode0));
17065
17066 op0 = force_reg (mode0, op0);
17067
17068 if (optimize || !target || !register_operand (target, tmode))
17069 target = gen_reg_rtx (tmode);
17070
17071 ix86_expand_vector_extract (true, target, op0, elt);
17072
17073 return target;
17074 }
17075
17076 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17077 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17078 a language-level syntax for referencing vector elements. */
17079
17080 static rtx
17081 ix86_expand_vec_set_builtin (tree arglist)
17082 {
17083 enum machine_mode tmode, mode1;
17084 tree arg0, arg1, arg2;
17085 int elt;
17086 rtx op0, op1;
17087
17088 arg0 = TREE_VALUE (arglist);
17089 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17090 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17091
17092 tmode = TYPE_MODE (TREE_TYPE (arg0));
17093 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17094 gcc_assert (VECTOR_MODE_P (tmode));
17095
17096 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17097 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17098 elt = get_element_number (TREE_TYPE (arg0), arg2);
17099
17100 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17101 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17102
17103 op0 = force_reg (tmode, op0);
17104 op1 = force_reg (mode1, op1);
17105
17106 ix86_expand_vector_set (true, op0, op1, elt);
17107
17108 return op0;
17109 }
17110
17111 /* Expand an expression EXP that calls a built-in function,
17112 with result going to TARGET if that's convenient
17113 (and in mode MODE if that's convenient).
17114 SUBTARGET may be used as the target for computing one of EXP's operands.
17115 IGNORE is nonzero if the value is to be ignored. */
17116
17117 static rtx
17118 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17119 enum machine_mode mode ATTRIBUTE_UNUSED,
17120 int ignore ATTRIBUTE_UNUSED)
17121 {
17122 const struct builtin_description *d;
17123 size_t i;
17124 enum insn_code icode;
17125 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17126 tree arglist = TREE_OPERAND (exp, 1);
17127 tree arg0, arg1, arg2;
17128 rtx op0, op1, op2, pat;
17129 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17130 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17131
17132 switch (fcode)
17133 {
17134 case IX86_BUILTIN_EMMS:
17135 emit_insn (gen_mmx_emms ());
17136 return 0;
17137
17138 case IX86_BUILTIN_SFENCE:
17139 emit_insn (gen_sse_sfence ());
17140 return 0;
17141
17142 case IX86_BUILTIN_MASKMOVQ:
17143 case IX86_BUILTIN_MASKMOVDQU:
17144 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17145 ? CODE_FOR_mmx_maskmovq
17146 : CODE_FOR_sse2_maskmovdqu);
17147 /* Note the arg order is different from the operand order. */
17148 arg1 = TREE_VALUE (arglist);
17149 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17150 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17151 op0 = expand_normal (arg0);
17152 op1 = expand_normal (arg1);
17153 op2 = expand_normal (arg2);
17154 mode0 = insn_data[icode].operand[0].mode;
17155 mode1 = insn_data[icode].operand[1].mode;
17156 mode2 = insn_data[icode].operand[2].mode;
17157
17158 op0 = force_reg (Pmode, op0);
17159 op0 = gen_rtx_MEM (mode1, op0);
17160
17161 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17162 op0 = copy_to_mode_reg (mode0, op0);
17163 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17164 op1 = copy_to_mode_reg (mode1, op1);
17165 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17166 op2 = copy_to_mode_reg (mode2, op2);
17167 pat = GEN_FCN (icode) (op0, op1, op2);
17168 if (! pat)
17169 return 0;
17170 emit_insn (pat);
17171 return 0;
17172
17173 case IX86_BUILTIN_SQRTSS:
17174 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17175 case IX86_BUILTIN_RSQRTSS:
17176 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17177 case IX86_BUILTIN_RCPSS:
17178 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17179
17180 case IX86_BUILTIN_LOADUPS:
17181 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17182
17183 case IX86_BUILTIN_STOREUPS:
17184 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17185
17186 case IX86_BUILTIN_LOADHPS:
17187 case IX86_BUILTIN_LOADLPS:
17188 case IX86_BUILTIN_LOADHPD:
17189 case IX86_BUILTIN_LOADLPD:
17190 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17191 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17192 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17193 : CODE_FOR_sse2_loadlpd);
17194 arg0 = TREE_VALUE (arglist);
17195 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17196 op0 = expand_normal (arg0);
17197 op1 = expand_normal (arg1);
17198 tmode = insn_data[icode].operand[0].mode;
17199 mode0 = insn_data[icode].operand[1].mode;
17200 mode1 = insn_data[icode].operand[2].mode;
17201
17202 op0 = force_reg (mode0, op0);
17203 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17204 if (optimize || target == 0
17205 || GET_MODE (target) != tmode
17206 || !register_operand (target, tmode))
17207 target = gen_reg_rtx (tmode);
17208 pat = GEN_FCN (icode) (target, op0, op1);
17209 if (! pat)
17210 return 0;
17211 emit_insn (pat);
17212 return target;
17213
17214 case IX86_BUILTIN_STOREHPS:
17215 case IX86_BUILTIN_STORELPS:
17216 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17217 : CODE_FOR_sse_storelps);
17218 arg0 = TREE_VALUE (arglist);
17219 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17220 op0 = expand_normal (arg0);
17221 op1 = expand_normal (arg1);
17222 mode0 = insn_data[icode].operand[0].mode;
17223 mode1 = insn_data[icode].operand[1].mode;
17224
17225 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17226 op1 = force_reg (mode1, op1);
17227
17228 pat = GEN_FCN (icode) (op0, op1);
17229 if (! pat)
17230 return 0;
17231 emit_insn (pat);
17232 return const0_rtx;
17233
17234 case IX86_BUILTIN_MOVNTPS:
17235 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17236 case IX86_BUILTIN_MOVNTQ:
17237 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17238
17239 case IX86_BUILTIN_LDMXCSR:
17240 op0 = expand_normal (TREE_VALUE (arglist));
17241 target = assign_386_stack_local (SImode, SLOT_TEMP);
17242 emit_move_insn (target, op0);
17243 emit_insn (gen_sse_ldmxcsr (target));
17244 return 0;
17245
17246 case IX86_BUILTIN_STMXCSR:
17247 target = assign_386_stack_local (SImode, SLOT_TEMP);
17248 emit_insn (gen_sse_stmxcsr (target));
17249 return copy_to_mode_reg (SImode, target);
17250
17251 case IX86_BUILTIN_SHUFPS:
17252 case IX86_BUILTIN_SHUFPD:
17253 icode = (fcode == IX86_BUILTIN_SHUFPS
17254 ? CODE_FOR_sse_shufps
17255 : CODE_FOR_sse2_shufpd);
17256 arg0 = TREE_VALUE (arglist);
17257 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17258 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17259 op0 = expand_normal (arg0);
17260 op1 = expand_normal (arg1);
17261 op2 = expand_normal (arg2);
17262 tmode = insn_data[icode].operand[0].mode;
17263 mode0 = insn_data[icode].operand[1].mode;
17264 mode1 = insn_data[icode].operand[2].mode;
17265 mode2 = insn_data[icode].operand[3].mode;
17266
17267 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17268 op0 = copy_to_mode_reg (mode0, op0);
17269 if ((optimize && !register_operand (op1, mode1))
17270 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17271 op1 = copy_to_mode_reg (mode1, op1);
17272 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17273 {
17274 /* @@@ better error message */
17275 error ("mask must be an immediate");
17276 return gen_reg_rtx (tmode);
17277 }
17278 if (optimize || target == 0
17279 || GET_MODE (target) != tmode
17280 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17281 target = gen_reg_rtx (tmode);
17282 pat = GEN_FCN (icode) (target, op0, op1, op2);
17283 if (! pat)
17284 return 0;
17285 emit_insn (pat);
17286 return target;
17287
17288 case IX86_BUILTIN_PSHUFW:
17289 case IX86_BUILTIN_PSHUFD:
17290 case IX86_BUILTIN_PSHUFHW:
17291 case IX86_BUILTIN_PSHUFLW:
17292 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17293 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17294 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17295 : CODE_FOR_mmx_pshufw);
17296 arg0 = TREE_VALUE (arglist);
17297 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17298 op0 = expand_normal (arg0);
17299 op1 = expand_normal (arg1);
17300 tmode = insn_data[icode].operand[0].mode;
17301 mode1 = insn_data[icode].operand[1].mode;
17302 mode2 = insn_data[icode].operand[2].mode;
17303
17304 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17305 op0 = copy_to_mode_reg (mode1, op0);
17306 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17307 {
17308 /* @@@ better error message */
17309 error ("mask must be an immediate");
17310 return const0_rtx;
17311 }
17312 if (target == 0
17313 || GET_MODE (target) != tmode
17314 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17315 target = gen_reg_rtx (tmode);
17316 pat = GEN_FCN (icode) (target, op0, op1);
17317 if (! pat)
17318 return 0;
17319 emit_insn (pat);
17320 return target;
17321
17322 case IX86_BUILTIN_PSLLDQI128:
17323 case IX86_BUILTIN_PSRLDQI128:
17324 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17325 : CODE_FOR_sse2_lshrti3);
17326 arg0 = TREE_VALUE (arglist);
17327 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17328 op0 = expand_normal (arg0);
17329 op1 = expand_normal (arg1);
17330 tmode = insn_data[icode].operand[0].mode;
17331 mode1 = insn_data[icode].operand[1].mode;
17332 mode2 = insn_data[icode].operand[2].mode;
17333
17334 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17335 {
17336 op0 = copy_to_reg (op0);
17337 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17338 }
17339 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17340 {
17341 error ("shift must be an immediate");
17342 return const0_rtx;
17343 }
17344 target = gen_reg_rtx (V2DImode);
17345 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17346 if (! pat)
17347 return 0;
17348 emit_insn (pat);
17349 return target;
17350
17351 case IX86_BUILTIN_FEMMS:
17352 emit_insn (gen_mmx_femms ());
17353 return NULL_RTX;
17354
17355 case IX86_BUILTIN_PAVGUSB:
17356 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17357
17358 case IX86_BUILTIN_PF2ID:
17359 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17360
17361 case IX86_BUILTIN_PFACC:
17362 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17363
17364 case IX86_BUILTIN_PFADD:
17365 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17366
17367 case IX86_BUILTIN_PFCMPEQ:
17368 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17369
17370 case IX86_BUILTIN_PFCMPGE:
17371 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17372
17373 case IX86_BUILTIN_PFCMPGT:
17374 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17375
17376 case IX86_BUILTIN_PFMAX:
17377 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17378
17379 case IX86_BUILTIN_PFMIN:
17380 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17381
17382 case IX86_BUILTIN_PFMUL:
17383 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17384
17385 case IX86_BUILTIN_PFRCP:
17386 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17387
17388 case IX86_BUILTIN_PFRCPIT1:
17389 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17390
17391 case IX86_BUILTIN_PFRCPIT2:
17392 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17393
17394 case IX86_BUILTIN_PFRSQIT1:
17395 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17396
17397 case IX86_BUILTIN_PFRSQRT:
17398 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17399
17400 case IX86_BUILTIN_PFSUB:
17401 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17402
17403 case IX86_BUILTIN_PFSUBR:
17404 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17405
17406 case IX86_BUILTIN_PI2FD:
17407 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17408
17409 case IX86_BUILTIN_PMULHRW:
17410 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17411
17412 case IX86_BUILTIN_PF2IW:
17413 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17414
17415 case IX86_BUILTIN_PFNACC:
17416 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17417
17418 case IX86_BUILTIN_PFPNACC:
17419 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17420
17421 case IX86_BUILTIN_PI2FW:
17422 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17423
17424 case IX86_BUILTIN_PSWAPDSI:
17425 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17426
17427 case IX86_BUILTIN_PSWAPDSF:
17428 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17429
17430 case IX86_BUILTIN_SQRTSD:
17431 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17432 case IX86_BUILTIN_LOADUPD:
17433 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17434 case IX86_BUILTIN_STOREUPD:
17435 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17436
17437 case IX86_BUILTIN_MFENCE:
17438 emit_insn (gen_sse2_mfence ());
17439 return 0;
17440 case IX86_BUILTIN_LFENCE:
17441 emit_insn (gen_sse2_lfence ());
17442 return 0;
17443
17444 case IX86_BUILTIN_CLFLUSH:
17445 arg0 = TREE_VALUE (arglist);
17446 op0 = expand_normal (arg0);
17447 icode = CODE_FOR_sse2_clflush;
17448 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17449 op0 = copy_to_mode_reg (Pmode, op0);
17450
17451 emit_insn (gen_sse2_clflush (op0));
17452 return 0;
17453
17454 case IX86_BUILTIN_MOVNTPD:
17455 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17456 case IX86_BUILTIN_MOVNTDQ:
17457 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17458 case IX86_BUILTIN_MOVNTI:
17459 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17460
17461 case IX86_BUILTIN_LOADDQU:
17462 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17463 case IX86_BUILTIN_STOREDQU:
17464 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17465
17466 case IX86_BUILTIN_MONITOR:
17467 arg0 = TREE_VALUE (arglist);
17468 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17469 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17470 op0 = expand_normal (arg0);
17471 op1 = expand_normal (arg1);
17472 op2 = expand_normal (arg2);
17473 if (!REG_P (op0))
17474 op0 = copy_to_mode_reg (Pmode, op0);
17475 if (!REG_P (op1))
17476 op1 = copy_to_mode_reg (SImode, op1);
17477 if (!REG_P (op2))
17478 op2 = copy_to_mode_reg (SImode, op2);
17479 if (!TARGET_64BIT)
17480 emit_insn (gen_sse3_monitor (op0, op1, op2));
17481 else
17482 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17483 return 0;
17484
17485 case IX86_BUILTIN_MWAIT:
17486 arg0 = TREE_VALUE (arglist);
17487 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17488 op0 = expand_normal (arg0);
17489 op1 = expand_normal (arg1);
17490 if (!REG_P (op0))
17491 op0 = copy_to_mode_reg (SImode, op0);
17492 if (!REG_P (op1))
17493 op1 = copy_to_mode_reg (SImode, op1);
17494 emit_insn (gen_sse3_mwait (op0, op1));
17495 return 0;
17496
17497 case IX86_BUILTIN_LDDQU:
17498 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17499 target, 1);
17500
17501 case IX86_BUILTIN_PALIGNR:
17502 case IX86_BUILTIN_PALIGNR128:
17503 if (fcode == IX86_BUILTIN_PALIGNR)
17504 {
17505 icode = CODE_FOR_ssse3_palignrdi;
17506 mode = DImode;
17507 }
17508 else
17509 {
17510 icode = CODE_FOR_ssse3_palignrti;
17511 mode = V2DImode;
17512 }
17513 arg0 = TREE_VALUE (arglist);
17514 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17515 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17516 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17517 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17518 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17519 tmode = insn_data[icode].operand[0].mode;
17520 mode1 = insn_data[icode].operand[1].mode;
17521 mode2 = insn_data[icode].operand[2].mode;
17522 mode3 = insn_data[icode].operand[3].mode;
17523
17524 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17525 {
17526 op0 = copy_to_reg (op0);
17527 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17528 }
17529 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17530 {
17531 op1 = copy_to_reg (op1);
17532 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17533 }
17534 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17535 {
17536 error ("shift must be an immediate");
17537 return const0_rtx;
17538 }
17539 target = gen_reg_rtx (mode);
17540 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17541 op0, op1, op2);
17542 if (! pat)
17543 return 0;
17544 emit_insn (pat);
17545 return target;
17546
17547 case IX86_BUILTIN_VEC_INIT_V2SI:
17548 case IX86_BUILTIN_VEC_INIT_V4HI:
17549 case IX86_BUILTIN_VEC_INIT_V8QI:
17550 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17551
17552 case IX86_BUILTIN_VEC_EXT_V2DF:
17553 case IX86_BUILTIN_VEC_EXT_V2DI:
17554 case IX86_BUILTIN_VEC_EXT_V4SF:
17555 case IX86_BUILTIN_VEC_EXT_V4SI:
17556 case IX86_BUILTIN_VEC_EXT_V8HI:
17557 case IX86_BUILTIN_VEC_EXT_V2SI:
17558 case IX86_BUILTIN_VEC_EXT_V4HI:
17559 return ix86_expand_vec_ext_builtin (arglist, target);
17560
17561 case IX86_BUILTIN_VEC_SET_V8HI:
17562 case IX86_BUILTIN_VEC_SET_V4HI:
17563 return ix86_expand_vec_set_builtin (arglist);
17564
17565 default:
17566 break;
17567 }
17568
17569 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17570 if (d->code == fcode)
17571 {
17572 /* Compares are treated specially. */
17573 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17574 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17575 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17576 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17577 return ix86_expand_sse_compare (d, arglist, target);
17578
17579 return ix86_expand_binop_builtin (d->icode, arglist, target);
17580 }
17581
17582 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17583 if (d->code == fcode)
17584 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17585
17586 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17587 if (d->code == fcode)
17588 return ix86_expand_sse_comi (d, arglist, target);
17589
17590 gcc_unreachable ();
17591 }
17592
17593 /* Returns a function decl for a vectorized version of the builtin function
17594 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17595 if it is not available. */
17596
17597 static tree
17598 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17599 {
17600 enum machine_mode el_mode;
17601 int n;
17602
17603 if (TREE_CODE (type) != VECTOR_TYPE)
17604 return NULL_TREE;
17605
17606 el_mode = TYPE_MODE (TREE_TYPE (type));
17607 n = TYPE_VECTOR_SUBPARTS (type);
17608
17609 switch (fn)
17610 {
17611 case BUILT_IN_SQRT:
17612 if (el_mode == DFmode && n == 2)
17613 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17614 return NULL_TREE;
17615
17616 case BUILT_IN_SQRTF:
17617 if (el_mode == SFmode && n == 4)
17618 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17619 return NULL_TREE;
17620
17621 default:
17622 ;
17623 }
17624
17625 return NULL_TREE;
17626 }
17627
17628 /* Store OPERAND to the memory after reload is completed. This means
17629 that we can't easily use assign_stack_local. */
17630 rtx
17631 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17632 {
17633 rtx result;
17634
17635 gcc_assert (reload_completed);
17636 if (TARGET_RED_ZONE)
17637 {
17638 result = gen_rtx_MEM (mode,
17639 gen_rtx_PLUS (Pmode,
17640 stack_pointer_rtx,
17641 GEN_INT (-RED_ZONE_SIZE)));
17642 emit_move_insn (result, operand);
17643 }
17644 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17645 {
17646 switch (mode)
17647 {
17648 case HImode:
17649 case SImode:
17650 operand = gen_lowpart (DImode, operand);
17651 /* FALLTHRU */
17652 case DImode:
17653 emit_insn (
17654 gen_rtx_SET (VOIDmode,
17655 gen_rtx_MEM (DImode,
17656 gen_rtx_PRE_DEC (DImode,
17657 stack_pointer_rtx)),
17658 operand));
17659 break;
17660 default:
17661 gcc_unreachable ();
17662 }
17663 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17664 }
17665 else
17666 {
17667 switch (mode)
17668 {
17669 case DImode:
17670 {
17671 rtx operands[2];
17672 split_di (&operand, 1, operands, operands + 1);
17673 emit_insn (
17674 gen_rtx_SET (VOIDmode,
17675 gen_rtx_MEM (SImode,
17676 gen_rtx_PRE_DEC (Pmode,
17677 stack_pointer_rtx)),
17678 operands[1]));
17679 emit_insn (
17680 gen_rtx_SET (VOIDmode,
17681 gen_rtx_MEM (SImode,
17682 gen_rtx_PRE_DEC (Pmode,
17683 stack_pointer_rtx)),
17684 operands[0]));
17685 }
17686 break;
17687 case HImode:
17688 /* Store HImodes as SImodes. */
17689 operand = gen_lowpart (SImode, operand);
17690 /* FALLTHRU */
17691 case SImode:
17692 emit_insn (
17693 gen_rtx_SET (VOIDmode,
17694 gen_rtx_MEM (GET_MODE (operand),
17695 gen_rtx_PRE_DEC (SImode,
17696 stack_pointer_rtx)),
17697 operand));
17698 break;
17699 default:
17700 gcc_unreachable ();
17701 }
17702 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17703 }
17704 return result;
17705 }
17706
17707 /* Free operand from the memory. */
17708 void
17709 ix86_free_from_memory (enum machine_mode mode)
17710 {
17711 if (!TARGET_RED_ZONE)
17712 {
17713 int size;
17714
17715 if (mode == DImode || TARGET_64BIT)
17716 size = 8;
17717 else
17718 size = 4;
17719 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17720 to pop or add instruction if registers are available. */
17721 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17722 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17723 GEN_INT (size))));
17724 }
17725 }
17726
17727 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17728 QImode must go into class Q_REGS.
17729 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17730 movdf to do mem-to-mem moves through integer regs. */
17731 enum reg_class
17732 ix86_preferred_reload_class (rtx x, enum reg_class class)
17733 {
17734 enum machine_mode mode = GET_MODE (x);
17735
17736 /* We're only allowed to return a subclass of CLASS. Many of the
17737 following checks fail for NO_REGS, so eliminate that early. */
17738 if (class == NO_REGS)
17739 return NO_REGS;
17740
17741 /* All classes can load zeros. */
17742 if (x == CONST0_RTX (mode))
17743 return class;
17744
17745 /* Force constants into memory if we are loading a (nonzero) constant into
17746 an MMX or SSE register. This is because there are no MMX/SSE instructions
17747 to load from a constant. */
17748 if (CONSTANT_P (x)
17749 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17750 return NO_REGS;
17751
17752 /* Prefer SSE regs only, if we can use them for math. */
17753 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17754 return SSE_CLASS_P (class) ? class : NO_REGS;
17755
17756 /* Floating-point constants need more complex checks. */
17757 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17758 {
17759 /* General regs can load everything. */
17760 if (reg_class_subset_p (class, GENERAL_REGS))
17761 return class;
17762
17763 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17764 zero above. We only want to wind up preferring 80387 registers if
17765 we plan on doing computation with them. */
17766 if (TARGET_80387
17767 && standard_80387_constant_p (x))
17768 {
17769 /* Limit class to non-sse. */
17770 if (class == FLOAT_SSE_REGS)
17771 return FLOAT_REGS;
17772 if (class == FP_TOP_SSE_REGS)
17773 return FP_TOP_REG;
17774 if (class == FP_SECOND_SSE_REGS)
17775 return FP_SECOND_REG;
17776 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17777 return class;
17778 }
17779
17780 return NO_REGS;
17781 }
17782
17783 /* Generally when we see PLUS here, it's the function invariant
17784 (plus soft-fp const_int). Which can only be computed into general
17785 regs. */
17786 if (GET_CODE (x) == PLUS)
17787 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17788
17789 /* QImode constants are easy to load, but non-constant QImode data
17790 must go into Q_REGS. */
17791 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17792 {
17793 if (reg_class_subset_p (class, Q_REGS))
17794 return class;
17795 if (reg_class_subset_p (Q_REGS, class))
17796 return Q_REGS;
17797 return NO_REGS;
17798 }
17799
17800 return class;
17801 }
17802
17803 /* Discourage putting floating-point values in SSE registers unless
17804 SSE math is being used, and likewise for the 387 registers. */
17805 enum reg_class
17806 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17807 {
17808 enum machine_mode mode = GET_MODE (x);
17809
17810 /* Restrict the output reload class to the register bank that we are doing
17811 math on. If we would like not to return a subset of CLASS, reject this
17812 alternative: if reload cannot do this, it will still use its choice. */
17813 mode = GET_MODE (x);
17814 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17815 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17816
17817 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17818 {
17819 if (class == FP_TOP_SSE_REGS)
17820 return FP_TOP_REG;
17821 else if (class == FP_SECOND_SSE_REGS)
17822 return FP_SECOND_REG;
17823 else
17824 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17825 }
17826
17827 return class;
17828 }
17829
17830 /* If we are copying between general and FP registers, we need a memory
17831 location. The same is true for SSE and MMX registers.
17832
17833 The macro can't work reliably when one of the CLASSES is class containing
17834 registers from multiple units (SSE, MMX, integer). We avoid this by never
17835 combining those units in single alternative in the machine description.
17836 Ensure that this constraint holds to avoid unexpected surprises.
17837
17838 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17839 enforce these sanity checks. */
17840
17841 int
17842 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17843 enum machine_mode mode, int strict)
17844 {
17845 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17846 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17847 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17848 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17849 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17850 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17851 {
17852 gcc_assert (!strict);
17853 return true;
17854 }
17855
17856 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17857 return true;
17858
17859 /* ??? This is a lie. We do have moves between mmx/general, and for
17860 mmx/sse2. But by saying we need secondary memory we discourage the
17861 register allocator from using the mmx registers unless needed. */
17862 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17863 return true;
17864
17865 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17866 {
17867 /* SSE1 doesn't have any direct moves from other classes. */
17868 if (!TARGET_SSE2)
17869 return true;
17870
17871 /* If the target says that inter-unit moves are more expensive
17872 than moving through memory, then don't generate them. */
17873 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17874 return true;
17875
17876 /* Between SSE and general, we have moves no larger than word size. */
17877 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17878 return true;
17879
17880 /* ??? For the cost of one register reformat penalty, we could use
17881 the same instructions to move SFmode and DFmode data, but the
17882 relevant move patterns don't support those alternatives. */
17883 if (mode == SFmode || mode == DFmode)
17884 return true;
17885 }
17886
17887 return false;
17888 }
17889
17890 /* Return true if the registers in CLASS cannot represent the change from
17891 modes FROM to TO. */
17892
17893 bool
17894 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17895 enum reg_class class)
17896 {
17897 if (from == to)
17898 return false;
17899
17900 /* x87 registers can't do subreg at all, as all values are reformatted
17901 to extended precision. */
17902 if (MAYBE_FLOAT_CLASS_P (class))
17903 return true;
17904
17905 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17906 {
17907 /* Vector registers do not support QI or HImode loads. If we don't
17908 disallow a change to these modes, reload will assume it's ok to
17909 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17910 the vec_dupv4hi pattern. */
17911 if (GET_MODE_SIZE (from) < 4)
17912 return true;
17913
17914 /* Vector registers do not support subreg with nonzero offsets, which
17915 are otherwise valid for integer registers. Since we can't see
17916 whether we have a nonzero offset from here, prohibit all
17917 nonparadoxical subregs changing size. */
17918 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17919 return true;
17920 }
17921
17922 return false;
17923 }
17924
17925 /* Return the cost of moving data from a register in class CLASS1 to
17926 one in class CLASS2.
17927
17928 It is not required that the cost always equal 2 when FROM is the same as TO;
17929 on some machines it is expensive to move between registers if they are not
17930 general registers. */
17931
17932 int
17933 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17934 enum reg_class class2)
17935 {
17936 /* In case we require secondary memory, compute cost of the store followed
17937 by load. In order to avoid bad register allocation choices, we need
17938 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17939
17940 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17941 {
17942 int cost = 1;
17943
17944 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17945 MEMORY_MOVE_COST (mode, class1, 1));
17946 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17947 MEMORY_MOVE_COST (mode, class2, 1));
17948
17949 /* In case of copying from general_purpose_register we may emit multiple
17950 stores followed by single load causing memory size mismatch stall.
17951 Count this as arbitrarily high cost of 20. */
17952 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
17953 cost += 20;
17954
17955 /* In the case of FP/MMX moves, the registers actually overlap, and we
17956 have to switch modes in order to treat them differently. */
17957 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
17958 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
17959 cost += 20;
17960
17961 return cost;
17962 }
17963
17964 /* Moves between SSE/MMX and integer unit are expensive. */
17965 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
17966 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17967 return ix86_cost->mmxsse_to_integer;
17968 if (MAYBE_FLOAT_CLASS_P (class1))
17969 return ix86_cost->fp_move;
17970 if (MAYBE_SSE_CLASS_P (class1))
17971 return ix86_cost->sse_move;
17972 if (MAYBE_MMX_CLASS_P (class1))
17973 return ix86_cost->mmx_move;
17974 return 2;
17975 }
17976
17977 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
17978
17979 bool
17980 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
17981 {
17982 /* Flags and only flags can only hold CCmode values. */
17983 if (CC_REGNO_P (regno))
17984 return GET_MODE_CLASS (mode) == MODE_CC;
17985 if (GET_MODE_CLASS (mode) == MODE_CC
17986 || GET_MODE_CLASS (mode) == MODE_RANDOM
17987 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
17988 return 0;
17989 if (FP_REGNO_P (regno))
17990 return VALID_FP_MODE_P (mode);
17991 if (SSE_REGNO_P (regno))
17992 {
17993 /* We implement the move patterns for all vector modes into and
17994 out of SSE registers, even when no operation instructions
17995 are available. */
17996 return (VALID_SSE_REG_MODE (mode)
17997 || VALID_SSE2_REG_MODE (mode)
17998 || VALID_MMX_REG_MODE (mode)
17999 || VALID_MMX_REG_MODE_3DNOW (mode));
18000 }
18001 if (MMX_REGNO_P (regno))
18002 {
18003 /* We implement the move patterns for 3DNOW modes even in MMX mode,
18004 so if the register is available at all, then we can move data of
18005 the given mode into or out of it. */
18006 return (VALID_MMX_REG_MODE (mode)
18007 || VALID_MMX_REG_MODE_3DNOW (mode));
18008 }
18009
18010 if (mode == QImode)
18011 {
18012 /* Take care for QImode values - they can be in non-QI regs,
18013 but then they do cause partial register stalls. */
18014 if (regno < 4 || TARGET_64BIT)
18015 return 1;
18016 if (!TARGET_PARTIAL_REG_STALL)
18017 return 1;
18018 return reload_in_progress || reload_completed;
18019 }
18020 /* We handle both integer and floats in the general purpose registers. */
18021 else if (VALID_INT_MODE_P (mode))
18022 return 1;
18023 else if (VALID_FP_MODE_P (mode))
18024 return 1;
18025 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
18026 on to use that value in smaller contexts, this can easily force a
18027 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
18028 supporting DImode, allow it. */
18029 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
18030 return 1;
18031
18032 return 0;
18033 }
18034
18035 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
18036 tieable integer mode. */
18037
18038 static bool
18039 ix86_tieable_integer_mode_p (enum machine_mode mode)
18040 {
18041 switch (mode)
18042 {
18043 case HImode:
18044 case SImode:
18045 return true;
18046
18047 case QImode:
18048 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
18049
18050 case DImode:
18051 return TARGET_64BIT;
18052
18053 default:
18054 return false;
18055 }
18056 }
18057
18058 /* Return true if MODE1 is accessible in a register that can hold MODE2
18059 without copying. That is, all register classes that can hold MODE2
18060 can also hold MODE1. */
18061
18062 bool
18063 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
18064 {
18065 if (mode1 == mode2)
18066 return true;
18067
18068 if (ix86_tieable_integer_mode_p (mode1)
18069 && ix86_tieable_integer_mode_p (mode2))
18070 return true;
18071
18072 /* MODE2 being XFmode implies fp stack or general regs, which means we
18073 can tie any smaller floating point modes to it. Note that we do not
18074 tie this with TFmode. */
18075 if (mode2 == XFmode)
18076 return mode1 == SFmode || mode1 == DFmode;
18077
18078 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
18079 that we can tie it with SFmode. */
18080 if (mode2 == DFmode)
18081 return mode1 == SFmode;
18082
18083 /* If MODE2 is only appropriate for an SSE register, then tie with
18084 any other mode acceptable to SSE registers. */
18085 if (GET_MODE_SIZE (mode2) >= 8
18086 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
18087 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
18088
18089 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
18090 with any other mode acceptable to MMX registers. */
18091 if (GET_MODE_SIZE (mode2) == 8
18092 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
18093 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
18094
18095 return false;
18096 }
18097
18098 /* Return the cost of moving data of mode M between a
18099 register and memory. A value of 2 is the default; this cost is
18100 relative to those in `REGISTER_MOVE_COST'.
18101
18102 If moving between registers and memory is more expensive than
18103 between two registers, you should define this macro to express the
18104 relative cost.
18105
18106 Model also increased moving costs of QImode registers in non
18107 Q_REGS classes.
18108 */
18109 int
18110 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18111 {
18112 if (FLOAT_CLASS_P (class))
18113 {
18114 int index;
18115 switch (mode)
18116 {
18117 case SFmode:
18118 index = 0;
18119 break;
18120 case DFmode:
18121 index = 1;
18122 break;
18123 case XFmode:
18124 index = 2;
18125 break;
18126 default:
18127 return 100;
18128 }
18129 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18130 }
18131 if (SSE_CLASS_P (class))
18132 {
18133 int index;
18134 switch (GET_MODE_SIZE (mode))
18135 {
18136 case 4:
18137 index = 0;
18138 break;
18139 case 8:
18140 index = 1;
18141 break;
18142 case 16:
18143 index = 2;
18144 break;
18145 default:
18146 return 100;
18147 }
18148 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18149 }
18150 if (MMX_CLASS_P (class))
18151 {
18152 int index;
18153 switch (GET_MODE_SIZE (mode))
18154 {
18155 case 4:
18156 index = 0;
18157 break;
18158 case 8:
18159 index = 1;
18160 break;
18161 default:
18162 return 100;
18163 }
18164 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18165 }
18166 switch (GET_MODE_SIZE (mode))
18167 {
18168 case 1:
18169 if (in)
18170 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18171 : ix86_cost->movzbl_load);
18172 else
18173 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18174 : ix86_cost->int_store[0] + 4);
18175 break;
18176 case 2:
18177 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18178 default:
18179 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18180 if (mode == TFmode)
18181 mode = XFmode;
18182 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18183 * (((int) GET_MODE_SIZE (mode)
18184 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18185 }
18186 }
18187
18188 /* Compute a (partial) cost for rtx X. Return true if the complete
18189 cost has been computed, and false if subexpressions should be
18190 scanned. In either case, *TOTAL contains the cost result. */
18191
18192 static bool
18193 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18194 {
18195 enum machine_mode mode = GET_MODE (x);
18196
18197 switch (code)
18198 {
18199 case CONST_INT:
18200 case CONST:
18201 case LABEL_REF:
18202 case SYMBOL_REF:
18203 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18204 *total = 3;
18205 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18206 *total = 2;
18207 else if (flag_pic && SYMBOLIC_CONST (x)
18208 && (!TARGET_64BIT
18209 || (!GET_CODE (x) != LABEL_REF
18210 && (GET_CODE (x) != SYMBOL_REF
18211 || !SYMBOL_REF_LOCAL_P (x)))))
18212 *total = 1;
18213 else
18214 *total = 0;
18215 return true;
18216
18217 case CONST_DOUBLE:
18218 if (mode == VOIDmode)
18219 *total = 0;
18220 else
18221 switch (standard_80387_constant_p (x))
18222 {
18223 case 1: /* 0.0 */
18224 *total = 1;
18225 break;
18226 default: /* Other constants */
18227 *total = 2;
18228 break;
18229 case 0:
18230 case -1:
18231 /* Start with (MEM (SYMBOL_REF)), since that's where
18232 it'll probably end up. Add a penalty for size. */
18233 *total = (COSTS_N_INSNS (1)
18234 + (flag_pic != 0 && !TARGET_64BIT)
18235 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18236 break;
18237 }
18238 return true;
18239
18240 case ZERO_EXTEND:
18241 /* The zero extensions is often completely free on x86_64, so make
18242 it as cheap as possible. */
18243 if (TARGET_64BIT && mode == DImode
18244 && GET_MODE (XEXP (x, 0)) == SImode)
18245 *total = 1;
18246 else if (TARGET_ZERO_EXTEND_WITH_AND)
18247 *total = ix86_cost->add;
18248 else
18249 *total = ix86_cost->movzx;
18250 return false;
18251
18252 case SIGN_EXTEND:
18253 *total = ix86_cost->movsx;
18254 return false;
18255
18256 case ASHIFT:
18257 if (CONST_INT_P (XEXP (x, 1))
18258 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18259 {
18260 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18261 if (value == 1)
18262 {
18263 *total = ix86_cost->add;
18264 return false;
18265 }
18266 if ((value == 2 || value == 3)
18267 && ix86_cost->lea <= ix86_cost->shift_const)
18268 {
18269 *total = ix86_cost->lea;
18270 return false;
18271 }
18272 }
18273 /* FALLTHRU */
18274
18275 case ROTATE:
18276 case ASHIFTRT:
18277 case LSHIFTRT:
18278 case ROTATERT:
18279 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18280 {
18281 if (CONST_INT_P (XEXP (x, 1)))
18282 {
18283 if (INTVAL (XEXP (x, 1)) > 32)
18284 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18285 else
18286 *total = ix86_cost->shift_const * 2;
18287 }
18288 else
18289 {
18290 if (GET_CODE (XEXP (x, 1)) == AND)
18291 *total = ix86_cost->shift_var * 2;
18292 else
18293 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18294 }
18295 }
18296 else
18297 {
18298 if (CONST_INT_P (XEXP (x, 1)))
18299 *total = ix86_cost->shift_const;
18300 else
18301 *total = ix86_cost->shift_var;
18302 }
18303 return false;
18304
18305 case MULT:
18306 if (FLOAT_MODE_P (mode))
18307 {
18308 *total = ix86_cost->fmul;
18309 return false;
18310 }
18311 else
18312 {
18313 rtx op0 = XEXP (x, 0);
18314 rtx op1 = XEXP (x, 1);
18315 int nbits;
18316 if (CONST_INT_P (XEXP (x, 1)))
18317 {
18318 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18319 for (nbits = 0; value != 0; value &= value - 1)
18320 nbits++;
18321 }
18322 else
18323 /* This is arbitrary. */
18324 nbits = 7;
18325
18326 /* Compute costs correctly for widening multiplication. */
18327 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18328 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18329 == GET_MODE_SIZE (mode))
18330 {
18331 int is_mulwiden = 0;
18332 enum machine_mode inner_mode = GET_MODE (op0);
18333
18334 if (GET_CODE (op0) == GET_CODE (op1))
18335 is_mulwiden = 1, op1 = XEXP (op1, 0);
18336 else if (CONST_INT_P (op1))
18337 {
18338 if (GET_CODE (op0) == SIGN_EXTEND)
18339 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18340 == INTVAL (op1);
18341 else
18342 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18343 }
18344
18345 if (is_mulwiden)
18346 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18347 }
18348
18349 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18350 + nbits * ix86_cost->mult_bit
18351 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18352
18353 return true;
18354 }
18355
18356 case DIV:
18357 case UDIV:
18358 case MOD:
18359 case UMOD:
18360 if (FLOAT_MODE_P (mode))
18361 *total = ix86_cost->fdiv;
18362 else
18363 *total = ix86_cost->divide[MODE_INDEX (mode)];
18364 return false;
18365
18366 case PLUS:
18367 if (FLOAT_MODE_P (mode))
18368 *total = ix86_cost->fadd;
18369 else if (GET_MODE_CLASS (mode) == MODE_INT
18370 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18371 {
18372 if (GET_CODE (XEXP (x, 0)) == PLUS
18373 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18374 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
18375 && CONSTANT_P (XEXP (x, 1)))
18376 {
18377 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18378 if (val == 2 || val == 4 || val == 8)
18379 {
18380 *total = ix86_cost->lea;
18381 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18382 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18383 outer_code);
18384 *total += rtx_cost (XEXP (x, 1), outer_code);
18385 return true;
18386 }
18387 }
18388 else if (GET_CODE (XEXP (x, 0)) == MULT
18389 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
18390 {
18391 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18392 if (val == 2 || val == 4 || val == 8)
18393 {
18394 *total = ix86_cost->lea;
18395 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18396 *total += rtx_cost (XEXP (x, 1), outer_code);
18397 return true;
18398 }
18399 }
18400 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18401 {
18402 *total = ix86_cost->lea;
18403 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18404 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18405 *total += rtx_cost (XEXP (x, 1), outer_code);
18406 return true;
18407 }
18408 }
18409 /* FALLTHRU */
18410
18411 case MINUS:
18412 if (FLOAT_MODE_P (mode))
18413 {
18414 *total = ix86_cost->fadd;
18415 return false;
18416 }
18417 /* FALLTHRU */
18418
18419 case AND:
18420 case IOR:
18421 case XOR:
18422 if (!TARGET_64BIT && mode == DImode)
18423 {
18424 *total = (ix86_cost->add * 2
18425 + (rtx_cost (XEXP (x, 0), outer_code)
18426 << (GET_MODE (XEXP (x, 0)) != DImode))
18427 + (rtx_cost (XEXP (x, 1), outer_code)
18428 << (GET_MODE (XEXP (x, 1)) != DImode)));
18429 return true;
18430 }
18431 /* FALLTHRU */
18432
18433 case NEG:
18434 if (FLOAT_MODE_P (mode))
18435 {
18436 *total = ix86_cost->fchs;
18437 return false;
18438 }
18439 /* FALLTHRU */
18440
18441 case NOT:
18442 if (!TARGET_64BIT && mode == DImode)
18443 *total = ix86_cost->add * 2;
18444 else
18445 *total = ix86_cost->add;
18446 return false;
18447
18448 case COMPARE:
18449 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18450 && XEXP (XEXP (x, 0), 1) == const1_rtx
18451 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
18452 && XEXP (x, 1) == const0_rtx)
18453 {
18454 /* This kind of construct is implemented using test[bwl].
18455 Treat it as if we had an AND. */
18456 *total = (ix86_cost->add
18457 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18458 + rtx_cost (const1_rtx, outer_code));
18459 return true;
18460 }
18461 return false;
18462
18463 case FLOAT_EXTEND:
18464 if (!TARGET_SSE_MATH
18465 || mode == XFmode
18466 || (mode == DFmode && !TARGET_SSE2))
18467 *total = 0;
18468 return false;
18469
18470 case ABS:
18471 if (FLOAT_MODE_P (mode))
18472 *total = ix86_cost->fabs;
18473 return false;
18474
18475 case SQRT:
18476 if (FLOAT_MODE_P (mode))
18477 *total = ix86_cost->fsqrt;
18478 return false;
18479
18480 case UNSPEC:
18481 if (XINT (x, 1) == UNSPEC_TP)
18482 *total = 0;
18483 return false;
18484
18485 default:
18486 return false;
18487 }
18488 }
18489
18490 #if TARGET_MACHO
18491
18492 static int current_machopic_label_num;
18493
18494 /* Given a symbol name and its associated stub, write out the
18495 definition of the stub. */
18496
18497 void
18498 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18499 {
18500 unsigned int length;
18501 char *binder_name, *symbol_name, lazy_ptr_name[32];
18502 int label = ++current_machopic_label_num;
18503
18504 /* For 64-bit we shouldn't get here. */
18505 gcc_assert (!TARGET_64BIT);
18506
18507 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18508 symb = (*targetm.strip_name_encoding) (symb);
18509
18510 length = strlen (stub);
18511 binder_name = alloca (length + 32);
18512 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18513
18514 length = strlen (symb);
18515 symbol_name = alloca (length + 32);
18516 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18517
18518 sprintf (lazy_ptr_name, "L%d$lz", label);
18519
18520 if (MACHOPIC_PURE)
18521 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18522 else
18523 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18524
18525 fprintf (file, "%s:\n", stub);
18526 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18527
18528 if (MACHOPIC_PURE)
18529 {
18530 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18531 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18532 fprintf (file, "\tjmp\t*%%edx\n");
18533 }
18534 else
18535 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18536
18537 fprintf (file, "%s:\n", binder_name);
18538
18539 if (MACHOPIC_PURE)
18540 {
18541 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18542 fprintf (file, "\tpushl\t%%eax\n");
18543 }
18544 else
18545 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18546
18547 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18548
18549 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18550 fprintf (file, "%s:\n", lazy_ptr_name);
18551 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18552 fprintf (file, "\t.long %s\n", binder_name);
18553 }
18554
18555 void
18556 darwin_x86_file_end (void)
18557 {
18558 darwin_file_end ();
18559 ix86_file_end ();
18560 }
18561 #endif /* TARGET_MACHO */
18562
18563 /* Order the registers for register allocator. */
18564
18565 void
18566 x86_order_regs_for_local_alloc (void)
18567 {
18568 int pos = 0;
18569 int i;
18570
18571 /* First allocate the local general purpose registers. */
18572 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18573 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18574 reg_alloc_order [pos++] = i;
18575
18576 /* Global general purpose registers. */
18577 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18578 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18579 reg_alloc_order [pos++] = i;
18580
18581 /* x87 registers come first in case we are doing FP math
18582 using them. */
18583 if (!TARGET_SSE_MATH)
18584 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18585 reg_alloc_order [pos++] = i;
18586
18587 /* SSE registers. */
18588 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18589 reg_alloc_order [pos++] = i;
18590 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18591 reg_alloc_order [pos++] = i;
18592
18593 /* x87 registers. */
18594 if (TARGET_SSE_MATH)
18595 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18596 reg_alloc_order [pos++] = i;
18597
18598 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18599 reg_alloc_order [pos++] = i;
18600
18601 /* Initialize the rest of array as we do not allocate some registers
18602 at all. */
18603 while (pos < FIRST_PSEUDO_REGISTER)
18604 reg_alloc_order [pos++] = 0;
18605 }
18606
18607 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18608 struct attribute_spec.handler. */
18609 static tree
18610 ix86_handle_struct_attribute (tree *node, tree name,
18611 tree args ATTRIBUTE_UNUSED,
18612 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18613 {
18614 tree *type = NULL;
18615 if (DECL_P (*node))
18616 {
18617 if (TREE_CODE (*node) == TYPE_DECL)
18618 type = &TREE_TYPE (*node);
18619 }
18620 else
18621 type = node;
18622
18623 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18624 || TREE_CODE (*type) == UNION_TYPE)))
18625 {
18626 warning (OPT_Wattributes, "%qs attribute ignored",
18627 IDENTIFIER_POINTER (name));
18628 *no_add_attrs = true;
18629 }
18630
18631 else if ((is_attribute_p ("ms_struct", name)
18632 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18633 || ((is_attribute_p ("gcc_struct", name)
18634 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18635 {
18636 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18637 IDENTIFIER_POINTER (name));
18638 *no_add_attrs = true;
18639 }
18640
18641 return NULL_TREE;
18642 }
18643
18644 static bool
18645 ix86_ms_bitfield_layout_p (tree record_type)
18646 {
18647 return (TARGET_MS_BITFIELD_LAYOUT &&
18648 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18649 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18650 }
18651
18652 /* Returns an expression indicating where the this parameter is
18653 located on entry to the FUNCTION. */
18654
18655 static rtx
18656 x86_this_parameter (tree function)
18657 {
18658 tree type = TREE_TYPE (function);
18659
18660 if (TARGET_64BIT)
18661 {
18662 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18663 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18664 }
18665
18666 if (ix86_function_regparm (type, function) > 0)
18667 {
18668 tree parm;
18669
18670 parm = TYPE_ARG_TYPES (type);
18671 /* Figure out whether or not the function has a variable number of
18672 arguments. */
18673 for (; parm; parm = TREE_CHAIN (parm))
18674 if (TREE_VALUE (parm) == void_type_node)
18675 break;
18676 /* If not, the this parameter is in the first argument. */
18677 if (parm)
18678 {
18679 int regno = 0;
18680 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18681 regno = 2;
18682 return gen_rtx_REG (SImode, regno);
18683 }
18684 }
18685
18686 if (aggregate_value_p (TREE_TYPE (type), type))
18687 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18688 else
18689 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18690 }
18691
18692 /* Determine whether x86_output_mi_thunk can succeed. */
18693
18694 static bool
18695 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18696 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18697 HOST_WIDE_INT vcall_offset, tree function)
18698 {
18699 /* 64-bit can handle anything. */
18700 if (TARGET_64BIT)
18701 return true;
18702
18703 /* For 32-bit, everything's fine if we have one free register. */
18704 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18705 return true;
18706
18707 /* Need a free register for vcall_offset. */
18708 if (vcall_offset)
18709 return false;
18710
18711 /* Need a free register for GOT references. */
18712 if (flag_pic && !(*targetm.binds_local_p) (function))
18713 return false;
18714
18715 /* Otherwise ok. */
18716 return true;
18717 }
18718
18719 /* Output the assembler code for a thunk function. THUNK_DECL is the
18720 declaration for the thunk function itself, FUNCTION is the decl for
18721 the target function. DELTA is an immediate constant offset to be
18722 added to THIS. If VCALL_OFFSET is nonzero, the word at
18723 *(*this + vcall_offset) should be added to THIS. */
18724
18725 static void
18726 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18727 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18728 HOST_WIDE_INT vcall_offset, tree function)
18729 {
18730 rtx xops[3];
18731 rtx this = x86_this_parameter (function);
18732 rtx this_reg, tmp;
18733
18734 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18735 pull it in now and let DELTA benefit. */
18736 if (REG_P (this))
18737 this_reg = this;
18738 else if (vcall_offset)
18739 {
18740 /* Put the this parameter into %eax. */
18741 xops[0] = this;
18742 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18743 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18744 }
18745 else
18746 this_reg = NULL_RTX;
18747
18748 /* Adjust the this parameter by a fixed constant. */
18749 if (delta)
18750 {
18751 xops[0] = GEN_INT (delta);
18752 xops[1] = this_reg ? this_reg : this;
18753 if (TARGET_64BIT)
18754 {
18755 if (!x86_64_general_operand (xops[0], DImode))
18756 {
18757 tmp = gen_rtx_REG (DImode, R10_REG);
18758 xops[1] = tmp;
18759 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18760 xops[0] = tmp;
18761 xops[1] = this;
18762 }
18763 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18764 }
18765 else
18766 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18767 }
18768
18769 /* Adjust the this parameter by a value stored in the vtable. */
18770 if (vcall_offset)
18771 {
18772 if (TARGET_64BIT)
18773 tmp = gen_rtx_REG (DImode, R10_REG);
18774 else
18775 {
18776 int tmp_regno = 2 /* ECX */;
18777 if (lookup_attribute ("fastcall",
18778 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18779 tmp_regno = 0 /* EAX */;
18780 tmp = gen_rtx_REG (SImode, tmp_regno);
18781 }
18782
18783 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18784 xops[1] = tmp;
18785 if (TARGET_64BIT)
18786 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18787 else
18788 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18789
18790 /* Adjust the this parameter. */
18791 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18792 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18793 {
18794 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18795 xops[0] = GEN_INT (vcall_offset);
18796 xops[1] = tmp2;
18797 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18798 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18799 }
18800 xops[1] = this_reg;
18801 if (TARGET_64BIT)
18802 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18803 else
18804 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18805 }
18806
18807 /* If necessary, drop THIS back to its stack slot. */
18808 if (this_reg && this_reg != this)
18809 {
18810 xops[0] = this_reg;
18811 xops[1] = this;
18812 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18813 }
18814
18815 xops[0] = XEXP (DECL_RTL (function), 0);
18816 if (TARGET_64BIT)
18817 {
18818 if (!flag_pic || (*targetm.binds_local_p) (function))
18819 output_asm_insn ("jmp\t%P0", xops);
18820 else
18821 {
18822 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18823 tmp = gen_rtx_CONST (Pmode, tmp);
18824 tmp = gen_rtx_MEM (QImode, tmp);
18825 xops[0] = tmp;
18826 output_asm_insn ("jmp\t%A0", xops);
18827 }
18828 }
18829 else
18830 {
18831 if (!flag_pic || (*targetm.binds_local_p) (function))
18832 output_asm_insn ("jmp\t%P0", xops);
18833 else
18834 #if TARGET_MACHO
18835 if (TARGET_MACHO)
18836 {
18837 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18838 tmp = (gen_rtx_SYMBOL_REF
18839 (Pmode,
18840 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18841 tmp = gen_rtx_MEM (QImode, tmp);
18842 xops[0] = tmp;
18843 output_asm_insn ("jmp\t%0", xops);
18844 }
18845 else
18846 #endif /* TARGET_MACHO */
18847 {
18848 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18849 output_set_got (tmp, NULL_RTX);
18850
18851 xops[1] = tmp;
18852 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18853 output_asm_insn ("jmp\t{*}%1", xops);
18854 }
18855 }
18856 }
18857
18858 static void
18859 x86_file_start (void)
18860 {
18861 default_file_start ();
18862 #if TARGET_MACHO
18863 darwin_file_start ();
18864 #endif
18865 if (X86_FILE_START_VERSION_DIRECTIVE)
18866 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18867 if (X86_FILE_START_FLTUSED)
18868 fputs ("\t.global\t__fltused\n", asm_out_file);
18869 if (ix86_asm_dialect == ASM_INTEL)
18870 fputs ("\t.intel_syntax\n", asm_out_file);
18871 }
18872
18873 int
18874 x86_field_alignment (tree field, int computed)
18875 {
18876 enum machine_mode mode;
18877 tree type = TREE_TYPE (field);
18878
18879 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18880 return computed;
18881 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18882 ? get_inner_array_type (type) : type);
18883 if (mode == DFmode || mode == DCmode
18884 || GET_MODE_CLASS (mode) == MODE_INT
18885 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18886 return MIN (32, computed);
18887 return computed;
18888 }
18889
18890 /* Output assembler code to FILE to increment profiler label # LABELNO
18891 for profiling a function entry. */
18892 void
18893 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18894 {
18895 if (TARGET_64BIT)
18896 if (flag_pic)
18897 {
18898 #ifndef NO_PROFILE_COUNTERS
18899 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18900 #endif
18901 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18902 }
18903 else
18904 {
18905 #ifndef NO_PROFILE_COUNTERS
18906 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18907 #endif
18908 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18909 }
18910 else if (flag_pic)
18911 {
18912 #ifndef NO_PROFILE_COUNTERS
18913 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18914 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18915 #endif
18916 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18917 }
18918 else
18919 {
18920 #ifndef NO_PROFILE_COUNTERS
18921 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18922 PROFILE_COUNT_REGISTER);
18923 #endif
18924 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18925 }
18926 }
18927
18928 /* We don't have exact information about the insn sizes, but we may assume
18929 quite safely that we are informed about all 1 byte insns and memory
18930 address sizes. This is enough to eliminate unnecessary padding in
18931 99% of cases. */
18932
18933 static int
18934 min_insn_size (rtx insn)
18935 {
18936 int l = 0;
18937
18938 if (!INSN_P (insn) || !active_insn_p (insn))
18939 return 0;
18940
18941 /* Discard alignments we've emit and jump instructions. */
18942 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18943 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18944 return 0;
18945 if (JUMP_P (insn)
18946 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18947 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
18948 return 0;
18949
18950 /* Important case - calls are always 5 bytes.
18951 It is common to have many calls in the row. */
18952 if (CALL_P (insn)
18953 && symbolic_reference_mentioned_p (PATTERN (insn))
18954 && !SIBLING_CALL_P (insn))
18955 return 5;
18956 if (get_attr_length (insn) <= 1)
18957 return 1;
18958
18959 /* For normal instructions we may rely on the sizes of addresses
18960 and the presence of symbol to require 4 bytes of encoding.
18961 This is not the case for jumps where references are PC relative. */
18962 if (!JUMP_P (insn))
18963 {
18964 l = get_attr_length_address (insn);
18965 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
18966 l = 4;
18967 }
18968 if (l)
18969 return 1+l;
18970 else
18971 return 2;
18972 }
18973
18974 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
18975 window. */
18976
18977 static void
18978 ix86_avoid_jump_misspredicts (void)
18979 {
18980 rtx insn, start = get_insns ();
18981 int nbytes = 0, njumps = 0;
18982 int isjump = 0;
18983
18984 /* Look for all minimal intervals of instructions containing 4 jumps.
18985 The intervals are bounded by START and INSN. NBYTES is the total
18986 size of instructions in the interval including INSN and not including
18987 START. When the NBYTES is smaller than 16 bytes, it is possible
18988 that the end of START and INSN ends up in the same 16byte page.
18989
18990 The smallest offset in the page INSN can start is the case where START
18991 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
18992 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
18993 */
18994 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18995 {
18996
18997 nbytes += min_insn_size (insn);
18998 if (dump_file)
18999 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
19000 INSN_UID (insn), min_insn_size (insn));
19001 if ((JUMP_P (insn)
19002 && GET_CODE (PATTERN (insn)) != ADDR_VEC
19003 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
19004 || CALL_P (insn))
19005 njumps++;
19006 else
19007 continue;
19008
19009 while (njumps > 3)
19010 {
19011 start = NEXT_INSN (start);
19012 if ((JUMP_P (start)
19013 && GET_CODE (PATTERN (start)) != ADDR_VEC
19014 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
19015 || CALL_P (start))
19016 njumps--, isjump = 1;
19017 else
19018 isjump = 0;
19019 nbytes -= min_insn_size (start);
19020 }
19021 gcc_assert (njumps >= 0);
19022 if (dump_file)
19023 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
19024 INSN_UID (start), INSN_UID (insn), nbytes);
19025
19026 if (njumps == 3 && isjump && nbytes < 16)
19027 {
19028 int padsize = 15 - nbytes + min_insn_size (insn);
19029
19030 if (dump_file)
19031 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
19032 INSN_UID (insn), padsize);
19033 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
19034 }
19035 }
19036 }
19037
19038 /* AMD Athlon works faster
19039 when RET is not destination of conditional jump or directly preceded
19040 by other jump instruction. We avoid the penalty by inserting NOP just
19041 before the RET instructions in such cases. */
19042 static void
19043 ix86_pad_returns (void)
19044 {
19045 edge e;
19046 edge_iterator ei;
19047
19048 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
19049 {
19050 basic_block bb = e->src;
19051 rtx ret = BB_END (bb);
19052 rtx prev;
19053 bool replace = false;
19054
19055 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
19056 || !maybe_hot_bb_p (bb))
19057 continue;
19058 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
19059 if (active_insn_p (prev) || LABEL_P (prev))
19060 break;
19061 if (prev && LABEL_P (prev))
19062 {
19063 edge e;
19064 edge_iterator ei;
19065
19066 FOR_EACH_EDGE (e, ei, bb->preds)
19067 if (EDGE_FREQUENCY (e) && e->src->index >= 0
19068 && !(e->flags & EDGE_FALLTHRU))
19069 replace = true;
19070 }
19071 if (!replace)
19072 {
19073 prev = prev_active_insn (ret);
19074 if (prev
19075 && ((JUMP_P (prev) && any_condjump_p (prev))
19076 || CALL_P (prev)))
19077 replace = true;
19078 /* Empty functions get branch mispredict even when the jump destination
19079 is not visible to us. */
19080 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
19081 replace = true;
19082 }
19083 if (replace)
19084 {
19085 emit_insn_before (gen_return_internal_long (), ret);
19086 delete_insn (ret);
19087 }
19088 }
19089 }
19090
19091 /* Implement machine specific optimizations. We implement padding of returns
19092 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
19093 static void
19094 ix86_reorg (void)
19095 {
19096 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
19097 ix86_pad_returns ();
19098 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19099 ix86_avoid_jump_misspredicts ();
19100 }
19101
19102 /* Return nonzero when QImode register that must be represented via REX prefix
19103 is used. */
19104 bool
19105 x86_extended_QIreg_mentioned_p (rtx insn)
19106 {
19107 int i;
19108 extract_insn_cached (insn);
19109 for (i = 0; i < recog_data.n_operands; i++)
19110 if (REG_P (recog_data.operand[i])
19111 && REGNO (recog_data.operand[i]) >= 4)
19112 return true;
19113 return false;
19114 }
19115
19116 /* Return nonzero when P points to register encoded via REX prefix.
19117 Called via for_each_rtx. */
19118 static int
19119 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19120 {
19121 unsigned int regno;
19122 if (!REG_P (*p))
19123 return 0;
19124 regno = REGNO (*p);
19125 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19126 }
19127
19128 /* Return true when INSN mentions register that must be encoded using REX
19129 prefix. */
19130 bool
19131 x86_extended_reg_mentioned_p (rtx insn)
19132 {
19133 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19134 }
19135
19136 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19137 optabs would emit if we didn't have TFmode patterns. */
19138
19139 void
19140 x86_emit_floatuns (rtx operands[2])
19141 {
19142 rtx neglab, donelab, i0, i1, f0, in, out;
19143 enum machine_mode mode, inmode;
19144
19145 inmode = GET_MODE (operands[1]);
19146 gcc_assert (inmode == SImode || inmode == DImode);
19147
19148 out = operands[0];
19149 in = force_reg (inmode, operands[1]);
19150 mode = GET_MODE (out);
19151 neglab = gen_label_rtx ();
19152 donelab = gen_label_rtx ();
19153 i1 = gen_reg_rtx (Pmode);
19154 f0 = gen_reg_rtx (mode);
19155
19156 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19157
19158 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19159 emit_jump_insn (gen_jump (donelab));
19160 emit_barrier ();
19161
19162 emit_label (neglab);
19163
19164 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19165 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19166 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19167 expand_float (f0, i0, 0);
19168 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19169
19170 emit_label (donelab);
19171 }
19172 \f
19173 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19174 with all elements equal to VAR. Return true if successful. */
19175
19176 static bool
19177 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19178 rtx target, rtx val)
19179 {
19180 enum machine_mode smode, wsmode, wvmode;
19181 rtx x;
19182
19183 switch (mode)
19184 {
19185 case V2SImode:
19186 case V2SFmode:
19187 if (!mmx_ok)
19188 return false;
19189 /* FALLTHRU */
19190
19191 case V2DFmode:
19192 case V2DImode:
19193 case V4SFmode:
19194 case V4SImode:
19195 val = force_reg (GET_MODE_INNER (mode), val);
19196 x = gen_rtx_VEC_DUPLICATE (mode, val);
19197 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19198 return true;
19199
19200 case V4HImode:
19201 if (!mmx_ok)
19202 return false;
19203 if (TARGET_SSE || TARGET_3DNOW_A)
19204 {
19205 val = gen_lowpart (SImode, val);
19206 x = gen_rtx_TRUNCATE (HImode, val);
19207 x = gen_rtx_VEC_DUPLICATE (mode, x);
19208 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19209 return true;
19210 }
19211 else
19212 {
19213 smode = HImode;
19214 wsmode = SImode;
19215 wvmode = V2SImode;
19216 goto widen;
19217 }
19218
19219 case V8QImode:
19220 if (!mmx_ok)
19221 return false;
19222 smode = QImode;
19223 wsmode = HImode;
19224 wvmode = V4HImode;
19225 goto widen;
19226 case V8HImode:
19227 if (TARGET_SSE2)
19228 {
19229 rtx tmp1, tmp2;
19230 /* Extend HImode to SImode using a paradoxical SUBREG. */
19231 tmp1 = gen_reg_rtx (SImode);
19232 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19233 /* Insert the SImode value as low element of V4SImode vector. */
19234 tmp2 = gen_reg_rtx (V4SImode);
19235 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19236 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19237 CONST0_RTX (V4SImode),
19238 const1_rtx);
19239 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19240 /* Cast the V4SImode vector back to a V8HImode vector. */
19241 tmp1 = gen_reg_rtx (V8HImode);
19242 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19243 /* Duplicate the low short through the whole low SImode word. */
19244 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19245 /* Cast the V8HImode vector back to a V4SImode vector. */
19246 tmp2 = gen_reg_rtx (V4SImode);
19247 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19248 /* Replicate the low element of the V4SImode vector. */
19249 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19250 /* Cast the V2SImode back to V8HImode, and store in target. */
19251 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19252 return true;
19253 }
19254 smode = HImode;
19255 wsmode = SImode;
19256 wvmode = V4SImode;
19257 goto widen;
19258 case V16QImode:
19259 if (TARGET_SSE2)
19260 {
19261 rtx tmp1, tmp2;
19262 /* Extend QImode to SImode using a paradoxical SUBREG. */
19263 tmp1 = gen_reg_rtx (SImode);
19264 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19265 /* Insert the SImode value as low element of V4SImode vector. */
19266 tmp2 = gen_reg_rtx (V4SImode);
19267 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19268 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19269 CONST0_RTX (V4SImode),
19270 const1_rtx);
19271 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19272 /* Cast the V4SImode vector back to a V16QImode vector. */
19273 tmp1 = gen_reg_rtx (V16QImode);
19274 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19275 /* Duplicate the low byte through the whole low SImode word. */
19276 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19277 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19278 /* Cast the V16QImode vector back to a V4SImode vector. */
19279 tmp2 = gen_reg_rtx (V4SImode);
19280 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19281 /* Replicate the low element of the V4SImode vector. */
19282 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19283 /* Cast the V2SImode back to V16QImode, and store in target. */
19284 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19285 return true;
19286 }
19287 smode = QImode;
19288 wsmode = HImode;
19289 wvmode = V8HImode;
19290 goto widen;
19291 widen:
19292 /* Replicate the value once into the next wider mode and recurse. */
19293 val = convert_modes (wsmode, smode, val, true);
19294 x = expand_simple_binop (wsmode, ASHIFT, val,
19295 GEN_INT (GET_MODE_BITSIZE (smode)),
19296 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19297 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19298
19299 x = gen_reg_rtx (wvmode);
19300 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19301 gcc_unreachable ();
19302 emit_move_insn (target, gen_lowpart (mode, x));
19303 return true;
19304
19305 default:
19306 return false;
19307 }
19308 }
19309
19310 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19311 whose ONE_VAR element is VAR, and other elements are zero. Return true
19312 if successful. */
19313
19314 static bool
19315 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19316 rtx target, rtx var, int one_var)
19317 {
19318 enum machine_mode vsimode;
19319 rtx new_target;
19320 rtx x, tmp;
19321
19322 switch (mode)
19323 {
19324 case V2SFmode:
19325 case V2SImode:
19326 if (!mmx_ok)
19327 return false;
19328 /* FALLTHRU */
19329
19330 case V2DFmode:
19331 case V2DImode:
19332 if (one_var != 0)
19333 return false;
19334 var = force_reg (GET_MODE_INNER (mode), var);
19335 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19336 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19337 return true;
19338
19339 case V4SFmode:
19340 case V4SImode:
19341 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19342 new_target = gen_reg_rtx (mode);
19343 else
19344 new_target = target;
19345 var = force_reg (GET_MODE_INNER (mode), var);
19346 x = gen_rtx_VEC_DUPLICATE (mode, var);
19347 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19348 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19349 if (one_var != 0)
19350 {
19351 /* We need to shuffle the value to the correct position, so
19352 create a new pseudo to store the intermediate result. */
19353
19354 /* With SSE2, we can use the integer shuffle insns. */
19355 if (mode != V4SFmode && TARGET_SSE2)
19356 {
19357 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19358 GEN_INT (1),
19359 GEN_INT (one_var == 1 ? 0 : 1),
19360 GEN_INT (one_var == 2 ? 0 : 1),
19361 GEN_INT (one_var == 3 ? 0 : 1)));
19362 if (target != new_target)
19363 emit_move_insn (target, new_target);
19364 return true;
19365 }
19366
19367 /* Otherwise convert the intermediate result to V4SFmode and
19368 use the SSE1 shuffle instructions. */
19369 if (mode != V4SFmode)
19370 {
19371 tmp = gen_reg_rtx (V4SFmode);
19372 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19373 }
19374 else
19375 tmp = new_target;
19376
19377 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19378 GEN_INT (1),
19379 GEN_INT (one_var == 1 ? 0 : 1),
19380 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19381 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19382
19383 if (mode != V4SFmode)
19384 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19385 else if (tmp != target)
19386 emit_move_insn (target, tmp);
19387 }
19388 else if (target != new_target)
19389 emit_move_insn (target, new_target);
19390 return true;
19391
19392 case V8HImode:
19393 case V16QImode:
19394 vsimode = V4SImode;
19395 goto widen;
19396 case V4HImode:
19397 case V8QImode:
19398 if (!mmx_ok)
19399 return false;
19400 vsimode = V2SImode;
19401 goto widen;
19402 widen:
19403 if (one_var != 0)
19404 return false;
19405
19406 /* Zero extend the variable element to SImode and recurse. */
19407 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19408
19409 x = gen_reg_rtx (vsimode);
19410 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19411 var, one_var))
19412 gcc_unreachable ();
19413
19414 emit_move_insn (target, gen_lowpart (mode, x));
19415 return true;
19416
19417 default:
19418 return false;
19419 }
19420 }
19421
19422 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19423 consisting of the values in VALS. It is known that all elements
19424 except ONE_VAR are constants. Return true if successful. */
19425
19426 static bool
19427 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19428 rtx target, rtx vals, int one_var)
19429 {
19430 rtx var = XVECEXP (vals, 0, one_var);
19431 enum machine_mode wmode;
19432 rtx const_vec, x;
19433
19434 const_vec = copy_rtx (vals);
19435 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19436 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19437
19438 switch (mode)
19439 {
19440 case V2DFmode:
19441 case V2DImode:
19442 case V2SFmode:
19443 case V2SImode:
19444 /* For the two element vectors, it's just as easy to use
19445 the general case. */
19446 return false;
19447
19448 case V4SFmode:
19449 case V4SImode:
19450 case V8HImode:
19451 case V4HImode:
19452 break;
19453
19454 case V16QImode:
19455 wmode = V8HImode;
19456 goto widen;
19457 case V8QImode:
19458 wmode = V4HImode;
19459 goto widen;
19460 widen:
19461 /* There's no way to set one QImode entry easily. Combine
19462 the variable value with its adjacent constant value, and
19463 promote to an HImode set. */
19464 x = XVECEXP (vals, 0, one_var ^ 1);
19465 if (one_var & 1)
19466 {
19467 var = convert_modes (HImode, QImode, var, true);
19468 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19469 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19470 x = GEN_INT (INTVAL (x) & 0xff);
19471 }
19472 else
19473 {
19474 var = convert_modes (HImode, QImode, var, true);
19475 x = gen_int_mode (INTVAL (x) << 8, HImode);
19476 }
19477 if (x != const0_rtx)
19478 var = expand_simple_binop (HImode, IOR, var, x, var,
19479 1, OPTAB_LIB_WIDEN);
19480
19481 x = gen_reg_rtx (wmode);
19482 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19483 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19484
19485 emit_move_insn (target, gen_lowpart (mode, x));
19486 return true;
19487
19488 default:
19489 return false;
19490 }
19491
19492 emit_move_insn (target, const_vec);
19493 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19494 return true;
19495 }
19496
19497 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19498 all values variable, and none identical. */
19499
19500 static void
19501 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19502 rtx target, rtx vals)
19503 {
19504 enum machine_mode half_mode = GET_MODE_INNER (mode);
19505 rtx op0 = NULL, op1 = NULL;
19506 bool use_vec_concat = false;
19507
19508 switch (mode)
19509 {
19510 case V2SFmode:
19511 case V2SImode:
19512 if (!mmx_ok && !TARGET_SSE)
19513 break;
19514 /* FALLTHRU */
19515
19516 case V2DFmode:
19517 case V2DImode:
19518 /* For the two element vectors, we always implement VEC_CONCAT. */
19519 op0 = XVECEXP (vals, 0, 0);
19520 op1 = XVECEXP (vals, 0, 1);
19521 use_vec_concat = true;
19522 break;
19523
19524 case V4SFmode:
19525 half_mode = V2SFmode;
19526 goto half;
19527 case V4SImode:
19528 half_mode = V2SImode;
19529 goto half;
19530 half:
19531 {
19532 rtvec v;
19533
19534 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19535 Recurse to load the two halves. */
19536
19537 op0 = gen_reg_rtx (half_mode);
19538 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19539 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19540
19541 op1 = gen_reg_rtx (half_mode);
19542 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19543 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19544
19545 use_vec_concat = true;
19546 }
19547 break;
19548
19549 case V8HImode:
19550 case V16QImode:
19551 case V4HImode:
19552 case V8QImode:
19553 break;
19554
19555 default:
19556 gcc_unreachable ();
19557 }
19558
19559 if (use_vec_concat)
19560 {
19561 if (!register_operand (op0, half_mode))
19562 op0 = force_reg (half_mode, op0);
19563 if (!register_operand (op1, half_mode))
19564 op1 = force_reg (half_mode, op1);
19565
19566 emit_insn (gen_rtx_SET (VOIDmode, target,
19567 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19568 }
19569 else
19570 {
19571 int i, j, n_elts, n_words, n_elt_per_word;
19572 enum machine_mode inner_mode;
19573 rtx words[4], shift;
19574
19575 inner_mode = GET_MODE_INNER (mode);
19576 n_elts = GET_MODE_NUNITS (mode);
19577 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19578 n_elt_per_word = n_elts / n_words;
19579 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19580
19581 for (i = 0; i < n_words; ++i)
19582 {
19583 rtx word = NULL_RTX;
19584
19585 for (j = 0; j < n_elt_per_word; ++j)
19586 {
19587 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19588 elt = convert_modes (word_mode, inner_mode, elt, true);
19589
19590 if (j == 0)
19591 word = elt;
19592 else
19593 {
19594 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19595 word, 1, OPTAB_LIB_WIDEN);
19596 word = expand_simple_binop (word_mode, IOR, word, elt,
19597 word, 1, OPTAB_LIB_WIDEN);
19598 }
19599 }
19600
19601 words[i] = word;
19602 }
19603
19604 if (n_words == 1)
19605 emit_move_insn (target, gen_lowpart (mode, words[0]));
19606 else if (n_words == 2)
19607 {
19608 rtx tmp = gen_reg_rtx (mode);
19609 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19610 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19611 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19612 emit_move_insn (target, tmp);
19613 }
19614 else if (n_words == 4)
19615 {
19616 rtx tmp = gen_reg_rtx (V4SImode);
19617 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19618 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19619 emit_move_insn (target, gen_lowpart (mode, tmp));
19620 }
19621 else
19622 gcc_unreachable ();
19623 }
19624 }
19625
19626 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19627 instructions unless MMX_OK is true. */
19628
19629 void
19630 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19631 {
19632 enum machine_mode mode = GET_MODE (target);
19633 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19634 int n_elts = GET_MODE_NUNITS (mode);
19635 int n_var = 0, one_var = -1;
19636 bool all_same = true, all_const_zero = true;
19637 int i;
19638 rtx x;
19639
19640 for (i = 0; i < n_elts; ++i)
19641 {
19642 x = XVECEXP (vals, 0, i);
19643 if (!CONSTANT_P (x))
19644 n_var++, one_var = i;
19645 else if (x != CONST0_RTX (inner_mode))
19646 all_const_zero = false;
19647 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19648 all_same = false;
19649 }
19650
19651 /* Constants are best loaded from the constant pool. */
19652 if (n_var == 0)
19653 {
19654 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19655 return;
19656 }
19657
19658 /* If all values are identical, broadcast the value. */
19659 if (all_same
19660 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19661 XVECEXP (vals, 0, 0)))
19662 return;
19663
19664 /* Values where only one field is non-constant are best loaded from
19665 the pool and overwritten via move later. */
19666 if (n_var == 1)
19667 {
19668 if (all_const_zero
19669 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19670 XVECEXP (vals, 0, one_var),
19671 one_var))
19672 return;
19673
19674 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19675 return;
19676 }
19677
19678 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19679 }
19680
19681 void
19682 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19683 {
19684 enum machine_mode mode = GET_MODE (target);
19685 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19686 bool use_vec_merge = false;
19687 rtx tmp;
19688
19689 switch (mode)
19690 {
19691 case V2SFmode:
19692 case V2SImode:
19693 if (mmx_ok)
19694 {
19695 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19696 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19697 if (elt == 0)
19698 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19699 else
19700 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19701 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19702 return;
19703 }
19704 break;
19705
19706 case V2DFmode:
19707 case V2DImode:
19708 {
19709 rtx op0, op1;
19710
19711 /* For the two element vectors, we implement a VEC_CONCAT with
19712 the extraction of the other element. */
19713
19714 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19715 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19716
19717 if (elt == 0)
19718 op0 = val, op1 = tmp;
19719 else
19720 op0 = tmp, op1 = val;
19721
19722 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19723 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19724 }
19725 return;
19726
19727 case V4SFmode:
19728 switch (elt)
19729 {
19730 case 0:
19731 use_vec_merge = true;
19732 break;
19733
19734 case 1:
19735 /* tmp = target = A B C D */
19736 tmp = copy_to_reg (target);
19737 /* target = A A B B */
19738 emit_insn (gen_sse_unpcklps (target, target, target));
19739 /* target = X A B B */
19740 ix86_expand_vector_set (false, target, val, 0);
19741 /* target = A X C D */
19742 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19743 GEN_INT (1), GEN_INT (0),
19744 GEN_INT (2+4), GEN_INT (3+4)));
19745 return;
19746
19747 case 2:
19748 /* tmp = target = A B C D */
19749 tmp = copy_to_reg (target);
19750 /* tmp = X B C D */
19751 ix86_expand_vector_set (false, tmp, val, 0);
19752 /* target = A B X D */
19753 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19754 GEN_INT (0), GEN_INT (1),
19755 GEN_INT (0+4), GEN_INT (3+4)));
19756 return;
19757
19758 case 3:
19759 /* tmp = target = A B C D */
19760 tmp = copy_to_reg (target);
19761 /* tmp = X B C D */
19762 ix86_expand_vector_set (false, tmp, val, 0);
19763 /* target = A B X D */
19764 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19765 GEN_INT (0), GEN_INT (1),
19766 GEN_INT (2+4), GEN_INT (0+4)));
19767 return;
19768
19769 default:
19770 gcc_unreachable ();
19771 }
19772 break;
19773
19774 case V4SImode:
19775 /* Element 0 handled by vec_merge below. */
19776 if (elt == 0)
19777 {
19778 use_vec_merge = true;
19779 break;
19780 }
19781
19782 if (TARGET_SSE2)
19783 {
19784 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19785 store into element 0, then shuffle them back. */
19786
19787 rtx order[4];
19788
19789 order[0] = GEN_INT (elt);
19790 order[1] = const1_rtx;
19791 order[2] = const2_rtx;
19792 order[3] = GEN_INT (3);
19793 order[elt] = const0_rtx;
19794
19795 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19796 order[1], order[2], order[3]));
19797
19798 ix86_expand_vector_set (false, target, val, 0);
19799
19800 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19801 order[1], order[2], order[3]));
19802 }
19803 else
19804 {
19805 /* For SSE1, we have to reuse the V4SF code. */
19806 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19807 gen_lowpart (SFmode, val), elt);
19808 }
19809 return;
19810
19811 case V8HImode:
19812 use_vec_merge = TARGET_SSE2;
19813 break;
19814 case V4HImode:
19815 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19816 break;
19817
19818 case V16QImode:
19819 case V8QImode:
19820 default:
19821 break;
19822 }
19823
19824 if (use_vec_merge)
19825 {
19826 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19827 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19828 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19829 }
19830 else
19831 {
19832 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19833
19834 emit_move_insn (mem, target);
19835
19836 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19837 emit_move_insn (tmp, val);
19838
19839 emit_move_insn (target, mem);
19840 }
19841 }
19842
19843 void
19844 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19845 {
19846 enum machine_mode mode = GET_MODE (vec);
19847 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19848 bool use_vec_extr = false;
19849 rtx tmp;
19850
19851 switch (mode)
19852 {
19853 case V2SImode:
19854 case V2SFmode:
19855 if (!mmx_ok)
19856 break;
19857 /* FALLTHRU */
19858
19859 case V2DFmode:
19860 case V2DImode:
19861 use_vec_extr = true;
19862 break;
19863
19864 case V4SFmode:
19865 switch (elt)
19866 {
19867 case 0:
19868 tmp = vec;
19869 break;
19870
19871 case 1:
19872 case 3:
19873 tmp = gen_reg_rtx (mode);
19874 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19875 GEN_INT (elt), GEN_INT (elt),
19876 GEN_INT (elt+4), GEN_INT (elt+4)));
19877 break;
19878
19879 case 2:
19880 tmp = gen_reg_rtx (mode);
19881 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19882 break;
19883
19884 default:
19885 gcc_unreachable ();
19886 }
19887 vec = tmp;
19888 use_vec_extr = true;
19889 elt = 0;
19890 break;
19891
19892 case V4SImode:
19893 if (TARGET_SSE2)
19894 {
19895 switch (elt)
19896 {
19897 case 0:
19898 tmp = vec;
19899 break;
19900
19901 case 1:
19902 case 3:
19903 tmp = gen_reg_rtx (mode);
19904 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19905 GEN_INT (elt), GEN_INT (elt),
19906 GEN_INT (elt), GEN_INT (elt)));
19907 break;
19908
19909 case 2:
19910 tmp = gen_reg_rtx (mode);
19911 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19912 break;
19913
19914 default:
19915 gcc_unreachable ();
19916 }
19917 vec = tmp;
19918 use_vec_extr = true;
19919 elt = 0;
19920 }
19921 else
19922 {
19923 /* For SSE1, we have to reuse the V4SF code. */
19924 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19925 gen_lowpart (V4SFmode, vec), elt);
19926 return;
19927 }
19928 break;
19929
19930 case V8HImode:
19931 use_vec_extr = TARGET_SSE2;
19932 break;
19933 case V4HImode:
19934 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19935 break;
19936
19937 case V16QImode:
19938 case V8QImode:
19939 /* ??? Could extract the appropriate HImode element and shift. */
19940 default:
19941 break;
19942 }
19943
19944 if (use_vec_extr)
19945 {
19946 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19947 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
19948
19949 /* Let the rtl optimizers know about the zero extension performed. */
19950 if (inner_mode == HImode)
19951 {
19952 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
19953 target = gen_lowpart (SImode, target);
19954 }
19955
19956 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19957 }
19958 else
19959 {
19960 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19961
19962 emit_move_insn (mem, vec);
19963
19964 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19965 emit_move_insn (target, tmp);
19966 }
19967 }
19968
19969 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
19970 pattern to reduce; DEST is the destination; IN is the input vector. */
19971
19972 void
19973 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
19974 {
19975 rtx tmp1, tmp2, tmp3;
19976
19977 tmp1 = gen_reg_rtx (V4SFmode);
19978 tmp2 = gen_reg_rtx (V4SFmode);
19979 tmp3 = gen_reg_rtx (V4SFmode);
19980
19981 emit_insn (gen_sse_movhlps (tmp1, in, in));
19982 emit_insn (fn (tmp2, tmp1, in));
19983
19984 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
19985 GEN_INT (1), GEN_INT (1),
19986 GEN_INT (1+4), GEN_INT (1+4)));
19987 emit_insn (fn (dest, tmp2, tmp3));
19988 }
19989 \f
19990 /* Target hook for scalar_mode_supported_p. */
19991 static bool
19992 ix86_scalar_mode_supported_p (enum machine_mode mode)
19993 {
19994 if (DECIMAL_FLOAT_MODE_P (mode))
19995 return true;
19996 else
19997 return default_scalar_mode_supported_p (mode);
19998 }
19999
20000 /* Implements target hook vector_mode_supported_p. */
20001 static bool
20002 ix86_vector_mode_supported_p (enum machine_mode mode)
20003 {
20004 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
20005 return true;
20006 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
20007 return true;
20008 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
20009 return true;
20010 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
20011 return true;
20012 return false;
20013 }
20014
20015 /* Worker function for TARGET_MD_ASM_CLOBBERS.
20016
20017 We do this in the new i386 backend to maintain source compatibility
20018 with the old cc0-based compiler. */
20019
20020 static tree
20021 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
20022 tree inputs ATTRIBUTE_UNUSED,
20023 tree clobbers)
20024 {
20025 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
20026 clobbers);
20027 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
20028 clobbers);
20029 return clobbers;
20030 }
20031
20032 /* Return true if this goes in small data/bss. */
20033
20034 static bool
20035 ix86_in_large_data_p (tree exp)
20036 {
20037 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
20038 return false;
20039
20040 /* Functions are never large data. */
20041 if (TREE_CODE (exp) == FUNCTION_DECL)
20042 return false;
20043
20044 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
20045 {
20046 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
20047 if (strcmp (section, ".ldata") == 0
20048 || strcmp (section, ".lbss") == 0)
20049 return true;
20050 return false;
20051 }
20052 else
20053 {
20054 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
20055
20056 /* If this is an incomplete type with size 0, then we can't put it
20057 in data because it might be too big when completed. */
20058 if (!size || size > ix86_section_threshold)
20059 return true;
20060 }
20061
20062 return false;
20063 }
20064 static void
20065 ix86_encode_section_info (tree decl, rtx rtl, int first)
20066 {
20067 default_encode_section_info (decl, rtl, first);
20068
20069 if (TREE_CODE (decl) == VAR_DECL
20070 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
20071 && ix86_in_large_data_p (decl))
20072 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
20073 }
20074
20075 /* Worker function for REVERSE_CONDITION. */
20076
20077 enum rtx_code
20078 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
20079 {
20080 return (mode != CCFPmode && mode != CCFPUmode
20081 ? reverse_condition (code)
20082 : reverse_condition_maybe_unordered (code));
20083 }
20084
20085 /* Output code to perform an x87 FP register move, from OPERANDS[1]
20086 to OPERANDS[0]. */
20087
20088 const char *
20089 output_387_reg_move (rtx insn, rtx *operands)
20090 {
20091 if (REG_P (operands[1])
20092 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
20093 {
20094 if (REGNO (operands[0]) == FIRST_STACK_REG)
20095 return output_387_ffreep (operands, 0);
20096 return "fstp\t%y0";
20097 }
20098 if (STACK_TOP_P (operands[0]))
20099 return "fld%z1\t%y1";
20100 return "fst\t%y0";
20101 }
20102
20103 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20104 FP status register is set. */
20105
20106 void
20107 ix86_emit_fp_unordered_jump (rtx label)
20108 {
20109 rtx reg = gen_reg_rtx (HImode);
20110 rtx temp;
20111
20112 emit_insn (gen_x86_fnstsw_1 (reg));
20113
20114 if (TARGET_USE_SAHF)
20115 {
20116 emit_insn (gen_x86_sahf_1 (reg));
20117
20118 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20119 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20120 }
20121 else
20122 {
20123 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20124
20125 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20126 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20127 }
20128
20129 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20130 gen_rtx_LABEL_REF (VOIDmode, label),
20131 pc_rtx);
20132 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20133 emit_jump_insn (temp);
20134 }
20135
20136 /* Output code to perform a log1p XFmode calculation. */
20137
20138 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20139 {
20140 rtx label1 = gen_label_rtx ();
20141 rtx label2 = gen_label_rtx ();
20142
20143 rtx tmp = gen_reg_rtx (XFmode);
20144 rtx tmp2 = gen_reg_rtx (XFmode);
20145
20146 emit_insn (gen_absxf2 (tmp, op1));
20147 emit_insn (gen_cmpxf (tmp,
20148 CONST_DOUBLE_FROM_REAL_VALUE (
20149 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20150 XFmode)));
20151 emit_jump_insn (gen_bge (label1));
20152
20153 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20154 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
20155 emit_jump (label2);
20156
20157 emit_label (label1);
20158 emit_move_insn (tmp, CONST1_RTX (XFmode));
20159 emit_insn (gen_addxf3 (tmp, op1, tmp));
20160 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20161 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
20162
20163 emit_label (label2);
20164 }
20165
20166 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20167
20168 static void
20169 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20170 tree decl)
20171 {
20172 /* With Binutils 2.15, the "@unwind" marker must be specified on
20173 every occurrence of the ".eh_frame" section, not just the first
20174 one. */
20175 if (TARGET_64BIT
20176 && strcmp (name, ".eh_frame") == 0)
20177 {
20178 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20179 flags & SECTION_WRITE ? "aw" : "a");
20180 return;
20181 }
20182 default_elf_asm_named_section (name, flags, decl);
20183 }
20184
20185 /* Return the mangling of TYPE if it is an extended fundamental type. */
20186
20187 static const char *
20188 ix86_mangle_fundamental_type (tree type)
20189 {
20190 switch (TYPE_MODE (type))
20191 {
20192 case TFmode:
20193 /* __float128 is "g". */
20194 return "g";
20195 case XFmode:
20196 /* "long double" or __float80 is "e". */
20197 return "e";
20198 default:
20199 return NULL;
20200 }
20201 }
20202
20203 /* For 32-bit code we can save PIC register setup by using
20204 __stack_chk_fail_local hidden function instead of calling
20205 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20206 register, so it is better to call __stack_chk_fail directly. */
20207
20208 static tree
20209 ix86_stack_protect_fail (void)
20210 {
20211 return TARGET_64BIT
20212 ? default_external_stack_protect_fail ()
20213 : default_hidden_stack_protect_fail ();
20214 }
20215
20216 /* Select a format to encode pointers in exception handling data. CODE
20217 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20218 true if the symbol may be affected by dynamic relocations.
20219
20220 ??? All x86 object file formats are capable of representing this.
20221 After all, the relocation needed is the same as for the call insn.
20222 Whether or not a particular assembler allows us to enter such, I
20223 guess we'll have to see. */
20224 int
20225 asm_preferred_eh_data_format (int code, int global)
20226 {
20227 if (flag_pic)
20228 {
20229 int type = DW_EH_PE_sdata8;
20230 if (!TARGET_64BIT
20231 || ix86_cmodel == CM_SMALL_PIC
20232 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20233 type = DW_EH_PE_sdata4;
20234 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20235 }
20236 if (ix86_cmodel == CM_SMALL
20237 || (ix86_cmodel == CM_MEDIUM && code))
20238 return DW_EH_PE_udata4;
20239 return DW_EH_PE_absptr;
20240 }
20241 \f
20242 /* Expand copysign from SIGN to the positive value ABS_VALUE
20243 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20244 the sign-bit. */
20245 static void
20246 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20247 {
20248 enum machine_mode mode = GET_MODE (sign);
20249 rtx sgn = gen_reg_rtx (mode);
20250 if (mask == NULL_RTX)
20251 {
20252 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20253 if (!VECTOR_MODE_P (mode))
20254 {
20255 /* We need to generate a scalar mode mask in this case. */
20256 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20257 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20258 mask = gen_reg_rtx (mode);
20259 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20260 }
20261 }
20262 else
20263 mask = gen_rtx_NOT (mode, mask);
20264 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20265 gen_rtx_AND (mode, mask, sign)));
20266 emit_insn (gen_rtx_SET (VOIDmode, result,
20267 gen_rtx_IOR (mode, abs_value, sgn)));
20268 }
20269
20270 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20271 mask for masking out the sign-bit is stored in *SMASK, if that is
20272 non-null. */
20273 static rtx
20274 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20275 {
20276 enum machine_mode mode = GET_MODE (op0);
20277 rtx xa, mask;
20278
20279 xa = gen_reg_rtx (mode);
20280 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20281 if (!VECTOR_MODE_P (mode))
20282 {
20283 /* We need to generate a scalar mode mask in this case. */
20284 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20285 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20286 mask = gen_reg_rtx (mode);
20287 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20288 }
20289 emit_insn (gen_rtx_SET (VOIDmode, xa,
20290 gen_rtx_AND (mode, op0, mask)));
20291
20292 if (smask)
20293 *smask = mask;
20294
20295 return xa;
20296 }
20297
20298 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20299 swapping the operands if SWAP_OPERANDS is true. The expanded
20300 code is a forward jump to a newly created label in case the
20301 comparison is true. The generated label rtx is returned. */
20302 static rtx
20303 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20304 bool swap_operands)
20305 {
20306 rtx label, tmp;
20307
20308 if (swap_operands)
20309 {
20310 tmp = op0;
20311 op0 = op1;
20312 op1 = tmp;
20313 }
20314
20315 label = gen_label_rtx ();
20316 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20317 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20318 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20319 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20320 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20321 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20322 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20323 JUMP_LABEL (tmp) = label;
20324
20325 return label;
20326 }
20327
20328 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20329 using comparison code CODE. Operands are swapped for the comparison if
20330 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20331 static rtx
20332 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20333 bool swap_operands)
20334 {
20335 enum machine_mode mode = GET_MODE (op0);
20336 rtx mask = gen_reg_rtx (mode);
20337
20338 if (swap_operands)
20339 {
20340 rtx tmp = op0;
20341 op0 = op1;
20342 op1 = tmp;
20343 }
20344
20345 if (mode == DFmode)
20346 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20347 gen_rtx_fmt_ee (code, mode, op0, op1)));
20348 else
20349 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20350 gen_rtx_fmt_ee (code, mode, op0, op1)));
20351
20352 return mask;
20353 }
20354
20355 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20356 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20357 static rtx
20358 ix86_gen_TWO52 (enum machine_mode mode)
20359 {
20360 REAL_VALUE_TYPE TWO52r;
20361 rtx TWO52;
20362
20363 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20364 TWO52 = const_double_from_real_value (TWO52r, mode);
20365 TWO52 = force_reg (mode, TWO52);
20366
20367 return TWO52;
20368 }
20369
20370 /* Expand SSE sequence for computing lround from OP1 storing
20371 into OP0. */
20372 void
20373 ix86_expand_lround (rtx op0, rtx op1)
20374 {
20375 /* C code for the stuff we're doing below:
20376 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20377 return (long)tmp;
20378 */
20379 enum machine_mode mode = GET_MODE (op1);
20380 const struct real_format *fmt;
20381 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20382 rtx adj;
20383
20384 /* load nextafter (0.5, 0.0) */
20385 fmt = REAL_MODE_FORMAT (mode);
20386 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20387 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20388
20389 /* adj = copysign (0.5, op1) */
20390 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20391 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20392
20393 /* adj = op1 + adj */
20394 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20395
20396 /* op0 = (imode)adj */
20397 expand_fix (op0, adj, 0);
20398 }
20399
20400 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20401 into OPERAND0. */
20402 void
20403 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20404 {
20405 /* C code for the stuff we're doing below (for do_floor):
20406 xi = (long)op1;
20407 xi -= (double)xi > op1 ? 1 : 0;
20408 return xi;
20409 */
20410 enum machine_mode fmode = GET_MODE (op1);
20411 enum machine_mode imode = GET_MODE (op0);
20412 rtx ireg, freg, label, tmp;
20413
20414 /* reg = (long)op1 */
20415 ireg = gen_reg_rtx (imode);
20416 expand_fix (ireg, op1, 0);
20417
20418 /* freg = (double)reg */
20419 freg = gen_reg_rtx (fmode);
20420 expand_float (freg, ireg, 0);
20421
20422 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20423 label = ix86_expand_sse_compare_and_jump (UNLE,
20424 freg, op1, !do_floor);
20425 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20426 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20427 emit_move_insn (ireg, tmp);
20428
20429 emit_label (label);
20430 LABEL_NUSES (label) = 1;
20431
20432 emit_move_insn (op0, ireg);
20433 }
20434
20435 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20436 result in OPERAND0. */
20437 void
20438 ix86_expand_rint (rtx operand0, rtx operand1)
20439 {
20440 /* C code for the stuff we're doing below:
20441 xa = fabs (operand1);
20442 if (!isless (xa, 2**52))
20443 return operand1;
20444 xa = xa + 2**52 - 2**52;
20445 return copysign (xa, operand1);
20446 */
20447 enum machine_mode mode = GET_MODE (operand0);
20448 rtx res, xa, label, TWO52, mask;
20449
20450 res = gen_reg_rtx (mode);
20451 emit_move_insn (res, operand1);
20452
20453 /* xa = abs (operand1) */
20454 xa = ix86_expand_sse_fabs (res, &mask);
20455
20456 /* if (!isless (xa, TWO52)) goto label; */
20457 TWO52 = ix86_gen_TWO52 (mode);
20458 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20459
20460 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20461 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20462
20463 ix86_sse_copysign_to_positive (res, xa, res, mask);
20464
20465 emit_label (label);
20466 LABEL_NUSES (label) = 1;
20467
20468 emit_move_insn (operand0, res);
20469 }
20470
20471 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20472 into OPERAND0. */
20473 void
20474 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20475 {
20476 /* C code for the stuff we expand below.
20477 double xa = fabs (x), x2;
20478 if (!isless (xa, TWO52))
20479 return x;
20480 xa = xa + TWO52 - TWO52;
20481 x2 = copysign (xa, x);
20482 Compensate. Floor:
20483 if (x2 > x)
20484 x2 -= 1;
20485 Compensate. Ceil:
20486 if (x2 < x)
20487 x2 -= -1;
20488 return x2;
20489 */
20490 enum machine_mode mode = GET_MODE (operand0);
20491 rtx xa, TWO52, tmp, label, one, res, mask;
20492
20493 TWO52 = ix86_gen_TWO52 (mode);
20494
20495 /* Temporary for holding the result, initialized to the input
20496 operand to ease control flow. */
20497 res = gen_reg_rtx (mode);
20498 emit_move_insn (res, operand1);
20499
20500 /* xa = abs (operand1) */
20501 xa = ix86_expand_sse_fabs (res, &mask);
20502
20503 /* if (!isless (xa, TWO52)) goto label; */
20504 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20505
20506 /* xa = xa + TWO52 - TWO52; */
20507 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20508 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20509
20510 /* xa = copysign (xa, operand1) */
20511 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20512
20513 /* generate 1.0 or -1.0 */
20514 one = force_reg (mode,
20515 const_double_from_real_value (do_floor
20516 ? dconst1 : dconstm1, mode));
20517
20518 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20519 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20520 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20521 gen_rtx_AND (mode, one, tmp)));
20522 /* We always need to subtract here to preserve signed zero. */
20523 tmp = expand_simple_binop (mode, MINUS,
20524 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20525 emit_move_insn (res, tmp);
20526
20527 emit_label (label);
20528 LABEL_NUSES (label) = 1;
20529
20530 emit_move_insn (operand0, res);
20531 }
20532
20533 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20534 into OPERAND0. */
20535 void
20536 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20537 {
20538 /* C code for the stuff we expand below.
20539 double xa = fabs (x), x2;
20540 if (!isless (xa, TWO52))
20541 return x;
20542 x2 = (double)(long)x;
20543 Compensate. Floor:
20544 if (x2 > x)
20545 x2 -= 1;
20546 Compensate. Ceil:
20547 if (x2 < x)
20548 x2 += 1;
20549 if (HONOR_SIGNED_ZEROS (mode))
20550 return copysign (x2, x);
20551 return x2;
20552 */
20553 enum machine_mode mode = GET_MODE (operand0);
20554 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20555
20556 TWO52 = ix86_gen_TWO52 (mode);
20557
20558 /* Temporary for holding the result, initialized to the input
20559 operand to ease control flow. */
20560 res = gen_reg_rtx (mode);
20561 emit_move_insn (res, operand1);
20562
20563 /* xa = abs (operand1) */
20564 xa = ix86_expand_sse_fabs (res, &mask);
20565
20566 /* if (!isless (xa, TWO52)) goto label; */
20567 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20568
20569 /* xa = (double)(long)x */
20570 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20571 expand_fix (xi, res, 0);
20572 expand_float (xa, xi, 0);
20573
20574 /* generate 1.0 */
20575 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20576
20577 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20578 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20579 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20580 gen_rtx_AND (mode, one, tmp)));
20581 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20582 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20583 emit_move_insn (res, tmp);
20584
20585 if (HONOR_SIGNED_ZEROS (mode))
20586 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20587
20588 emit_label (label);
20589 LABEL_NUSES (label) = 1;
20590
20591 emit_move_insn (operand0, res);
20592 }
20593
20594 /* Expand SSE sequence for computing round from OPERAND1 storing
20595 into OPERAND0. Sequence that works without relying on DImode truncation
20596 via cvttsd2siq that is only available on 64bit targets. */
20597 void
20598 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20599 {
20600 /* C code for the stuff we expand below.
20601 double xa = fabs (x), xa2, x2;
20602 if (!isless (xa, TWO52))
20603 return x;
20604 Using the absolute value and copying back sign makes
20605 -0.0 -> -0.0 correct.
20606 xa2 = xa + TWO52 - TWO52;
20607 Compensate.
20608 dxa = xa2 - xa;
20609 if (dxa <= -0.5)
20610 xa2 += 1;
20611 else if (dxa > 0.5)
20612 xa2 -= 1;
20613 x2 = copysign (xa2, x);
20614 return x2;
20615 */
20616 enum machine_mode mode = GET_MODE (operand0);
20617 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20618
20619 TWO52 = ix86_gen_TWO52 (mode);
20620
20621 /* Temporary for holding the result, initialized to the input
20622 operand to ease control flow. */
20623 res = gen_reg_rtx (mode);
20624 emit_move_insn (res, operand1);
20625
20626 /* xa = abs (operand1) */
20627 xa = ix86_expand_sse_fabs (res, &mask);
20628
20629 /* if (!isless (xa, TWO52)) goto label; */
20630 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20631
20632 /* xa2 = xa + TWO52 - TWO52; */
20633 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20634 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20635
20636 /* dxa = xa2 - xa; */
20637 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20638
20639 /* generate 0.5, 1.0 and -0.5 */
20640 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20641 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20642 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20643 0, OPTAB_DIRECT);
20644
20645 /* Compensate. */
20646 tmp = gen_reg_rtx (mode);
20647 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20648 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20649 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20650 gen_rtx_AND (mode, one, tmp)));
20651 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20652 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20653 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20654 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20655 gen_rtx_AND (mode, one, tmp)));
20656 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20657
20658 /* res = copysign (xa2, operand1) */
20659 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20660
20661 emit_label (label);
20662 LABEL_NUSES (label) = 1;
20663
20664 emit_move_insn (operand0, res);
20665 }
20666
20667 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20668 into OPERAND0. */
20669 void
20670 ix86_expand_trunc (rtx operand0, rtx operand1)
20671 {
20672 /* C code for SSE variant we expand below.
20673 double xa = fabs (x), x2;
20674 if (!isless (xa, TWO52))
20675 return x;
20676 x2 = (double)(long)x;
20677 if (HONOR_SIGNED_ZEROS (mode))
20678 return copysign (x2, x);
20679 return x2;
20680 */
20681 enum machine_mode mode = GET_MODE (operand0);
20682 rtx xa, xi, TWO52, label, res, mask;
20683
20684 TWO52 = ix86_gen_TWO52 (mode);
20685
20686 /* Temporary for holding the result, initialized to the input
20687 operand to ease control flow. */
20688 res = gen_reg_rtx (mode);
20689 emit_move_insn (res, operand1);
20690
20691 /* xa = abs (operand1) */
20692 xa = ix86_expand_sse_fabs (res, &mask);
20693
20694 /* if (!isless (xa, TWO52)) goto label; */
20695 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20696
20697 /* x = (double)(long)x */
20698 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20699 expand_fix (xi, res, 0);
20700 expand_float (res, xi, 0);
20701
20702 if (HONOR_SIGNED_ZEROS (mode))
20703 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20704
20705 emit_label (label);
20706 LABEL_NUSES (label) = 1;
20707
20708 emit_move_insn (operand0, res);
20709 }
20710
20711 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20712 into OPERAND0. */
20713 void
20714 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20715 {
20716 enum machine_mode mode = GET_MODE (operand0);
20717 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20718
20719 /* C code for SSE variant we expand below.
20720 double xa = fabs (x), x2;
20721 if (!isless (xa, TWO52))
20722 return x;
20723 xa2 = xa + TWO52 - TWO52;
20724 Compensate:
20725 if (xa2 > xa)
20726 xa2 -= 1.0;
20727 x2 = copysign (xa2, x);
20728 return x2;
20729 */
20730
20731 TWO52 = ix86_gen_TWO52 (mode);
20732
20733 /* Temporary for holding the result, initialized to the input
20734 operand to ease control flow. */
20735 res = gen_reg_rtx (mode);
20736 emit_move_insn (res, operand1);
20737
20738 /* xa = abs (operand1) */
20739 xa = ix86_expand_sse_fabs (res, &smask);
20740
20741 /* if (!isless (xa, TWO52)) goto label; */
20742 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20743
20744 /* res = xa + TWO52 - TWO52; */
20745 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20746 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20747 emit_move_insn (res, tmp);
20748
20749 /* generate 1.0 */
20750 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20751
20752 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20753 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20754 emit_insn (gen_rtx_SET (VOIDmode, mask,
20755 gen_rtx_AND (mode, mask, one)));
20756 tmp = expand_simple_binop (mode, MINUS,
20757 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20758 emit_move_insn (res, tmp);
20759
20760 /* res = copysign (res, operand1) */
20761 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20762
20763 emit_label (label);
20764 LABEL_NUSES (label) = 1;
20765
20766 emit_move_insn (operand0, res);
20767 }
20768
20769 /* Expand SSE sequence for computing round from OPERAND1 storing
20770 into OPERAND0. */
20771 void
20772 ix86_expand_round (rtx operand0, rtx operand1)
20773 {
20774 /* C code for the stuff we're doing below:
20775 double xa = fabs (x);
20776 if (!isless (xa, TWO52))
20777 return x;
20778 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20779 return copysign (xa, x);
20780 */
20781 enum machine_mode mode = GET_MODE (operand0);
20782 rtx res, TWO52, xa, label, xi, half, mask;
20783 const struct real_format *fmt;
20784 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20785
20786 /* Temporary for holding the result, initialized to the input
20787 operand to ease control flow. */
20788 res = gen_reg_rtx (mode);
20789 emit_move_insn (res, operand1);
20790
20791 TWO52 = ix86_gen_TWO52 (mode);
20792 xa = ix86_expand_sse_fabs (res, &mask);
20793 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20794
20795 /* load nextafter (0.5, 0.0) */
20796 fmt = REAL_MODE_FORMAT (mode);
20797 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20798 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20799
20800 /* xa = xa + 0.5 */
20801 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20802 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20803
20804 /* xa = (double)(int64_t)xa */
20805 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20806 expand_fix (xi, xa, 0);
20807 expand_float (xa, xi, 0);
20808
20809 /* res = copysign (xa, operand1) */
20810 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20811
20812 emit_label (label);
20813 LABEL_NUSES (label) = 1;
20814
20815 emit_move_insn (operand0, res);
20816 }
20817
20818 #include "gt-i386.h"