re PR target/30120 (silent miscompilation of argument passing)
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
57 #endif
58
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
65 : 4)
66
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
70
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
72
73 static const
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
92 0, /* "large" insn */
93 2, /* MOVE_RATIO */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
117 2, /* Branch cost */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
128 };
129
130 /* Processor costs (relative to an add) */
131 static const
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
151 3, /* MOVE_RATIO */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
175 1, /* Branch cost */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
186 };
187
188 static const
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
208 3, /* MOVE_RATIO */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
232 1, /* Branch cost */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
242 DUMMY_STRINGOP_ALGS}
243 };
244
245 static const
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
265 6, /* MOVE_RATIO */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
289 2, /* Branch cost */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
299 DUMMY_STRINGOP_ALGS}
300 };
301
302 static const
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
322 6, /* MOVE_RATIO */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
346 2, /* Branch cost */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
357 */
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
363 DUMMY_STRINGOP_ALGS}
364 };
365
366 static const
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
386 4, /* MOVE_RATIO */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
397
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
411 1, /* Branch cost */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
421 DUMMY_STRINGOP_ALGS}
422 };
423
424 static const
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
444 4, /* MOVE_RATIO */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
468 1, /* Branch cost */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
478 DUMMY_STRINGOP_ALGS}
479 };
480
481 static const
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
501 9, /* MOVE_RATIO */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
525 5, /* Branch cost */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
538 DUMMY_STRINGOP_ALGS}
539 };
540
541 static const
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
561 9, /* MOVE_RATIO */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
588 time). */
589 100, /* number of parallel prefetches */
590 5, /* Branch cost */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
605 };
606
607 static const
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
627 6, /* MOVE_RATIO */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
651 2, /* Branch cost */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
661 {-1, libcall}}},
662 DUMMY_STRINGOP_ALGS},
663 };
664
665 static const
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
685 17, /* MOVE_RATIO */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
709 1, /* Branch cost */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
720 {-1, libcall}}},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
723 };
724
725 static const
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
745 16, /* MOVE_RATIO */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
768 3, /* Branch cost */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
782 };
783
784 /* Generic64 should produce code tuned for Nocona and K8. */
785 static const
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
809 17, /* MOVE_RATIO */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
835 3, /* Branch cost */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
846 };
847
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
849 static const
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
869 17, /* MOVE_RATIO */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
893 3, /* Branch cost */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
904 };
905
906 const struct processor_costs *ix86_cost = &pentium_cost;
907
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
925
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
928
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
995 of moves. */
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1000 undefined. */
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1006
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1010
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Compare and exchange 16 bytes was added for nocona. */
1022 const int x86_cmpxchg16b = m_NOCONA;
1023 /* Exchange and add was added for 80486. */
1024 const int x86_xadd = ~m_386;
1025 /* Byteswap was added for 80486. */
1026 const int x86_bswap = ~m_386;
1027 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1028
1029 static enum stringop_alg stringop_alg = no_stringop;
1030
1031 /* In case the average insn count for single function invocation is
1032 lower than this constant, emit fast (but longer) prologue and
1033 epilogue code. */
1034 #define FAST_PROLOGUE_INSN_COUNT 20
1035
1036 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1037 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1038 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1039 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1040
1041 /* Array of the smallest class containing reg number REGNO, indexed by
1042 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1043
1044 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1045 {
1046 /* ax, dx, cx, bx */
1047 AREG, DREG, CREG, BREG,
1048 /* si, di, bp, sp */
1049 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1050 /* FP registers */
1051 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1052 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1053 /* arg pointer */
1054 NON_Q_REGS,
1055 /* flags, fpsr, fpcr, dirflag, frame */
1056 NO_REGS, NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1057 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1058 SSE_REGS, SSE_REGS,
1059 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1060 MMX_REGS, MMX_REGS,
1061 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1062 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1063 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1064 SSE_REGS, SSE_REGS,
1065 };
1066
1067 /* The "default" register map used in 32bit mode. */
1068
1069 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1070 {
1071 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1072 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1073 -1, -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, dir, frame */
1074 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1075 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1076 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1077 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1078 };
1079
1080 static int const x86_64_int_parameter_registers[6] =
1081 {
1082 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1083 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1084 };
1085
1086 static int const x86_64_int_return_registers[4] =
1087 {
1088 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1089 };
1090
1091 /* The "default" register map used in 64bit mode. */
1092 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1093 {
1094 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1095 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1096 -1, -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, dir, frame */
1097 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1098 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1099 8,9,10,11,12,13,14,15, /* extended integer registers */
1100 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1101 };
1102
1103 /* Define the register numbers to be used in Dwarf debugging information.
1104 The SVR4 reference port C compiler uses the following register numbers
1105 in its Dwarf output code:
1106 0 for %eax (gcc regno = 0)
1107 1 for %ecx (gcc regno = 2)
1108 2 for %edx (gcc regno = 1)
1109 3 for %ebx (gcc regno = 3)
1110 4 for %esp (gcc regno = 7)
1111 5 for %ebp (gcc regno = 6)
1112 6 for %esi (gcc regno = 4)
1113 7 for %edi (gcc regno = 5)
1114 The following three DWARF register numbers are never generated by
1115 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1116 believes these numbers have these meanings.
1117 8 for %eip (no gcc equivalent)
1118 9 for %eflags (gcc regno = 17)
1119 10 for %trapno (no gcc equivalent)
1120 It is not at all clear how we should number the FP stack registers
1121 for the x86 architecture. If the version of SDB on x86/svr4 were
1122 a bit less brain dead with respect to floating-point then we would
1123 have a precedent to follow with respect to DWARF register numbers
1124 for x86 FP registers, but the SDB on x86/svr4 is so completely
1125 broken with respect to FP registers that it is hardly worth thinking
1126 of it as something to strive for compatibility with.
1127 The version of x86/svr4 SDB I have at the moment does (partially)
1128 seem to believe that DWARF register number 11 is associated with
1129 the x86 register %st(0), but that's about all. Higher DWARF
1130 register numbers don't seem to be associated with anything in
1131 particular, and even for DWARF regno 11, SDB only seems to under-
1132 stand that it should say that a variable lives in %st(0) (when
1133 asked via an `=' command) if we said it was in DWARF regno 11,
1134 but SDB still prints garbage when asked for the value of the
1135 variable in question (via a `/' command).
1136 (Also note that the labels SDB prints for various FP stack regs
1137 when doing an `x' command are all wrong.)
1138 Note that these problems generally don't affect the native SVR4
1139 C compiler because it doesn't allow the use of -O with -g and
1140 because when it is *not* optimizing, it allocates a memory
1141 location for each floating-point variable, and the memory
1142 location is what gets described in the DWARF AT_location
1143 attribute for the variable in question.
1144 Regardless of the severe mental illness of the x86/svr4 SDB, we
1145 do something sensible here and we use the following DWARF
1146 register numbers. Note that these are all stack-top-relative
1147 numbers.
1148 11 for %st(0) (gcc regno = 8)
1149 12 for %st(1) (gcc regno = 9)
1150 13 for %st(2) (gcc regno = 10)
1151 14 for %st(3) (gcc regno = 11)
1152 15 for %st(4) (gcc regno = 12)
1153 16 for %st(5) (gcc regno = 13)
1154 17 for %st(6) (gcc regno = 14)
1155 18 for %st(7) (gcc regno = 15)
1156 */
1157 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1158 {
1159 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1160 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1161 -1, 9, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, dir, frame */
1162 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1163 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1164 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1165 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1166 };
1167
1168 /* Test and compare insns in i386.md store the information needed to
1169 generate branch and scc insns here. */
1170
1171 rtx ix86_compare_op0 = NULL_RTX;
1172 rtx ix86_compare_op1 = NULL_RTX;
1173 rtx ix86_compare_emitted = NULL_RTX;
1174
1175 /* Size of the register save area. */
1176 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1177
1178 /* Define the structure for the machine field in struct function. */
1179
1180 struct stack_local_entry GTY(())
1181 {
1182 unsigned short mode;
1183 unsigned short n;
1184 rtx rtl;
1185 struct stack_local_entry *next;
1186 };
1187
1188 /* Structure describing stack frame layout.
1189 Stack grows downward:
1190
1191 [arguments]
1192 <- ARG_POINTER
1193 saved pc
1194
1195 saved frame pointer if frame_pointer_needed
1196 <- HARD_FRAME_POINTER
1197 [saved regs]
1198
1199 [padding1] \
1200 )
1201 [va_arg registers] (
1202 > to_allocate <- FRAME_POINTER
1203 [frame] (
1204 )
1205 [padding2] /
1206 */
1207 struct ix86_frame
1208 {
1209 int nregs;
1210 int padding1;
1211 int va_arg_size;
1212 HOST_WIDE_INT frame;
1213 int padding2;
1214 int outgoing_arguments_size;
1215 int red_zone_size;
1216
1217 HOST_WIDE_INT to_allocate;
1218 /* The offsets relative to ARG_POINTER. */
1219 HOST_WIDE_INT frame_pointer_offset;
1220 HOST_WIDE_INT hard_frame_pointer_offset;
1221 HOST_WIDE_INT stack_pointer_offset;
1222
1223 /* When save_regs_using_mov is set, emit prologue using
1224 move instead of push instructions. */
1225 bool save_regs_using_mov;
1226 };
1227
1228 /* Code model option. */
1229 enum cmodel ix86_cmodel;
1230 /* Asm dialect. */
1231 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1232 /* TLS dialects. */
1233 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1234
1235 /* Which unit we are generating floating point math for. */
1236 enum fpmath_unit ix86_fpmath;
1237
1238 /* Which cpu are we scheduling for. */
1239 enum processor_type ix86_tune;
1240 /* Which instruction set architecture to use. */
1241 enum processor_type ix86_arch;
1242
1243 /* true if sse prefetch instruction is not NOOP. */
1244 int x86_prefetch_sse;
1245
1246 /* ix86_regparm_string as a number */
1247 static int ix86_regparm;
1248
1249 /* -mstackrealign option */
1250 extern int ix86_force_align_arg_pointer;
1251 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1252
1253 /* Preferred alignment for stack boundary in bits. */
1254 unsigned int ix86_preferred_stack_boundary;
1255
1256 /* Values 1-5: see jump.c */
1257 int ix86_branch_cost;
1258
1259 /* Variables which are this size or smaller are put in the data/bss
1260 or ldata/lbss sections. */
1261
1262 int ix86_section_threshold = 65536;
1263
1264 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1265 char internal_label_prefix[16];
1266 int internal_label_prefix_len;
1267 \f
1268 static bool ix86_handle_option (size_t, const char *, int);
1269 static void output_pic_addr_const (FILE *, rtx, int);
1270 static void put_condition_code (enum rtx_code, enum machine_mode,
1271 int, int, FILE *);
1272 static const char *get_some_local_dynamic_name (void);
1273 static int get_some_local_dynamic_name_1 (rtx *, void *);
1274 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1275 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1276 rtx *);
1277 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1278 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1279 enum machine_mode);
1280 static rtx get_thread_pointer (int);
1281 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1282 static void get_pc_thunk_name (char [32], unsigned int);
1283 static rtx gen_push (rtx);
1284 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1285 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1286 static struct machine_function * ix86_init_machine_status (void);
1287 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1288 static int ix86_nsaved_regs (void);
1289 static void ix86_emit_save_regs (void);
1290 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1291 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1292 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1293 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1294 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1295 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1296 static int ix86_issue_rate (void);
1297 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1298 static int ia32_multipass_dfa_lookahead (void);
1299 static void ix86_init_mmx_sse_builtins (void);
1300 static rtx x86_this_parameter (tree);
1301 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1302 HOST_WIDE_INT, tree);
1303 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1304 static void x86_file_start (void);
1305 static void ix86_reorg (void);
1306 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1307 static tree ix86_build_builtin_va_list (void);
1308 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1309 tree, int *, int);
1310 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1311 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1312 static bool ix86_vector_mode_supported_p (enum machine_mode);
1313
1314 static int ix86_address_cost (rtx);
1315 static bool ix86_cannot_force_const_mem (rtx);
1316 static rtx ix86_delegitimize_address (rtx);
1317
1318 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1319
1320 struct builtin_description;
1321 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1322 tree, rtx);
1323 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1324 tree, rtx);
1325 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1326 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1327 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1328 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1329 static rtx safe_vector_operand (rtx, enum machine_mode);
1330 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1331 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1332 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_cost (enum rtx_code code);
1335 static unsigned int ix86_select_alt_pic_regnum (void);
1336 static int ix86_save_reg (unsigned int, int);
1337 static void ix86_compute_frame_layout (struct ix86_frame *);
1338 static int ix86_comp_type_attributes (tree, tree);
1339 static int ix86_function_regparm (tree, tree);
1340 const struct attribute_spec ix86_attribute_table[];
1341 static bool ix86_function_ok_for_sibcall (tree, tree);
1342 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1343 static int ix86_value_regno (enum machine_mode, tree, tree);
1344 static bool contains_128bit_aligned_vector_p (tree);
1345 static rtx ix86_struct_value_rtx (tree, int);
1346 static bool ix86_ms_bitfield_layout_p (tree);
1347 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1348 static int extended_reg_mentioned_1 (rtx *, void *);
1349 static bool ix86_rtx_costs (rtx, int, int, int *);
1350 static int min_insn_size (rtx);
1351 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1352 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1353 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1354 tree, bool);
1355 static void ix86_init_builtins (void);
1356 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1357 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1358 static const char *ix86_mangle_fundamental_type (tree);
1359 static tree ix86_stack_protect_fail (void);
1360 static rtx ix86_internal_arg_pointer (void);
1361 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1362
1363 /* This function is only used on Solaris. */
1364 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1365 ATTRIBUTE_UNUSED;
1366
1367 /* Register class used for passing given 64bit part of the argument.
1368 These represent classes as documented by the PS ABI, with the exception
1369 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1370 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1371
1372 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1373 whenever possible (upper half does contain padding).
1374 */
1375 enum x86_64_reg_class
1376 {
1377 X86_64_NO_CLASS,
1378 X86_64_INTEGER_CLASS,
1379 X86_64_INTEGERSI_CLASS,
1380 X86_64_SSE_CLASS,
1381 X86_64_SSESF_CLASS,
1382 X86_64_SSEDF_CLASS,
1383 X86_64_SSEUP_CLASS,
1384 X86_64_X87_CLASS,
1385 X86_64_X87UP_CLASS,
1386 X86_64_COMPLEX_X87_CLASS,
1387 X86_64_MEMORY_CLASS
1388 };
1389 static const char * const x86_64_reg_class_name[] = {
1390 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1391 "sseup", "x87", "x87up", "cplx87", "no"
1392 };
1393
1394 #define MAX_CLASSES 4
1395
1396 /* Table of constants used by fldpi, fldln2, etc.... */
1397 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1398 static bool ext_80387_constants_init = 0;
1399 static void init_ext_80387_constants (void);
1400 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1401 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1402 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1403 static section *x86_64_elf_select_section (tree decl, int reloc,
1404 unsigned HOST_WIDE_INT align)
1405 ATTRIBUTE_UNUSED;
1406 \f
1407 /* Initialize the GCC target structure. */
1408 #undef TARGET_ATTRIBUTE_TABLE
1409 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1410 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1411 # undef TARGET_MERGE_DECL_ATTRIBUTES
1412 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1413 #endif
1414
1415 #undef TARGET_COMP_TYPE_ATTRIBUTES
1416 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1417
1418 #undef TARGET_INIT_BUILTINS
1419 #define TARGET_INIT_BUILTINS ix86_init_builtins
1420 #undef TARGET_EXPAND_BUILTIN
1421 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1422 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1423 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1424
1425 #undef TARGET_ASM_FUNCTION_EPILOGUE
1426 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1427
1428 #undef TARGET_ENCODE_SECTION_INFO
1429 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1430 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1431 #else
1432 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1433 #endif
1434
1435 #undef TARGET_ASM_OPEN_PAREN
1436 #define TARGET_ASM_OPEN_PAREN ""
1437 #undef TARGET_ASM_CLOSE_PAREN
1438 #define TARGET_ASM_CLOSE_PAREN ""
1439
1440 #undef TARGET_ASM_ALIGNED_HI_OP
1441 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1442 #undef TARGET_ASM_ALIGNED_SI_OP
1443 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1444 #ifdef ASM_QUAD
1445 #undef TARGET_ASM_ALIGNED_DI_OP
1446 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1447 #endif
1448
1449 #undef TARGET_ASM_UNALIGNED_HI_OP
1450 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1451 #undef TARGET_ASM_UNALIGNED_SI_OP
1452 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1453 #undef TARGET_ASM_UNALIGNED_DI_OP
1454 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1455
1456 #undef TARGET_SCHED_ADJUST_COST
1457 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1458 #undef TARGET_SCHED_ISSUE_RATE
1459 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1462 ia32_multipass_dfa_lookahead
1463
1464 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1465 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1466
1467 #ifdef HAVE_AS_TLS
1468 #undef TARGET_HAVE_TLS
1469 #define TARGET_HAVE_TLS true
1470 #endif
1471 #undef TARGET_CANNOT_FORCE_CONST_MEM
1472 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1475
1476 #undef TARGET_DELEGITIMIZE_ADDRESS
1477 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1478
1479 #undef TARGET_MS_BITFIELD_LAYOUT_P
1480 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1481
1482 #if TARGET_MACHO
1483 #undef TARGET_BINDS_LOCAL_P
1484 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1485 #endif
1486
1487 #undef TARGET_ASM_OUTPUT_MI_THUNK
1488 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1489 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1490 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1491
1492 #undef TARGET_ASM_FILE_START
1493 #define TARGET_ASM_FILE_START x86_file_start
1494
1495 #undef TARGET_DEFAULT_TARGET_FLAGS
1496 #define TARGET_DEFAULT_TARGET_FLAGS \
1497 (TARGET_DEFAULT \
1498 | TARGET_64BIT_DEFAULT \
1499 | TARGET_SUBTARGET_DEFAULT \
1500 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1501
1502 #undef TARGET_HANDLE_OPTION
1503 #define TARGET_HANDLE_OPTION ix86_handle_option
1504
1505 #undef TARGET_RTX_COSTS
1506 #define TARGET_RTX_COSTS ix86_rtx_costs
1507 #undef TARGET_ADDRESS_COST
1508 #define TARGET_ADDRESS_COST ix86_address_cost
1509
1510 #undef TARGET_FIXED_CONDITION_CODE_REGS
1511 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1512 #undef TARGET_CC_MODES_COMPATIBLE
1513 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1514
1515 #undef TARGET_MACHINE_DEPENDENT_REORG
1516 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1517
1518 #undef TARGET_BUILD_BUILTIN_VA_LIST
1519 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1520
1521 #undef TARGET_MD_ASM_CLOBBERS
1522 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1523
1524 #undef TARGET_PROMOTE_PROTOTYPES
1525 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1526 #undef TARGET_STRUCT_VALUE_RTX
1527 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1528 #undef TARGET_SETUP_INCOMING_VARARGS
1529 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1530 #undef TARGET_MUST_PASS_IN_STACK
1531 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1532 #undef TARGET_PASS_BY_REFERENCE
1533 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1534 #undef TARGET_INTERNAL_ARG_POINTER
1535 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1536 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1537 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1538
1539 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1540 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1541
1542 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1543 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1544
1545 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1546 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1547
1548 #ifdef HAVE_AS_TLS
1549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1551 #endif
1552
1553 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1554 #undef TARGET_INSERT_ATTRIBUTES
1555 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1556 #endif
1557
1558 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1559 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1560
1561 #undef TARGET_STACK_PROTECT_FAIL
1562 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1563
1564 #undef TARGET_FUNCTION_VALUE
1565 #define TARGET_FUNCTION_VALUE ix86_function_value
1566
1567 struct gcc_target targetm = TARGET_INITIALIZER;
1568
1569 \f
1570 /* The svr4 ABI for the i386 says that records and unions are returned
1571 in memory. */
1572 #ifndef DEFAULT_PCC_STRUCT_RETURN
1573 #define DEFAULT_PCC_STRUCT_RETURN 1
1574 #endif
1575
1576 /* Implement TARGET_HANDLE_OPTION. */
1577
1578 static bool
1579 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1580 {
1581 switch (code)
1582 {
1583 case OPT_m3dnow:
1584 if (!value)
1585 {
1586 target_flags &= ~MASK_3DNOW_A;
1587 target_flags_explicit |= MASK_3DNOW_A;
1588 }
1589 return true;
1590
1591 case OPT_mmmx:
1592 if (!value)
1593 {
1594 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1595 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1596 }
1597 return true;
1598
1599 case OPT_msse:
1600 if (!value)
1601 {
1602 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1603 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1604 }
1605 return true;
1606
1607 case OPT_msse2:
1608 if (!value)
1609 {
1610 target_flags &= ~MASK_SSE3;
1611 target_flags_explicit |= MASK_SSE3;
1612 }
1613 return true;
1614
1615 default:
1616 return true;
1617 }
1618 }
1619
1620 /* Sometimes certain combinations of command options do not make
1621 sense on a particular target machine. You can define a macro
1622 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1623 defined, is executed once just after all the command options have
1624 been parsed.
1625
1626 Don't use this macro to turn on various extra optimizations for
1627 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1628
1629 void
1630 override_options (void)
1631 {
1632 int i;
1633 int ix86_tune_defaulted = 0;
1634
1635 /* Comes from final.c -- no real reason to change it. */
1636 #define MAX_CODE_ALIGN 16
1637
1638 static struct ptt
1639 {
1640 const struct processor_costs *cost; /* Processor costs */
1641 const int target_enable; /* Target flags to enable. */
1642 const int target_disable; /* Target flags to disable. */
1643 const int align_loop; /* Default alignments. */
1644 const int align_loop_max_skip;
1645 const int align_jump;
1646 const int align_jump_max_skip;
1647 const int align_func;
1648 }
1649 const processor_target_table[PROCESSOR_max] =
1650 {
1651 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1652 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1653 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1654 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1655 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1656 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1657 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1658 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1659 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1660 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1661 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1662 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1664 };
1665
1666 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1667 static struct pta
1668 {
1669 const char *const name; /* processor name or nickname. */
1670 const enum processor_type processor;
1671 const enum pta_flags
1672 {
1673 PTA_SSE = 1,
1674 PTA_SSE2 = 2,
1675 PTA_SSE3 = 4,
1676 PTA_MMX = 8,
1677 PTA_PREFETCH_SSE = 16,
1678 PTA_3DNOW = 32,
1679 PTA_3DNOW_A = 64,
1680 PTA_64BIT = 128,
1681 PTA_SSSE3 = 256
1682 } flags;
1683 }
1684 const processor_alias_table[] =
1685 {
1686 {"i386", PROCESSOR_I386, 0},
1687 {"i486", PROCESSOR_I486, 0},
1688 {"i586", PROCESSOR_PENTIUM, 0},
1689 {"pentium", PROCESSOR_PENTIUM, 0},
1690 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1691 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1692 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1693 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1694 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1695 {"i686", PROCESSOR_PENTIUMPRO, 0},
1696 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1697 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1698 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1699 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1700 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1701 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1702 | PTA_MMX | PTA_PREFETCH_SSE},
1703 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1710 | PTA_64BIT | PTA_MMX
1711 | PTA_PREFETCH_SSE},
1712 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1713 | PTA_3DNOW_A},
1714 {"k6", PROCESSOR_K6, PTA_MMX},
1715 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1716 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1717 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1718 | PTA_3DNOW_A},
1719 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1720 | PTA_3DNOW | PTA_3DNOW_A},
1721 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1722 | PTA_3DNOW_A | PTA_SSE},
1723 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1728 | PTA_SSE | PTA_SSE2 },
1729 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1730 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1731 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1739 };
1740
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1742
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1745 #endif
1746
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1749 #endif
1750
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1753 flag_pic = 2;
1754
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1757 if (TARGET_64BIT)
1758 {
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1766 }
1767 else
1768 {
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1775 }
1776
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1779 {
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1786 {
1787 if (TARGET_64BIT)
1788 ix86_tune_string = "generic64";
1789 else
1790 ix86_tune_string = "generic32";
1791 }
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1794 }
1795 else
1796 {
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1800 {
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1803 }
1804
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1810 {
1811 if (TARGET_64BIT)
1812 ix86_tune_string = "generic64";
1813 else
1814 ix86_tune_string = "generic32";
1815 }
1816 }
1817 if (ix86_stringop_string)
1818 {
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1833 else
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1835 }
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1839
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1846
1847 if (ix86_cmodel_string != 0)
1848 {
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1853 else if (flag_pic)
1854 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1855 else if (!strcmp (ix86_cmodel_string, "32"))
1856 ix86_cmodel = CM_32;
1857 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1858 ix86_cmodel = CM_KERNEL;
1859 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1860 ix86_cmodel = CM_LARGE;
1861 else
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1863 }
1864 else
1865 {
1866 ix86_cmodel = CM_32;
1867 if (TARGET_64BIT)
1868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1869 }
1870 if (ix86_asm_string != 0)
1871 {
1872 if (! TARGET_MACHO
1873 && !strcmp (ix86_asm_string, "intel"))
1874 ix86_asm_dialect = ASM_INTEL;
1875 else if (!strcmp (ix86_asm_string, "att"))
1876 ix86_asm_dialect = ASM_ATT;
1877 else
1878 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1879 }
1880 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1881 error ("code model %qs not supported in the %s bit mode",
1882 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1883 if (ix86_cmodel == CM_LARGE)
1884 sorry ("code model %<large%> not supported yet");
1885 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1886 sorry ("%i-bit mode not compiled in",
1887 (target_flags & MASK_64BIT) ? 64 : 32);
1888
1889 for (i = 0; i < pta_size; i++)
1890 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1891 {
1892 ix86_arch = processor_alias_table[i].processor;
1893 /* Default cpu tuning to the architecture. */
1894 ix86_tune = ix86_arch;
1895 if (processor_alias_table[i].flags & PTA_MMX
1896 && !(target_flags_explicit & MASK_MMX))
1897 target_flags |= MASK_MMX;
1898 if (processor_alias_table[i].flags & PTA_3DNOW
1899 && !(target_flags_explicit & MASK_3DNOW))
1900 target_flags |= MASK_3DNOW;
1901 if (processor_alias_table[i].flags & PTA_3DNOW_A
1902 && !(target_flags_explicit & MASK_3DNOW_A))
1903 target_flags |= MASK_3DNOW_A;
1904 if (processor_alias_table[i].flags & PTA_SSE
1905 && !(target_flags_explicit & MASK_SSE))
1906 target_flags |= MASK_SSE;
1907 if (processor_alias_table[i].flags & PTA_SSE2
1908 && !(target_flags_explicit & MASK_SSE2))
1909 target_flags |= MASK_SSE2;
1910 if (processor_alias_table[i].flags & PTA_SSE3
1911 && !(target_flags_explicit & MASK_SSE3))
1912 target_flags |= MASK_SSE3;
1913 if (processor_alias_table[i].flags & PTA_SSSE3
1914 && !(target_flags_explicit & MASK_SSSE3))
1915 target_flags |= MASK_SSSE3;
1916 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1917 x86_prefetch_sse = true;
1918 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1919 error ("CPU you selected does not support x86-64 "
1920 "instruction set");
1921 break;
1922 }
1923
1924 if (i == pta_size)
1925 error ("bad value (%s) for -march= switch", ix86_arch_string);
1926
1927 for (i = 0; i < pta_size; i++)
1928 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1929 {
1930 ix86_tune = processor_alias_table[i].processor;
1931 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1932 {
1933 if (ix86_tune_defaulted)
1934 {
1935 ix86_tune_string = "x86-64";
1936 for (i = 0; i < pta_size; i++)
1937 if (! strcmp (ix86_tune_string,
1938 processor_alias_table[i].name))
1939 break;
1940 ix86_tune = processor_alias_table[i].processor;
1941 }
1942 else
1943 error ("CPU you selected does not support x86-64 "
1944 "instruction set");
1945 }
1946 /* Intel CPUs have always interpreted SSE prefetch instructions as
1947 NOPs; so, we can enable SSE prefetch instructions even when
1948 -mtune (rather than -march) points us to a processor that has them.
1949 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1950 higher processors. */
1951 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1952 x86_prefetch_sse = true;
1953 break;
1954 }
1955 if (i == pta_size)
1956 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1957
1958 if (optimize_size)
1959 ix86_cost = &size_cost;
1960 else
1961 ix86_cost = processor_target_table[ix86_tune].cost;
1962 target_flags |= processor_target_table[ix86_tune].target_enable;
1963 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1964
1965 /* Arrange to set up i386_stack_locals for all functions. */
1966 init_machine_status = ix86_init_machine_status;
1967
1968 /* Validate -mregparm= value. */
1969 if (ix86_regparm_string)
1970 {
1971 i = atoi (ix86_regparm_string);
1972 if (i < 0 || i > REGPARM_MAX)
1973 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1974 else
1975 ix86_regparm = i;
1976 }
1977 else
1978 if (TARGET_64BIT)
1979 ix86_regparm = REGPARM_MAX;
1980
1981 /* If the user has provided any of the -malign-* options,
1982 warn and use that value only if -falign-* is not set.
1983 Remove this code in GCC 3.2 or later. */
1984 if (ix86_align_loops_string)
1985 {
1986 warning (0, "-malign-loops is obsolete, use -falign-loops");
1987 if (align_loops == 0)
1988 {
1989 i = atoi (ix86_align_loops_string);
1990 if (i < 0 || i > MAX_CODE_ALIGN)
1991 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1992 else
1993 align_loops = 1 << i;
1994 }
1995 }
1996
1997 if (ix86_align_jumps_string)
1998 {
1999 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2000 if (align_jumps == 0)
2001 {
2002 i = atoi (ix86_align_jumps_string);
2003 if (i < 0 || i > MAX_CODE_ALIGN)
2004 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2005 else
2006 align_jumps = 1 << i;
2007 }
2008 }
2009
2010 if (ix86_align_funcs_string)
2011 {
2012 warning (0, "-malign-functions is obsolete, use -falign-functions");
2013 if (align_functions == 0)
2014 {
2015 i = atoi (ix86_align_funcs_string);
2016 if (i < 0 || i > MAX_CODE_ALIGN)
2017 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2018 else
2019 align_functions = 1 << i;
2020 }
2021 }
2022
2023 /* Default align_* from the processor table. */
2024 if (align_loops == 0)
2025 {
2026 align_loops = processor_target_table[ix86_tune].align_loop;
2027 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2028 }
2029 if (align_jumps == 0)
2030 {
2031 align_jumps = processor_target_table[ix86_tune].align_jump;
2032 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2033 }
2034 if (align_functions == 0)
2035 {
2036 align_functions = processor_target_table[ix86_tune].align_func;
2037 }
2038
2039 /* Validate -mbranch-cost= value, or provide default. */
2040 ix86_branch_cost = ix86_cost->branch_cost;
2041 if (ix86_branch_cost_string)
2042 {
2043 i = atoi (ix86_branch_cost_string);
2044 if (i < 0 || i > 5)
2045 error ("-mbranch-cost=%d is not between 0 and 5", i);
2046 else
2047 ix86_branch_cost = i;
2048 }
2049 if (ix86_section_threshold_string)
2050 {
2051 i = atoi (ix86_section_threshold_string);
2052 if (i < 0)
2053 error ("-mlarge-data-threshold=%d is negative", i);
2054 else
2055 ix86_section_threshold = i;
2056 }
2057
2058 if (ix86_tls_dialect_string)
2059 {
2060 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2061 ix86_tls_dialect = TLS_DIALECT_GNU;
2062 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2063 ix86_tls_dialect = TLS_DIALECT_GNU2;
2064 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_SUN;
2066 else
2067 error ("bad value (%s) for -mtls-dialect= switch",
2068 ix86_tls_dialect_string);
2069 }
2070
2071 /* Keep nonleaf frame pointers. */
2072 if (flag_omit_frame_pointer)
2073 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2074 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2075 flag_omit_frame_pointer = 1;
2076
2077 /* If we're doing fast math, we don't care about comparison order
2078 wrt NaNs. This lets us use a shorter comparison sequence. */
2079 if (flag_finite_math_only)
2080 target_flags &= ~MASK_IEEE_FP;
2081
2082 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2083 since the insns won't need emulation. */
2084 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2085 target_flags &= ~MASK_NO_FANCY_MATH_387;
2086
2087 /* Likewise, if the target doesn't have a 387, or we've specified
2088 software floating point, don't use 387 inline intrinsics. */
2089 if (!TARGET_80387)
2090 target_flags |= MASK_NO_FANCY_MATH_387;
2091
2092 /* Turn on SSE3 builtins for -mssse3. */
2093 if (TARGET_SSSE3)
2094 target_flags |= MASK_SSE3;
2095
2096 /* Turn on SSE2 builtins for -msse3. */
2097 if (TARGET_SSE3)
2098 target_flags |= MASK_SSE2;
2099
2100 /* Turn on SSE builtins for -msse2. */
2101 if (TARGET_SSE2)
2102 target_flags |= MASK_SSE;
2103
2104 /* Turn on MMX builtins for -msse. */
2105 if (TARGET_SSE)
2106 {
2107 target_flags |= MASK_MMX & ~target_flags_explicit;
2108 x86_prefetch_sse = true;
2109 }
2110
2111 /* Turn on MMX builtins for 3Dnow. */
2112 if (TARGET_3DNOW)
2113 target_flags |= MASK_MMX;
2114
2115 if (TARGET_64BIT)
2116 {
2117 if (TARGET_ALIGN_DOUBLE)
2118 error ("-malign-double makes no sense in the 64bit mode");
2119 if (TARGET_RTD)
2120 error ("-mrtd calling convention not supported in the 64bit mode");
2121
2122 /* Enable by default the SSE and MMX builtins. Do allow the user to
2123 explicitly disable any of these. In particular, disabling SSE and
2124 MMX for kernel code is extremely useful. */
2125 target_flags
2126 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2127 & ~target_flags_explicit);
2128 }
2129 else
2130 {
2131 /* i386 ABI does not specify red zone. It still makes sense to use it
2132 when programmer takes care to stack from being destroyed. */
2133 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2134 target_flags |= MASK_NO_RED_ZONE;
2135 }
2136
2137 /* Validate -mpreferred-stack-boundary= value, or provide default.
2138 The default of 128 bits is for Pentium III's SSE __m128. We can't
2139 change it because of optimize_size. Otherwise, we can't mix object
2140 files compiled with -Os and -On. */
2141 ix86_preferred_stack_boundary = 128;
2142 if (ix86_preferred_stack_boundary_string)
2143 {
2144 i = atoi (ix86_preferred_stack_boundary_string);
2145 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2146 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2147 TARGET_64BIT ? 4 : 2);
2148 else
2149 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2150 }
2151
2152 /* Accept -msseregparm only if at least SSE support is enabled. */
2153 if (TARGET_SSEREGPARM
2154 && ! TARGET_SSE)
2155 error ("-msseregparm used without SSE enabled");
2156
2157 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2158
2159 if (ix86_fpmath_string != 0)
2160 {
2161 if (! strcmp (ix86_fpmath_string, "387"))
2162 ix86_fpmath = FPMATH_387;
2163 else if (! strcmp (ix86_fpmath_string, "sse"))
2164 {
2165 if (!TARGET_SSE)
2166 {
2167 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2168 ix86_fpmath = FPMATH_387;
2169 }
2170 else
2171 ix86_fpmath = FPMATH_SSE;
2172 }
2173 else if (! strcmp (ix86_fpmath_string, "387,sse")
2174 || ! strcmp (ix86_fpmath_string, "sse,387"))
2175 {
2176 if (!TARGET_SSE)
2177 {
2178 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2179 ix86_fpmath = FPMATH_387;
2180 }
2181 else if (!TARGET_80387)
2182 {
2183 warning (0, "387 instruction set disabled, using SSE arithmetics");
2184 ix86_fpmath = FPMATH_SSE;
2185 }
2186 else
2187 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2188 }
2189 else
2190 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2191 }
2192
2193 /* If the i387 is disabled, then do not return values in it. */
2194 if (!TARGET_80387)
2195 target_flags &= ~MASK_FLOAT_RETURNS;
2196
2197 if ((x86_accumulate_outgoing_args & TUNEMASK)
2198 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2199 && !optimize_size)
2200 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2201
2202 /* ??? Unwind info is not correct around the CFG unless either a frame
2203 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2204 unwind info generation to be aware of the CFG and propagating states
2205 around edges. */
2206 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2207 || flag_exceptions || flag_non_call_exceptions)
2208 && flag_omit_frame_pointer
2209 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2210 {
2211 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2212 warning (0, "unwind tables currently require either a frame pointer "
2213 "or -maccumulate-outgoing-args for correctness");
2214 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2215 }
2216
2217 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2218 {
2219 char *p;
2220 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2221 p = strchr (internal_label_prefix, 'X');
2222 internal_label_prefix_len = p - internal_label_prefix;
2223 *p = '\0';
2224 }
2225
2226 /* When scheduling description is not available, disable scheduler pass
2227 so it won't slow down the compilation and make x87 code slower. */
2228 if (!TARGET_SCHEDULE)
2229 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2230
2231 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2232 set_param_value ("simultaneous-prefetches",
2233 ix86_cost->simultaneous_prefetches);
2234 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2235 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2236 }
2237 \f
2238 /* switch to the appropriate section for output of DECL.
2239 DECL is either a `VAR_DECL' node or a constant of some sort.
2240 RELOC indicates whether forming the initial value of DECL requires
2241 link-time relocations. */
2242
2243 static section *
2244 x86_64_elf_select_section (tree decl, int reloc,
2245 unsigned HOST_WIDE_INT align)
2246 {
2247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2248 && ix86_in_large_data_p (decl))
2249 {
2250 const char *sname = NULL;
2251 unsigned int flags = SECTION_WRITE;
2252 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2253 {
2254 case SECCAT_DATA:
2255 sname = ".ldata";
2256 break;
2257 case SECCAT_DATA_REL:
2258 sname = ".ldata.rel";
2259 break;
2260 case SECCAT_DATA_REL_LOCAL:
2261 sname = ".ldata.rel.local";
2262 break;
2263 case SECCAT_DATA_REL_RO:
2264 sname = ".ldata.rel.ro";
2265 break;
2266 case SECCAT_DATA_REL_RO_LOCAL:
2267 sname = ".ldata.rel.ro.local";
2268 break;
2269 case SECCAT_BSS:
2270 sname = ".lbss";
2271 flags |= SECTION_BSS;
2272 break;
2273 case SECCAT_RODATA:
2274 case SECCAT_RODATA_MERGE_STR:
2275 case SECCAT_RODATA_MERGE_STR_INIT:
2276 case SECCAT_RODATA_MERGE_CONST:
2277 sname = ".lrodata";
2278 flags = 0;
2279 break;
2280 case SECCAT_SRODATA:
2281 case SECCAT_SDATA:
2282 case SECCAT_SBSS:
2283 gcc_unreachable ();
2284 case SECCAT_TEXT:
2285 case SECCAT_TDATA:
2286 case SECCAT_TBSS:
2287 /* We don't split these for medium model. Place them into
2288 default sections and hope for best. */
2289 break;
2290 }
2291 if (sname)
2292 {
2293 /* We might get called with string constants, but get_named_section
2294 doesn't like them as they are not DECLs. Also, we need to set
2295 flags in that case. */
2296 if (!DECL_P (decl))
2297 return get_section (sname, flags, NULL);
2298 return get_named_section (decl, sname, reloc);
2299 }
2300 }
2301 return default_elf_select_section (decl, reloc, align);
2302 }
2303
2304 /* Build up a unique section name, expressed as a
2305 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2306 RELOC indicates whether the initial value of EXP requires
2307 link-time relocations. */
2308
2309 static void
2310 x86_64_elf_unique_section (tree decl, int reloc)
2311 {
2312 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2313 && ix86_in_large_data_p (decl))
2314 {
2315 const char *prefix = NULL;
2316 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2317 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2318
2319 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2320 {
2321 case SECCAT_DATA:
2322 case SECCAT_DATA_REL:
2323 case SECCAT_DATA_REL_LOCAL:
2324 case SECCAT_DATA_REL_RO:
2325 case SECCAT_DATA_REL_RO_LOCAL:
2326 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2327 break;
2328 case SECCAT_BSS:
2329 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2330 break;
2331 case SECCAT_RODATA:
2332 case SECCAT_RODATA_MERGE_STR:
2333 case SECCAT_RODATA_MERGE_STR_INIT:
2334 case SECCAT_RODATA_MERGE_CONST:
2335 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2336 break;
2337 case SECCAT_SRODATA:
2338 case SECCAT_SDATA:
2339 case SECCAT_SBSS:
2340 gcc_unreachable ();
2341 case SECCAT_TEXT:
2342 case SECCAT_TDATA:
2343 case SECCAT_TBSS:
2344 /* We don't split these for medium model. Place them into
2345 default sections and hope for best. */
2346 break;
2347 }
2348 if (prefix)
2349 {
2350 const char *name;
2351 size_t nlen, plen;
2352 char *string;
2353 plen = strlen (prefix);
2354
2355 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2356 name = targetm.strip_name_encoding (name);
2357 nlen = strlen (name);
2358
2359 string = alloca (nlen + plen + 1);
2360 memcpy (string, prefix, plen);
2361 memcpy (string + plen, name, nlen + 1);
2362
2363 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2364 return;
2365 }
2366 }
2367 default_unique_section (decl, reloc);
2368 }
2369
2370 #ifdef COMMON_ASM_OP
2371 /* This says how to output assembler code to declare an
2372 uninitialized external linkage data object.
2373
2374 For medium model x86-64 we need to use .largecomm opcode for
2375 large objects. */
2376 void
2377 x86_elf_aligned_common (FILE *file,
2378 const char *name, unsigned HOST_WIDE_INT size,
2379 int align)
2380 {
2381 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2382 && size > (unsigned int)ix86_section_threshold)
2383 fprintf (file, ".largecomm\t");
2384 else
2385 fprintf (file, "%s", COMMON_ASM_OP);
2386 assemble_name (file, name);
2387 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2388 size, align / BITS_PER_UNIT);
2389 }
2390 #endif
2391 /* Utility function for targets to use in implementing
2392 ASM_OUTPUT_ALIGNED_BSS. */
2393
2394 void
2395 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2396 const char *name, unsigned HOST_WIDE_INT size,
2397 int align)
2398 {
2399 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2400 && size > (unsigned int)ix86_section_threshold)
2401 switch_to_section (get_named_section (decl, ".lbss", 0));
2402 else
2403 switch_to_section (bss_section);
2404 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2405 #ifdef ASM_DECLARE_OBJECT_NAME
2406 last_assemble_variable_decl = decl;
2407 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2408 #else
2409 /* Standard thing is just output label for the object. */
2410 ASM_OUTPUT_LABEL (file, name);
2411 #endif /* ASM_DECLARE_OBJECT_NAME */
2412 ASM_OUTPUT_SKIP (file, size ? size : 1);
2413 }
2414 \f
2415 void
2416 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2417 {
2418 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2419 make the problem with not enough registers even worse. */
2420 #ifdef INSN_SCHEDULING
2421 if (level > 1)
2422 flag_schedule_insns = 0;
2423 #endif
2424
2425 if (TARGET_MACHO)
2426 /* The Darwin libraries never set errno, so we might as well
2427 avoid calling them when that's the only reason we would. */
2428 flag_errno_math = 0;
2429
2430 /* The default values of these switches depend on the TARGET_64BIT
2431 that is not known at this moment. Mark these values with 2 and
2432 let user the to override these. In case there is no command line option
2433 specifying them, we will set the defaults in override_options. */
2434 if (optimize >= 1)
2435 flag_omit_frame_pointer = 2;
2436 flag_pcc_struct_return = 2;
2437 flag_asynchronous_unwind_tables = 2;
2438 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2439 SUBTARGET_OPTIMIZATION_OPTIONS;
2440 #endif
2441 }
2442 \f
2443 /* Table of valid machine attributes. */
2444 const struct attribute_spec ix86_attribute_table[] =
2445 {
2446 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2447 /* Stdcall attribute says callee is responsible for popping arguments
2448 if they are not variable. */
2449 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2450 /* Fastcall attribute says callee is responsible for popping arguments
2451 if they are not variable. */
2452 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2453 /* Cdecl attribute says the callee is a normal C declaration */
2454 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2455 /* Regparm attribute specifies how many integer arguments are to be
2456 passed in registers. */
2457 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2458 /* Sseregparm attribute says we are using x86_64 calling conventions
2459 for FP arguments. */
2460 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2461 /* force_align_arg_pointer says this function realigns the stack at entry. */
2462 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2463 false, true, true, ix86_handle_cconv_attribute },
2464 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2465 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2466 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2467 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2468 #endif
2469 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2470 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2471 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2472 SUBTARGET_ATTRIBUTE_TABLE,
2473 #endif
2474 { NULL, 0, 0, false, false, false, NULL }
2475 };
2476
2477 /* Decide whether we can make a sibling call to a function. DECL is the
2478 declaration of the function being targeted by the call and EXP is the
2479 CALL_EXPR representing the call. */
2480
2481 static bool
2482 ix86_function_ok_for_sibcall (tree decl, tree exp)
2483 {
2484 tree func;
2485 rtx a, b;
2486
2487 /* If we are generating position-independent code, we cannot sibcall
2488 optimize any indirect call, or a direct call to a global function,
2489 as the PLT requires %ebx be live. */
2490 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2491 return false;
2492
2493 if (decl)
2494 func = decl;
2495 else
2496 {
2497 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2498 if (POINTER_TYPE_P (func))
2499 func = TREE_TYPE (func);
2500 }
2501
2502 /* Check that the return value locations are the same. Like
2503 if we are returning floats on the 80387 register stack, we cannot
2504 make a sibcall from a function that doesn't return a float to a
2505 function that does or, conversely, from a function that does return
2506 a float to a function that doesn't; the necessary stack adjustment
2507 would not be executed. This is also the place we notice
2508 differences in the return value ABI. Note that it is ok for one
2509 of the functions to have void return type as long as the return
2510 value of the other is passed in a register. */
2511 a = ix86_function_value (TREE_TYPE (exp), func, false);
2512 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2513 cfun->decl, false);
2514 if (STACK_REG_P (a) || STACK_REG_P (b))
2515 {
2516 if (!rtx_equal_p (a, b))
2517 return false;
2518 }
2519 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2520 ;
2521 else if (!rtx_equal_p (a, b))
2522 return false;
2523
2524 /* If this call is indirect, we'll need to be able to use a call-clobbered
2525 register for the address of the target function. Make sure that all
2526 such registers are not used for passing parameters. */
2527 if (!decl && !TARGET_64BIT)
2528 {
2529 tree type;
2530
2531 /* We're looking at the CALL_EXPR, we need the type of the function. */
2532 type = TREE_OPERAND (exp, 0); /* pointer expression */
2533 type = TREE_TYPE (type); /* pointer type */
2534 type = TREE_TYPE (type); /* function type */
2535
2536 if (ix86_function_regparm (type, NULL) >= 3)
2537 {
2538 /* ??? Need to count the actual number of registers to be used,
2539 not the possible number of registers. Fix later. */
2540 return false;
2541 }
2542 }
2543
2544 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2545 /* Dllimport'd functions are also called indirectly. */
2546 if (decl && DECL_DLLIMPORT_P (decl)
2547 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2548 return false;
2549 #endif
2550
2551 /* If we forced aligned the stack, then sibcalling would unalign the
2552 stack, which may break the called function. */
2553 if (cfun->machine->force_align_arg_pointer)
2554 return false;
2555
2556 /* Otherwise okay. That also includes certain types of indirect calls. */
2557 return true;
2558 }
2559
2560 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2561 calling convention attributes;
2562 arguments as in struct attribute_spec.handler. */
2563
2564 static tree
2565 ix86_handle_cconv_attribute (tree *node, tree name,
2566 tree args,
2567 int flags ATTRIBUTE_UNUSED,
2568 bool *no_add_attrs)
2569 {
2570 if (TREE_CODE (*node) != FUNCTION_TYPE
2571 && TREE_CODE (*node) != METHOD_TYPE
2572 && TREE_CODE (*node) != FIELD_DECL
2573 && TREE_CODE (*node) != TYPE_DECL)
2574 {
2575 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2576 IDENTIFIER_POINTER (name));
2577 *no_add_attrs = true;
2578 return NULL_TREE;
2579 }
2580
2581 /* Can combine regparm with all attributes but fastcall. */
2582 if (is_attribute_p ("regparm", name))
2583 {
2584 tree cst;
2585
2586 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2587 {
2588 error ("fastcall and regparm attributes are not compatible");
2589 }
2590
2591 cst = TREE_VALUE (args);
2592 if (TREE_CODE (cst) != INTEGER_CST)
2593 {
2594 warning (OPT_Wattributes,
2595 "%qs attribute requires an integer constant argument",
2596 IDENTIFIER_POINTER (name));
2597 *no_add_attrs = true;
2598 }
2599 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2600 {
2601 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2602 IDENTIFIER_POINTER (name), REGPARM_MAX);
2603 *no_add_attrs = true;
2604 }
2605
2606 if (!TARGET_64BIT
2607 && lookup_attribute (ix86_force_align_arg_pointer_string,
2608 TYPE_ATTRIBUTES (*node))
2609 && compare_tree_int (cst, REGPARM_MAX-1))
2610 {
2611 error ("%s functions limited to %d register parameters",
2612 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2613 }
2614
2615 return NULL_TREE;
2616 }
2617
2618 if (TARGET_64BIT)
2619 {
2620 warning (OPT_Wattributes, "%qs attribute ignored",
2621 IDENTIFIER_POINTER (name));
2622 *no_add_attrs = true;
2623 return NULL_TREE;
2624 }
2625
2626 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2627 if (is_attribute_p ("fastcall", name))
2628 {
2629 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2630 {
2631 error ("fastcall and cdecl attributes are not compatible");
2632 }
2633 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2634 {
2635 error ("fastcall and stdcall attributes are not compatible");
2636 }
2637 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2638 {
2639 error ("fastcall and regparm attributes are not compatible");
2640 }
2641 }
2642
2643 /* Can combine stdcall with fastcall (redundant), regparm and
2644 sseregparm. */
2645 else if (is_attribute_p ("stdcall", name))
2646 {
2647 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2648 {
2649 error ("stdcall and cdecl attributes are not compatible");
2650 }
2651 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2652 {
2653 error ("stdcall and fastcall attributes are not compatible");
2654 }
2655 }
2656
2657 /* Can combine cdecl with regparm and sseregparm. */
2658 else if (is_attribute_p ("cdecl", name))
2659 {
2660 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2661 {
2662 error ("stdcall and cdecl attributes are not compatible");
2663 }
2664 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2665 {
2666 error ("fastcall and cdecl attributes are not compatible");
2667 }
2668 }
2669
2670 /* Can combine sseregparm with all attributes. */
2671
2672 return NULL_TREE;
2673 }
2674
2675 /* Return 0 if the attributes for two types are incompatible, 1 if they
2676 are compatible, and 2 if they are nearly compatible (which causes a
2677 warning to be generated). */
2678
2679 static int
2680 ix86_comp_type_attributes (tree type1, tree type2)
2681 {
2682 /* Check for mismatch of non-default calling convention. */
2683 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2684
2685 if (TREE_CODE (type1) != FUNCTION_TYPE)
2686 return 1;
2687
2688 /* Check for mismatched fastcall/regparm types. */
2689 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2690 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2691 || (ix86_function_regparm (type1, NULL)
2692 != ix86_function_regparm (type2, NULL)))
2693 return 0;
2694
2695 /* Check for mismatched sseregparm types. */
2696 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2697 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2698 return 0;
2699
2700 /* Check for mismatched return types (cdecl vs stdcall). */
2701 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2702 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2703 return 0;
2704
2705 return 1;
2706 }
2707 \f
2708 /* Return the regparm value for a function with the indicated TYPE and DECL.
2709 DECL may be NULL when calling function indirectly
2710 or considering a libcall. */
2711
2712 static int
2713 ix86_function_regparm (tree type, tree decl)
2714 {
2715 tree attr;
2716 int regparm = ix86_regparm;
2717 bool user_convention = false;
2718
2719 if (!TARGET_64BIT)
2720 {
2721 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2722 if (attr)
2723 {
2724 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2725 user_convention = true;
2726 }
2727
2728 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2729 {
2730 regparm = 2;
2731 user_convention = true;
2732 }
2733
2734 /* Use register calling convention for local functions when possible. */
2735 if (!TARGET_64BIT && !user_convention && decl
2736 && flag_unit_at_a_time && !profile_flag)
2737 {
2738 struct cgraph_local_info *i = cgraph_local_info (decl);
2739 if (i && i->local)
2740 {
2741 int local_regparm, globals = 0, regno;
2742
2743 /* Make sure no regparm register is taken by a global register
2744 variable. */
2745 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2746 if (global_regs[local_regparm])
2747 break;
2748 /* We can't use regparm(3) for nested functions as these use
2749 static chain pointer in third argument. */
2750 if (local_regparm == 3
2751 && decl_function_context (decl)
2752 && !DECL_NO_STATIC_CHAIN (decl))
2753 local_regparm = 2;
2754 /* If the function realigns its stackpointer, the
2755 prologue will clobber %ecx. If we've already
2756 generated code for the callee, the callee
2757 DECL_STRUCT_FUNCTION is gone, so we fall back to
2758 scanning the attributes for the self-realigning
2759 property. */
2760 if ((DECL_STRUCT_FUNCTION (decl)
2761 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2762 || (!DECL_STRUCT_FUNCTION (decl)
2763 && lookup_attribute (ix86_force_align_arg_pointer_string,
2764 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2765 local_regparm = 2;
2766 /* Each global register variable increases register preassure,
2767 so the more global reg vars there are, the smaller regparm
2768 optimization use, unless requested by the user explicitly. */
2769 for (regno = 0; regno < 6; regno++)
2770 if (global_regs[regno])
2771 globals++;
2772 local_regparm
2773 = globals < local_regparm ? local_regparm - globals : 0;
2774
2775 if (local_regparm > regparm)
2776 regparm = local_regparm;
2777 }
2778 }
2779 }
2780 return regparm;
2781 }
2782
2783 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2784 DFmode (2) arguments in SSE registers for a function with the
2785 indicated TYPE and DECL. DECL may be NULL when calling function
2786 indirectly or considering a libcall. Otherwise return 0. */
2787
2788 static int
2789 ix86_function_sseregparm (tree type, tree decl)
2790 {
2791 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2792 by the sseregparm attribute. */
2793 if (TARGET_SSEREGPARM
2794 || (type
2795 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2796 {
2797 if (!TARGET_SSE)
2798 {
2799 if (decl)
2800 error ("Calling %qD with attribute sseregparm without "
2801 "SSE/SSE2 enabled", decl);
2802 else
2803 error ("Calling %qT with attribute sseregparm without "
2804 "SSE/SSE2 enabled", type);
2805 return 0;
2806 }
2807
2808 return 2;
2809 }
2810
2811 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2812 (and DFmode for SSE2) arguments in SSE registers,
2813 even for 32-bit targets. */
2814 if (!TARGET_64BIT && decl
2815 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2816 {
2817 struct cgraph_local_info *i = cgraph_local_info (decl);
2818 if (i && i->local)
2819 return TARGET_SSE2 ? 2 : 1;
2820 }
2821
2822 return 0;
2823 }
2824
2825 /* Return true if EAX is live at the start of the function. Used by
2826 ix86_expand_prologue to determine if we need special help before
2827 calling allocate_stack_worker. */
2828
2829 static bool
2830 ix86_eax_live_at_start_p (void)
2831 {
2832 /* Cheat. Don't bother working forward from ix86_function_regparm
2833 to the function type to whether an actual argument is located in
2834 eax. Instead just look at cfg info, which is still close enough
2835 to correct at this point. This gives false positives for broken
2836 functions that might use uninitialized data that happens to be
2837 allocated in eax, but who cares? */
2838 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2839 }
2840
2841 /* Value is the number of bytes of arguments automatically
2842 popped when returning from a subroutine call.
2843 FUNDECL is the declaration node of the function (as a tree),
2844 FUNTYPE is the data type of the function (as a tree),
2845 or for a library call it is an identifier node for the subroutine name.
2846 SIZE is the number of bytes of arguments passed on the stack.
2847
2848 On the 80386, the RTD insn may be used to pop them if the number
2849 of args is fixed, but if the number is variable then the caller
2850 must pop them all. RTD can't be used for library calls now
2851 because the library is compiled with the Unix compiler.
2852 Use of RTD is a selectable option, since it is incompatible with
2853 standard Unix calling sequences. If the option is not selected,
2854 the caller must always pop the args.
2855
2856 The attribute stdcall is equivalent to RTD on a per module basis. */
2857
2858 int
2859 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2860 {
2861 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2862
2863 /* Cdecl functions override -mrtd, and never pop the stack. */
2864 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2865
2866 /* Stdcall and fastcall functions will pop the stack if not
2867 variable args. */
2868 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2869 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2870 rtd = 1;
2871
2872 if (rtd
2873 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2874 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2875 == void_type_node)))
2876 return size;
2877 }
2878
2879 /* Lose any fake structure return argument if it is passed on the stack. */
2880 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2881 && !TARGET_64BIT
2882 && !KEEP_AGGREGATE_RETURN_POINTER)
2883 {
2884 int nregs = ix86_function_regparm (funtype, fundecl);
2885
2886 if (!nregs)
2887 return GET_MODE_SIZE (Pmode);
2888 }
2889
2890 return 0;
2891 }
2892 \f
2893 /* Argument support functions. */
2894
2895 /* Return true when register may be used to pass function parameters. */
2896 bool
2897 ix86_function_arg_regno_p (int regno)
2898 {
2899 int i;
2900 if (!TARGET_64BIT)
2901 return (regno < REGPARM_MAX
2902 || (TARGET_MMX && MMX_REGNO_P (regno)
2903 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2904 || (TARGET_SSE && SSE_REGNO_P (regno)
2905 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2906
2907 if (TARGET_SSE && SSE_REGNO_P (regno)
2908 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2909 return true;
2910 /* RAX is used as hidden argument to va_arg functions. */
2911 if (!regno)
2912 return true;
2913 for (i = 0; i < REGPARM_MAX; i++)
2914 if (regno == x86_64_int_parameter_registers[i])
2915 return true;
2916 return false;
2917 }
2918
2919 /* Return if we do not know how to pass TYPE solely in registers. */
2920
2921 static bool
2922 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2923 {
2924 if (must_pass_in_stack_var_size_or_pad (mode, type))
2925 return true;
2926
2927 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2928 The layout_type routine is crafty and tries to trick us into passing
2929 currently unsupported vector types on the stack by using TImode. */
2930 return (!TARGET_64BIT && mode == TImode
2931 && type && TREE_CODE (type) != VECTOR_TYPE);
2932 }
2933
2934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2935 for a call to a function whose data type is FNTYPE.
2936 For a library call, FNTYPE is 0. */
2937
2938 void
2939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2940 tree fntype, /* tree ptr for function decl */
2941 rtx libname, /* SYMBOL_REF of library name or 0 */
2942 tree fndecl)
2943 {
2944 static CUMULATIVE_ARGS zero_cum;
2945 tree param, next_param;
2946
2947 if (TARGET_DEBUG_ARG)
2948 {
2949 fprintf (stderr, "\ninit_cumulative_args (");
2950 if (fntype)
2951 fprintf (stderr, "fntype code = %s, ret code = %s",
2952 tree_code_name[(int) TREE_CODE (fntype)],
2953 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2954 else
2955 fprintf (stderr, "no fntype");
2956
2957 if (libname)
2958 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2959 }
2960
2961 *cum = zero_cum;
2962
2963 /* Set up the number of registers to use for passing arguments. */
2964 cum->nregs = ix86_regparm;
2965 if (TARGET_SSE)
2966 cum->sse_nregs = SSE_REGPARM_MAX;
2967 if (TARGET_MMX)
2968 cum->mmx_nregs = MMX_REGPARM_MAX;
2969 cum->warn_sse = true;
2970 cum->warn_mmx = true;
2971 cum->maybe_vaarg = false;
2972
2973 /* Use ecx and edx registers if function has fastcall attribute,
2974 else look for regparm information. */
2975 if (fntype && !TARGET_64BIT)
2976 {
2977 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2978 {
2979 cum->nregs = 2;
2980 cum->fastcall = 1;
2981 }
2982 else
2983 cum->nregs = ix86_function_regparm (fntype, fndecl);
2984 }
2985
2986 /* Set up the number of SSE registers used for passing SFmode
2987 and DFmode arguments. Warn for mismatching ABI. */
2988 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2989
2990 /* Determine if this function has variable arguments. This is
2991 indicated by the last argument being 'void_type_mode' if there
2992 are no variable arguments. If there are variable arguments, then
2993 we won't pass anything in registers in 32-bit mode. */
2994
2995 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2996 {
2997 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2998 param != 0; param = next_param)
2999 {
3000 next_param = TREE_CHAIN (param);
3001 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3002 {
3003 if (!TARGET_64BIT)
3004 {
3005 cum->nregs = 0;
3006 cum->sse_nregs = 0;
3007 cum->mmx_nregs = 0;
3008 cum->warn_sse = 0;
3009 cum->warn_mmx = 0;
3010 cum->fastcall = 0;
3011 cum->float_in_sse = 0;
3012 }
3013 cum->maybe_vaarg = true;
3014 }
3015 }
3016 }
3017 if ((!fntype && !libname)
3018 || (fntype && !TYPE_ARG_TYPES (fntype)))
3019 cum->maybe_vaarg = true;
3020
3021 if (TARGET_DEBUG_ARG)
3022 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3023
3024 return;
3025 }
3026
3027 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3028 But in the case of vector types, it is some vector mode.
3029
3030 When we have only some of our vector isa extensions enabled, then there
3031 are some modes for which vector_mode_supported_p is false. For these
3032 modes, the generic vector support in gcc will choose some non-vector mode
3033 in order to implement the type. By computing the natural mode, we'll
3034 select the proper ABI location for the operand and not depend on whatever
3035 the middle-end decides to do with these vector types. */
3036
3037 static enum machine_mode
3038 type_natural_mode (tree type)
3039 {
3040 enum machine_mode mode = TYPE_MODE (type);
3041
3042 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3043 {
3044 HOST_WIDE_INT size = int_size_in_bytes (type);
3045 if ((size == 8 || size == 16)
3046 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3047 && TYPE_VECTOR_SUBPARTS (type) > 1)
3048 {
3049 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3050
3051 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3052 mode = MIN_MODE_VECTOR_FLOAT;
3053 else
3054 mode = MIN_MODE_VECTOR_INT;
3055
3056 /* Get the mode which has this inner mode and number of units. */
3057 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3058 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3059 && GET_MODE_INNER (mode) == innermode)
3060 return mode;
3061
3062 gcc_unreachable ();
3063 }
3064 }
3065
3066 return mode;
3067 }
3068
3069 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3070 this may not agree with the mode that the type system has chosen for the
3071 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3072 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3073
3074 static rtx
3075 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3076 unsigned int regno)
3077 {
3078 rtx tmp;
3079
3080 if (orig_mode != BLKmode)
3081 tmp = gen_rtx_REG (orig_mode, regno);
3082 else
3083 {
3084 tmp = gen_rtx_REG (mode, regno);
3085 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3086 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3087 }
3088
3089 return tmp;
3090 }
3091
3092 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3093 of this code is to classify each 8bytes of incoming argument by the register
3094 class and assign registers accordingly. */
3095
3096 /* Return the union class of CLASS1 and CLASS2.
3097 See the x86-64 PS ABI for details. */
3098
3099 static enum x86_64_reg_class
3100 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3101 {
3102 /* Rule #1: If both classes are equal, this is the resulting class. */
3103 if (class1 == class2)
3104 return class1;
3105
3106 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3107 the other class. */
3108 if (class1 == X86_64_NO_CLASS)
3109 return class2;
3110 if (class2 == X86_64_NO_CLASS)
3111 return class1;
3112
3113 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3114 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3115 return X86_64_MEMORY_CLASS;
3116
3117 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3118 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3119 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3120 return X86_64_INTEGERSI_CLASS;
3121 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3122 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3123 return X86_64_INTEGER_CLASS;
3124
3125 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3126 MEMORY is used. */
3127 if (class1 == X86_64_X87_CLASS
3128 || class1 == X86_64_X87UP_CLASS
3129 || class1 == X86_64_COMPLEX_X87_CLASS
3130 || class2 == X86_64_X87_CLASS
3131 || class2 == X86_64_X87UP_CLASS
3132 || class2 == X86_64_COMPLEX_X87_CLASS)
3133 return X86_64_MEMORY_CLASS;
3134
3135 /* Rule #6: Otherwise class SSE is used. */
3136 return X86_64_SSE_CLASS;
3137 }
3138
3139 /* Classify the argument of type TYPE and mode MODE.
3140 CLASSES will be filled by the register class used to pass each word
3141 of the operand. The number of words is returned. In case the parameter
3142 should be passed in memory, 0 is returned. As a special case for zero
3143 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3144
3145 BIT_OFFSET is used internally for handling records and specifies offset
3146 of the offset in bits modulo 256 to avoid overflow cases.
3147
3148 See the x86-64 PS ABI for details.
3149 */
3150
3151 static int
3152 classify_argument (enum machine_mode mode, tree type,
3153 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3154 {
3155 HOST_WIDE_INT bytes =
3156 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3157 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3158
3159 /* Variable sized entities are always passed/returned in memory. */
3160 if (bytes < 0)
3161 return 0;
3162
3163 if (mode != VOIDmode
3164 && targetm.calls.must_pass_in_stack (mode, type))
3165 return 0;
3166
3167 if (type && AGGREGATE_TYPE_P (type))
3168 {
3169 int i;
3170 tree field;
3171 enum x86_64_reg_class subclasses[MAX_CLASSES];
3172
3173 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3174 if (bytes > 16)
3175 return 0;
3176
3177 for (i = 0; i < words; i++)
3178 classes[i] = X86_64_NO_CLASS;
3179
3180 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3181 signalize memory class, so handle it as special case. */
3182 if (!words)
3183 {
3184 classes[0] = X86_64_NO_CLASS;
3185 return 1;
3186 }
3187
3188 /* Classify each field of record and merge classes. */
3189 switch (TREE_CODE (type))
3190 {
3191 case RECORD_TYPE:
3192 /* And now merge the fields of structure. */
3193 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3194 {
3195 if (TREE_CODE (field) == FIELD_DECL)
3196 {
3197 int num;
3198
3199 if (TREE_TYPE (field) == error_mark_node)
3200 continue;
3201
3202 /* Bitfields are always classified as integer. Handle them
3203 early, since later code would consider them to be
3204 misaligned integers. */
3205 if (DECL_BIT_FIELD (field))
3206 {
3207 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3208 i < ((int_bit_position (field) + (bit_offset % 64))
3209 + tree_low_cst (DECL_SIZE (field), 0)
3210 + 63) / 8 / 8; i++)
3211 classes[i] =
3212 merge_classes (X86_64_INTEGER_CLASS,
3213 classes[i]);
3214 }
3215 else
3216 {
3217 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3218 TREE_TYPE (field), subclasses,
3219 (int_bit_position (field)
3220 + bit_offset) % 256);
3221 if (!num)
3222 return 0;
3223 for (i = 0; i < num; i++)
3224 {
3225 int pos =
3226 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3227 classes[i + pos] =
3228 merge_classes (subclasses[i], classes[i + pos]);
3229 }
3230 }
3231 }
3232 }
3233 break;
3234
3235 case ARRAY_TYPE:
3236 /* Arrays are handled as small records. */
3237 {
3238 int num;
3239 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3240 TREE_TYPE (type), subclasses, bit_offset);
3241 if (!num)
3242 return 0;
3243
3244 /* The partial classes are now full classes. */
3245 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3246 subclasses[0] = X86_64_SSE_CLASS;
3247 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3248 subclasses[0] = X86_64_INTEGER_CLASS;
3249
3250 for (i = 0; i < words; i++)
3251 classes[i] = subclasses[i % num];
3252
3253 break;
3254 }
3255 case UNION_TYPE:
3256 case QUAL_UNION_TYPE:
3257 /* Unions are similar to RECORD_TYPE but offset is always 0.
3258 */
3259 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3260 {
3261 if (TREE_CODE (field) == FIELD_DECL)
3262 {
3263 int num;
3264
3265 if (TREE_TYPE (field) == error_mark_node)
3266 continue;
3267
3268 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3269 TREE_TYPE (field), subclasses,
3270 bit_offset);
3271 if (!num)
3272 return 0;
3273 for (i = 0; i < num; i++)
3274 classes[i] = merge_classes (subclasses[i], classes[i]);
3275 }
3276 }
3277 break;
3278
3279 default:
3280 gcc_unreachable ();
3281 }
3282
3283 /* Final merger cleanup. */
3284 for (i = 0; i < words; i++)
3285 {
3286 /* If one class is MEMORY, everything should be passed in
3287 memory. */
3288 if (classes[i] == X86_64_MEMORY_CLASS)
3289 return 0;
3290
3291 /* The X86_64_SSEUP_CLASS should be always preceded by
3292 X86_64_SSE_CLASS. */
3293 if (classes[i] == X86_64_SSEUP_CLASS
3294 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3295 classes[i] = X86_64_SSE_CLASS;
3296
3297 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3298 if (classes[i] == X86_64_X87UP_CLASS
3299 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3300 classes[i] = X86_64_SSE_CLASS;
3301 }
3302 return words;
3303 }
3304
3305 /* Compute alignment needed. We align all types to natural boundaries with
3306 exception of XFmode that is aligned to 64bits. */
3307 if (mode != VOIDmode && mode != BLKmode)
3308 {
3309 int mode_alignment = GET_MODE_BITSIZE (mode);
3310
3311 if (mode == XFmode)
3312 mode_alignment = 128;
3313 else if (mode == XCmode)
3314 mode_alignment = 256;
3315 if (COMPLEX_MODE_P (mode))
3316 mode_alignment /= 2;
3317 /* Misaligned fields are always returned in memory. */
3318 if (bit_offset % mode_alignment)
3319 return 0;
3320 }
3321
3322 /* for V1xx modes, just use the base mode */
3323 if (VECTOR_MODE_P (mode)
3324 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3325 mode = GET_MODE_INNER (mode);
3326
3327 /* Classification of atomic types. */
3328 switch (mode)
3329 {
3330 case SDmode:
3331 case DDmode:
3332 classes[0] = X86_64_SSE_CLASS;
3333 return 1;
3334 case TDmode:
3335 classes[0] = X86_64_SSE_CLASS;
3336 classes[1] = X86_64_SSEUP_CLASS;
3337 return 2;
3338 case DImode:
3339 case SImode:
3340 case HImode:
3341 case QImode:
3342 case CSImode:
3343 case CHImode:
3344 case CQImode:
3345 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3346 classes[0] = X86_64_INTEGERSI_CLASS;
3347 else
3348 classes[0] = X86_64_INTEGER_CLASS;
3349 return 1;
3350 case CDImode:
3351 case TImode:
3352 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3353 return 2;
3354 case CTImode:
3355 return 0;
3356 case SFmode:
3357 if (!(bit_offset % 64))
3358 classes[0] = X86_64_SSESF_CLASS;
3359 else
3360 classes[0] = X86_64_SSE_CLASS;
3361 return 1;
3362 case DFmode:
3363 classes[0] = X86_64_SSEDF_CLASS;
3364 return 1;
3365 case XFmode:
3366 classes[0] = X86_64_X87_CLASS;
3367 classes[1] = X86_64_X87UP_CLASS;
3368 return 2;
3369 case TFmode:
3370 classes[0] = X86_64_SSE_CLASS;
3371 classes[1] = X86_64_SSEUP_CLASS;
3372 return 2;
3373 case SCmode:
3374 classes[0] = X86_64_SSE_CLASS;
3375 return 1;
3376 case DCmode:
3377 classes[0] = X86_64_SSEDF_CLASS;
3378 classes[1] = X86_64_SSEDF_CLASS;
3379 return 2;
3380 case XCmode:
3381 classes[0] = X86_64_COMPLEX_X87_CLASS;
3382 return 1;
3383 case TCmode:
3384 /* This modes is larger than 16 bytes. */
3385 return 0;
3386 case V4SFmode:
3387 case V4SImode:
3388 case V16QImode:
3389 case V8HImode:
3390 case V2DFmode:
3391 case V2DImode:
3392 classes[0] = X86_64_SSE_CLASS;
3393 classes[1] = X86_64_SSEUP_CLASS;
3394 return 2;
3395 case V2SFmode:
3396 case V2SImode:
3397 case V4HImode:
3398 case V8QImode:
3399 classes[0] = X86_64_SSE_CLASS;
3400 return 1;
3401 case BLKmode:
3402 case VOIDmode:
3403 return 0;
3404 default:
3405 gcc_assert (VECTOR_MODE_P (mode));
3406
3407 if (bytes > 16)
3408 return 0;
3409
3410 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3411
3412 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3413 classes[0] = X86_64_INTEGERSI_CLASS;
3414 else
3415 classes[0] = X86_64_INTEGER_CLASS;
3416 classes[1] = X86_64_INTEGER_CLASS;
3417 return 1 + (bytes > 8);
3418 }
3419 }
3420
3421 /* Examine the argument and return set number of register required in each
3422 class. Return 0 iff parameter should be passed in memory. */
3423 static int
3424 examine_argument (enum machine_mode mode, tree type, int in_return,
3425 int *int_nregs, int *sse_nregs)
3426 {
3427 enum x86_64_reg_class class[MAX_CLASSES];
3428 int n = classify_argument (mode, type, class, 0);
3429
3430 *int_nregs = 0;
3431 *sse_nregs = 0;
3432 if (!n)
3433 return 0;
3434 for (n--; n >= 0; n--)
3435 switch (class[n])
3436 {
3437 case X86_64_INTEGER_CLASS:
3438 case X86_64_INTEGERSI_CLASS:
3439 (*int_nregs)++;
3440 break;
3441 case X86_64_SSE_CLASS:
3442 case X86_64_SSESF_CLASS:
3443 case X86_64_SSEDF_CLASS:
3444 (*sse_nregs)++;
3445 break;
3446 case X86_64_NO_CLASS:
3447 case X86_64_SSEUP_CLASS:
3448 break;
3449 case X86_64_X87_CLASS:
3450 case X86_64_X87UP_CLASS:
3451 if (!in_return)
3452 return 0;
3453 break;
3454 case X86_64_COMPLEX_X87_CLASS:
3455 return in_return ? 2 : 0;
3456 case X86_64_MEMORY_CLASS:
3457 gcc_unreachable ();
3458 }
3459 return 1;
3460 }
3461
3462 /* Construct container for the argument used by GCC interface. See
3463 FUNCTION_ARG for the detailed description. */
3464
3465 static rtx
3466 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3467 tree type, int in_return, int nintregs, int nsseregs,
3468 const int *intreg, int sse_regno)
3469 {
3470 /* The following variables hold the static issued_error state. */
3471 static bool issued_sse_arg_error;
3472 static bool issued_sse_ret_error;
3473 static bool issued_x87_ret_error;
3474
3475 enum machine_mode tmpmode;
3476 int bytes =
3477 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3478 enum x86_64_reg_class class[MAX_CLASSES];
3479 int n;
3480 int i;
3481 int nexps = 0;
3482 int needed_sseregs, needed_intregs;
3483 rtx exp[MAX_CLASSES];
3484 rtx ret;
3485
3486 n = classify_argument (mode, type, class, 0);
3487 if (TARGET_DEBUG_ARG)
3488 {
3489 if (!n)
3490 fprintf (stderr, "Memory class\n");
3491 else
3492 {
3493 fprintf (stderr, "Classes:");
3494 for (i = 0; i < n; i++)
3495 {
3496 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3497 }
3498 fprintf (stderr, "\n");
3499 }
3500 }
3501 if (!n)
3502 return NULL;
3503 if (!examine_argument (mode, type, in_return, &needed_intregs,
3504 &needed_sseregs))
3505 return NULL;
3506 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3507 return NULL;
3508
3509 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3510 some less clueful developer tries to use floating-point anyway. */
3511 if (needed_sseregs && !TARGET_SSE)
3512 {
3513 if (in_return)
3514 {
3515 if (!issued_sse_ret_error)
3516 {
3517 error ("SSE register return with SSE disabled");
3518 issued_sse_ret_error = true;
3519 }
3520 }
3521 else if (!issued_sse_arg_error)
3522 {
3523 error ("SSE register argument with SSE disabled");
3524 issued_sse_arg_error = true;
3525 }
3526 return NULL;
3527 }
3528
3529 /* Likewise, error if the ABI requires us to return values in the
3530 x87 registers and the user specified -mno-80387. */
3531 if (!TARGET_80387 && in_return)
3532 for (i = 0; i < n; i++)
3533 if (class[i] == X86_64_X87_CLASS
3534 || class[i] == X86_64_X87UP_CLASS
3535 || class[i] == X86_64_COMPLEX_X87_CLASS)
3536 {
3537 if (!issued_x87_ret_error)
3538 {
3539 error ("x87 register return with x87 disabled");
3540 issued_x87_ret_error = true;
3541 }
3542 return NULL;
3543 }
3544
3545 /* First construct simple cases. Avoid SCmode, since we want to use
3546 single register to pass this type. */
3547 if (n == 1 && mode != SCmode)
3548 switch (class[0])
3549 {
3550 case X86_64_INTEGER_CLASS:
3551 case X86_64_INTEGERSI_CLASS:
3552 return gen_rtx_REG (mode, intreg[0]);
3553 case X86_64_SSE_CLASS:
3554 case X86_64_SSESF_CLASS:
3555 case X86_64_SSEDF_CLASS:
3556 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3557 case X86_64_X87_CLASS:
3558 case X86_64_COMPLEX_X87_CLASS:
3559 return gen_rtx_REG (mode, FIRST_STACK_REG);
3560 case X86_64_NO_CLASS:
3561 /* Zero sized array, struct or class. */
3562 return NULL;
3563 default:
3564 gcc_unreachable ();
3565 }
3566 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3567 && mode != BLKmode)
3568 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3569 if (n == 2
3570 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3571 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3572 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3573 && class[1] == X86_64_INTEGER_CLASS
3574 && (mode == CDImode || mode == TImode || mode == TFmode)
3575 && intreg[0] + 1 == intreg[1])
3576 return gen_rtx_REG (mode, intreg[0]);
3577
3578 /* Otherwise figure out the entries of the PARALLEL. */
3579 for (i = 0; i < n; i++)
3580 {
3581 switch (class[i])
3582 {
3583 case X86_64_NO_CLASS:
3584 break;
3585 case X86_64_INTEGER_CLASS:
3586 case X86_64_INTEGERSI_CLASS:
3587 /* Merge TImodes on aligned occasions here too. */
3588 if (i * 8 + 8 > bytes)
3589 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3590 else if (class[i] == X86_64_INTEGERSI_CLASS)
3591 tmpmode = SImode;
3592 else
3593 tmpmode = DImode;
3594 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3595 if (tmpmode == BLKmode)
3596 tmpmode = DImode;
3597 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3598 gen_rtx_REG (tmpmode, *intreg),
3599 GEN_INT (i*8));
3600 intreg++;
3601 break;
3602 case X86_64_SSESF_CLASS:
3603 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3604 gen_rtx_REG (SFmode,
3605 SSE_REGNO (sse_regno)),
3606 GEN_INT (i*8));
3607 sse_regno++;
3608 break;
3609 case X86_64_SSEDF_CLASS:
3610 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3611 gen_rtx_REG (DFmode,
3612 SSE_REGNO (sse_regno)),
3613 GEN_INT (i*8));
3614 sse_regno++;
3615 break;
3616 case X86_64_SSE_CLASS:
3617 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3618 tmpmode = TImode;
3619 else
3620 tmpmode = DImode;
3621 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3622 gen_rtx_REG (tmpmode,
3623 SSE_REGNO (sse_regno)),
3624 GEN_INT (i*8));
3625 if (tmpmode == TImode)
3626 i++;
3627 sse_regno++;
3628 break;
3629 default:
3630 gcc_unreachable ();
3631 }
3632 }
3633
3634 /* Empty aligned struct, union or class. */
3635 if (nexps == 0)
3636 return NULL;
3637
3638 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3639 for (i = 0; i < nexps; i++)
3640 XVECEXP (ret, 0, i) = exp [i];
3641 return ret;
3642 }
3643
3644 /* Update the data in CUM to advance over an argument
3645 of mode MODE and data type TYPE.
3646 (TYPE is null for libcalls where that information may not be available.) */
3647
3648 void
3649 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3650 tree type, int named)
3651 {
3652 int bytes =
3653 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3654 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3655
3656 if (type)
3657 mode = type_natural_mode (type);
3658
3659 if (TARGET_DEBUG_ARG)
3660 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3661 "mode=%s, named=%d)\n\n",
3662 words, cum->words, cum->nregs, cum->sse_nregs,
3663 GET_MODE_NAME (mode), named);
3664
3665 if (TARGET_64BIT)
3666 {
3667 int int_nregs, sse_nregs;
3668 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3669 cum->words += words;
3670 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3671 {
3672 cum->nregs -= int_nregs;
3673 cum->sse_nregs -= sse_nregs;
3674 cum->regno += int_nregs;
3675 cum->sse_regno += sse_nregs;
3676 }
3677 else
3678 cum->words += words;
3679 }
3680 else
3681 {
3682 switch (mode)
3683 {
3684 default:
3685 break;
3686
3687 case BLKmode:
3688 if (bytes < 0)
3689 break;
3690 /* FALLTHRU */
3691
3692 case DImode:
3693 case SImode:
3694 case HImode:
3695 case QImode:
3696 cum->words += words;
3697 cum->nregs -= words;
3698 cum->regno += words;
3699
3700 if (cum->nregs <= 0)
3701 {
3702 cum->nregs = 0;
3703 cum->regno = 0;
3704 }
3705 break;
3706
3707 case DFmode:
3708 if (cum->float_in_sse < 2)
3709 break;
3710 case SFmode:
3711 if (cum->float_in_sse < 1)
3712 break;
3713 /* FALLTHRU */
3714
3715 case TImode:
3716 case V16QImode:
3717 case V8HImode:
3718 case V4SImode:
3719 case V2DImode:
3720 case V4SFmode:
3721 case V2DFmode:
3722 if (!type || !AGGREGATE_TYPE_P (type))
3723 {
3724 cum->sse_words += words;
3725 cum->sse_nregs -= 1;
3726 cum->sse_regno += 1;
3727 if (cum->sse_nregs <= 0)
3728 {
3729 cum->sse_nregs = 0;
3730 cum->sse_regno = 0;
3731 }
3732 }
3733 break;
3734
3735 case V8QImode:
3736 case V4HImode:
3737 case V2SImode:
3738 case V2SFmode:
3739 if (!type || !AGGREGATE_TYPE_P (type))
3740 {
3741 cum->mmx_words += words;
3742 cum->mmx_nregs -= 1;
3743 cum->mmx_regno += 1;
3744 if (cum->mmx_nregs <= 0)
3745 {
3746 cum->mmx_nregs = 0;
3747 cum->mmx_regno = 0;
3748 }
3749 }
3750 break;
3751 }
3752 }
3753 }
3754
3755 /* Define where to put the arguments to a function.
3756 Value is zero to push the argument on the stack,
3757 or a hard register in which to store the argument.
3758
3759 MODE is the argument's machine mode.
3760 TYPE is the data type of the argument (as a tree).
3761 This is null for libcalls where that information may
3762 not be available.
3763 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3764 the preceding args and about the function being called.
3765 NAMED is nonzero if this argument is a named parameter
3766 (otherwise it is an extra parameter matching an ellipsis). */
3767
3768 rtx
3769 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3770 tree type, int named)
3771 {
3772 enum machine_mode mode = orig_mode;
3773 rtx ret = NULL_RTX;
3774 int bytes =
3775 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3776 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3777 static bool warnedsse, warnedmmx;
3778
3779 /* To simplify the code below, represent vector types with a vector mode
3780 even if MMX/SSE are not active. */
3781 if (type && TREE_CODE (type) == VECTOR_TYPE)
3782 mode = type_natural_mode (type);
3783
3784 /* Handle a hidden AL argument containing number of registers for varargs
3785 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3786 any AL settings. */
3787 if (mode == VOIDmode)
3788 {
3789 if (TARGET_64BIT)
3790 return GEN_INT (cum->maybe_vaarg
3791 ? (cum->sse_nregs < 0
3792 ? SSE_REGPARM_MAX
3793 : cum->sse_regno)
3794 : -1);
3795 else
3796 return constm1_rtx;
3797 }
3798 if (TARGET_64BIT)
3799 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3800 cum->sse_nregs,
3801 &x86_64_int_parameter_registers [cum->regno],
3802 cum->sse_regno);
3803 else
3804 switch (mode)
3805 {
3806 /* For now, pass fp/complex values on the stack. */
3807 default:
3808 break;
3809
3810 case BLKmode:
3811 if (bytes < 0)
3812 break;
3813 /* FALLTHRU */
3814 case DImode:
3815 case SImode:
3816 case HImode:
3817 case QImode:
3818 if (words <= cum->nregs)
3819 {
3820 int regno = cum->regno;
3821
3822 /* Fastcall allocates the first two DWORD (SImode) or
3823 smaller arguments to ECX and EDX. */
3824 if (cum->fastcall)
3825 {
3826 if (mode == BLKmode || mode == DImode)
3827 break;
3828
3829 /* ECX not EAX is the first allocated register. */
3830 if (regno == 0)
3831 regno = 2;
3832 }
3833 ret = gen_rtx_REG (mode, regno);
3834 }
3835 break;
3836 case DFmode:
3837 if (cum->float_in_sse < 2)
3838 break;
3839 case SFmode:
3840 if (cum->float_in_sse < 1)
3841 break;
3842 /* FALLTHRU */
3843 case TImode:
3844 case V16QImode:
3845 case V8HImode:
3846 case V4SImode:
3847 case V2DImode:
3848 case V4SFmode:
3849 case V2DFmode:
3850 if (!type || !AGGREGATE_TYPE_P (type))
3851 {
3852 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3853 {
3854 warnedsse = true;
3855 warning (0, "SSE vector argument without SSE enabled "
3856 "changes the ABI");
3857 }
3858 if (cum->sse_nregs)
3859 ret = gen_reg_or_parallel (mode, orig_mode,
3860 cum->sse_regno + FIRST_SSE_REG);
3861 }
3862 break;
3863 case V8QImode:
3864 case V4HImode:
3865 case V2SImode:
3866 case V2SFmode:
3867 if (!type || !AGGREGATE_TYPE_P (type))
3868 {
3869 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3870 {
3871 warnedmmx = true;
3872 warning (0, "MMX vector argument without MMX enabled "
3873 "changes the ABI");
3874 }
3875 if (cum->mmx_nregs)
3876 ret = gen_reg_or_parallel (mode, orig_mode,
3877 cum->mmx_regno + FIRST_MMX_REG);
3878 }
3879 break;
3880 }
3881
3882 if (TARGET_DEBUG_ARG)
3883 {
3884 fprintf (stderr,
3885 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3886 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3887
3888 if (ret)
3889 print_simple_rtl (stderr, ret);
3890 else
3891 fprintf (stderr, ", stack");
3892
3893 fprintf (stderr, " )\n");
3894 }
3895
3896 return ret;
3897 }
3898
3899 /* A C expression that indicates when an argument must be passed by
3900 reference. If nonzero for an argument, a copy of that argument is
3901 made in memory and a pointer to the argument is passed instead of
3902 the argument itself. The pointer is passed in whatever way is
3903 appropriate for passing a pointer to that type. */
3904
3905 static bool
3906 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3907 enum machine_mode mode ATTRIBUTE_UNUSED,
3908 tree type, bool named ATTRIBUTE_UNUSED)
3909 {
3910 if (!TARGET_64BIT)
3911 return 0;
3912
3913 if (type && int_size_in_bytes (type) == -1)
3914 {
3915 if (TARGET_DEBUG_ARG)
3916 fprintf (stderr, "function_arg_pass_by_reference\n");
3917 return 1;
3918 }
3919
3920 return 0;
3921 }
3922
3923 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3924 ABI. Only called if TARGET_SSE. */
3925 static bool
3926 contains_128bit_aligned_vector_p (tree type)
3927 {
3928 enum machine_mode mode = TYPE_MODE (type);
3929 if (SSE_REG_MODE_P (mode)
3930 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3931 return true;
3932 if (TYPE_ALIGN (type) < 128)
3933 return false;
3934
3935 if (AGGREGATE_TYPE_P (type))
3936 {
3937 /* Walk the aggregates recursively. */
3938 switch (TREE_CODE (type))
3939 {
3940 case RECORD_TYPE:
3941 case UNION_TYPE:
3942 case QUAL_UNION_TYPE:
3943 {
3944 tree field;
3945
3946 /* Walk all the structure fields. */
3947 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3948 {
3949 if (TREE_CODE (field) == FIELD_DECL
3950 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3951 return true;
3952 }
3953 break;
3954 }
3955
3956 case ARRAY_TYPE:
3957 /* Just for use if some languages passes arrays by value. */
3958 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3959 return true;
3960 break;
3961
3962 default:
3963 gcc_unreachable ();
3964 }
3965 }
3966 return false;
3967 }
3968
3969 /* Gives the alignment boundary, in bits, of an argument with the
3970 specified mode and type. */
3971
3972 int
3973 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3974 {
3975 int align;
3976 if (type)
3977 align = TYPE_ALIGN (type);
3978 else
3979 align = GET_MODE_ALIGNMENT (mode);
3980 if (align < PARM_BOUNDARY)
3981 align = PARM_BOUNDARY;
3982 if (!TARGET_64BIT)
3983 {
3984 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3985 make an exception for SSE modes since these require 128bit
3986 alignment.
3987
3988 The handling here differs from field_alignment. ICC aligns MMX
3989 arguments to 4 byte boundaries, while structure fields are aligned
3990 to 8 byte boundaries. */
3991 if (!TARGET_SSE)
3992 align = PARM_BOUNDARY;
3993 else if (!type)
3994 {
3995 if (!SSE_REG_MODE_P (mode))
3996 align = PARM_BOUNDARY;
3997 }
3998 else
3999 {
4000 if (!contains_128bit_aligned_vector_p (type))
4001 align = PARM_BOUNDARY;
4002 }
4003 }
4004 if (align > 128)
4005 align = 128;
4006 return align;
4007 }
4008
4009 /* Return true if N is a possible register number of function value. */
4010 bool
4011 ix86_function_value_regno_p (int regno)
4012 {
4013 if (regno == 0
4014 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4015 || (regno == FIRST_SSE_REG && TARGET_SSE))
4016 return true;
4017
4018 if (!TARGET_64BIT
4019 && (regno == FIRST_MMX_REG && TARGET_MMX))
4020 return true;
4021
4022 return false;
4023 }
4024
4025 /* Define how to find the value returned by a function.
4026 VALTYPE is the data type of the value (as a tree).
4027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4028 otherwise, FUNC is 0. */
4029 rtx
4030 ix86_function_value (tree valtype, tree fntype_or_decl,
4031 bool outgoing ATTRIBUTE_UNUSED)
4032 {
4033 enum machine_mode natmode = type_natural_mode (valtype);
4034
4035 if (TARGET_64BIT)
4036 {
4037 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4038 1, REGPARM_MAX, SSE_REGPARM_MAX,
4039 x86_64_int_return_registers, 0);
4040 /* For zero sized structures, construct_container return NULL, but we
4041 need to keep rest of compiler happy by returning meaningful value. */
4042 if (!ret)
4043 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4044 return ret;
4045 }
4046 else
4047 {
4048 tree fn = NULL_TREE, fntype;
4049 if (fntype_or_decl
4050 && DECL_P (fntype_or_decl))
4051 fn = fntype_or_decl;
4052 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4053 return gen_rtx_REG (TYPE_MODE (valtype),
4054 ix86_value_regno (natmode, fn, fntype));
4055 }
4056 }
4057
4058 /* Return true iff type is returned in memory. */
4059 int
4060 ix86_return_in_memory (tree type)
4061 {
4062 int needed_intregs, needed_sseregs, size;
4063 enum machine_mode mode = type_natural_mode (type);
4064
4065 if (TARGET_64BIT)
4066 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4067
4068 if (mode == BLKmode)
4069 return 1;
4070
4071 size = int_size_in_bytes (type);
4072
4073 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4074 return 0;
4075
4076 if (VECTOR_MODE_P (mode) || mode == TImode)
4077 {
4078 /* User-created vectors small enough to fit in EAX. */
4079 if (size < 8)
4080 return 0;
4081
4082 /* MMX/3dNow values are returned in MM0,
4083 except when it doesn't exits. */
4084 if (size == 8)
4085 return (TARGET_MMX ? 0 : 1);
4086
4087 /* SSE values are returned in XMM0, except when it doesn't exist. */
4088 if (size == 16)
4089 return (TARGET_SSE ? 0 : 1);
4090 }
4091
4092 if (mode == XFmode)
4093 return 0;
4094
4095 if (mode == TDmode)
4096 return 1;
4097
4098 if (size > 12)
4099 return 1;
4100 return 0;
4101 }
4102
4103 /* When returning SSE vector types, we have a choice of either
4104 (1) being abi incompatible with a -march switch, or
4105 (2) generating an error.
4106 Given no good solution, I think the safest thing is one warning.
4107 The user won't be able to use -Werror, but....
4108
4109 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4110 called in response to actually generating a caller or callee that
4111 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4112 via aggregate_value_p for general type probing from tree-ssa. */
4113
4114 static rtx
4115 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4116 {
4117 static bool warnedsse, warnedmmx;
4118
4119 if (type)
4120 {
4121 /* Look at the return type of the function, not the function type. */
4122 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4123
4124 if (!TARGET_SSE && !warnedsse)
4125 {
4126 if (mode == TImode
4127 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4128 {
4129 warnedsse = true;
4130 warning (0, "SSE vector return without SSE enabled "
4131 "changes the ABI");
4132 }
4133 }
4134
4135 if (!TARGET_MMX && !warnedmmx)
4136 {
4137 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4138 {
4139 warnedmmx = true;
4140 warning (0, "MMX vector return without MMX enabled "
4141 "changes the ABI");
4142 }
4143 }
4144 }
4145
4146 return NULL;
4147 }
4148
4149 /* Define how to find the value returned by a library function
4150 assuming the value has mode MODE. */
4151 rtx
4152 ix86_libcall_value (enum machine_mode mode)
4153 {
4154 if (TARGET_64BIT)
4155 {
4156 switch (mode)
4157 {
4158 case SFmode:
4159 case SCmode:
4160 case DFmode:
4161 case DCmode:
4162 case TFmode:
4163 case SDmode:
4164 case DDmode:
4165 case TDmode:
4166 return gen_rtx_REG (mode, FIRST_SSE_REG);
4167 case XFmode:
4168 case XCmode:
4169 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4170 case TCmode:
4171 return NULL;
4172 default:
4173 return gen_rtx_REG (mode, 0);
4174 }
4175 }
4176 else
4177 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4178 }
4179
4180 /* Given a mode, return the register to use for a return value. */
4181
4182 static int
4183 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4184 {
4185 gcc_assert (!TARGET_64BIT);
4186
4187 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4188 we normally prevent this case when mmx is not available. However
4189 some ABIs may require the result to be returned like DImode. */
4190 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4191 return TARGET_MMX ? FIRST_MMX_REG : 0;
4192
4193 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4194 we prevent this case when sse is not available. However some ABIs
4195 may require the result to be returned like integer TImode. */
4196 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4197 return TARGET_SSE ? FIRST_SSE_REG : 0;
4198
4199 /* Decimal floating point values can go in %eax, unlike other float modes. */
4200 if (DECIMAL_FLOAT_MODE_P (mode))
4201 return 0;
4202
4203 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4204 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4205 return 0;
4206
4207 /* Floating point return values in %st(0), except for local functions when
4208 SSE math is enabled or for functions with sseregparm attribute. */
4209 if ((func || fntype)
4210 && (mode == SFmode || mode == DFmode))
4211 {
4212 int sse_level = ix86_function_sseregparm (fntype, func);
4213 if ((sse_level >= 1 && mode == SFmode)
4214 || (sse_level == 2 && mode == DFmode))
4215 return FIRST_SSE_REG;
4216 }
4217
4218 return FIRST_FLOAT_REG;
4219 }
4220 \f
4221 /* Create the va_list data type. */
4222
4223 static tree
4224 ix86_build_builtin_va_list (void)
4225 {
4226 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4227
4228 /* For i386 we use plain pointer to argument area. */
4229 if (!TARGET_64BIT)
4230 return build_pointer_type (char_type_node);
4231
4232 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4233 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4234
4235 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4236 unsigned_type_node);
4237 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4238 unsigned_type_node);
4239 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4240 ptr_type_node);
4241 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4242 ptr_type_node);
4243
4244 va_list_gpr_counter_field = f_gpr;
4245 va_list_fpr_counter_field = f_fpr;
4246
4247 DECL_FIELD_CONTEXT (f_gpr) = record;
4248 DECL_FIELD_CONTEXT (f_fpr) = record;
4249 DECL_FIELD_CONTEXT (f_ovf) = record;
4250 DECL_FIELD_CONTEXT (f_sav) = record;
4251
4252 TREE_CHAIN (record) = type_decl;
4253 TYPE_NAME (record) = type_decl;
4254 TYPE_FIELDS (record) = f_gpr;
4255 TREE_CHAIN (f_gpr) = f_fpr;
4256 TREE_CHAIN (f_fpr) = f_ovf;
4257 TREE_CHAIN (f_ovf) = f_sav;
4258
4259 layout_type (record);
4260
4261 /* The correct type is an array type of one element. */
4262 return build_array_type (record, build_index_type (size_zero_node));
4263 }
4264
4265 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4266
4267 static void
4268 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4269 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4270 int no_rtl)
4271 {
4272 CUMULATIVE_ARGS next_cum;
4273 rtx save_area = NULL_RTX, mem;
4274 rtx label;
4275 rtx label_ref;
4276 rtx tmp_reg;
4277 rtx nsse_reg;
4278 int set;
4279 tree fntype;
4280 int stdarg_p;
4281 int i;
4282
4283 if (!TARGET_64BIT)
4284 return;
4285
4286 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4287 return;
4288
4289 /* Indicate to allocate space on the stack for varargs save area. */
4290 ix86_save_varrargs_registers = 1;
4291
4292 cfun->stack_alignment_needed = 128;
4293
4294 fntype = TREE_TYPE (current_function_decl);
4295 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4296 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4297 != void_type_node));
4298
4299 /* For varargs, we do not want to skip the dummy va_dcl argument.
4300 For stdargs, we do want to skip the last named argument. */
4301 next_cum = *cum;
4302 if (stdarg_p)
4303 function_arg_advance (&next_cum, mode, type, 1);
4304
4305 if (!no_rtl)
4306 save_area = frame_pointer_rtx;
4307
4308 set = get_varargs_alias_set ();
4309
4310 for (i = next_cum.regno;
4311 i < ix86_regparm
4312 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4313 i++)
4314 {
4315 mem = gen_rtx_MEM (Pmode,
4316 plus_constant (save_area, i * UNITS_PER_WORD));
4317 MEM_NOTRAP_P (mem) = 1;
4318 set_mem_alias_set (mem, set);
4319 emit_move_insn (mem, gen_rtx_REG (Pmode,
4320 x86_64_int_parameter_registers[i]));
4321 }
4322
4323 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4324 {
4325 /* Now emit code to save SSE registers. The AX parameter contains number
4326 of SSE parameter registers used to call this function. We use
4327 sse_prologue_save insn template that produces computed jump across
4328 SSE saves. We need some preparation work to get this working. */
4329
4330 label = gen_label_rtx ();
4331 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4332
4333 /* Compute address to jump to :
4334 label - 5*eax + nnamed_sse_arguments*5 */
4335 tmp_reg = gen_reg_rtx (Pmode);
4336 nsse_reg = gen_reg_rtx (Pmode);
4337 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4338 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4339 gen_rtx_MULT (Pmode, nsse_reg,
4340 GEN_INT (4))));
4341 if (next_cum.sse_regno)
4342 emit_move_insn
4343 (nsse_reg,
4344 gen_rtx_CONST (DImode,
4345 gen_rtx_PLUS (DImode,
4346 label_ref,
4347 GEN_INT (next_cum.sse_regno * 4))));
4348 else
4349 emit_move_insn (nsse_reg, label_ref);
4350 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4351
4352 /* Compute address of memory block we save into. We always use pointer
4353 pointing 127 bytes after first byte to store - this is needed to keep
4354 instruction size limited by 4 bytes. */
4355 tmp_reg = gen_reg_rtx (Pmode);
4356 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4357 plus_constant (save_area,
4358 8 * REGPARM_MAX + 127)));
4359 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4360 MEM_NOTRAP_P (mem) = 1;
4361 set_mem_alias_set (mem, set);
4362 set_mem_align (mem, BITS_PER_WORD);
4363
4364 /* And finally do the dirty job! */
4365 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4366 GEN_INT (next_cum.sse_regno), label));
4367 }
4368
4369 }
4370
4371 /* Implement va_start. */
4372
4373 void
4374 ix86_va_start (tree valist, rtx nextarg)
4375 {
4376 HOST_WIDE_INT words, n_gpr, n_fpr;
4377 tree f_gpr, f_fpr, f_ovf, f_sav;
4378 tree gpr, fpr, ovf, sav, t;
4379 tree type;
4380
4381 /* Only 64bit target needs something special. */
4382 if (!TARGET_64BIT)
4383 {
4384 std_expand_builtin_va_start (valist, nextarg);
4385 return;
4386 }
4387
4388 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4389 f_fpr = TREE_CHAIN (f_gpr);
4390 f_ovf = TREE_CHAIN (f_fpr);
4391 f_sav = TREE_CHAIN (f_ovf);
4392
4393 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4394 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4395 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4396 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4397 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4398
4399 /* Count number of gp and fp argument registers used. */
4400 words = current_function_args_info.words;
4401 n_gpr = current_function_args_info.regno;
4402 n_fpr = current_function_args_info.sse_regno;
4403
4404 if (TARGET_DEBUG_ARG)
4405 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4406 (int) words, (int) n_gpr, (int) n_fpr);
4407
4408 if (cfun->va_list_gpr_size)
4409 {
4410 type = TREE_TYPE (gpr);
4411 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4412 build_int_cst (type, n_gpr * 8));
4413 TREE_SIDE_EFFECTS (t) = 1;
4414 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4415 }
4416
4417 if (cfun->va_list_fpr_size)
4418 {
4419 type = TREE_TYPE (fpr);
4420 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4421 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4422 TREE_SIDE_EFFECTS (t) = 1;
4423 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4424 }
4425
4426 /* Find the overflow area. */
4427 type = TREE_TYPE (ovf);
4428 t = make_tree (type, virtual_incoming_args_rtx);
4429 if (words != 0)
4430 t = build2 (PLUS_EXPR, type, t,
4431 build_int_cst (type, words * UNITS_PER_WORD));
4432 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4433 TREE_SIDE_EFFECTS (t) = 1;
4434 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4435
4436 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4437 {
4438 /* Find the register save area.
4439 Prologue of the function save it right above stack frame. */
4440 type = TREE_TYPE (sav);
4441 t = make_tree (type, frame_pointer_rtx);
4442 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4443 TREE_SIDE_EFFECTS (t) = 1;
4444 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4445 }
4446 }
4447
4448 /* Implement va_arg. */
4449
4450 tree
4451 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4452 {
4453 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4454 tree f_gpr, f_fpr, f_ovf, f_sav;
4455 tree gpr, fpr, ovf, sav, t;
4456 int size, rsize;
4457 tree lab_false, lab_over = NULL_TREE;
4458 tree addr, t2;
4459 rtx container;
4460 int indirect_p = 0;
4461 tree ptrtype;
4462 enum machine_mode nat_mode;
4463
4464 /* Only 64bit target needs something special. */
4465 if (!TARGET_64BIT)
4466 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4467
4468 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4469 f_fpr = TREE_CHAIN (f_gpr);
4470 f_ovf = TREE_CHAIN (f_fpr);
4471 f_sav = TREE_CHAIN (f_ovf);
4472
4473 valist = build_va_arg_indirect_ref (valist);
4474 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4475 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4476 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4477 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4478
4479 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4480 if (indirect_p)
4481 type = build_pointer_type (type);
4482 size = int_size_in_bytes (type);
4483 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4484
4485 nat_mode = type_natural_mode (type);
4486 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4487 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4488
4489 /* Pull the value out of the saved registers. */
4490
4491 addr = create_tmp_var (ptr_type_node, "addr");
4492 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4493
4494 if (container)
4495 {
4496 int needed_intregs, needed_sseregs;
4497 bool need_temp;
4498 tree int_addr, sse_addr;
4499
4500 lab_false = create_artificial_label ();
4501 lab_over = create_artificial_label ();
4502
4503 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4504
4505 need_temp = (!REG_P (container)
4506 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4507 || TYPE_ALIGN (type) > 128));
4508
4509 /* In case we are passing structure, verify that it is consecutive block
4510 on the register save area. If not we need to do moves. */
4511 if (!need_temp && !REG_P (container))
4512 {
4513 /* Verify that all registers are strictly consecutive */
4514 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4515 {
4516 int i;
4517
4518 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4519 {
4520 rtx slot = XVECEXP (container, 0, i);
4521 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4522 || INTVAL (XEXP (slot, 1)) != i * 16)
4523 need_temp = 1;
4524 }
4525 }
4526 else
4527 {
4528 int i;
4529
4530 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4531 {
4532 rtx slot = XVECEXP (container, 0, i);
4533 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4534 || INTVAL (XEXP (slot, 1)) != i * 8)
4535 need_temp = 1;
4536 }
4537 }
4538 }
4539 if (!need_temp)
4540 {
4541 int_addr = addr;
4542 sse_addr = addr;
4543 }
4544 else
4545 {
4546 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4547 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4548 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4549 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4550 }
4551
4552 /* First ensure that we fit completely in registers. */
4553 if (needed_intregs)
4554 {
4555 t = build_int_cst (TREE_TYPE (gpr),
4556 (REGPARM_MAX - needed_intregs + 1) * 8);
4557 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4558 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4559 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4560 gimplify_and_add (t, pre_p);
4561 }
4562 if (needed_sseregs)
4563 {
4564 t = build_int_cst (TREE_TYPE (fpr),
4565 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4566 + REGPARM_MAX * 8);
4567 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4568 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4569 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4570 gimplify_and_add (t, pre_p);
4571 }
4572
4573 /* Compute index to start of area used for integer regs. */
4574 if (needed_intregs)
4575 {
4576 /* int_addr = gpr + sav; */
4577 t = fold_convert (ptr_type_node, gpr);
4578 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4579 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4580 gimplify_and_add (t, pre_p);
4581 }
4582 if (needed_sseregs)
4583 {
4584 /* sse_addr = fpr + sav; */
4585 t = fold_convert (ptr_type_node, fpr);
4586 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4587 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4588 gimplify_and_add (t, pre_p);
4589 }
4590 if (need_temp)
4591 {
4592 int i;
4593 tree temp = create_tmp_var (type, "va_arg_tmp");
4594
4595 /* addr = &temp; */
4596 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4597 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4598 gimplify_and_add (t, pre_p);
4599
4600 for (i = 0; i < XVECLEN (container, 0); i++)
4601 {
4602 rtx slot = XVECEXP (container, 0, i);
4603 rtx reg = XEXP (slot, 0);
4604 enum machine_mode mode = GET_MODE (reg);
4605 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4606 tree addr_type = build_pointer_type (piece_type);
4607 tree src_addr, src;
4608 int src_offset;
4609 tree dest_addr, dest;
4610
4611 if (SSE_REGNO_P (REGNO (reg)))
4612 {
4613 src_addr = sse_addr;
4614 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4615 }
4616 else
4617 {
4618 src_addr = int_addr;
4619 src_offset = REGNO (reg) * 8;
4620 }
4621 src_addr = fold_convert (addr_type, src_addr);
4622 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4623 size_int (src_offset)));
4624 src = build_va_arg_indirect_ref (src_addr);
4625
4626 dest_addr = fold_convert (addr_type, addr);
4627 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4628 size_int (INTVAL (XEXP (slot, 1)))));
4629 dest = build_va_arg_indirect_ref (dest_addr);
4630
4631 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4632 gimplify_and_add (t, pre_p);
4633 }
4634 }
4635
4636 if (needed_intregs)
4637 {
4638 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4639 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4640 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4641 gimplify_and_add (t, pre_p);
4642 }
4643 if (needed_sseregs)
4644 {
4645 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4646 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4647 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4648 gimplify_and_add (t, pre_p);
4649 }
4650
4651 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4652 gimplify_and_add (t, pre_p);
4653
4654 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4655 append_to_statement_list (t, pre_p);
4656 }
4657
4658 /* ... otherwise out of the overflow area. */
4659
4660 /* Care for on-stack alignment if needed. */
4661 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4662 || integer_zerop (TYPE_SIZE (type)))
4663 t = ovf;
4664 else
4665 {
4666 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4667 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4668 build_int_cst (TREE_TYPE (ovf), align - 1));
4669 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4670 build_int_cst (TREE_TYPE (t), -align));
4671 }
4672 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4673
4674 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4675 gimplify_and_add (t2, pre_p);
4676
4677 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4678 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4679 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4680 gimplify_and_add (t, pre_p);
4681
4682 if (container)
4683 {
4684 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4685 append_to_statement_list (t, pre_p);
4686 }
4687
4688 ptrtype = build_pointer_type (type);
4689 addr = fold_convert (ptrtype, addr);
4690
4691 if (indirect_p)
4692 addr = build_va_arg_indirect_ref (addr);
4693 return build_va_arg_indirect_ref (addr);
4694 }
4695 \f
4696 /* Return nonzero if OPNUM's MEM should be matched
4697 in movabs* patterns. */
4698
4699 int
4700 ix86_check_movabs (rtx insn, int opnum)
4701 {
4702 rtx set, mem;
4703
4704 set = PATTERN (insn);
4705 if (GET_CODE (set) == PARALLEL)
4706 set = XVECEXP (set, 0, 0);
4707 gcc_assert (GET_CODE (set) == SET);
4708 mem = XEXP (set, opnum);
4709 while (GET_CODE (mem) == SUBREG)
4710 mem = SUBREG_REG (mem);
4711 gcc_assert (GET_CODE (mem) == MEM);
4712 return (volatile_ok || !MEM_VOLATILE_P (mem));
4713 }
4714 \f
4715 /* Initialize the table of extra 80387 mathematical constants. */
4716
4717 static void
4718 init_ext_80387_constants (void)
4719 {
4720 static const char * cst[5] =
4721 {
4722 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4723 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4724 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4725 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4726 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4727 };
4728 int i;
4729
4730 for (i = 0; i < 5; i++)
4731 {
4732 real_from_string (&ext_80387_constants_table[i], cst[i]);
4733 /* Ensure each constant is rounded to XFmode precision. */
4734 real_convert (&ext_80387_constants_table[i],
4735 XFmode, &ext_80387_constants_table[i]);
4736 }
4737
4738 ext_80387_constants_init = 1;
4739 }
4740
4741 /* Return true if the constant is something that can be loaded with
4742 a special instruction. */
4743
4744 int
4745 standard_80387_constant_p (rtx x)
4746 {
4747 REAL_VALUE_TYPE r;
4748
4749 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4750 return -1;
4751
4752 if (x == CONST0_RTX (GET_MODE (x)))
4753 return 1;
4754 if (x == CONST1_RTX (GET_MODE (x)))
4755 return 2;
4756
4757 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4758
4759 /* For XFmode constants, try to find a special 80387 instruction when
4760 optimizing for size or on those CPUs that benefit from them. */
4761 if (GET_MODE (x) == XFmode
4762 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4763 {
4764 int i;
4765
4766 if (! ext_80387_constants_init)
4767 init_ext_80387_constants ();
4768
4769 for (i = 0; i < 5; i++)
4770 if (real_identical (&r, &ext_80387_constants_table[i]))
4771 return i + 3;
4772 }
4773
4774 /* Load of the constant -0.0 or -1.0 will be split as
4775 fldz;fchs or fld1;fchs sequence. */
4776 if (real_isnegzero (&r))
4777 return 8;
4778 if (real_identical (&r, &dconstm1))
4779 return 9;
4780
4781 return 0;
4782 }
4783
4784 /* Return the opcode of the special instruction to be used to load
4785 the constant X. */
4786
4787 const char *
4788 standard_80387_constant_opcode (rtx x)
4789 {
4790 switch (standard_80387_constant_p (x))
4791 {
4792 case 1:
4793 return "fldz";
4794 case 2:
4795 return "fld1";
4796 case 3:
4797 return "fldlg2";
4798 case 4:
4799 return "fldln2";
4800 case 5:
4801 return "fldl2e";
4802 case 6:
4803 return "fldl2t";
4804 case 7:
4805 return "fldpi";
4806 case 8:
4807 case 9:
4808 return "#";
4809 default:
4810 gcc_unreachable ();
4811 }
4812 }
4813
4814 /* Return the CONST_DOUBLE representing the 80387 constant that is
4815 loaded by the specified special instruction. The argument IDX
4816 matches the return value from standard_80387_constant_p. */
4817
4818 rtx
4819 standard_80387_constant_rtx (int idx)
4820 {
4821 int i;
4822
4823 if (! ext_80387_constants_init)
4824 init_ext_80387_constants ();
4825
4826 switch (idx)
4827 {
4828 case 3:
4829 case 4:
4830 case 5:
4831 case 6:
4832 case 7:
4833 i = idx - 3;
4834 break;
4835
4836 default:
4837 gcc_unreachable ();
4838 }
4839
4840 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4841 XFmode);
4842 }
4843
4844 /* Return 1 if mode is a valid mode for sse. */
4845 static int
4846 standard_sse_mode_p (enum machine_mode mode)
4847 {
4848 switch (mode)
4849 {
4850 case V16QImode:
4851 case V8HImode:
4852 case V4SImode:
4853 case V2DImode:
4854 case V4SFmode:
4855 case V2DFmode:
4856 return 1;
4857
4858 default:
4859 return 0;
4860 }
4861 }
4862
4863 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4864 */
4865 int
4866 standard_sse_constant_p (rtx x)
4867 {
4868 enum machine_mode mode = GET_MODE (x);
4869
4870 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4871 return 1;
4872 if (vector_all_ones_operand (x, mode)
4873 && standard_sse_mode_p (mode))
4874 return TARGET_SSE2 ? 2 : -1;
4875
4876 return 0;
4877 }
4878
4879 /* Return the opcode of the special instruction to be used to load
4880 the constant X. */
4881
4882 const char *
4883 standard_sse_constant_opcode (rtx insn, rtx x)
4884 {
4885 switch (standard_sse_constant_p (x))
4886 {
4887 case 1:
4888 if (get_attr_mode (insn) == MODE_V4SF)
4889 return "xorps\t%0, %0";
4890 else if (get_attr_mode (insn) == MODE_V2DF)
4891 return "xorpd\t%0, %0";
4892 else
4893 return "pxor\t%0, %0";
4894 case 2:
4895 return "pcmpeqd\t%0, %0";
4896 }
4897 gcc_unreachable ();
4898 }
4899
4900 /* Returns 1 if OP contains a symbol reference */
4901
4902 int
4903 symbolic_reference_mentioned_p (rtx op)
4904 {
4905 const char *fmt;
4906 int i;
4907
4908 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4909 return 1;
4910
4911 fmt = GET_RTX_FORMAT (GET_CODE (op));
4912 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4913 {
4914 if (fmt[i] == 'E')
4915 {
4916 int j;
4917
4918 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4919 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4920 return 1;
4921 }
4922
4923 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4924 return 1;
4925 }
4926
4927 return 0;
4928 }
4929
4930 /* Return 1 if it is appropriate to emit `ret' instructions in the
4931 body of a function. Do this only if the epilogue is simple, needing a
4932 couple of insns. Prior to reloading, we can't tell how many registers
4933 must be saved, so return 0 then. Return 0 if there is no frame
4934 marker to de-allocate. */
4935
4936 int
4937 ix86_can_use_return_insn_p (void)
4938 {
4939 struct ix86_frame frame;
4940
4941 if (! reload_completed || frame_pointer_needed)
4942 return 0;
4943
4944 /* Don't allow more than 32 pop, since that's all we can do
4945 with one instruction. */
4946 if (current_function_pops_args
4947 && current_function_args_size >= 32768)
4948 return 0;
4949
4950 ix86_compute_frame_layout (&frame);
4951 return frame.to_allocate == 0 && frame.nregs == 0;
4952 }
4953 \f
4954 /* Value should be nonzero if functions must have frame pointers.
4955 Zero means the frame pointer need not be set up (and parms may
4956 be accessed via the stack pointer) in functions that seem suitable. */
4957
4958 int
4959 ix86_frame_pointer_required (void)
4960 {
4961 /* If we accessed previous frames, then the generated code expects
4962 to be able to access the saved ebp value in our frame. */
4963 if (cfun->machine->accesses_prev_frame)
4964 return 1;
4965
4966 /* Several x86 os'es need a frame pointer for other reasons,
4967 usually pertaining to setjmp. */
4968 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4969 return 1;
4970
4971 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4972 the frame pointer by default. Turn it back on now if we've not
4973 got a leaf function. */
4974 if (TARGET_OMIT_LEAF_FRAME_POINTER
4975 && (!current_function_is_leaf
4976 || ix86_current_function_calls_tls_descriptor))
4977 return 1;
4978
4979 if (current_function_profile)
4980 return 1;
4981
4982 return 0;
4983 }
4984
4985 /* Record that the current function accesses previous call frames. */
4986
4987 void
4988 ix86_setup_frame_addresses (void)
4989 {
4990 cfun->machine->accesses_prev_frame = 1;
4991 }
4992 \f
4993 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4994 # define USE_HIDDEN_LINKONCE 1
4995 #else
4996 # define USE_HIDDEN_LINKONCE 0
4997 #endif
4998
4999 static int pic_labels_used;
5000
5001 /* Fills in the label name that should be used for a pc thunk for
5002 the given register. */
5003
5004 static void
5005 get_pc_thunk_name (char name[32], unsigned int regno)
5006 {
5007 gcc_assert (!TARGET_64BIT);
5008
5009 if (USE_HIDDEN_LINKONCE)
5010 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5011 else
5012 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5013 }
5014
5015
5016 /* This function generates code for -fpic that loads %ebx with
5017 the return address of the caller and then returns. */
5018
5019 void
5020 ix86_file_end (void)
5021 {
5022 rtx xops[2];
5023 int regno;
5024
5025 for (regno = 0; regno < 8; ++regno)
5026 {
5027 char name[32];
5028
5029 if (! ((pic_labels_used >> regno) & 1))
5030 continue;
5031
5032 get_pc_thunk_name (name, regno);
5033
5034 #if TARGET_MACHO
5035 if (TARGET_MACHO)
5036 {
5037 switch_to_section (darwin_sections[text_coal_section]);
5038 fputs ("\t.weak_definition\t", asm_out_file);
5039 assemble_name (asm_out_file, name);
5040 fputs ("\n\t.private_extern\t", asm_out_file);
5041 assemble_name (asm_out_file, name);
5042 fputs ("\n", asm_out_file);
5043 ASM_OUTPUT_LABEL (asm_out_file, name);
5044 }
5045 else
5046 #endif
5047 if (USE_HIDDEN_LINKONCE)
5048 {
5049 tree decl;
5050
5051 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5052 error_mark_node);
5053 TREE_PUBLIC (decl) = 1;
5054 TREE_STATIC (decl) = 1;
5055 DECL_ONE_ONLY (decl) = 1;
5056
5057 (*targetm.asm_out.unique_section) (decl, 0);
5058 switch_to_section (get_named_section (decl, NULL, 0));
5059
5060 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5061 fputs ("\t.hidden\t", asm_out_file);
5062 assemble_name (asm_out_file, name);
5063 fputc ('\n', asm_out_file);
5064 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5065 }
5066 else
5067 {
5068 switch_to_section (text_section);
5069 ASM_OUTPUT_LABEL (asm_out_file, name);
5070 }
5071
5072 xops[0] = gen_rtx_REG (SImode, regno);
5073 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5074 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5075 output_asm_insn ("ret", xops);
5076 }
5077
5078 if (NEED_INDICATE_EXEC_STACK)
5079 file_end_indicate_exec_stack ();
5080 }
5081
5082 /* Emit code for the SET_GOT patterns. */
5083
5084 const char *
5085 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5086 {
5087 rtx xops[3];
5088
5089 xops[0] = dest;
5090 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5091
5092 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5093 {
5094 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5095
5096 if (!flag_pic)
5097 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5098 else
5099 output_asm_insn ("call\t%a2", xops);
5100
5101 #if TARGET_MACHO
5102 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5103 is what will be referenced by the Mach-O PIC subsystem. */
5104 if (!label)
5105 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5106 #endif
5107
5108 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5109 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5110
5111 if (flag_pic)
5112 output_asm_insn ("pop{l}\t%0", xops);
5113 }
5114 else
5115 {
5116 char name[32];
5117 get_pc_thunk_name (name, REGNO (dest));
5118 pic_labels_used |= 1 << REGNO (dest);
5119
5120 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5121 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5122 output_asm_insn ("call\t%X2", xops);
5123 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5124 is what will be referenced by the Mach-O PIC subsystem. */
5125 #if TARGET_MACHO
5126 if (!label)
5127 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5128 else
5129 targetm.asm_out.internal_label (asm_out_file, "L",
5130 CODE_LABEL_NUMBER (label));
5131 #endif
5132 }
5133
5134 if (TARGET_MACHO)
5135 return "";
5136
5137 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5138 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5139 else
5140 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5141
5142 return "";
5143 }
5144
5145 /* Generate an "push" pattern for input ARG. */
5146
5147 static rtx
5148 gen_push (rtx arg)
5149 {
5150 return gen_rtx_SET (VOIDmode,
5151 gen_rtx_MEM (Pmode,
5152 gen_rtx_PRE_DEC (Pmode,
5153 stack_pointer_rtx)),
5154 arg);
5155 }
5156
5157 /* Return >= 0 if there is an unused call-clobbered register available
5158 for the entire function. */
5159
5160 static unsigned int
5161 ix86_select_alt_pic_regnum (void)
5162 {
5163 if (current_function_is_leaf && !current_function_profile
5164 && !ix86_current_function_calls_tls_descriptor)
5165 {
5166 int i;
5167 for (i = 2; i >= 0; --i)
5168 if (!regs_ever_live[i])
5169 return i;
5170 }
5171
5172 return INVALID_REGNUM;
5173 }
5174
5175 /* Return 1 if we need to save REGNO. */
5176 static int
5177 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5178 {
5179 if (pic_offset_table_rtx
5180 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5181 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5182 || current_function_profile
5183 || current_function_calls_eh_return
5184 || current_function_uses_const_pool))
5185 {
5186 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5187 return 0;
5188 return 1;
5189 }
5190
5191 if (current_function_calls_eh_return && maybe_eh_return)
5192 {
5193 unsigned i;
5194 for (i = 0; ; i++)
5195 {
5196 unsigned test = EH_RETURN_DATA_REGNO (i);
5197 if (test == INVALID_REGNUM)
5198 break;
5199 if (test == regno)
5200 return 1;
5201 }
5202 }
5203
5204 if (cfun->machine->force_align_arg_pointer
5205 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5206 return 1;
5207
5208 return (regs_ever_live[regno]
5209 && !call_used_regs[regno]
5210 && !fixed_regs[regno]
5211 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5212 }
5213
5214 /* Return number of registers to be saved on the stack. */
5215
5216 static int
5217 ix86_nsaved_regs (void)
5218 {
5219 int nregs = 0;
5220 int regno;
5221
5222 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5223 if (ix86_save_reg (regno, true))
5224 nregs++;
5225 return nregs;
5226 }
5227
5228 /* Return the offset between two registers, one to be eliminated, and the other
5229 its replacement, at the start of a routine. */
5230
5231 HOST_WIDE_INT
5232 ix86_initial_elimination_offset (int from, int to)
5233 {
5234 struct ix86_frame frame;
5235 ix86_compute_frame_layout (&frame);
5236
5237 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5238 return frame.hard_frame_pointer_offset;
5239 else if (from == FRAME_POINTER_REGNUM
5240 && to == HARD_FRAME_POINTER_REGNUM)
5241 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5242 else
5243 {
5244 gcc_assert (to == STACK_POINTER_REGNUM);
5245
5246 if (from == ARG_POINTER_REGNUM)
5247 return frame.stack_pointer_offset;
5248
5249 gcc_assert (from == FRAME_POINTER_REGNUM);
5250 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5251 }
5252 }
5253
5254 /* Fill structure ix86_frame about frame of currently computed function. */
5255
5256 static void
5257 ix86_compute_frame_layout (struct ix86_frame *frame)
5258 {
5259 HOST_WIDE_INT total_size;
5260 unsigned int stack_alignment_needed;
5261 HOST_WIDE_INT offset;
5262 unsigned int preferred_alignment;
5263 HOST_WIDE_INT size = get_frame_size ();
5264
5265 frame->nregs = ix86_nsaved_regs ();
5266 total_size = size;
5267
5268 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5269 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5270
5271 /* During reload iteration the amount of registers saved can change.
5272 Recompute the value as needed. Do not recompute when amount of registers
5273 didn't change as reload does multiple calls to the function and does not
5274 expect the decision to change within single iteration. */
5275 if (!optimize_size
5276 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5277 {
5278 int count = frame->nregs;
5279
5280 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5281 /* The fast prologue uses move instead of push to save registers. This
5282 is significantly longer, but also executes faster as modern hardware
5283 can execute the moves in parallel, but can't do that for push/pop.
5284
5285 Be careful about choosing what prologue to emit: When function takes
5286 many instructions to execute we may use slow version as well as in
5287 case function is known to be outside hot spot (this is known with
5288 feedback only). Weight the size of function by number of registers
5289 to save as it is cheap to use one or two push instructions but very
5290 slow to use many of them. */
5291 if (count)
5292 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5293 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5294 || (flag_branch_probabilities
5295 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5296 cfun->machine->use_fast_prologue_epilogue = false;
5297 else
5298 cfun->machine->use_fast_prologue_epilogue
5299 = !expensive_function_p (count);
5300 }
5301 if (TARGET_PROLOGUE_USING_MOVE
5302 && cfun->machine->use_fast_prologue_epilogue)
5303 frame->save_regs_using_mov = true;
5304 else
5305 frame->save_regs_using_mov = false;
5306
5307
5308 /* Skip return address and saved base pointer. */
5309 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5310
5311 frame->hard_frame_pointer_offset = offset;
5312
5313 /* Do some sanity checking of stack_alignment_needed and
5314 preferred_alignment, since i386 port is the only using those features
5315 that may break easily. */
5316
5317 gcc_assert (!size || stack_alignment_needed);
5318 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5319 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5320 gcc_assert (stack_alignment_needed
5321 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5322
5323 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5324 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5325
5326 /* Register save area */
5327 offset += frame->nregs * UNITS_PER_WORD;
5328
5329 /* Va-arg area */
5330 if (ix86_save_varrargs_registers)
5331 {
5332 offset += X86_64_VARARGS_SIZE;
5333 frame->va_arg_size = X86_64_VARARGS_SIZE;
5334 }
5335 else
5336 frame->va_arg_size = 0;
5337
5338 /* Align start of frame for local function. */
5339 frame->padding1 = ((offset + stack_alignment_needed - 1)
5340 & -stack_alignment_needed) - offset;
5341
5342 offset += frame->padding1;
5343
5344 /* Frame pointer points here. */
5345 frame->frame_pointer_offset = offset;
5346
5347 offset += size;
5348
5349 /* Add outgoing arguments area. Can be skipped if we eliminated
5350 all the function calls as dead code.
5351 Skipping is however impossible when function calls alloca. Alloca
5352 expander assumes that last current_function_outgoing_args_size
5353 of stack frame are unused. */
5354 if (ACCUMULATE_OUTGOING_ARGS
5355 && (!current_function_is_leaf || current_function_calls_alloca
5356 || ix86_current_function_calls_tls_descriptor))
5357 {
5358 offset += current_function_outgoing_args_size;
5359 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5360 }
5361 else
5362 frame->outgoing_arguments_size = 0;
5363
5364 /* Align stack boundary. Only needed if we're calling another function
5365 or using alloca. */
5366 if (!current_function_is_leaf || current_function_calls_alloca
5367 || ix86_current_function_calls_tls_descriptor)
5368 frame->padding2 = ((offset + preferred_alignment - 1)
5369 & -preferred_alignment) - offset;
5370 else
5371 frame->padding2 = 0;
5372
5373 offset += frame->padding2;
5374
5375 /* We've reached end of stack frame. */
5376 frame->stack_pointer_offset = offset;
5377
5378 /* Size prologue needs to allocate. */
5379 frame->to_allocate =
5380 (size + frame->padding1 + frame->padding2
5381 + frame->outgoing_arguments_size + frame->va_arg_size);
5382
5383 if ((!frame->to_allocate && frame->nregs <= 1)
5384 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5385 frame->save_regs_using_mov = false;
5386
5387 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5388 && current_function_is_leaf
5389 && !ix86_current_function_calls_tls_descriptor)
5390 {
5391 frame->red_zone_size = frame->to_allocate;
5392 if (frame->save_regs_using_mov)
5393 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5394 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5395 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5396 }
5397 else
5398 frame->red_zone_size = 0;
5399 frame->to_allocate -= frame->red_zone_size;
5400 frame->stack_pointer_offset -= frame->red_zone_size;
5401 #if 0
5402 fprintf (stderr, "nregs: %i\n", frame->nregs);
5403 fprintf (stderr, "size: %i\n", size);
5404 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
5405 fprintf (stderr, "padding1: %i\n", frame->padding1);
5406 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
5407 fprintf (stderr, "padding2: %i\n", frame->padding2);
5408 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
5409 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
5410 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
5411 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
5412 frame->hard_frame_pointer_offset);
5413 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
5414 #endif
5415 }
5416
5417 /* Emit code to save registers in the prologue. */
5418
5419 static void
5420 ix86_emit_save_regs (void)
5421 {
5422 unsigned int regno;
5423 rtx insn;
5424
5425 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5426 if (ix86_save_reg (regno, true))
5427 {
5428 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5429 RTX_FRAME_RELATED_P (insn) = 1;
5430 }
5431 }
5432
5433 /* Emit code to save registers using MOV insns. First register
5434 is restored from POINTER + OFFSET. */
5435 static void
5436 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5437 {
5438 unsigned int regno;
5439 rtx insn;
5440
5441 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5442 if (ix86_save_reg (regno, true))
5443 {
5444 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5445 Pmode, offset),
5446 gen_rtx_REG (Pmode, regno));
5447 RTX_FRAME_RELATED_P (insn) = 1;
5448 offset += UNITS_PER_WORD;
5449 }
5450 }
5451
5452 /* Expand prologue or epilogue stack adjustment.
5453 The pattern exist to put a dependency on all ebp-based memory accesses.
5454 STYLE should be negative if instructions should be marked as frame related,
5455 zero if %r11 register is live and cannot be freely used and positive
5456 otherwise. */
5457
5458 static void
5459 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5460 {
5461 rtx insn;
5462
5463 if (! TARGET_64BIT)
5464 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5465 else if (x86_64_immediate_operand (offset, DImode))
5466 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5467 else
5468 {
5469 rtx r11;
5470 /* r11 is used by indirect sibcall return as well, set before the
5471 epilogue and used after the epilogue. ATM indirect sibcall
5472 shouldn't be used together with huge frame sizes in one
5473 function because of the frame_size check in sibcall.c. */
5474 gcc_assert (style);
5475 r11 = gen_rtx_REG (DImode, R11_REG);
5476 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5477 if (style < 0)
5478 RTX_FRAME_RELATED_P (insn) = 1;
5479 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5480 offset));
5481 }
5482 if (style < 0)
5483 RTX_FRAME_RELATED_P (insn) = 1;
5484 }
5485
5486 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5487
5488 static rtx
5489 ix86_internal_arg_pointer (void)
5490 {
5491 bool has_force_align_arg_pointer =
5492 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5493 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5494 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5495 && DECL_NAME (current_function_decl)
5496 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5497 && DECL_FILE_SCOPE_P (current_function_decl))
5498 || ix86_force_align_arg_pointer
5499 || has_force_align_arg_pointer)
5500 {
5501 /* Nested functions can't realign the stack due to a register
5502 conflict. */
5503 if (DECL_CONTEXT (current_function_decl)
5504 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5505 {
5506 if (ix86_force_align_arg_pointer)
5507 warning (0, "-mstackrealign ignored for nested functions");
5508 if (has_force_align_arg_pointer)
5509 error ("%s not supported for nested functions",
5510 ix86_force_align_arg_pointer_string);
5511 return virtual_incoming_args_rtx;
5512 }
5513 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5514 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5515 }
5516 else
5517 return virtual_incoming_args_rtx;
5518 }
5519
5520 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5521 This is called from dwarf2out.c to emit call frame instructions
5522 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5523 static void
5524 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5525 {
5526 rtx unspec = SET_SRC (pattern);
5527 gcc_assert (GET_CODE (unspec) == UNSPEC);
5528
5529 switch (index)
5530 {
5531 case UNSPEC_REG_SAVE:
5532 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5533 SET_DEST (pattern));
5534 break;
5535 case UNSPEC_DEF_CFA:
5536 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5537 INTVAL (XVECEXP (unspec, 0, 0)));
5538 break;
5539 default:
5540 gcc_unreachable ();
5541 }
5542 }
5543
5544 /* Expand the prologue into a bunch of separate insns. */
5545
5546 void
5547 ix86_expand_prologue (void)
5548 {
5549 rtx insn;
5550 bool pic_reg_used;
5551 struct ix86_frame frame;
5552 HOST_WIDE_INT allocate;
5553
5554 ix86_compute_frame_layout (&frame);
5555
5556 if (cfun->machine->force_align_arg_pointer)
5557 {
5558 rtx x, y;
5559
5560 /* Grab the argument pointer. */
5561 x = plus_constant (stack_pointer_rtx, 4);
5562 y = cfun->machine->force_align_arg_pointer;
5563 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5564 RTX_FRAME_RELATED_P (insn) = 1;
5565
5566 /* The unwind info consists of two parts: install the fafp as the cfa,
5567 and record the fafp as the "save register" of the stack pointer.
5568 The later is there in order that the unwinder can see where it
5569 should restore the stack pointer across the and insn. */
5570 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5571 x = gen_rtx_SET (VOIDmode, y, x);
5572 RTX_FRAME_RELATED_P (x) = 1;
5573 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5574 UNSPEC_REG_SAVE);
5575 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5576 RTX_FRAME_RELATED_P (y) = 1;
5577 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5578 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5579 REG_NOTES (insn) = x;
5580
5581 /* Align the stack. */
5582 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5583 GEN_INT (-16)));
5584
5585 /* And here we cheat like madmen with the unwind info. We force the
5586 cfa register back to sp+4, which is exactly what it was at the
5587 start of the function. Re-pushing the return address results in
5588 the return at the same spot relative to the cfa, and thus is
5589 correct wrt the unwind info. */
5590 x = cfun->machine->force_align_arg_pointer;
5591 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5592 insn = emit_insn (gen_push (x));
5593 RTX_FRAME_RELATED_P (insn) = 1;
5594
5595 x = GEN_INT (4);
5596 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5597 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5598 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5599 REG_NOTES (insn) = x;
5600 }
5601
5602 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5603 slower on all targets. Also sdb doesn't like it. */
5604
5605 if (frame_pointer_needed)
5606 {
5607 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5608 RTX_FRAME_RELATED_P (insn) = 1;
5609
5610 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5611 RTX_FRAME_RELATED_P (insn) = 1;
5612 }
5613
5614 allocate = frame.to_allocate;
5615
5616 if (!frame.save_regs_using_mov)
5617 ix86_emit_save_regs ();
5618 else
5619 allocate += frame.nregs * UNITS_PER_WORD;
5620
5621 /* When using red zone we may start register saving before allocating
5622 the stack frame saving one cycle of the prologue. */
5623 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5624 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5625 : stack_pointer_rtx,
5626 -frame.nregs * UNITS_PER_WORD);
5627
5628 if (allocate == 0)
5629 ;
5630 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5631 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5632 GEN_INT (-allocate), -1);
5633 else
5634 {
5635 /* Only valid for Win32. */
5636 rtx eax = gen_rtx_REG (SImode, 0);
5637 bool eax_live = ix86_eax_live_at_start_p ();
5638 rtx t;
5639
5640 gcc_assert (!TARGET_64BIT);
5641
5642 if (eax_live)
5643 {
5644 emit_insn (gen_push (eax));
5645 allocate -= 4;
5646 }
5647
5648 emit_move_insn (eax, GEN_INT (allocate));
5649
5650 insn = emit_insn (gen_allocate_stack_worker (eax));
5651 RTX_FRAME_RELATED_P (insn) = 1;
5652 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5653 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5654 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5655 t, REG_NOTES (insn));
5656
5657 if (eax_live)
5658 {
5659 if (frame_pointer_needed)
5660 t = plus_constant (hard_frame_pointer_rtx,
5661 allocate
5662 - frame.to_allocate
5663 - frame.nregs * UNITS_PER_WORD);
5664 else
5665 t = plus_constant (stack_pointer_rtx, allocate);
5666 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5667 }
5668 }
5669
5670 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5671 {
5672 if (!frame_pointer_needed || !frame.to_allocate)
5673 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5674 else
5675 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5676 -frame.nregs * UNITS_PER_WORD);
5677 }
5678
5679 pic_reg_used = false;
5680 if (pic_offset_table_rtx
5681 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5682 || current_function_profile))
5683 {
5684 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5685
5686 if (alt_pic_reg_used != INVALID_REGNUM)
5687 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5688
5689 pic_reg_used = true;
5690 }
5691
5692 if (pic_reg_used)
5693 {
5694 if (TARGET_64BIT)
5695 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5696 else
5697 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5698
5699 /* Even with accurate pre-reload life analysis, we can wind up
5700 deleting all references to the pic register after reload.
5701 Consider if cross-jumping unifies two sides of a branch
5702 controlled by a comparison vs the only read from a global.
5703 In which case, allow the set_got to be deleted, though we're
5704 too late to do anything about the ebx save in the prologue. */
5705 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5706 }
5707
5708 /* Prevent function calls from be scheduled before the call to mcount.
5709 In the pic_reg_used case, make sure that the got load isn't deleted. */
5710 if (current_function_profile)
5711 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5712 }
5713
5714 /* Emit code to restore saved registers using MOV insns. First register
5715 is restored from POINTER + OFFSET. */
5716 static void
5717 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5718 int maybe_eh_return)
5719 {
5720 int regno;
5721 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5722
5723 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5724 if (ix86_save_reg (regno, maybe_eh_return))
5725 {
5726 /* Ensure that adjust_address won't be forced to produce pointer
5727 out of range allowed by x86-64 instruction set. */
5728 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5729 {
5730 rtx r11;
5731
5732 r11 = gen_rtx_REG (DImode, R11_REG);
5733 emit_move_insn (r11, GEN_INT (offset));
5734 emit_insn (gen_adddi3 (r11, r11, pointer));
5735 base_address = gen_rtx_MEM (Pmode, r11);
5736 offset = 0;
5737 }
5738 emit_move_insn (gen_rtx_REG (Pmode, regno),
5739 adjust_address (base_address, Pmode, offset));
5740 offset += UNITS_PER_WORD;
5741 }
5742 }
5743
5744 /* Restore function stack, frame, and registers. */
5745
5746 void
5747 ix86_expand_epilogue (int style)
5748 {
5749 int regno;
5750 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5751 struct ix86_frame frame;
5752 HOST_WIDE_INT offset;
5753
5754 ix86_compute_frame_layout (&frame);
5755
5756 /* Calculate start of saved registers relative to ebp. Special care
5757 must be taken for the normal return case of a function using
5758 eh_return: the eax and edx registers are marked as saved, but not
5759 restored along this path. */
5760 offset = frame.nregs;
5761 if (current_function_calls_eh_return && style != 2)
5762 offset -= 2;
5763 offset *= -UNITS_PER_WORD;
5764
5765 /* If we're only restoring one register and sp is not valid then
5766 using a move instruction to restore the register since it's
5767 less work than reloading sp and popping the register.
5768
5769 The default code result in stack adjustment using add/lea instruction,
5770 while this code results in LEAVE instruction (or discrete equivalent),
5771 so it is profitable in some other cases as well. Especially when there
5772 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5773 and there is exactly one register to pop. This heuristic may need some
5774 tuning in future. */
5775 if ((!sp_valid && frame.nregs <= 1)
5776 || (TARGET_EPILOGUE_USING_MOVE
5777 && cfun->machine->use_fast_prologue_epilogue
5778 && (frame.nregs > 1 || frame.to_allocate))
5779 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5780 || (frame_pointer_needed && TARGET_USE_LEAVE
5781 && cfun->machine->use_fast_prologue_epilogue
5782 && frame.nregs == 1)
5783 || current_function_calls_eh_return)
5784 {
5785 /* Restore registers. We can use ebp or esp to address the memory
5786 locations. If both are available, default to ebp, since offsets
5787 are known to be small. Only exception is esp pointing directly to the
5788 end of block of saved registers, where we may simplify addressing
5789 mode. */
5790
5791 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5792 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5793 frame.to_allocate, style == 2);
5794 else
5795 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5796 offset, style == 2);
5797
5798 /* eh_return epilogues need %ecx added to the stack pointer. */
5799 if (style == 2)
5800 {
5801 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5802
5803 if (frame_pointer_needed)
5804 {
5805 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5806 tmp = plus_constant (tmp, UNITS_PER_WORD);
5807 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5808
5809 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5810 emit_move_insn (hard_frame_pointer_rtx, tmp);
5811
5812 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5813 const0_rtx, style);
5814 }
5815 else
5816 {
5817 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5818 tmp = plus_constant (tmp, (frame.to_allocate
5819 + frame.nregs * UNITS_PER_WORD));
5820 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5821 }
5822 }
5823 else if (!frame_pointer_needed)
5824 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5825 GEN_INT (frame.to_allocate
5826 + frame.nregs * UNITS_PER_WORD),
5827 style);
5828 /* If not an i386, mov & pop is faster than "leave". */
5829 else if (TARGET_USE_LEAVE || optimize_size
5830 || !cfun->machine->use_fast_prologue_epilogue)
5831 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5832 else
5833 {
5834 pro_epilogue_adjust_stack (stack_pointer_rtx,
5835 hard_frame_pointer_rtx,
5836 const0_rtx, style);
5837 if (TARGET_64BIT)
5838 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5839 else
5840 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5841 }
5842 }
5843 else
5844 {
5845 /* First step is to deallocate the stack frame so that we can
5846 pop the registers. */
5847 if (!sp_valid)
5848 {
5849 gcc_assert (frame_pointer_needed);
5850 pro_epilogue_adjust_stack (stack_pointer_rtx,
5851 hard_frame_pointer_rtx,
5852 GEN_INT (offset), style);
5853 }
5854 else if (frame.to_allocate)
5855 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5856 GEN_INT (frame.to_allocate), style);
5857
5858 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5859 if (ix86_save_reg (regno, false))
5860 {
5861 if (TARGET_64BIT)
5862 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5863 else
5864 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5865 }
5866 if (frame_pointer_needed)
5867 {
5868 /* Leave results in shorter dependency chains on CPUs that are
5869 able to grok it fast. */
5870 if (TARGET_USE_LEAVE)
5871 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5872 else if (TARGET_64BIT)
5873 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5874 else
5875 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5876 }
5877 }
5878
5879 if (cfun->machine->force_align_arg_pointer)
5880 {
5881 emit_insn (gen_addsi3 (stack_pointer_rtx,
5882 cfun->machine->force_align_arg_pointer,
5883 GEN_INT (-4)));
5884 }
5885
5886 /* Sibcall epilogues don't want a return instruction. */
5887 if (style == 0)
5888 return;
5889
5890 if (current_function_pops_args && current_function_args_size)
5891 {
5892 rtx popc = GEN_INT (current_function_pops_args);
5893
5894 /* i386 can only pop 64K bytes. If asked to pop more, pop
5895 return address, do explicit add, and jump indirectly to the
5896 caller. */
5897
5898 if (current_function_pops_args >= 65536)
5899 {
5900 rtx ecx = gen_rtx_REG (SImode, 2);
5901
5902 /* There is no "pascal" calling convention in 64bit ABI. */
5903 gcc_assert (!TARGET_64BIT);
5904
5905 emit_insn (gen_popsi1 (ecx));
5906 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5907 emit_jump_insn (gen_return_indirect_internal (ecx));
5908 }
5909 else
5910 emit_jump_insn (gen_return_pop_internal (popc));
5911 }
5912 else
5913 emit_jump_insn (gen_return_internal ());
5914 }
5915
5916 /* Reset from the function's potential modifications. */
5917
5918 static void
5919 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5920 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5921 {
5922 if (pic_offset_table_rtx)
5923 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5924 #if TARGET_MACHO
5925 /* Mach-O doesn't support labels at the end of objects, so if
5926 it looks like we might want one, insert a NOP. */
5927 {
5928 rtx insn = get_last_insn ();
5929 while (insn
5930 && NOTE_P (insn)
5931 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
5932 insn = PREV_INSN (insn);
5933 if (insn
5934 && (LABEL_P (insn)
5935 || (NOTE_P (insn)
5936 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
5937 fputs ("\tnop\n", file);
5938 }
5939 #endif
5940
5941 }
5942 \f
5943 /* Extract the parts of an RTL expression that is a valid memory address
5944 for an instruction. Return 0 if the structure of the address is
5945 grossly off. Return -1 if the address contains ASHIFT, so it is not
5946 strictly valid, but still used for computing length of lea instruction. */
5947
5948 int
5949 ix86_decompose_address (rtx addr, struct ix86_address *out)
5950 {
5951 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5952 rtx base_reg, index_reg;
5953 HOST_WIDE_INT scale = 1;
5954 rtx scale_rtx = NULL_RTX;
5955 int retval = 1;
5956 enum ix86_address_seg seg = SEG_DEFAULT;
5957
5958 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5959 base = addr;
5960 else if (GET_CODE (addr) == PLUS)
5961 {
5962 rtx addends[4], op;
5963 int n = 0, i;
5964
5965 op = addr;
5966 do
5967 {
5968 if (n >= 4)
5969 return 0;
5970 addends[n++] = XEXP (op, 1);
5971 op = XEXP (op, 0);
5972 }
5973 while (GET_CODE (op) == PLUS);
5974 if (n >= 4)
5975 return 0;
5976 addends[n] = op;
5977
5978 for (i = n; i >= 0; --i)
5979 {
5980 op = addends[i];
5981 switch (GET_CODE (op))
5982 {
5983 case MULT:
5984 if (index)
5985 return 0;
5986 index = XEXP (op, 0);
5987 scale_rtx = XEXP (op, 1);
5988 break;
5989
5990 case UNSPEC:
5991 if (XINT (op, 1) == UNSPEC_TP
5992 && TARGET_TLS_DIRECT_SEG_REFS
5993 && seg == SEG_DEFAULT)
5994 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5995 else
5996 return 0;
5997 break;
5998
5999 case REG:
6000 case SUBREG:
6001 if (!base)
6002 base = op;
6003 else if (!index)
6004 index = op;
6005 else
6006 return 0;
6007 break;
6008
6009 case CONST:
6010 case CONST_INT:
6011 case SYMBOL_REF:
6012 case LABEL_REF:
6013 if (disp)
6014 return 0;
6015 disp = op;
6016 break;
6017
6018 default:
6019 return 0;
6020 }
6021 }
6022 }
6023 else if (GET_CODE (addr) == MULT)
6024 {
6025 index = XEXP (addr, 0); /* index*scale */
6026 scale_rtx = XEXP (addr, 1);
6027 }
6028 else if (GET_CODE (addr) == ASHIFT)
6029 {
6030 rtx tmp;
6031
6032 /* We're called for lea too, which implements ashift on occasion. */
6033 index = XEXP (addr, 0);
6034 tmp = XEXP (addr, 1);
6035 if (GET_CODE (tmp) != CONST_INT)
6036 return 0;
6037 scale = INTVAL (tmp);
6038 if ((unsigned HOST_WIDE_INT) scale > 3)
6039 return 0;
6040 scale = 1 << scale;
6041 retval = -1;
6042 }
6043 else
6044 disp = addr; /* displacement */
6045
6046 /* Extract the integral value of scale. */
6047 if (scale_rtx)
6048 {
6049 if (GET_CODE (scale_rtx) != CONST_INT)
6050 return 0;
6051 scale = INTVAL (scale_rtx);
6052 }
6053
6054 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6055 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6056
6057 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6058 if (base_reg && index_reg && scale == 1
6059 && (index_reg == arg_pointer_rtx
6060 || index_reg == frame_pointer_rtx
6061 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6062 {
6063 rtx tmp;
6064 tmp = base, base = index, index = tmp;
6065 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6066 }
6067
6068 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6069 if ((base_reg == hard_frame_pointer_rtx
6070 || base_reg == frame_pointer_rtx
6071 || base_reg == arg_pointer_rtx) && !disp)
6072 disp = const0_rtx;
6073
6074 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6075 Avoid this by transforming to [%esi+0]. */
6076 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6077 && base_reg && !index_reg && !disp
6078 && REG_P (base_reg)
6079 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6080 disp = const0_rtx;
6081
6082 /* Special case: encode reg+reg instead of reg*2. */
6083 if (!base && index && scale && scale == 2)
6084 base = index, base_reg = index_reg, scale = 1;
6085
6086 /* Special case: scaling cannot be encoded without base or displacement. */
6087 if (!base && !disp && index && scale != 1)
6088 disp = const0_rtx;
6089
6090 out->base = base;
6091 out->index = index;
6092 out->disp = disp;
6093 out->scale = scale;
6094 out->seg = seg;
6095
6096 return retval;
6097 }
6098 \f
6099 /* Return cost of the memory address x.
6100 For i386, it is better to use a complex address than let gcc copy
6101 the address into a reg and make a new pseudo. But not if the address
6102 requires to two regs - that would mean more pseudos with longer
6103 lifetimes. */
6104 static int
6105 ix86_address_cost (rtx x)
6106 {
6107 struct ix86_address parts;
6108 int cost = 1;
6109 int ok = ix86_decompose_address (x, &parts);
6110
6111 gcc_assert (ok);
6112
6113 if (parts.base && GET_CODE (parts.base) == SUBREG)
6114 parts.base = SUBREG_REG (parts.base);
6115 if (parts.index && GET_CODE (parts.index) == SUBREG)
6116 parts.index = SUBREG_REG (parts.index);
6117
6118 /* More complex memory references are better. */
6119 if (parts.disp && parts.disp != const0_rtx)
6120 cost--;
6121 if (parts.seg != SEG_DEFAULT)
6122 cost--;
6123
6124 /* Attempt to minimize number of registers in the address. */
6125 if ((parts.base
6126 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6127 || (parts.index
6128 && (!REG_P (parts.index)
6129 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6130 cost++;
6131
6132 if (parts.base
6133 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6134 && parts.index
6135 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6136 && parts.base != parts.index)
6137 cost++;
6138
6139 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6140 since it's predecode logic can't detect the length of instructions
6141 and it degenerates to vector decoded. Increase cost of such
6142 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6143 to split such addresses or even refuse such addresses at all.
6144
6145 Following addressing modes are affected:
6146 [base+scale*index]
6147 [scale*index+disp]
6148 [base+index]
6149
6150 The first and last case may be avoidable by explicitly coding the zero in
6151 memory address, but I don't have AMD-K6 machine handy to check this
6152 theory. */
6153
6154 if (TARGET_K6
6155 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6156 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6157 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6158 cost += 10;
6159
6160 return cost;
6161 }
6162 \f
6163 /* If X is a machine specific address (i.e. a symbol or label being
6164 referenced as a displacement from the GOT implemented using an
6165 UNSPEC), then return the base term. Otherwise return X. */
6166
6167 rtx
6168 ix86_find_base_term (rtx x)
6169 {
6170 rtx term;
6171
6172 if (TARGET_64BIT)
6173 {
6174 if (GET_CODE (x) != CONST)
6175 return x;
6176 term = XEXP (x, 0);
6177 if (GET_CODE (term) == PLUS
6178 && (GET_CODE (XEXP (term, 1)) == CONST_INT
6179 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6180 term = XEXP (term, 0);
6181 if (GET_CODE (term) != UNSPEC
6182 || XINT (term, 1) != UNSPEC_GOTPCREL)
6183 return x;
6184
6185 term = XVECEXP (term, 0, 0);
6186
6187 if (GET_CODE (term) != SYMBOL_REF
6188 && GET_CODE (term) != LABEL_REF)
6189 return x;
6190
6191 return term;
6192 }
6193
6194 term = ix86_delegitimize_address (x);
6195
6196 if (GET_CODE (term) != SYMBOL_REF
6197 && GET_CODE (term) != LABEL_REF)
6198 return x;
6199
6200 return term;
6201 }
6202
6203 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6204 this is used for to form addresses to local data when -fPIC is in
6205 use. */
6206
6207 static bool
6208 darwin_local_data_pic (rtx disp)
6209 {
6210 if (GET_CODE (disp) == MINUS)
6211 {
6212 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6213 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6214 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6215 {
6216 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6217 if (! strcmp (sym_name, "<pic base>"))
6218 return true;
6219 }
6220 }
6221
6222 return false;
6223 }
6224 \f
6225 /* Determine if a given RTX is a valid constant. We already know this
6226 satisfies CONSTANT_P. */
6227
6228 bool
6229 legitimate_constant_p (rtx x)
6230 {
6231 switch (GET_CODE (x))
6232 {
6233 case CONST:
6234 x = XEXP (x, 0);
6235
6236 if (GET_CODE (x) == PLUS)
6237 {
6238 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6239 return false;
6240 x = XEXP (x, 0);
6241 }
6242
6243 if (TARGET_MACHO && darwin_local_data_pic (x))
6244 return true;
6245
6246 /* Only some unspecs are valid as "constants". */
6247 if (GET_CODE (x) == UNSPEC)
6248 switch (XINT (x, 1))
6249 {
6250 case UNSPEC_GOTOFF:
6251 return TARGET_64BIT;
6252 case UNSPEC_TPOFF:
6253 case UNSPEC_NTPOFF:
6254 x = XVECEXP (x, 0, 0);
6255 return (GET_CODE (x) == SYMBOL_REF
6256 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6257 case UNSPEC_DTPOFF:
6258 x = XVECEXP (x, 0, 0);
6259 return (GET_CODE (x) == SYMBOL_REF
6260 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6261 default:
6262 return false;
6263 }
6264
6265 /* We must have drilled down to a symbol. */
6266 if (GET_CODE (x) == LABEL_REF)
6267 return true;
6268 if (GET_CODE (x) != SYMBOL_REF)
6269 return false;
6270 /* FALLTHRU */
6271
6272 case SYMBOL_REF:
6273 /* TLS symbols are never valid. */
6274 if (SYMBOL_REF_TLS_MODEL (x))
6275 return false;
6276 break;
6277
6278 case CONST_DOUBLE:
6279 if (GET_MODE (x) == TImode
6280 && x != CONST0_RTX (TImode)
6281 && !TARGET_64BIT)
6282 return false;
6283 break;
6284
6285 case CONST_VECTOR:
6286 if (x == CONST0_RTX (GET_MODE (x)))
6287 return true;
6288 return false;
6289
6290 default:
6291 break;
6292 }
6293
6294 /* Otherwise we handle everything else in the move patterns. */
6295 return true;
6296 }
6297
6298 /* Determine if it's legal to put X into the constant pool. This
6299 is not possible for the address of thread-local symbols, which
6300 is checked above. */
6301
6302 static bool
6303 ix86_cannot_force_const_mem (rtx x)
6304 {
6305 /* We can always put integral constants and vectors in memory. */
6306 switch (GET_CODE (x))
6307 {
6308 case CONST_INT:
6309 case CONST_DOUBLE:
6310 case CONST_VECTOR:
6311 return false;
6312
6313 default:
6314 break;
6315 }
6316 return !legitimate_constant_p (x);
6317 }
6318
6319 /* Determine if a given RTX is a valid constant address. */
6320
6321 bool
6322 constant_address_p (rtx x)
6323 {
6324 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6325 }
6326
6327 /* Nonzero if the constant value X is a legitimate general operand
6328 when generating PIC code. It is given that flag_pic is on and
6329 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6330
6331 bool
6332 legitimate_pic_operand_p (rtx x)
6333 {
6334 rtx inner;
6335
6336 switch (GET_CODE (x))
6337 {
6338 case CONST:
6339 inner = XEXP (x, 0);
6340 if (GET_CODE (inner) == PLUS
6341 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
6342 inner = XEXP (inner, 0);
6343
6344 /* Only some unspecs are valid as "constants". */
6345 if (GET_CODE (inner) == UNSPEC)
6346 switch (XINT (inner, 1))
6347 {
6348 case UNSPEC_GOTOFF:
6349 return TARGET_64BIT;
6350 case UNSPEC_TPOFF:
6351 x = XVECEXP (inner, 0, 0);
6352 return (GET_CODE (x) == SYMBOL_REF
6353 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6354 default:
6355 return false;
6356 }
6357 /* FALLTHRU */
6358
6359 case SYMBOL_REF:
6360 case LABEL_REF:
6361 return legitimate_pic_address_disp_p (x);
6362
6363 default:
6364 return true;
6365 }
6366 }
6367
6368 /* Determine if a given CONST RTX is a valid memory displacement
6369 in PIC mode. */
6370
6371 int
6372 legitimate_pic_address_disp_p (rtx disp)
6373 {
6374 bool saw_plus;
6375
6376 /* In 64bit mode we can allow direct addresses of symbols and labels
6377 when they are not dynamic symbols. */
6378 if (TARGET_64BIT)
6379 {
6380 rtx op0 = disp, op1;
6381
6382 switch (GET_CODE (disp))
6383 {
6384 case LABEL_REF:
6385 return true;
6386
6387 case CONST:
6388 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6389 break;
6390 op0 = XEXP (XEXP (disp, 0), 0);
6391 op1 = XEXP (XEXP (disp, 0), 1);
6392 if (GET_CODE (op1) != CONST_INT
6393 || INTVAL (op1) >= 16*1024*1024
6394 || INTVAL (op1) < -16*1024*1024)
6395 break;
6396 if (GET_CODE (op0) == LABEL_REF)
6397 return true;
6398 if (GET_CODE (op0) != SYMBOL_REF)
6399 break;
6400 /* FALLTHRU */
6401
6402 case SYMBOL_REF:
6403 /* TLS references should always be enclosed in UNSPEC. */
6404 if (SYMBOL_REF_TLS_MODEL (op0))
6405 return false;
6406 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6407 return true;
6408 break;
6409
6410 default:
6411 break;
6412 }
6413 }
6414 if (GET_CODE (disp) != CONST)
6415 return 0;
6416 disp = XEXP (disp, 0);
6417
6418 if (TARGET_64BIT)
6419 {
6420 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6421 of GOT tables. We should not need these anyway. */
6422 if (GET_CODE (disp) != UNSPEC
6423 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6424 && XINT (disp, 1) != UNSPEC_GOTOFF))
6425 return 0;
6426
6427 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6428 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6429 return 0;
6430 return 1;
6431 }
6432
6433 saw_plus = false;
6434 if (GET_CODE (disp) == PLUS)
6435 {
6436 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
6437 return 0;
6438 disp = XEXP (disp, 0);
6439 saw_plus = true;
6440 }
6441
6442 if (TARGET_MACHO && darwin_local_data_pic (disp))
6443 return 1;
6444
6445 if (GET_CODE (disp) != UNSPEC)
6446 return 0;
6447
6448 switch (XINT (disp, 1))
6449 {
6450 case UNSPEC_GOT:
6451 if (saw_plus)
6452 return false;
6453 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6454 case UNSPEC_GOTOFF:
6455 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6456 While ABI specify also 32bit relocation but we don't produce it in
6457 small PIC model at all. */
6458 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6459 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6460 && !TARGET_64BIT)
6461 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6462 return false;
6463 case UNSPEC_GOTTPOFF:
6464 case UNSPEC_GOTNTPOFF:
6465 case UNSPEC_INDNTPOFF:
6466 if (saw_plus)
6467 return false;
6468 disp = XVECEXP (disp, 0, 0);
6469 return (GET_CODE (disp) == SYMBOL_REF
6470 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6471 case UNSPEC_NTPOFF:
6472 disp = XVECEXP (disp, 0, 0);
6473 return (GET_CODE (disp) == SYMBOL_REF
6474 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6475 case UNSPEC_DTPOFF:
6476 disp = XVECEXP (disp, 0, 0);
6477 return (GET_CODE (disp) == SYMBOL_REF
6478 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6479 }
6480
6481 return 0;
6482 }
6483
6484 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6485 memory address for an instruction. The MODE argument is the machine mode
6486 for the MEM expression that wants to use this address.
6487
6488 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6489 convert common non-canonical forms to canonical form so that they will
6490 be recognized. */
6491
6492 int
6493 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6494 {
6495 struct ix86_address parts;
6496 rtx base, index, disp;
6497 HOST_WIDE_INT scale;
6498 const char *reason = NULL;
6499 rtx reason_rtx = NULL_RTX;
6500
6501 if (TARGET_DEBUG_ADDR)
6502 {
6503 fprintf (stderr,
6504 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6505 GET_MODE_NAME (mode), strict);
6506 debug_rtx (addr);
6507 }
6508
6509 if (ix86_decompose_address (addr, &parts) <= 0)
6510 {
6511 reason = "decomposition failed";
6512 goto report_error;
6513 }
6514
6515 base = parts.base;
6516 index = parts.index;
6517 disp = parts.disp;
6518 scale = parts.scale;
6519
6520 /* Validate base register.
6521
6522 Don't allow SUBREG's that span more than a word here. It can lead to spill
6523 failures when the base is one word out of a two word structure, which is
6524 represented internally as a DImode int. */
6525
6526 if (base)
6527 {
6528 rtx reg;
6529 reason_rtx = base;
6530
6531 if (REG_P (base))
6532 reg = base;
6533 else if (GET_CODE (base) == SUBREG
6534 && REG_P (SUBREG_REG (base))
6535 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6536 <= UNITS_PER_WORD)
6537 reg = SUBREG_REG (base);
6538 else
6539 {
6540 reason = "base is not a register";
6541 goto report_error;
6542 }
6543
6544 if (GET_MODE (base) != Pmode)
6545 {
6546 reason = "base is not in Pmode";
6547 goto report_error;
6548 }
6549
6550 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6551 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6552 {
6553 reason = "base is not valid";
6554 goto report_error;
6555 }
6556 }
6557
6558 /* Validate index register.
6559
6560 Don't allow SUBREG's that span more than a word here -- same as above. */
6561
6562 if (index)
6563 {
6564 rtx reg;
6565 reason_rtx = index;
6566
6567 if (REG_P (index))
6568 reg = index;
6569 else if (GET_CODE (index) == SUBREG
6570 && REG_P (SUBREG_REG (index))
6571 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6572 <= UNITS_PER_WORD)
6573 reg = SUBREG_REG (index);
6574 else
6575 {
6576 reason = "index is not a register";
6577 goto report_error;
6578 }
6579
6580 if (GET_MODE (index) != Pmode)
6581 {
6582 reason = "index is not in Pmode";
6583 goto report_error;
6584 }
6585
6586 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6587 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6588 {
6589 reason = "index is not valid";
6590 goto report_error;
6591 }
6592 }
6593
6594 /* Validate scale factor. */
6595 if (scale != 1)
6596 {
6597 reason_rtx = GEN_INT (scale);
6598 if (!index)
6599 {
6600 reason = "scale without index";
6601 goto report_error;
6602 }
6603
6604 if (scale != 2 && scale != 4 && scale != 8)
6605 {
6606 reason = "scale is not a valid multiplier";
6607 goto report_error;
6608 }
6609 }
6610
6611 /* Validate displacement. */
6612 if (disp)
6613 {
6614 reason_rtx = disp;
6615
6616 if (GET_CODE (disp) == CONST
6617 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6618 switch (XINT (XEXP (disp, 0), 1))
6619 {
6620 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6621 used. While ABI specify also 32bit relocations, we don't produce
6622 them at all and use IP relative instead. */
6623 case UNSPEC_GOT:
6624 case UNSPEC_GOTOFF:
6625 gcc_assert (flag_pic);
6626 if (!TARGET_64BIT)
6627 goto is_legitimate_pic;
6628 reason = "64bit address unspec";
6629 goto report_error;
6630
6631 case UNSPEC_GOTPCREL:
6632 gcc_assert (flag_pic);
6633 goto is_legitimate_pic;
6634
6635 case UNSPEC_GOTTPOFF:
6636 case UNSPEC_GOTNTPOFF:
6637 case UNSPEC_INDNTPOFF:
6638 case UNSPEC_NTPOFF:
6639 case UNSPEC_DTPOFF:
6640 break;
6641
6642 default:
6643 reason = "invalid address unspec";
6644 goto report_error;
6645 }
6646
6647 else if (SYMBOLIC_CONST (disp)
6648 && (flag_pic
6649 || (TARGET_MACHO
6650 #if TARGET_MACHO
6651 && MACHOPIC_INDIRECT
6652 && !machopic_operand_p (disp)
6653 #endif
6654 )))
6655 {
6656
6657 is_legitimate_pic:
6658 if (TARGET_64BIT && (index || base))
6659 {
6660 /* foo@dtpoff(%rX) is ok. */
6661 if (GET_CODE (disp) != CONST
6662 || GET_CODE (XEXP (disp, 0)) != PLUS
6663 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6664 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6665 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6666 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6667 {
6668 reason = "non-constant pic memory reference";
6669 goto report_error;
6670 }
6671 }
6672 else if (! legitimate_pic_address_disp_p (disp))
6673 {
6674 reason = "displacement is an invalid pic construct";
6675 goto report_error;
6676 }
6677
6678 /* This code used to verify that a symbolic pic displacement
6679 includes the pic_offset_table_rtx register.
6680
6681 While this is good idea, unfortunately these constructs may
6682 be created by "adds using lea" optimization for incorrect
6683 code like:
6684
6685 int a;
6686 int foo(int i)
6687 {
6688 return *(&a+i);
6689 }
6690
6691 This code is nonsensical, but results in addressing
6692 GOT table with pic_offset_table_rtx base. We can't
6693 just refuse it easily, since it gets matched by
6694 "addsi3" pattern, that later gets split to lea in the
6695 case output register differs from input. While this
6696 can be handled by separate addsi pattern for this case
6697 that never results in lea, this seems to be easier and
6698 correct fix for crash to disable this test. */
6699 }
6700 else if (GET_CODE (disp) != LABEL_REF
6701 && GET_CODE (disp) != CONST_INT
6702 && (GET_CODE (disp) != CONST
6703 || !legitimate_constant_p (disp))
6704 && (GET_CODE (disp) != SYMBOL_REF
6705 || !legitimate_constant_p (disp)))
6706 {
6707 reason = "displacement is not constant";
6708 goto report_error;
6709 }
6710 else if (TARGET_64BIT
6711 && !x86_64_immediate_operand (disp, VOIDmode))
6712 {
6713 reason = "displacement is out of range";
6714 goto report_error;
6715 }
6716 }
6717
6718 /* Everything looks valid. */
6719 if (TARGET_DEBUG_ADDR)
6720 fprintf (stderr, "Success.\n");
6721 return TRUE;
6722
6723 report_error:
6724 if (TARGET_DEBUG_ADDR)
6725 {
6726 fprintf (stderr, "Error: %s\n", reason);
6727 debug_rtx (reason_rtx);
6728 }
6729 return FALSE;
6730 }
6731 \f
6732 /* Return a unique alias set for the GOT. */
6733
6734 static HOST_WIDE_INT
6735 ix86_GOT_alias_set (void)
6736 {
6737 static HOST_WIDE_INT set = -1;
6738 if (set == -1)
6739 set = new_alias_set ();
6740 return set;
6741 }
6742
6743 /* Return a legitimate reference for ORIG (an address) using the
6744 register REG. If REG is 0, a new pseudo is generated.
6745
6746 There are two types of references that must be handled:
6747
6748 1. Global data references must load the address from the GOT, via
6749 the PIC reg. An insn is emitted to do this load, and the reg is
6750 returned.
6751
6752 2. Static data references, constant pool addresses, and code labels
6753 compute the address as an offset from the GOT, whose base is in
6754 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6755 differentiate them from global data objects. The returned
6756 address is the PIC reg + an unspec constant.
6757
6758 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6759 reg also appears in the address. */
6760
6761 static rtx
6762 legitimize_pic_address (rtx orig, rtx reg)
6763 {
6764 rtx addr = orig;
6765 rtx new = orig;
6766 rtx base;
6767
6768 #if TARGET_MACHO
6769 if (TARGET_MACHO && !TARGET_64BIT)
6770 {
6771 if (reg == 0)
6772 reg = gen_reg_rtx (Pmode);
6773 /* Use the generic Mach-O PIC machinery. */
6774 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6775 }
6776 #endif
6777
6778 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6779 new = addr;
6780 else if (TARGET_64BIT
6781 && ix86_cmodel != CM_SMALL_PIC
6782 && local_symbolic_operand (addr, Pmode))
6783 {
6784 rtx tmpreg;
6785 /* This symbol may be referenced via a displacement from the PIC
6786 base address (@GOTOFF). */
6787
6788 if (reload_in_progress)
6789 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6790 if (GET_CODE (addr) == CONST)
6791 addr = XEXP (addr, 0);
6792 if (GET_CODE (addr) == PLUS)
6793 {
6794 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6795 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6796 }
6797 else
6798 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6799 new = gen_rtx_CONST (Pmode, new);
6800 if (!reg)
6801 tmpreg = gen_reg_rtx (Pmode);
6802 else
6803 tmpreg = reg;
6804 emit_move_insn (tmpreg, new);
6805
6806 if (reg != 0)
6807 {
6808 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6809 tmpreg, 1, OPTAB_DIRECT);
6810 new = reg;
6811 }
6812 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6813 }
6814 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6815 {
6816 /* This symbol may be referenced via a displacement from the PIC
6817 base address (@GOTOFF). */
6818
6819 if (reload_in_progress)
6820 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6821 if (GET_CODE (addr) == CONST)
6822 addr = XEXP (addr, 0);
6823 if (GET_CODE (addr) == PLUS)
6824 {
6825 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6826 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6827 }
6828 else
6829 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6830 new = gen_rtx_CONST (Pmode, new);
6831 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6832
6833 if (reg != 0)
6834 {
6835 emit_move_insn (reg, new);
6836 new = reg;
6837 }
6838 }
6839 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6840 {
6841 if (TARGET_64BIT)
6842 {
6843 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6844 new = gen_rtx_CONST (Pmode, new);
6845 new = gen_const_mem (Pmode, new);
6846 set_mem_alias_set (new, ix86_GOT_alias_set ());
6847
6848 if (reg == 0)
6849 reg = gen_reg_rtx (Pmode);
6850 /* Use directly gen_movsi, otherwise the address is loaded
6851 into register for CSE. We don't want to CSE this addresses,
6852 instead we CSE addresses from the GOT table, so skip this. */
6853 emit_insn (gen_movsi (reg, new));
6854 new = reg;
6855 }
6856 else
6857 {
6858 /* This symbol must be referenced via a load from the
6859 Global Offset Table (@GOT). */
6860
6861 if (reload_in_progress)
6862 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6863 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6864 new = gen_rtx_CONST (Pmode, new);
6865 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6866 new = gen_const_mem (Pmode, new);
6867 set_mem_alias_set (new, ix86_GOT_alias_set ());
6868
6869 if (reg == 0)
6870 reg = gen_reg_rtx (Pmode);
6871 emit_move_insn (reg, new);
6872 new = reg;
6873 }
6874 }
6875 else
6876 {
6877 if (GET_CODE (addr) == CONST_INT
6878 && !x86_64_immediate_operand (addr, VOIDmode))
6879 {
6880 if (reg)
6881 {
6882 emit_move_insn (reg, addr);
6883 new = reg;
6884 }
6885 else
6886 new = force_reg (Pmode, addr);
6887 }
6888 else if (GET_CODE (addr) == CONST)
6889 {
6890 addr = XEXP (addr, 0);
6891
6892 /* We must match stuff we generate before. Assume the only
6893 unspecs that can get here are ours. Not that we could do
6894 anything with them anyway.... */
6895 if (GET_CODE (addr) == UNSPEC
6896 || (GET_CODE (addr) == PLUS
6897 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6898 return orig;
6899 gcc_assert (GET_CODE (addr) == PLUS);
6900 }
6901 if (GET_CODE (addr) == PLUS)
6902 {
6903 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6904
6905 /* Check first to see if this is a constant offset from a @GOTOFF
6906 symbol reference. */
6907 if (local_symbolic_operand (op0, Pmode)
6908 && GET_CODE (op1) == CONST_INT)
6909 {
6910 if (!TARGET_64BIT)
6911 {
6912 if (reload_in_progress)
6913 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6914 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6915 UNSPEC_GOTOFF);
6916 new = gen_rtx_PLUS (Pmode, new, op1);
6917 new = gen_rtx_CONST (Pmode, new);
6918 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6919
6920 if (reg != 0)
6921 {
6922 emit_move_insn (reg, new);
6923 new = reg;
6924 }
6925 }
6926 else
6927 {
6928 if (INTVAL (op1) < -16*1024*1024
6929 || INTVAL (op1) >= 16*1024*1024)
6930 {
6931 if (!x86_64_immediate_operand (op1, Pmode))
6932 op1 = force_reg (Pmode, op1);
6933 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6934 }
6935 }
6936 }
6937 else
6938 {
6939 base = legitimize_pic_address (XEXP (addr, 0), reg);
6940 new = legitimize_pic_address (XEXP (addr, 1),
6941 base == reg ? NULL_RTX : reg);
6942
6943 if (GET_CODE (new) == CONST_INT)
6944 new = plus_constant (base, INTVAL (new));
6945 else
6946 {
6947 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6948 {
6949 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6950 new = XEXP (new, 1);
6951 }
6952 new = gen_rtx_PLUS (Pmode, base, new);
6953 }
6954 }
6955 }
6956 }
6957 return new;
6958 }
6959 \f
6960 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6961
6962 static rtx
6963 get_thread_pointer (int to_reg)
6964 {
6965 rtx tp, reg, insn;
6966
6967 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6968 if (!to_reg)
6969 return tp;
6970
6971 reg = gen_reg_rtx (Pmode);
6972 insn = gen_rtx_SET (VOIDmode, reg, tp);
6973 insn = emit_insn (insn);
6974
6975 return reg;
6976 }
6977
6978 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6979 false if we expect this to be used for a memory address and true if
6980 we expect to load the address into a register. */
6981
6982 static rtx
6983 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6984 {
6985 rtx dest, base, off, pic, tp;
6986 int type;
6987
6988 switch (model)
6989 {
6990 case TLS_MODEL_GLOBAL_DYNAMIC:
6991 dest = gen_reg_rtx (Pmode);
6992 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6993
6994 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
6995 {
6996 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6997
6998 start_sequence ();
6999 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7000 insns = get_insns ();
7001 end_sequence ();
7002
7003 emit_libcall_block (insns, dest, rax, x);
7004 }
7005 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7006 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7007 else
7008 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7009
7010 if (TARGET_GNU2_TLS)
7011 {
7012 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7013
7014 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7015 }
7016 break;
7017
7018 case TLS_MODEL_LOCAL_DYNAMIC:
7019 base = gen_reg_rtx (Pmode);
7020 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7021
7022 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7023 {
7024 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7025
7026 start_sequence ();
7027 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7028 insns = get_insns ();
7029 end_sequence ();
7030
7031 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7032 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7033 emit_libcall_block (insns, base, rax, note);
7034 }
7035 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7036 emit_insn (gen_tls_local_dynamic_base_64 (base));
7037 else
7038 emit_insn (gen_tls_local_dynamic_base_32 (base));
7039
7040 if (TARGET_GNU2_TLS)
7041 {
7042 rtx x = ix86_tls_module_base ();
7043
7044 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7045 gen_rtx_MINUS (Pmode, x, tp));
7046 }
7047
7048 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7049 off = gen_rtx_CONST (Pmode, off);
7050
7051 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7052
7053 if (TARGET_GNU2_TLS)
7054 {
7055 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7056
7057 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7058 }
7059
7060 break;
7061
7062 case TLS_MODEL_INITIAL_EXEC:
7063 if (TARGET_64BIT)
7064 {
7065 pic = NULL;
7066 type = UNSPEC_GOTNTPOFF;
7067 }
7068 else if (flag_pic)
7069 {
7070 if (reload_in_progress)
7071 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7072 pic = pic_offset_table_rtx;
7073 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7074 }
7075 else if (!TARGET_ANY_GNU_TLS)
7076 {
7077 pic = gen_reg_rtx (Pmode);
7078 emit_insn (gen_set_got (pic));
7079 type = UNSPEC_GOTTPOFF;
7080 }
7081 else
7082 {
7083 pic = NULL;
7084 type = UNSPEC_INDNTPOFF;
7085 }
7086
7087 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7088 off = gen_rtx_CONST (Pmode, off);
7089 if (pic)
7090 off = gen_rtx_PLUS (Pmode, pic, off);
7091 off = gen_const_mem (Pmode, off);
7092 set_mem_alias_set (off, ix86_GOT_alias_set ());
7093
7094 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7095 {
7096 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7097 off = force_reg (Pmode, off);
7098 return gen_rtx_PLUS (Pmode, base, off);
7099 }
7100 else
7101 {
7102 base = get_thread_pointer (true);
7103 dest = gen_reg_rtx (Pmode);
7104 emit_insn (gen_subsi3 (dest, base, off));
7105 }
7106 break;
7107
7108 case TLS_MODEL_LOCAL_EXEC:
7109 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7110 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7111 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7112 off = gen_rtx_CONST (Pmode, off);
7113
7114 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7115 {
7116 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7117 return gen_rtx_PLUS (Pmode, base, off);
7118 }
7119 else
7120 {
7121 base = get_thread_pointer (true);
7122 dest = gen_reg_rtx (Pmode);
7123 emit_insn (gen_subsi3 (dest, base, off));
7124 }
7125 break;
7126
7127 default:
7128 gcc_unreachable ();
7129 }
7130
7131 return dest;
7132 }
7133
7134 /* Try machine-dependent ways of modifying an illegitimate address
7135 to be legitimate. If we find one, return the new, valid address.
7136 This macro is used in only one place: `memory_address' in explow.c.
7137
7138 OLDX is the address as it was before break_out_memory_refs was called.
7139 In some cases it is useful to look at this to decide what needs to be done.
7140
7141 MODE and WIN are passed so that this macro can use
7142 GO_IF_LEGITIMATE_ADDRESS.
7143
7144 It is always safe for this macro to do nothing. It exists to recognize
7145 opportunities to optimize the output.
7146
7147 For the 80386, we handle X+REG by loading X into a register R and
7148 using R+REG. R will go in a general reg and indexing will be used.
7149 However, if REG is a broken-out memory address or multiplication,
7150 nothing needs to be done because REG can certainly go in a general reg.
7151
7152 When -fpic is used, special handling is needed for symbolic references.
7153 See comments by legitimize_pic_address in i386.c for details. */
7154
7155 rtx
7156 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7157 {
7158 int changed = 0;
7159 unsigned log;
7160
7161 if (TARGET_DEBUG_ADDR)
7162 {
7163 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7164 GET_MODE_NAME (mode));
7165 debug_rtx (x);
7166 }
7167
7168 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7169 if (log)
7170 return legitimize_tls_address (x, log, false);
7171 if (GET_CODE (x) == CONST
7172 && GET_CODE (XEXP (x, 0)) == PLUS
7173 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7174 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7175 {
7176 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7177 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7178 }
7179
7180 if (flag_pic && SYMBOLIC_CONST (x))
7181 return legitimize_pic_address (x, 0);
7182
7183 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7184 if (GET_CODE (x) == ASHIFT
7185 && GET_CODE (XEXP (x, 1)) == CONST_INT
7186 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7187 {
7188 changed = 1;
7189 log = INTVAL (XEXP (x, 1));
7190 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7191 GEN_INT (1 << log));
7192 }
7193
7194 if (GET_CODE (x) == PLUS)
7195 {
7196 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7197
7198 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7199 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7200 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7201 {
7202 changed = 1;
7203 log = INTVAL (XEXP (XEXP (x, 0), 1));
7204 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7205 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7206 GEN_INT (1 << log));
7207 }
7208
7209 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7210 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
7211 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7212 {
7213 changed = 1;
7214 log = INTVAL (XEXP (XEXP (x, 1), 1));
7215 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7216 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7217 GEN_INT (1 << log));
7218 }
7219
7220 /* Put multiply first if it isn't already. */
7221 if (GET_CODE (XEXP (x, 1)) == MULT)
7222 {
7223 rtx tmp = XEXP (x, 0);
7224 XEXP (x, 0) = XEXP (x, 1);
7225 XEXP (x, 1) = tmp;
7226 changed = 1;
7227 }
7228
7229 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7230 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7231 created by virtual register instantiation, register elimination, and
7232 similar optimizations. */
7233 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7234 {
7235 changed = 1;
7236 x = gen_rtx_PLUS (Pmode,
7237 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7238 XEXP (XEXP (x, 1), 0)),
7239 XEXP (XEXP (x, 1), 1));
7240 }
7241
7242 /* Canonicalize
7243 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7244 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7245 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7246 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7247 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7248 && CONSTANT_P (XEXP (x, 1)))
7249 {
7250 rtx constant;
7251 rtx other = NULL_RTX;
7252
7253 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7254 {
7255 constant = XEXP (x, 1);
7256 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7257 }
7258 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
7259 {
7260 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7261 other = XEXP (x, 1);
7262 }
7263 else
7264 constant = 0;
7265
7266 if (constant)
7267 {
7268 changed = 1;
7269 x = gen_rtx_PLUS (Pmode,
7270 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7271 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7272 plus_constant (other, INTVAL (constant)));
7273 }
7274 }
7275
7276 if (changed && legitimate_address_p (mode, x, FALSE))
7277 return x;
7278
7279 if (GET_CODE (XEXP (x, 0)) == MULT)
7280 {
7281 changed = 1;
7282 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7283 }
7284
7285 if (GET_CODE (XEXP (x, 1)) == MULT)
7286 {
7287 changed = 1;
7288 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7289 }
7290
7291 if (changed
7292 && GET_CODE (XEXP (x, 1)) == REG
7293 && GET_CODE (XEXP (x, 0)) == REG)
7294 return x;
7295
7296 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7297 {
7298 changed = 1;
7299 x = legitimize_pic_address (x, 0);
7300 }
7301
7302 if (changed && legitimate_address_p (mode, x, FALSE))
7303 return x;
7304
7305 if (GET_CODE (XEXP (x, 0)) == REG)
7306 {
7307 rtx temp = gen_reg_rtx (Pmode);
7308 rtx val = force_operand (XEXP (x, 1), temp);
7309 if (val != temp)
7310 emit_move_insn (temp, val);
7311
7312 XEXP (x, 1) = temp;
7313 return x;
7314 }
7315
7316 else if (GET_CODE (XEXP (x, 1)) == REG)
7317 {
7318 rtx temp = gen_reg_rtx (Pmode);
7319 rtx val = force_operand (XEXP (x, 0), temp);
7320 if (val != temp)
7321 emit_move_insn (temp, val);
7322
7323 XEXP (x, 0) = temp;
7324 return x;
7325 }
7326 }
7327
7328 return x;
7329 }
7330 \f
7331 /* Print an integer constant expression in assembler syntax. Addition
7332 and subtraction are the only arithmetic that may appear in these
7333 expressions. FILE is the stdio stream to write to, X is the rtx, and
7334 CODE is the operand print code from the output string. */
7335
7336 static void
7337 output_pic_addr_const (FILE *file, rtx x, int code)
7338 {
7339 char buf[256];
7340
7341 switch (GET_CODE (x))
7342 {
7343 case PC:
7344 gcc_assert (flag_pic);
7345 putc ('.', file);
7346 break;
7347
7348 case SYMBOL_REF:
7349 output_addr_const (file, x);
7350 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7351 fputs ("@PLT", file);
7352 break;
7353
7354 case LABEL_REF:
7355 x = XEXP (x, 0);
7356 /* FALLTHRU */
7357 case CODE_LABEL:
7358 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7359 assemble_name (asm_out_file, buf);
7360 break;
7361
7362 case CONST_INT:
7363 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7364 break;
7365
7366 case CONST:
7367 /* This used to output parentheses around the expression,
7368 but that does not work on the 386 (either ATT or BSD assembler). */
7369 output_pic_addr_const (file, XEXP (x, 0), code);
7370 break;
7371
7372 case CONST_DOUBLE:
7373 if (GET_MODE (x) == VOIDmode)
7374 {
7375 /* We can use %d if the number is <32 bits and positive. */
7376 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7377 fprintf (file, "0x%lx%08lx",
7378 (unsigned long) CONST_DOUBLE_HIGH (x),
7379 (unsigned long) CONST_DOUBLE_LOW (x));
7380 else
7381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7382 }
7383 else
7384 /* We can't handle floating point constants;
7385 PRINT_OPERAND must handle them. */
7386 output_operand_lossage ("floating constant misused");
7387 break;
7388
7389 case PLUS:
7390 /* Some assemblers need integer constants to appear first. */
7391 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
7392 {
7393 output_pic_addr_const (file, XEXP (x, 0), code);
7394 putc ('+', file);
7395 output_pic_addr_const (file, XEXP (x, 1), code);
7396 }
7397 else
7398 {
7399 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
7400 output_pic_addr_const (file, XEXP (x, 1), code);
7401 putc ('+', file);
7402 output_pic_addr_const (file, XEXP (x, 0), code);
7403 }
7404 break;
7405
7406 case MINUS:
7407 if (!TARGET_MACHO)
7408 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7409 output_pic_addr_const (file, XEXP (x, 0), code);
7410 putc ('-', file);
7411 output_pic_addr_const (file, XEXP (x, 1), code);
7412 if (!TARGET_MACHO)
7413 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7414 break;
7415
7416 case UNSPEC:
7417 gcc_assert (XVECLEN (x, 0) == 1);
7418 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7419 switch (XINT (x, 1))
7420 {
7421 case UNSPEC_GOT:
7422 fputs ("@GOT", file);
7423 break;
7424 case UNSPEC_GOTOFF:
7425 fputs ("@GOTOFF", file);
7426 break;
7427 case UNSPEC_GOTPCREL:
7428 fputs ("@GOTPCREL(%rip)", file);
7429 break;
7430 case UNSPEC_GOTTPOFF:
7431 /* FIXME: This might be @TPOFF in Sun ld too. */
7432 fputs ("@GOTTPOFF", file);
7433 break;
7434 case UNSPEC_TPOFF:
7435 fputs ("@TPOFF", file);
7436 break;
7437 case UNSPEC_NTPOFF:
7438 if (TARGET_64BIT)
7439 fputs ("@TPOFF", file);
7440 else
7441 fputs ("@NTPOFF", file);
7442 break;
7443 case UNSPEC_DTPOFF:
7444 fputs ("@DTPOFF", file);
7445 break;
7446 case UNSPEC_GOTNTPOFF:
7447 if (TARGET_64BIT)
7448 fputs ("@GOTTPOFF(%rip)", file);
7449 else
7450 fputs ("@GOTNTPOFF", file);
7451 break;
7452 case UNSPEC_INDNTPOFF:
7453 fputs ("@INDNTPOFF", file);
7454 break;
7455 default:
7456 output_operand_lossage ("invalid UNSPEC as operand");
7457 break;
7458 }
7459 break;
7460
7461 default:
7462 output_operand_lossage ("invalid expression as operand");
7463 }
7464 }
7465
7466 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7467 We need to emit DTP-relative relocations. */
7468
7469 static void
7470 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7471 {
7472 fputs (ASM_LONG, file);
7473 output_addr_const (file, x);
7474 fputs ("@DTPOFF", file);
7475 switch (size)
7476 {
7477 case 4:
7478 break;
7479 case 8:
7480 fputs (", 0", file);
7481 break;
7482 default:
7483 gcc_unreachable ();
7484 }
7485 }
7486
7487 /* In the name of slightly smaller debug output, and to cater to
7488 general assembler lossage, recognize PIC+GOTOFF and turn it back
7489 into a direct symbol reference.
7490
7491 On Darwin, this is necessary to avoid a crash, because Darwin
7492 has a different PIC label for each routine but the DWARF debugging
7493 information is not associated with any particular routine, so it's
7494 necessary to remove references to the PIC label from RTL stored by
7495 the DWARF output code. */
7496
7497 static rtx
7498 ix86_delegitimize_address (rtx orig_x)
7499 {
7500 rtx x = orig_x;
7501 /* reg_addend is NULL or a multiple of some register. */
7502 rtx reg_addend = NULL_RTX;
7503 /* const_addend is NULL or a const_int. */
7504 rtx const_addend = NULL_RTX;
7505 /* This is the result, or NULL. */
7506 rtx result = NULL_RTX;
7507
7508 if (GET_CODE (x) == MEM)
7509 x = XEXP (x, 0);
7510
7511 if (TARGET_64BIT)
7512 {
7513 if (GET_CODE (x) != CONST
7514 || GET_CODE (XEXP (x, 0)) != UNSPEC
7515 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7516 || GET_CODE (orig_x) != MEM)
7517 return orig_x;
7518 return XVECEXP (XEXP (x, 0), 0, 0);
7519 }
7520
7521 if (GET_CODE (x) != PLUS
7522 || GET_CODE (XEXP (x, 1)) != CONST)
7523 return orig_x;
7524
7525 if (GET_CODE (XEXP (x, 0)) == REG
7526 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7527 /* %ebx + GOT/GOTOFF */
7528 ;
7529 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7530 {
7531 /* %ebx + %reg * scale + GOT/GOTOFF */
7532 reg_addend = XEXP (x, 0);
7533 if (GET_CODE (XEXP (reg_addend, 0)) == REG
7534 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7535 reg_addend = XEXP (reg_addend, 1);
7536 else if (GET_CODE (XEXP (reg_addend, 1)) == REG
7537 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7538 reg_addend = XEXP (reg_addend, 0);
7539 else
7540 return orig_x;
7541 if (GET_CODE (reg_addend) != REG
7542 && GET_CODE (reg_addend) != MULT
7543 && GET_CODE (reg_addend) != ASHIFT)
7544 return orig_x;
7545 }
7546 else
7547 return orig_x;
7548
7549 x = XEXP (XEXP (x, 1), 0);
7550 if (GET_CODE (x) == PLUS
7551 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7552 {
7553 const_addend = XEXP (x, 1);
7554 x = XEXP (x, 0);
7555 }
7556
7557 if (GET_CODE (x) == UNSPEC
7558 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
7559 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
7560 result = XVECEXP (x, 0, 0);
7561
7562 if (TARGET_MACHO && darwin_local_data_pic (x)
7563 && GET_CODE (orig_x) != MEM)
7564 result = XEXP (x, 0);
7565
7566 if (! result)
7567 return orig_x;
7568
7569 if (const_addend)
7570 result = gen_rtx_PLUS (Pmode, result, const_addend);
7571 if (reg_addend)
7572 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7573 return result;
7574 }
7575 \f
7576 static void
7577 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7578 int fp, FILE *file)
7579 {
7580 const char *suffix;
7581
7582 if (mode == CCFPmode || mode == CCFPUmode)
7583 {
7584 enum rtx_code second_code, bypass_code;
7585 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7586 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7587 code = ix86_fp_compare_code_to_integer (code);
7588 mode = CCmode;
7589 }
7590 if (reverse)
7591 code = reverse_condition (code);
7592
7593 switch (code)
7594 {
7595 case EQ:
7596 suffix = "e";
7597 break;
7598 case NE:
7599 suffix = "ne";
7600 break;
7601 case GT:
7602 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7603 suffix = "g";
7604 break;
7605 case GTU:
7606 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7607 Those same assemblers have the same but opposite lossage on cmov. */
7608 gcc_assert (mode == CCmode);
7609 suffix = fp ? "nbe" : "a";
7610 break;
7611 case LT:
7612 switch (mode)
7613 {
7614 case CCNOmode:
7615 case CCGOCmode:
7616 suffix = "s";
7617 break;
7618
7619 case CCmode:
7620 case CCGCmode:
7621 suffix = "l";
7622 break;
7623
7624 default:
7625 gcc_unreachable ();
7626 }
7627 break;
7628 case LTU:
7629 gcc_assert (mode == CCmode);
7630 suffix = "b";
7631 break;
7632 case GE:
7633 switch (mode)
7634 {
7635 case CCNOmode:
7636 case CCGOCmode:
7637 suffix = "ns";
7638 break;
7639
7640 case CCmode:
7641 case CCGCmode:
7642 suffix = "ge";
7643 break;
7644
7645 default:
7646 gcc_unreachable ();
7647 }
7648 break;
7649 case GEU:
7650 /* ??? As above. */
7651 gcc_assert (mode == CCmode);
7652 suffix = fp ? "nb" : "ae";
7653 break;
7654 case LE:
7655 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7656 suffix = "le";
7657 break;
7658 case LEU:
7659 gcc_assert (mode == CCmode);
7660 suffix = "be";
7661 break;
7662 case UNORDERED:
7663 suffix = fp ? "u" : "p";
7664 break;
7665 case ORDERED:
7666 suffix = fp ? "nu" : "np";
7667 break;
7668 default:
7669 gcc_unreachable ();
7670 }
7671 fputs (suffix, file);
7672 }
7673
7674 /* Print the name of register X to FILE based on its machine mode and number.
7675 If CODE is 'w', pretend the mode is HImode.
7676 If CODE is 'b', pretend the mode is QImode.
7677 If CODE is 'k', pretend the mode is SImode.
7678 If CODE is 'q', pretend the mode is DImode.
7679 If CODE is 'h', pretend the reg is the 'high' byte register.
7680 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7681
7682 void
7683 print_reg (rtx x, int code, FILE *file)
7684 {
7685 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7686 && REGNO (x) != FRAME_POINTER_REGNUM
7687 && REGNO (x) != FLAGS_REG
7688 && REGNO (x) != FPSR_REG
7689 && REGNO (x) != FPCR_REG);
7690
7691 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7692 putc ('%', file);
7693
7694 if (code == 'w' || MMX_REG_P (x))
7695 code = 2;
7696 else if (code == 'b')
7697 code = 1;
7698 else if (code == 'k')
7699 code = 4;
7700 else if (code == 'q')
7701 code = 8;
7702 else if (code == 'y')
7703 code = 3;
7704 else if (code == 'h')
7705 code = 0;
7706 else
7707 code = GET_MODE_SIZE (GET_MODE (x));
7708
7709 /* Irritatingly, AMD extended registers use different naming convention
7710 from the normal registers. */
7711 if (REX_INT_REG_P (x))
7712 {
7713 gcc_assert (TARGET_64BIT);
7714 switch (code)
7715 {
7716 case 0:
7717 error ("extended registers have no high halves");
7718 break;
7719 case 1:
7720 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7721 break;
7722 case 2:
7723 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7724 break;
7725 case 4:
7726 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7727 break;
7728 case 8:
7729 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7730 break;
7731 default:
7732 error ("unsupported operand size for extended register");
7733 break;
7734 }
7735 return;
7736 }
7737 switch (code)
7738 {
7739 case 3:
7740 if (STACK_TOP_P (x))
7741 {
7742 fputs ("st(0)", file);
7743 break;
7744 }
7745 /* FALLTHRU */
7746 case 8:
7747 case 4:
7748 case 12:
7749 if (! ANY_FP_REG_P (x))
7750 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7751 /* FALLTHRU */
7752 case 16:
7753 case 2:
7754 normal:
7755 fputs (hi_reg_name[REGNO (x)], file);
7756 break;
7757 case 1:
7758 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7759 goto normal;
7760 fputs (qi_reg_name[REGNO (x)], file);
7761 break;
7762 case 0:
7763 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7764 goto normal;
7765 fputs (qi_high_reg_name[REGNO (x)], file);
7766 break;
7767 default:
7768 gcc_unreachable ();
7769 }
7770 }
7771
7772 /* Locate some local-dynamic symbol still in use by this function
7773 so that we can print its name in some tls_local_dynamic_base
7774 pattern. */
7775
7776 static const char *
7777 get_some_local_dynamic_name (void)
7778 {
7779 rtx insn;
7780
7781 if (cfun->machine->some_ld_name)
7782 return cfun->machine->some_ld_name;
7783
7784 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7785 if (INSN_P (insn)
7786 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7787 return cfun->machine->some_ld_name;
7788
7789 gcc_unreachable ();
7790 }
7791
7792 static int
7793 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7794 {
7795 rtx x = *px;
7796
7797 if (GET_CODE (x) == SYMBOL_REF
7798 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7799 {
7800 cfun->machine->some_ld_name = XSTR (x, 0);
7801 return 1;
7802 }
7803
7804 return 0;
7805 }
7806
7807 /* Meaning of CODE:
7808 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7809 C -- print opcode suffix for set/cmov insn.
7810 c -- like C, but print reversed condition
7811 F,f -- likewise, but for floating-point.
7812 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7813 otherwise nothing
7814 R -- print the prefix for register names.
7815 z -- print the opcode suffix for the size of the current operand.
7816 * -- print a star (in certain assembler syntax)
7817 A -- print an absolute memory reference.
7818 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7819 s -- print a shift double count, followed by the assemblers argument
7820 delimiter.
7821 b -- print the QImode name of the register for the indicated operand.
7822 %b0 would print %al if operands[0] is reg 0.
7823 w -- likewise, print the HImode name of the register.
7824 k -- likewise, print the SImode name of the register.
7825 q -- likewise, print the DImode name of the register.
7826 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7827 y -- print "st(0)" instead of "st" as a register.
7828 D -- print condition for SSE cmp instruction.
7829 P -- if PIC, print an @PLT suffix.
7830 X -- don't print any sort of PIC '@' suffix for a symbol.
7831 & -- print some in-use local-dynamic symbol name.
7832 H -- print a memory address offset by 8; used for sse high-parts
7833 */
7834
7835 void
7836 print_operand (FILE *file, rtx x, int code)
7837 {
7838 if (code)
7839 {
7840 switch (code)
7841 {
7842 case '*':
7843 if (ASSEMBLER_DIALECT == ASM_ATT)
7844 putc ('*', file);
7845 return;
7846
7847 case '&':
7848 assemble_name (file, get_some_local_dynamic_name ());
7849 return;
7850
7851 case 'A':
7852 switch (ASSEMBLER_DIALECT)
7853 {
7854 case ASM_ATT:
7855 putc ('*', file);
7856 break;
7857
7858 case ASM_INTEL:
7859 /* Intel syntax. For absolute addresses, registers should not
7860 be surrounded by braces. */
7861 if (GET_CODE (x) != REG)
7862 {
7863 putc ('[', file);
7864 PRINT_OPERAND (file, x, 0);
7865 putc (']', file);
7866 return;
7867 }
7868 break;
7869
7870 default:
7871 gcc_unreachable ();
7872 }
7873
7874 PRINT_OPERAND (file, x, 0);
7875 return;
7876
7877
7878 case 'L':
7879 if (ASSEMBLER_DIALECT == ASM_ATT)
7880 putc ('l', file);
7881 return;
7882
7883 case 'W':
7884 if (ASSEMBLER_DIALECT == ASM_ATT)
7885 putc ('w', file);
7886 return;
7887
7888 case 'B':
7889 if (ASSEMBLER_DIALECT == ASM_ATT)
7890 putc ('b', file);
7891 return;
7892
7893 case 'Q':
7894 if (ASSEMBLER_DIALECT == ASM_ATT)
7895 putc ('l', file);
7896 return;
7897
7898 case 'S':
7899 if (ASSEMBLER_DIALECT == ASM_ATT)
7900 putc ('s', file);
7901 return;
7902
7903 case 'T':
7904 if (ASSEMBLER_DIALECT == ASM_ATT)
7905 putc ('t', file);
7906 return;
7907
7908 case 'z':
7909 /* 387 opcodes don't get size suffixes if the operands are
7910 registers. */
7911 if (STACK_REG_P (x))
7912 return;
7913
7914 /* Likewise if using Intel opcodes. */
7915 if (ASSEMBLER_DIALECT == ASM_INTEL)
7916 return;
7917
7918 /* This is the size of op from size of operand. */
7919 switch (GET_MODE_SIZE (GET_MODE (x)))
7920 {
7921 case 2:
7922 #ifdef HAVE_GAS_FILDS_FISTS
7923 putc ('s', file);
7924 #endif
7925 return;
7926
7927 case 4:
7928 if (GET_MODE (x) == SFmode)
7929 {
7930 putc ('s', file);
7931 return;
7932 }
7933 else
7934 putc ('l', file);
7935 return;
7936
7937 case 12:
7938 case 16:
7939 putc ('t', file);
7940 return;
7941
7942 case 8:
7943 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7944 {
7945 #ifdef GAS_MNEMONICS
7946 putc ('q', file);
7947 #else
7948 putc ('l', file);
7949 putc ('l', file);
7950 #endif
7951 }
7952 else
7953 putc ('l', file);
7954 return;
7955
7956 default:
7957 gcc_unreachable ();
7958 }
7959
7960 case 'b':
7961 case 'w':
7962 case 'k':
7963 case 'q':
7964 case 'h':
7965 case 'y':
7966 case 'X':
7967 case 'P':
7968 break;
7969
7970 case 's':
7971 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7972 {
7973 PRINT_OPERAND (file, x, 0);
7974 putc (',', file);
7975 }
7976 return;
7977
7978 case 'D':
7979 /* Little bit of braindamage here. The SSE compare instructions
7980 does use completely different names for the comparisons that the
7981 fp conditional moves. */
7982 switch (GET_CODE (x))
7983 {
7984 case EQ:
7985 case UNEQ:
7986 fputs ("eq", file);
7987 break;
7988 case LT:
7989 case UNLT:
7990 fputs ("lt", file);
7991 break;
7992 case LE:
7993 case UNLE:
7994 fputs ("le", file);
7995 break;
7996 case UNORDERED:
7997 fputs ("unord", file);
7998 break;
7999 case NE:
8000 case LTGT:
8001 fputs ("neq", file);
8002 break;
8003 case UNGE:
8004 case GE:
8005 fputs ("nlt", file);
8006 break;
8007 case UNGT:
8008 case GT:
8009 fputs ("nle", file);
8010 break;
8011 case ORDERED:
8012 fputs ("ord", file);
8013 break;
8014 default:
8015 gcc_unreachable ();
8016 }
8017 return;
8018 case 'O':
8019 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8020 if (ASSEMBLER_DIALECT == ASM_ATT)
8021 {
8022 switch (GET_MODE (x))
8023 {
8024 case HImode: putc ('w', file); break;
8025 case SImode:
8026 case SFmode: putc ('l', file); break;
8027 case DImode:
8028 case DFmode: putc ('q', file); break;
8029 default: gcc_unreachable ();
8030 }
8031 putc ('.', file);
8032 }
8033 #endif
8034 return;
8035 case 'C':
8036 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8037 return;
8038 case 'F':
8039 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8040 if (ASSEMBLER_DIALECT == ASM_ATT)
8041 putc ('.', file);
8042 #endif
8043 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8044 return;
8045
8046 /* Like above, but reverse condition */
8047 case 'c':
8048 /* Check to see if argument to %c is really a constant
8049 and not a condition code which needs to be reversed. */
8050 if (!COMPARISON_P (x))
8051 {
8052 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8053 return;
8054 }
8055 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8056 return;
8057 case 'f':
8058 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8059 if (ASSEMBLER_DIALECT == ASM_ATT)
8060 putc ('.', file);
8061 #endif
8062 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8063 return;
8064
8065 case 'H':
8066 /* It doesn't actually matter what mode we use here, as we're
8067 only going to use this for printing. */
8068 x = adjust_address_nv (x, DImode, 8);
8069 break;
8070
8071 case '+':
8072 {
8073 rtx x;
8074
8075 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8076 return;
8077
8078 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8079 if (x)
8080 {
8081 int pred_val = INTVAL (XEXP (x, 0));
8082
8083 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8084 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8085 {
8086 int taken = pred_val > REG_BR_PROB_BASE / 2;
8087 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8088
8089 /* Emit hints only in the case default branch prediction
8090 heuristics would fail. */
8091 if (taken != cputaken)
8092 {
8093 /* We use 3e (DS) prefix for taken branches and
8094 2e (CS) prefix for not taken branches. */
8095 if (taken)
8096 fputs ("ds ; ", file);
8097 else
8098 fputs ("cs ; ", file);
8099 }
8100 }
8101 }
8102 return;
8103 }
8104 default:
8105 output_operand_lossage ("invalid operand code '%c'", code);
8106 }
8107 }
8108
8109 if (GET_CODE (x) == REG)
8110 print_reg (x, code, file);
8111
8112 else if (GET_CODE (x) == MEM)
8113 {
8114 /* No `byte ptr' prefix for call instructions. */
8115 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8116 {
8117 const char * size;
8118 switch (GET_MODE_SIZE (GET_MODE (x)))
8119 {
8120 case 1: size = "BYTE"; break;
8121 case 2: size = "WORD"; break;
8122 case 4: size = "DWORD"; break;
8123 case 8: size = "QWORD"; break;
8124 case 12: size = "XWORD"; break;
8125 case 16: size = "XMMWORD"; break;
8126 default:
8127 gcc_unreachable ();
8128 }
8129
8130 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8131 if (code == 'b')
8132 size = "BYTE";
8133 else if (code == 'w')
8134 size = "WORD";
8135 else if (code == 'k')
8136 size = "DWORD";
8137
8138 fputs (size, file);
8139 fputs (" PTR ", file);
8140 }
8141
8142 x = XEXP (x, 0);
8143 /* Avoid (%rip) for call operands. */
8144 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8145 && GET_CODE (x) != CONST_INT)
8146 output_addr_const (file, x);
8147 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8148 output_operand_lossage ("invalid constraints for operand");
8149 else
8150 output_address (x);
8151 }
8152
8153 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8154 {
8155 REAL_VALUE_TYPE r;
8156 long l;
8157
8158 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8159 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8160
8161 if (ASSEMBLER_DIALECT == ASM_ATT)
8162 putc ('$', file);
8163 fprintf (file, "0x%08lx", l);
8164 }
8165
8166 /* These float cases don't actually occur as immediate operands. */
8167 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8168 {
8169 char dstr[30];
8170
8171 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8172 fprintf (file, "%s", dstr);
8173 }
8174
8175 else if (GET_CODE (x) == CONST_DOUBLE
8176 && GET_MODE (x) == XFmode)
8177 {
8178 char dstr[30];
8179
8180 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8181 fprintf (file, "%s", dstr);
8182 }
8183
8184 else
8185 {
8186 /* We have patterns that allow zero sets of memory, for instance.
8187 In 64-bit mode, we should probably support all 8-byte vectors,
8188 since we can in fact encode that into an immediate. */
8189 if (GET_CODE (x) == CONST_VECTOR)
8190 {
8191 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8192 x = const0_rtx;
8193 }
8194
8195 if (code != 'P')
8196 {
8197 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8198 {
8199 if (ASSEMBLER_DIALECT == ASM_ATT)
8200 putc ('$', file);
8201 }
8202 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8203 || GET_CODE (x) == LABEL_REF)
8204 {
8205 if (ASSEMBLER_DIALECT == ASM_ATT)
8206 putc ('$', file);
8207 else
8208 fputs ("OFFSET FLAT:", file);
8209 }
8210 }
8211 if (GET_CODE (x) == CONST_INT)
8212 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8213 else if (flag_pic)
8214 output_pic_addr_const (file, x, code);
8215 else
8216 output_addr_const (file, x);
8217 }
8218 }
8219 \f
8220 /* Print a memory operand whose address is ADDR. */
8221
8222 void
8223 print_operand_address (FILE *file, rtx addr)
8224 {
8225 struct ix86_address parts;
8226 rtx base, index, disp;
8227 int scale;
8228 int ok = ix86_decompose_address (addr, &parts);
8229
8230 gcc_assert (ok);
8231
8232 base = parts.base;
8233 index = parts.index;
8234 disp = parts.disp;
8235 scale = parts.scale;
8236
8237 switch (parts.seg)
8238 {
8239 case SEG_DEFAULT:
8240 break;
8241 case SEG_FS:
8242 case SEG_GS:
8243 if (USER_LABEL_PREFIX[0] == 0)
8244 putc ('%', file);
8245 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8246 break;
8247 default:
8248 gcc_unreachable ();
8249 }
8250
8251 if (!base && !index)
8252 {
8253 /* Displacement only requires special attention. */
8254
8255 if (GET_CODE (disp) == CONST_INT)
8256 {
8257 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8258 {
8259 if (USER_LABEL_PREFIX[0] == 0)
8260 putc ('%', file);
8261 fputs ("ds:", file);
8262 }
8263 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8264 }
8265 else if (flag_pic)
8266 output_pic_addr_const (file, disp, 0);
8267 else
8268 output_addr_const (file, disp);
8269
8270 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8271 if (TARGET_64BIT)
8272 {
8273 if (GET_CODE (disp) == CONST
8274 && GET_CODE (XEXP (disp, 0)) == PLUS
8275 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8276 disp = XEXP (XEXP (disp, 0), 0);
8277 if (GET_CODE (disp) == LABEL_REF
8278 || (GET_CODE (disp) == SYMBOL_REF
8279 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8280 fputs ("(%rip)", file);
8281 }
8282 }
8283 else
8284 {
8285 if (ASSEMBLER_DIALECT == ASM_ATT)
8286 {
8287 if (disp)
8288 {
8289 if (flag_pic)
8290 output_pic_addr_const (file, disp, 0);
8291 else if (GET_CODE (disp) == LABEL_REF)
8292 output_asm_label (disp);
8293 else
8294 output_addr_const (file, disp);
8295 }
8296
8297 putc ('(', file);
8298 if (base)
8299 print_reg (base, 0, file);
8300 if (index)
8301 {
8302 putc (',', file);
8303 print_reg (index, 0, file);
8304 if (scale != 1)
8305 fprintf (file, ",%d", scale);
8306 }
8307 putc (')', file);
8308 }
8309 else
8310 {
8311 rtx offset = NULL_RTX;
8312
8313 if (disp)
8314 {
8315 /* Pull out the offset of a symbol; print any symbol itself. */
8316 if (GET_CODE (disp) == CONST
8317 && GET_CODE (XEXP (disp, 0)) == PLUS
8318 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8319 {
8320 offset = XEXP (XEXP (disp, 0), 1);
8321 disp = gen_rtx_CONST (VOIDmode,
8322 XEXP (XEXP (disp, 0), 0));
8323 }
8324
8325 if (flag_pic)
8326 output_pic_addr_const (file, disp, 0);
8327 else if (GET_CODE (disp) == LABEL_REF)
8328 output_asm_label (disp);
8329 else if (GET_CODE (disp) == CONST_INT)
8330 offset = disp;
8331 else
8332 output_addr_const (file, disp);
8333 }
8334
8335 putc ('[', file);
8336 if (base)
8337 {
8338 print_reg (base, 0, file);
8339 if (offset)
8340 {
8341 if (INTVAL (offset) >= 0)
8342 putc ('+', file);
8343 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8344 }
8345 }
8346 else if (offset)
8347 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8348 else
8349 putc ('0', file);
8350
8351 if (index)
8352 {
8353 putc ('+', file);
8354 print_reg (index, 0, file);
8355 if (scale != 1)
8356 fprintf (file, "*%d", scale);
8357 }
8358 putc (']', file);
8359 }
8360 }
8361 }
8362
8363 bool
8364 output_addr_const_extra (FILE *file, rtx x)
8365 {
8366 rtx op;
8367
8368 if (GET_CODE (x) != UNSPEC)
8369 return false;
8370
8371 op = XVECEXP (x, 0, 0);
8372 switch (XINT (x, 1))
8373 {
8374 case UNSPEC_GOTTPOFF:
8375 output_addr_const (file, op);
8376 /* FIXME: This might be @TPOFF in Sun ld. */
8377 fputs ("@GOTTPOFF", file);
8378 break;
8379 case UNSPEC_TPOFF:
8380 output_addr_const (file, op);
8381 fputs ("@TPOFF", file);
8382 break;
8383 case UNSPEC_NTPOFF:
8384 output_addr_const (file, op);
8385 if (TARGET_64BIT)
8386 fputs ("@TPOFF", file);
8387 else
8388 fputs ("@NTPOFF", file);
8389 break;
8390 case UNSPEC_DTPOFF:
8391 output_addr_const (file, op);
8392 fputs ("@DTPOFF", file);
8393 break;
8394 case UNSPEC_GOTNTPOFF:
8395 output_addr_const (file, op);
8396 if (TARGET_64BIT)
8397 fputs ("@GOTTPOFF(%rip)", file);
8398 else
8399 fputs ("@GOTNTPOFF", file);
8400 break;
8401 case UNSPEC_INDNTPOFF:
8402 output_addr_const (file, op);
8403 fputs ("@INDNTPOFF", file);
8404 break;
8405
8406 default:
8407 return false;
8408 }
8409
8410 return true;
8411 }
8412 \f
8413 /* Split one or more DImode RTL references into pairs of SImode
8414 references. The RTL can be REG, offsettable MEM, integer constant, or
8415 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8416 split and "num" is its length. lo_half and hi_half are output arrays
8417 that parallel "operands". */
8418
8419 void
8420 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8421 {
8422 while (num--)
8423 {
8424 rtx op = operands[num];
8425
8426 /* simplify_subreg refuse to split volatile memory addresses,
8427 but we still have to handle it. */
8428 if (GET_CODE (op) == MEM)
8429 {
8430 lo_half[num] = adjust_address (op, SImode, 0);
8431 hi_half[num] = adjust_address (op, SImode, 4);
8432 }
8433 else
8434 {
8435 lo_half[num] = simplify_gen_subreg (SImode, op,
8436 GET_MODE (op) == VOIDmode
8437 ? DImode : GET_MODE (op), 0);
8438 hi_half[num] = simplify_gen_subreg (SImode, op,
8439 GET_MODE (op) == VOIDmode
8440 ? DImode : GET_MODE (op), 4);
8441 }
8442 }
8443 }
8444 /* Split one or more TImode RTL references into pairs of DImode
8445 references. The RTL can be REG, offsettable MEM, integer constant, or
8446 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8447 split and "num" is its length. lo_half and hi_half are output arrays
8448 that parallel "operands". */
8449
8450 void
8451 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8452 {
8453 while (num--)
8454 {
8455 rtx op = operands[num];
8456
8457 /* simplify_subreg refuse to split volatile memory addresses, but we
8458 still have to handle it. */
8459 if (GET_CODE (op) == MEM)
8460 {
8461 lo_half[num] = adjust_address (op, DImode, 0);
8462 hi_half[num] = adjust_address (op, DImode, 8);
8463 }
8464 else
8465 {
8466 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8467 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8468 }
8469 }
8470 }
8471 \f
8472 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8473 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8474 is the expression of the binary operation. The output may either be
8475 emitted here, or returned to the caller, like all output_* functions.
8476
8477 There is no guarantee that the operands are the same mode, as they
8478 might be within FLOAT or FLOAT_EXTEND expressions. */
8479
8480 #ifndef SYSV386_COMPAT
8481 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8482 wants to fix the assemblers because that causes incompatibility
8483 with gcc. No-one wants to fix gcc because that causes
8484 incompatibility with assemblers... You can use the option of
8485 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8486 #define SYSV386_COMPAT 1
8487 #endif
8488
8489 const char *
8490 output_387_binary_op (rtx insn, rtx *operands)
8491 {
8492 static char buf[30];
8493 const char *p;
8494 const char *ssep;
8495 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8496
8497 #ifdef ENABLE_CHECKING
8498 /* Even if we do not want to check the inputs, this documents input
8499 constraints. Which helps in understanding the following code. */
8500 if (STACK_REG_P (operands[0])
8501 && ((REG_P (operands[1])
8502 && REGNO (operands[0]) == REGNO (operands[1])
8503 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
8504 || (REG_P (operands[2])
8505 && REGNO (operands[0]) == REGNO (operands[2])
8506 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
8507 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8508 ; /* ok */
8509 else
8510 gcc_assert (is_sse);
8511 #endif
8512
8513 switch (GET_CODE (operands[3]))
8514 {
8515 case PLUS:
8516 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8517 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8518 p = "fiadd";
8519 else
8520 p = "fadd";
8521 ssep = "add";
8522 break;
8523
8524 case MINUS:
8525 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8526 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8527 p = "fisub";
8528 else
8529 p = "fsub";
8530 ssep = "sub";
8531 break;
8532
8533 case MULT:
8534 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8535 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8536 p = "fimul";
8537 else
8538 p = "fmul";
8539 ssep = "mul";
8540 break;
8541
8542 case DIV:
8543 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8544 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8545 p = "fidiv";
8546 else
8547 p = "fdiv";
8548 ssep = "div";
8549 break;
8550
8551 default:
8552 gcc_unreachable ();
8553 }
8554
8555 if (is_sse)
8556 {
8557 strcpy (buf, ssep);
8558 if (GET_MODE (operands[0]) == SFmode)
8559 strcat (buf, "ss\t{%2, %0|%0, %2}");
8560 else
8561 strcat (buf, "sd\t{%2, %0|%0, %2}");
8562 return buf;
8563 }
8564 strcpy (buf, p);
8565
8566 switch (GET_CODE (operands[3]))
8567 {
8568 case MULT:
8569 case PLUS:
8570 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8571 {
8572 rtx temp = operands[2];
8573 operands[2] = operands[1];
8574 operands[1] = temp;
8575 }
8576
8577 /* know operands[0] == operands[1]. */
8578
8579 if (GET_CODE (operands[2]) == MEM)
8580 {
8581 p = "%z2\t%2";
8582 break;
8583 }
8584
8585 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8586 {
8587 if (STACK_TOP_P (operands[0]))
8588 /* How is it that we are storing to a dead operand[2]?
8589 Well, presumably operands[1] is dead too. We can't
8590 store the result to st(0) as st(0) gets popped on this
8591 instruction. Instead store to operands[2] (which I
8592 think has to be st(1)). st(1) will be popped later.
8593 gcc <= 2.8.1 didn't have this check and generated
8594 assembly code that the Unixware assembler rejected. */
8595 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8596 else
8597 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8598 break;
8599 }
8600
8601 if (STACK_TOP_P (operands[0]))
8602 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8603 else
8604 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8605 break;
8606
8607 case MINUS:
8608 case DIV:
8609 if (GET_CODE (operands[1]) == MEM)
8610 {
8611 p = "r%z1\t%1";
8612 break;
8613 }
8614
8615 if (GET_CODE (operands[2]) == MEM)
8616 {
8617 p = "%z2\t%2";
8618 break;
8619 }
8620
8621 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8622 {
8623 #if SYSV386_COMPAT
8624 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8625 derived assemblers, confusingly reverse the direction of
8626 the operation for fsub{r} and fdiv{r} when the
8627 destination register is not st(0). The Intel assembler
8628 doesn't have this brain damage. Read !SYSV386_COMPAT to
8629 figure out what the hardware really does. */
8630 if (STACK_TOP_P (operands[0]))
8631 p = "{p\t%0, %2|rp\t%2, %0}";
8632 else
8633 p = "{rp\t%2, %0|p\t%0, %2}";
8634 #else
8635 if (STACK_TOP_P (operands[0]))
8636 /* As above for fmul/fadd, we can't store to st(0). */
8637 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8638 else
8639 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8640 #endif
8641 break;
8642 }
8643
8644 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8645 {
8646 #if SYSV386_COMPAT
8647 if (STACK_TOP_P (operands[0]))
8648 p = "{rp\t%0, %1|p\t%1, %0}";
8649 else
8650 p = "{p\t%1, %0|rp\t%0, %1}";
8651 #else
8652 if (STACK_TOP_P (operands[0]))
8653 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8654 else
8655 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8656 #endif
8657 break;
8658 }
8659
8660 if (STACK_TOP_P (operands[0]))
8661 {
8662 if (STACK_TOP_P (operands[1]))
8663 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8664 else
8665 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8666 break;
8667 }
8668 else if (STACK_TOP_P (operands[1]))
8669 {
8670 #if SYSV386_COMPAT
8671 p = "{\t%1, %0|r\t%0, %1}";
8672 #else
8673 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8674 #endif
8675 }
8676 else
8677 {
8678 #if SYSV386_COMPAT
8679 p = "{r\t%2, %0|\t%0, %2}";
8680 #else
8681 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8682 #endif
8683 }
8684 break;
8685
8686 default:
8687 gcc_unreachable ();
8688 }
8689
8690 strcat (buf, p);
8691 return buf;
8692 }
8693
8694 /* Return needed mode for entity in optimize_mode_switching pass. */
8695
8696 int
8697 ix86_mode_needed (int entity, rtx insn)
8698 {
8699 enum attr_i387_cw mode;
8700
8701 /* The mode UNINITIALIZED is used to store control word after a
8702 function call or ASM pattern. The mode ANY specify that function
8703 has no requirements on the control word and make no changes in the
8704 bits we are interested in. */
8705
8706 if (CALL_P (insn)
8707 || (NONJUMP_INSN_P (insn)
8708 && (asm_noperands (PATTERN (insn)) >= 0
8709 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8710 return I387_CW_UNINITIALIZED;
8711
8712 if (recog_memoized (insn) < 0)
8713 return I387_CW_ANY;
8714
8715 mode = get_attr_i387_cw (insn);
8716
8717 switch (entity)
8718 {
8719 case I387_TRUNC:
8720 if (mode == I387_CW_TRUNC)
8721 return mode;
8722 break;
8723
8724 case I387_FLOOR:
8725 if (mode == I387_CW_FLOOR)
8726 return mode;
8727 break;
8728
8729 case I387_CEIL:
8730 if (mode == I387_CW_CEIL)
8731 return mode;
8732 break;
8733
8734 case I387_MASK_PM:
8735 if (mode == I387_CW_MASK_PM)
8736 return mode;
8737 break;
8738
8739 default:
8740 gcc_unreachable ();
8741 }
8742
8743 return I387_CW_ANY;
8744 }
8745
8746 /* Output code to initialize control word copies used by trunc?f?i and
8747 rounding patterns. CURRENT_MODE is set to current control word,
8748 while NEW_MODE is set to new control word. */
8749
8750 void
8751 emit_i387_cw_initialization (int mode)
8752 {
8753 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8754 rtx new_mode;
8755
8756 int slot;
8757
8758 rtx reg = gen_reg_rtx (HImode);
8759
8760 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8761 emit_move_insn (reg, copy_rtx (stored_mode));
8762
8763 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8764 {
8765 switch (mode)
8766 {
8767 case I387_CW_TRUNC:
8768 /* round toward zero (truncate) */
8769 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8770 slot = SLOT_CW_TRUNC;
8771 break;
8772
8773 case I387_CW_FLOOR:
8774 /* round down toward -oo */
8775 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8776 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8777 slot = SLOT_CW_FLOOR;
8778 break;
8779
8780 case I387_CW_CEIL:
8781 /* round up toward +oo */
8782 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8783 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8784 slot = SLOT_CW_CEIL;
8785 break;
8786
8787 case I387_CW_MASK_PM:
8788 /* mask precision exception for nearbyint() */
8789 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8790 slot = SLOT_CW_MASK_PM;
8791 break;
8792
8793 default:
8794 gcc_unreachable ();
8795 }
8796 }
8797 else
8798 {
8799 switch (mode)
8800 {
8801 case I387_CW_TRUNC:
8802 /* round toward zero (truncate) */
8803 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8804 slot = SLOT_CW_TRUNC;
8805 break;
8806
8807 case I387_CW_FLOOR:
8808 /* round down toward -oo */
8809 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8810 slot = SLOT_CW_FLOOR;
8811 break;
8812
8813 case I387_CW_CEIL:
8814 /* round up toward +oo */
8815 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8816 slot = SLOT_CW_CEIL;
8817 break;
8818
8819 case I387_CW_MASK_PM:
8820 /* mask precision exception for nearbyint() */
8821 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8822 slot = SLOT_CW_MASK_PM;
8823 break;
8824
8825 default:
8826 gcc_unreachable ();
8827 }
8828 }
8829
8830 gcc_assert (slot < MAX_386_STACK_LOCALS);
8831
8832 new_mode = assign_386_stack_local (HImode, slot);
8833 emit_move_insn (new_mode, reg);
8834 }
8835
8836 /* Output code for INSN to convert a float to a signed int. OPERANDS
8837 are the insn operands. The output may be [HSD]Imode and the input
8838 operand may be [SDX]Fmode. */
8839
8840 const char *
8841 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8842 {
8843 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8844 int dimode_p = GET_MODE (operands[0]) == DImode;
8845 int round_mode = get_attr_i387_cw (insn);
8846
8847 /* Jump through a hoop or two for DImode, since the hardware has no
8848 non-popping instruction. We used to do this a different way, but
8849 that was somewhat fragile and broke with post-reload splitters. */
8850 if ((dimode_p || fisttp) && !stack_top_dies)
8851 output_asm_insn ("fld\t%y1", operands);
8852
8853 gcc_assert (STACK_TOP_P (operands[1]));
8854 gcc_assert (GET_CODE (operands[0]) == MEM);
8855
8856 if (fisttp)
8857 output_asm_insn ("fisttp%z0\t%0", operands);
8858 else
8859 {
8860 if (round_mode != I387_CW_ANY)
8861 output_asm_insn ("fldcw\t%3", operands);
8862 if (stack_top_dies || dimode_p)
8863 output_asm_insn ("fistp%z0\t%0", operands);
8864 else
8865 output_asm_insn ("fist%z0\t%0", operands);
8866 if (round_mode != I387_CW_ANY)
8867 output_asm_insn ("fldcw\t%2", operands);
8868 }
8869
8870 return "";
8871 }
8872
8873 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8874 have the values zero or one, indicates the ffreep insn's operand
8875 from the OPERANDS array. */
8876
8877 static const char *
8878 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8879 {
8880 if (TARGET_USE_FFREEP)
8881 #if HAVE_AS_IX86_FFREEP
8882 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8883 #else
8884 {
8885 static char retval[] = ".word\t0xc_df";
8886 int regno = REGNO (operands[opno]);
8887
8888 gcc_assert (FP_REGNO_P (regno));
8889
8890 retval[9] = '0' + (regno - FIRST_STACK_REG);
8891 return retval;
8892 }
8893 #endif
8894
8895 return opno ? "fstp\t%y1" : "fstp\t%y0";
8896 }
8897
8898
8899 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8900 should be used. UNORDERED_P is true when fucom should be used. */
8901
8902 const char *
8903 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8904 {
8905 int stack_top_dies;
8906 rtx cmp_op0, cmp_op1;
8907 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8908
8909 if (eflags_p)
8910 {
8911 cmp_op0 = operands[0];
8912 cmp_op1 = operands[1];
8913 }
8914 else
8915 {
8916 cmp_op0 = operands[1];
8917 cmp_op1 = operands[2];
8918 }
8919
8920 if (is_sse)
8921 {
8922 if (GET_MODE (operands[0]) == SFmode)
8923 if (unordered_p)
8924 return "ucomiss\t{%1, %0|%0, %1}";
8925 else
8926 return "comiss\t{%1, %0|%0, %1}";
8927 else
8928 if (unordered_p)
8929 return "ucomisd\t{%1, %0|%0, %1}";
8930 else
8931 return "comisd\t{%1, %0|%0, %1}";
8932 }
8933
8934 gcc_assert (STACK_TOP_P (cmp_op0));
8935
8936 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8937
8938 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8939 {
8940 if (stack_top_dies)
8941 {
8942 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8943 return output_387_ffreep (operands, 1);
8944 }
8945 else
8946 return "ftst\n\tfnstsw\t%0";
8947 }
8948
8949 if (STACK_REG_P (cmp_op1)
8950 && stack_top_dies
8951 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8952 && REGNO (cmp_op1) != FIRST_STACK_REG)
8953 {
8954 /* If both the top of the 387 stack dies, and the other operand
8955 is also a stack register that dies, then this must be a
8956 `fcompp' float compare */
8957
8958 if (eflags_p)
8959 {
8960 /* There is no double popping fcomi variant. Fortunately,
8961 eflags is immune from the fstp's cc clobbering. */
8962 if (unordered_p)
8963 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8964 else
8965 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8966 return output_387_ffreep (operands, 0);
8967 }
8968 else
8969 {
8970 if (unordered_p)
8971 return "fucompp\n\tfnstsw\t%0";
8972 else
8973 return "fcompp\n\tfnstsw\t%0";
8974 }
8975 }
8976 else
8977 {
8978 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8979
8980 static const char * const alt[16] =
8981 {
8982 "fcom%z2\t%y2\n\tfnstsw\t%0",
8983 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8984 "fucom%z2\t%y2\n\tfnstsw\t%0",
8985 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8986
8987 "ficom%z2\t%y2\n\tfnstsw\t%0",
8988 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8989 NULL,
8990 NULL,
8991
8992 "fcomi\t{%y1, %0|%0, %y1}",
8993 "fcomip\t{%y1, %0|%0, %y1}",
8994 "fucomi\t{%y1, %0|%0, %y1}",
8995 "fucomip\t{%y1, %0|%0, %y1}",
8996
8997 NULL,
8998 NULL,
8999 NULL,
9000 NULL
9001 };
9002
9003 int mask;
9004 const char *ret;
9005
9006 mask = eflags_p << 3;
9007 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9008 mask |= unordered_p << 1;
9009 mask |= stack_top_dies;
9010
9011 gcc_assert (mask < 16);
9012 ret = alt[mask];
9013 gcc_assert (ret);
9014
9015 return ret;
9016 }
9017 }
9018
9019 void
9020 ix86_output_addr_vec_elt (FILE *file, int value)
9021 {
9022 const char *directive = ASM_LONG;
9023
9024 #ifdef ASM_QUAD
9025 if (TARGET_64BIT)
9026 directive = ASM_QUAD;
9027 #else
9028 gcc_assert (!TARGET_64BIT);
9029 #endif
9030
9031 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9032 }
9033
9034 void
9035 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9036 {
9037 if (TARGET_64BIT)
9038 fprintf (file, "%s%s%d-%s%d\n",
9039 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9040 else if (HAVE_AS_GOTOFF_IN_DATA)
9041 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9042 #if TARGET_MACHO
9043 else if (TARGET_MACHO)
9044 {
9045 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9046 machopic_output_function_base_name (file);
9047 fprintf(file, "\n");
9048 }
9049 #endif
9050 else
9051 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9052 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9053 }
9054 \f
9055 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9056 for the target. */
9057
9058 void
9059 ix86_expand_clear (rtx dest)
9060 {
9061 rtx tmp;
9062
9063 /* We play register width games, which are only valid after reload. */
9064 gcc_assert (reload_completed);
9065
9066 /* Avoid HImode and its attendant prefix byte. */
9067 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9068 dest = gen_rtx_REG (SImode, REGNO (dest));
9069
9070 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9071
9072 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9073 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9074 {
9075 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9076 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9077 }
9078
9079 emit_insn (tmp);
9080 }
9081
9082 /* X is an unchanging MEM. If it is a constant pool reference, return
9083 the constant pool rtx, else NULL. */
9084
9085 rtx
9086 maybe_get_pool_constant (rtx x)
9087 {
9088 x = ix86_delegitimize_address (XEXP (x, 0));
9089
9090 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9091 return get_pool_constant (x);
9092
9093 return NULL_RTX;
9094 }
9095
9096 void
9097 ix86_expand_move (enum machine_mode mode, rtx operands[])
9098 {
9099 int strict = (reload_in_progress || reload_completed);
9100 rtx op0, op1;
9101 enum tls_model model;
9102
9103 op0 = operands[0];
9104 op1 = operands[1];
9105
9106 if (GET_CODE (op1) == SYMBOL_REF)
9107 {
9108 model = SYMBOL_REF_TLS_MODEL (op1);
9109 if (model)
9110 {
9111 op1 = legitimize_tls_address (op1, model, true);
9112 op1 = force_operand (op1, op0);
9113 if (op1 == op0)
9114 return;
9115 }
9116 }
9117 else if (GET_CODE (op1) == CONST
9118 && GET_CODE (XEXP (op1, 0)) == PLUS
9119 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9120 {
9121 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9122 if (model)
9123 {
9124 rtx addend = XEXP (XEXP (op1, 0), 1);
9125 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9126 op1 = force_operand (op1, NULL);
9127 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9128 op0, 1, OPTAB_DIRECT);
9129 if (op1 == op0)
9130 return;
9131 }
9132 }
9133
9134 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9135 {
9136 if (TARGET_MACHO && !TARGET_64BIT)
9137 {
9138 #if TARGET_MACHO
9139 if (MACHOPIC_PURE)
9140 {
9141 rtx temp = ((reload_in_progress
9142 || ((op0 && GET_CODE (op0) == REG)
9143 && mode == Pmode))
9144 ? op0 : gen_reg_rtx (Pmode));
9145 op1 = machopic_indirect_data_reference (op1, temp);
9146 op1 = machopic_legitimize_pic_address (op1, mode,
9147 temp == op1 ? 0 : temp);
9148 }
9149 else if (MACHOPIC_INDIRECT)
9150 op1 = machopic_indirect_data_reference (op1, 0);
9151 if (op0 == op1)
9152 return;
9153 #endif
9154 }
9155 else
9156 {
9157 if (GET_CODE (op0) == MEM)
9158 op1 = force_reg (Pmode, op1);
9159 else
9160 op1 = legitimize_address (op1, op1, Pmode);
9161 }
9162 }
9163 else
9164 {
9165 if (GET_CODE (op0) == MEM
9166 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9167 || !push_operand (op0, mode))
9168 && GET_CODE (op1) == MEM)
9169 op1 = force_reg (mode, op1);
9170
9171 if (push_operand (op0, mode)
9172 && ! general_no_elim_operand (op1, mode))
9173 op1 = copy_to_mode_reg (mode, op1);
9174
9175 /* Force large constants in 64bit compilation into register
9176 to get them CSEed. */
9177 if (TARGET_64BIT && mode == DImode
9178 && immediate_operand (op1, mode)
9179 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9180 && !register_operand (op0, mode)
9181 && optimize && !reload_completed && !reload_in_progress)
9182 op1 = copy_to_mode_reg (mode, op1);
9183
9184 if (FLOAT_MODE_P (mode))
9185 {
9186 /* If we are loading a floating point constant to a register,
9187 force the value to memory now, since we'll get better code
9188 out the back end. */
9189
9190 if (strict)
9191 ;
9192 else if (GET_CODE (op1) == CONST_DOUBLE)
9193 {
9194 op1 = validize_mem (force_const_mem (mode, op1));
9195 if (!register_operand (op0, mode))
9196 {
9197 rtx temp = gen_reg_rtx (mode);
9198 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9199 emit_move_insn (op0, temp);
9200 return;
9201 }
9202 }
9203 }
9204 }
9205
9206 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9207 }
9208
9209 void
9210 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9211 {
9212 rtx op0 = operands[0], op1 = operands[1];
9213
9214 /* Force constants other than zero into memory. We do not know how
9215 the instructions used to build constants modify the upper 64 bits
9216 of the register, once we have that information we may be able
9217 to handle some of them more efficiently. */
9218 if ((reload_in_progress | reload_completed) == 0
9219 && register_operand (op0, mode)
9220 && CONSTANT_P (op1)
9221 && standard_sse_constant_p (op1) <= 0)
9222 op1 = validize_mem (force_const_mem (mode, op1));
9223
9224 /* Make operand1 a register if it isn't already. */
9225 if (!no_new_pseudos
9226 && !register_operand (op0, mode)
9227 && !register_operand (op1, mode))
9228 {
9229 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9230 return;
9231 }
9232
9233 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9234 }
9235
9236 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9237 straight to ix86_expand_vector_move. */
9238
9239 void
9240 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9241 {
9242 rtx op0, op1, m;
9243
9244 op0 = operands[0];
9245 op1 = operands[1];
9246
9247 if (MEM_P (op1))
9248 {
9249 /* If we're optimizing for size, movups is the smallest. */
9250 if (optimize_size)
9251 {
9252 op0 = gen_lowpart (V4SFmode, op0);
9253 op1 = gen_lowpart (V4SFmode, op1);
9254 emit_insn (gen_sse_movups (op0, op1));
9255 return;
9256 }
9257
9258 /* ??? If we have typed data, then it would appear that using
9259 movdqu is the only way to get unaligned data loaded with
9260 integer type. */
9261 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9262 {
9263 op0 = gen_lowpart (V16QImode, op0);
9264 op1 = gen_lowpart (V16QImode, op1);
9265 emit_insn (gen_sse2_movdqu (op0, op1));
9266 return;
9267 }
9268
9269 if (TARGET_SSE2 && mode == V2DFmode)
9270 {
9271 rtx zero;
9272
9273 /* When SSE registers are split into halves, we can avoid
9274 writing to the top half twice. */
9275 if (TARGET_SSE_SPLIT_REGS)
9276 {
9277 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9278 zero = op0;
9279 }
9280 else
9281 {
9282 /* ??? Not sure about the best option for the Intel chips.
9283 The following would seem to satisfy; the register is
9284 entirely cleared, breaking the dependency chain. We
9285 then store to the upper half, with a dependency depth
9286 of one. A rumor has it that Intel recommends two movsd
9287 followed by an unpacklpd, but this is unconfirmed. And
9288 given that the dependency depth of the unpacklpd would
9289 still be one, I'm not sure why this would be better. */
9290 zero = CONST0_RTX (V2DFmode);
9291 }
9292
9293 m = adjust_address (op1, DFmode, 0);
9294 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9295 m = adjust_address (op1, DFmode, 8);
9296 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9297 }
9298 else
9299 {
9300 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9301 emit_move_insn (op0, CONST0_RTX (mode));
9302 else
9303 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9304
9305 if (mode != V4SFmode)
9306 op0 = gen_lowpart (V4SFmode, op0);
9307 m = adjust_address (op1, V2SFmode, 0);
9308 emit_insn (gen_sse_loadlps (op0, op0, m));
9309 m = adjust_address (op1, V2SFmode, 8);
9310 emit_insn (gen_sse_loadhps (op0, op0, m));
9311 }
9312 }
9313 else if (MEM_P (op0))
9314 {
9315 /* If we're optimizing for size, movups is the smallest. */
9316 if (optimize_size)
9317 {
9318 op0 = gen_lowpart (V4SFmode, op0);
9319 op1 = gen_lowpart (V4SFmode, op1);
9320 emit_insn (gen_sse_movups (op0, op1));
9321 return;
9322 }
9323
9324 /* ??? Similar to above, only less clear because of quote
9325 typeless stores unquote. */
9326 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9327 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9328 {
9329 op0 = gen_lowpart (V16QImode, op0);
9330 op1 = gen_lowpart (V16QImode, op1);
9331 emit_insn (gen_sse2_movdqu (op0, op1));
9332 return;
9333 }
9334
9335 if (TARGET_SSE2 && mode == V2DFmode)
9336 {
9337 m = adjust_address (op0, DFmode, 0);
9338 emit_insn (gen_sse2_storelpd (m, op1));
9339 m = adjust_address (op0, DFmode, 8);
9340 emit_insn (gen_sse2_storehpd (m, op1));
9341 }
9342 else
9343 {
9344 if (mode != V4SFmode)
9345 op1 = gen_lowpart (V4SFmode, op1);
9346 m = adjust_address (op0, V2SFmode, 0);
9347 emit_insn (gen_sse_storelps (m, op1));
9348 m = adjust_address (op0, V2SFmode, 8);
9349 emit_insn (gen_sse_storehps (m, op1));
9350 }
9351 }
9352 else
9353 gcc_unreachable ();
9354 }
9355
9356 /* Expand a push in MODE. This is some mode for which we do not support
9357 proper push instructions, at least from the registers that we expect
9358 the value to live in. */
9359
9360 void
9361 ix86_expand_push (enum machine_mode mode, rtx x)
9362 {
9363 rtx tmp;
9364
9365 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9366 GEN_INT (-GET_MODE_SIZE (mode)),
9367 stack_pointer_rtx, 1, OPTAB_DIRECT);
9368 if (tmp != stack_pointer_rtx)
9369 emit_move_insn (stack_pointer_rtx, tmp);
9370
9371 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9372 emit_move_insn (tmp, x);
9373 }
9374
9375 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9376 destination to use for the operation. If different from the true
9377 destination in operands[0], a copy operation will be required. */
9378
9379 rtx
9380 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9381 rtx operands[])
9382 {
9383 int matching_memory;
9384 rtx src1, src2, dst;
9385
9386 dst = operands[0];
9387 src1 = operands[1];
9388 src2 = operands[2];
9389
9390 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9391 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9392 && (rtx_equal_p (dst, src2)
9393 || immediate_operand (src1, mode)))
9394 {
9395 rtx temp = src1;
9396 src1 = src2;
9397 src2 = temp;
9398 }
9399
9400 /* If the destination is memory, and we do not have matching source
9401 operands, do things in registers. */
9402 matching_memory = 0;
9403 if (GET_CODE (dst) == MEM)
9404 {
9405 if (rtx_equal_p (dst, src1))
9406 matching_memory = 1;
9407 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9408 && rtx_equal_p (dst, src2))
9409 matching_memory = 2;
9410 else
9411 dst = gen_reg_rtx (mode);
9412 }
9413
9414 /* Both source operands cannot be in memory. */
9415 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
9416 {
9417 if (matching_memory != 2)
9418 src2 = force_reg (mode, src2);
9419 else
9420 src1 = force_reg (mode, src1);
9421 }
9422
9423 /* If the operation is not commutable, source 1 cannot be a constant
9424 or non-matching memory. */
9425 if ((CONSTANT_P (src1)
9426 || (!matching_memory && GET_CODE (src1) == MEM))
9427 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9428 src1 = force_reg (mode, src1);
9429
9430 src1 = operands[1] = src1;
9431 src2 = operands[2] = src2;
9432 return dst;
9433 }
9434
9435 /* Similarly, but assume that the destination has already been
9436 set up properly. */
9437
9438 void
9439 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9440 enum machine_mode mode, rtx operands[])
9441 {
9442 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9443 gcc_assert (dst == operands[0]);
9444 }
9445
9446 /* Attempt to expand a binary operator. Make the expansion closer to the
9447 actual machine, then just general_operand, which will allow 3 separate
9448 memory references (one output, two input) in a single insn. */
9449
9450 void
9451 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9452 rtx operands[])
9453 {
9454 rtx src1, src2, dst, op, clob;
9455
9456 dst = ix86_fixup_binary_operands (code, mode, operands);
9457 src1 = operands[1];
9458 src2 = operands[2];
9459
9460 /* Emit the instruction. */
9461
9462 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9463 if (reload_in_progress)
9464 {
9465 /* Reload doesn't know about the flags register, and doesn't know that
9466 it doesn't want to clobber it. We can only do this with PLUS. */
9467 gcc_assert (code == PLUS);
9468 emit_insn (op);
9469 }
9470 else
9471 {
9472 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9473 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9474 }
9475
9476 /* Fix up the destination if needed. */
9477 if (dst != operands[0])
9478 emit_move_insn (operands[0], dst);
9479 }
9480
9481 /* Return TRUE or FALSE depending on whether the binary operator meets the
9482 appropriate constraints. */
9483
9484 int
9485 ix86_binary_operator_ok (enum rtx_code code,
9486 enum machine_mode mode ATTRIBUTE_UNUSED,
9487 rtx operands[3])
9488 {
9489 /* Both source operands cannot be in memory. */
9490 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
9491 return 0;
9492 /* If the operation is not commutable, source 1 cannot be a constant. */
9493 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9494 return 0;
9495 /* If the destination is memory, we must have a matching source operand. */
9496 if (GET_CODE (operands[0]) == MEM
9497 && ! (rtx_equal_p (operands[0], operands[1])
9498 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9499 && rtx_equal_p (operands[0], operands[2]))))
9500 return 0;
9501 /* If the operation is not commutable and the source 1 is memory, we must
9502 have a matching destination. */
9503 if (GET_CODE (operands[1]) == MEM
9504 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9505 && ! rtx_equal_p (operands[0], operands[1]))
9506 return 0;
9507 return 1;
9508 }
9509
9510 /* Attempt to expand a unary operator. Make the expansion closer to the
9511 actual machine, then just general_operand, which will allow 2 separate
9512 memory references (one output, one input) in a single insn. */
9513
9514 void
9515 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9516 rtx operands[])
9517 {
9518 int matching_memory;
9519 rtx src, dst, op, clob;
9520
9521 dst = operands[0];
9522 src = operands[1];
9523
9524 /* If the destination is memory, and we do not have matching source
9525 operands, do things in registers. */
9526 matching_memory = 0;
9527 if (MEM_P (dst))
9528 {
9529 if (rtx_equal_p (dst, src))
9530 matching_memory = 1;
9531 else
9532 dst = gen_reg_rtx (mode);
9533 }
9534
9535 /* When source operand is memory, destination must match. */
9536 if (MEM_P (src) && !matching_memory)
9537 src = force_reg (mode, src);
9538
9539 /* Emit the instruction. */
9540
9541 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9542 if (reload_in_progress || code == NOT)
9543 {
9544 /* Reload doesn't know about the flags register, and doesn't know that
9545 it doesn't want to clobber it. */
9546 gcc_assert (code == NOT);
9547 emit_insn (op);
9548 }
9549 else
9550 {
9551 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9552 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9553 }
9554
9555 /* Fix up the destination if needed. */
9556 if (dst != operands[0])
9557 emit_move_insn (operands[0], dst);
9558 }
9559
9560 /* Return TRUE or FALSE depending on whether the unary operator meets the
9561 appropriate constraints. */
9562
9563 int
9564 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9565 enum machine_mode mode ATTRIBUTE_UNUSED,
9566 rtx operands[2] ATTRIBUTE_UNUSED)
9567 {
9568 /* If one of operands is memory, source and destination must match. */
9569 if ((GET_CODE (operands[0]) == MEM
9570 || GET_CODE (operands[1]) == MEM)
9571 && ! rtx_equal_p (operands[0], operands[1]))
9572 return FALSE;
9573 return TRUE;
9574 }
9575
9576 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9577 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9578 true, then replicate the mask for all elements of the vector register.
9579 If INVERT is true, then create a mask excluding the sign bit. */
9580
9581 rtx
9582 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9583 {
9584 enum machine_mode vec_mode;
9585 HOST_WIDE_INT hi, lo;
9586 int shift = 63;
9587 rtvec v;
9588 rtx mask;
9589
9590 /* Find the sign bit, sign extended to 2*HWI. */
9591 if (mode == SFmode)
9592 lo = 0x80000000, hi = lo < 0;
9593 else if (HOST_BITS_PER_WIDE_INT >= 64)
9594 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9595 else
9596 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9597
9598 if (invert)
9599 lo = ~lo, hi = ~hi;
9600
9601 /* Force this value into the low part of a fp vector constant. */
9602 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9603 mask = gen_lowpart (mode, mask);
9604
9605 if (mode == SFmode)
9606 {
9607 if (vect)
9608 v = gen_rtvec (4, mask, mask, mask, mask);
9609 else
9610 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9611 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9612 vec_mode = V4SFmode;
9613 }
9614 else
9615 {
9616 if (vect)
9617 v = gen_rtvec (2, mask, mask);
9618 else
9619 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9620 vec_mode = V2DFmode;
9621 }
9622
9623 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9624 }
9625
9626 /* Generate code for floating point ABS or NEG. */
9627
9628 void
9629 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9630 rtx operands[])
9631 {
9632 rtx mask, set, use, clob, dst, src;
9633 bool matching_memory;
9634 bool use_sse = false;
9635 bool vector_mode = VECTOR_MODE_P (mode);
9636 enum machine_mode elt_mode = mode;
9637
9638 if (vector_mode)
9639 {
9640 elt_mode = GET_MODE_INNER (mode);
9641 use_sse = true;
9642 }
9643 else if (TARGET_SSE_MATH)
9644 use_sse = SSE_FLOAT_MODE_P (mode);
9645
9646 /* NEG and ABS performed with SSE use bitwise mask operations.
9647 Create the appropriate mask now. */
9648 if (use_sse)
9649 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9650 else
9651 mask = NULL_RTX;
9652
9653 dst = operands[0];
9654 src = operands[1];
9655
9656 /* If the destination is memory, and we don't have matching source
9657 operands or we're using the x87, do things in registers. */
9658 matching_memory = false;
9659 if (MEM_P (dst))
9660 {
9661 if (use_sse && rtx_equal_p (dst, src))
9662 matching_memory = true;
9663 else
9664 dst = gen_reg_rtx (mode);
9665 }
9666 if (MEM_P (src) && !matching_memory)
9667 src = force_reg (mode, src);
9668
9669 if (vector_mode)
9670 {
9671 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9672 set = gen_rtx_SET (VOIDmode, dst, set);
9673 emit_insn (set);
9674 }
9675 else
9676 {
9677 set = gen_rtx_fmt_e (code, mode, src);
9678 set = gen_rtx_SET (VOIDmode, dst, set);
9679 if (mask)
9680 {
9681 use = gen_rtx_USE (VOIDmode, mask);
9682 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9683 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9684 gen_rtvec (3, set, use, clob)));
9685 }
9686 else
9687 emit_insn (set);
9688 }
9689
9690 if (dst != operands[0])
9691 emit_move_insn (operands[0], dst);
9692 }
9693
9694 /* Expand a copysign operation. Special case operand 0 being a constant. */
9695
9696 void
9697 ix86_expand_copysign (rtx operands[])
9698 {
9699 enum machine_mode mode, vmode;
9700 rtx dest, op0, op1, mask, nmask;
9701
9702 dest = operands[0];
9703 op0 = operands[1];
9704 op1 = operands[2];
9705
9706 mode = GET_MODE (dest);
9707 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9708
9709 if (GET_CODE (op0) == CONST_DOUBLE)
9710 {
9711 rtvec v;
9712
9713 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9714 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9715
9716 if (op0 == CONST0_RTX (mode))
9717 op0 = CONST0_RTX (vmode);
9718 else
9719 {
9720 if (mode == SFmode)
9721 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9722 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9723 else
9724 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9725 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9726 }
9727
9728 mask = ix86_build_signbit_mask (mode, 0, 0);
9729
9730 if (mode == SFmode)
9731 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9732 else
9733 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9734 }
9735 else
9736 {
9737 nmask = ix86_build_signbit_mask (mode, 0, 1);
9738 mask = ix86_build_signbit_mask (mode, 0, 0);
9739
9740 if (mode == SFmode)
9741 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9742 else
9743 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9744 }
9745 }
9746
9747 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9748 be a constant, and so has already been expanded into a vector constant. */
9749
9750 void
9751 ix86_split_copysign_const (rtx operands[])
9752 {
9753 enum machine_mode mode, vmode;
9754 rtx dest, op0, op1, mask, x;
9755
9756 dest = operands[0];
9757 op0 = operands[1];
9758 op1 = operands[2];
9759 mask = operands[3];
9760
9761 mode = GET_MODE (dest);
9762 vmode = GET_MODE (mask);
9763
9764 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9765 x = gen_rtx_AND (vmode, dest, mask);
9766 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9767
9768 if (op0 != CONST0_RTX (vmode))
9769 {
9770 x = gen_rtx_IOR (vmode, dest, op0);
9771 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9772 }
9773 }
9774
9775 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9776 so we have to do two masks. */
9777
9778 void
9779 ix86_split_copysign_var (rtx operands[])
9780 {
9781 enum machine_mode mode, vmode;
9782 rtx dest, scratch, op0, op1, mask, nmask, x;
9783
9784 dest = operands[0];
9785 scratch = operands[1];
9786 op0 = operands[2];
9787 op1 = operands[3];
9788 nmask = operands[4];
9789 mask = operands[5];
9790
9791 mode = GET_MODE (dest);
9792 vmode = GET_MODE (mask);
9793
9794 if (rtx_equal_p (op0, op1))
9795 {
9796 /* Shouldn't happen often (it's useless, obviously), but when it does
9797 we'd generate incorrect code if we continue below. */
9798 emit_move_insn (dest, op0);
9799 return;
9800 }
9801
9802 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9803 {
9804 gcc_assert (REGNO (op1) == REGNO (scratch));
9805
9806 x = gen_rtx_AND (vmode, scratch, mask);
9807 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9808
9809 dest = mask;
9810 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9811 x = gen_rtx_NOT (vmode, dest);
9812 x = gen_rtx_AND (vmode, x, op0);
9813 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9814 }
9815 else
9816 {
9817 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9818 {
9819 x = gen_rtx_AND (vmode, scratch, mask);
9820 }
9821 else /* alternative 2,4 */
9822 {
9823 gcc_assert (REGNO (mask) == REGNO (scratch));
9824 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9825 x = gen_rtx_AND (vmode, scratch, op1);
9826 }
9827 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9828
9829 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9830 {
9831 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9832 x = gen_rtx_AND (vmode, dest, nmask);
9833 }
9834 else /* alternative 3,4 */
9835 {
9836 gcc_assert (REGNO (nmask) == REGNO (dest));
9837 dest = nmask;
9838 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9839 x = gen_rtx_AND (vmode, dest, op0);
9840 }
9841 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9842 }
9843
9844 x = gen_rtx_IOR (vmode, dest, scratch);
9845 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9846 }
9847
9848 /* Return TRUE or FALSE depending on whether the first SET in INSN
9849 has source and destination with matching CC modes, and that the
9850 CC mode is at least as constrained as REQ_MODE. */
9851
9852 int
9853 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9854 {
9855 rtx set;
9856 enum machine_mode set_mode;
9857
9858 set = PATTERN (insn);
9859 if (GET_CODE (set) == PARALLEL)
9860 set = XVECEXP (set, 0, 0);
9861 gcc_assert (GET_CODE (set) == SET);
9862 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9863
9864 set_mode = GET_MODE (SET_DEST (set));
9865 switch (set_mode)
9866 {
9867 case CCNOmode:
9868 if (req_mode != CCNOmode
9869 && (req_mode != CCmode
9870 || XEXP (SET_SRC (set), 1) != const0_rtx))
9871 return 0;
9872 break;
9873 case CCmode:
9874 if (req_mode == CCGCmode)
9875 return 0;
9876 /* FALLTHRU */
9877 case CCGCmode:
9878 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9879 return 0;
9880 /* FALLTHRU */
9881 case CCGOCmode:
9882 if (req_mode == CCZmode)
9883 return 0;
9884 /* FALLTHRU */
9885 case CCZmode:
9886 break;
9887
9888 default:
9889 gcc_unreachable ();
9890 }
9891
9892 return (GET_MODE (SET_SRC (set)) == set_mode);
9893 }
9894
9895 /* Generate insn patterns to do an integer compare of OPERANDS. */
9896
9897 static rtx
9898 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9899 {
9900 enum machine_mode cmpmode;
9901 rtx tmp, flags;
9902
9903 cmpmode = SELECT_CC_MODE (code, op0, op1);
9904 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9905
9906 /* This is very simple, but making the interface the same as in the
9907 FP case makes the rest of the code easier. */
9908 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9909 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9910
9911 /* Return the test that should be put into the flags user, i.e.
9912 the bcc, scc, or cmov instruction. */
9913 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9914 }
9915
9916 /* Figure out whether to use ordered or unordered fp comparisons.
9917 Return the appropriate mode to use. */
9918
9919 enum machine_mode
9920 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9921 {
9922 /* ??? In order to make all comparisons reversible, we do all comparisons
9923 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9924 all forms trapping and nontrapping comparisons, we can make inequality
9925 comparisons trapping again, since it results in better code when using
9926 FCOM based compares. */
9927 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9928 }
9929
9930 enum machine_mode
9931 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9932 {
9933 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9934 return ix86_fp_compare_mode (code);
9935 switch (code)
9936 {
9937 /* Only zero flag is needed. */
9938 case EQ: /* ZF=0 */
9939 case NE: /* ZF!=0 */
9940 return CCZmode;
9941 /* Codes needing carry flag. */
9942 case GEU: /* CF=0 */
9943 case GTU: /* CF=0 & ZF=0 */
9944 case LTU: /* CF=1 */
9945 case LEU: /* CF=1 | ZF=1 */
9946 return CCmode;
9947 /* Codes possibly doable only with sign flag when
9948 comparing against zero. */
9949 case GE: /* SF=OF or SF=0 */
9950 case LT: /* SF<>OF or SF=1 */
9951 if (op1 == const0_rtx)
9952 return CCGOCmode;
9953 else
9954 /* For other cases Carry flag is not required. */
9955 return CCGCmode;
9956 /* Codes doable only with sign flag when comparing
9957 against zero, but we miss jump instruction for it
9958 so we need to use relational tests against overflow
9959 that thus needs to be zero. */
9960 case GT: /* ZF=0 & SF=OF */
9961 case LE: /* ZF=1 | SF<>OF */
9962 if (op1 == const0_rtx)
9963 return CCNOmode;
9964 else
9965 return CCGCmode;
9966 /* strcmp pattern do (use flags) and combine may ask us for proper
9967 mode. */
9968 case USE:
9969 return CCmode;
9970 default:
9971 gcc_unreachable ();
9972 }
9973 }
9974
9975 /* Return the fixed registers used for condition codes. */
9976
9977 static bool
9978 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9979 {
9980 *p1 = FLAGS_REG;
9981 *p2 = FPSR_REG;
9982 return true;
9983 }
9984
9985 /* If two condition code modes are compatible, return a condition code
9986 mode which is compatible with both. Otherwise, return
9987 VOIDmode. */
9988
9989 static enum machine_mode
9990 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9991 {
9992 if (m1 == m2)
9993 return m1;
9994
9995 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9996 return VOIDmode;
9997
9998 if ((m1 == CCGCmode && m2 == CCGOCmode)
9999 || (m1 == CCGOCmode && m2 == CCGCmode))
10000 return CCGCmode;
10001
10002 switch (m1)
10003 {
10004 default:
10005 gcc_unreachable ();
10006
10007 case CCmode:
10008 case CCGCmode:
10009 case CCGOCmode:
10010 case CCNOmode:
10011 case CCZmode:
10012 switch (m2)
10013 {
10014 default:
10015 return VOIDmode;
10016
10017 case CCmode:
10018 case CCGCmode:
10019 case CCGOCmode:
10020 case CCNOmode:
10021 case CCZmode:
10022 return CCmode;
10023 }
10024
10025 case CCFPmode:
10026 case CCFPUmode:
10027 /* These are only compatible with themselves, which we already
10028 checked above. */
10029 return VOIDmode;
10030 }
10031 }
10032
10033 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10034
10035 int
10036 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10037 {
10038 enum rtx_code swapped_code = swap_condition (code);
10039 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10040 || (ix86_fp_comparison_cost (swapped_code)
10041 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10042 }
10043
10044 /* Swap, force into registers, or otherwise massage the two operands
10045 to a fp comparison. The operands are updated in place; the new
10046 comparison code is returned. */
10047
10048 static enum rtx_code
10049 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10050 {
10051 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10052 rtx op0 = *pop0, op1 = *pop1;
10053 enum machine_mode op_mode = GET_MODE (op0);
10054 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10055
10056 /* All of the unordered compare instructions only work on registers.
10057 The same is true of the fcomi compare instructions. The XFmode
10058 compare instructions require registers except when comparing
10059 against zero or when converting operand 1 from fixed point to
10060 floating point. */
10061
10062 if (!is_sse
10063 && (fpcmp_mode == CCFPUmode
10064 || (op_mode == XFmode
10065 && ! (standard_80387_constant_p (op0) == 1
10066 || standard_80387_constant_p (op1) == 1)
10067 && GET_CODE (op1) != FLOAT)
10068 || ix86_use_fcomi_compare (code)))
10069 {
10070 op0 = force_reg (op_mode, op0);
10071 op1 = force_reg (op_mode, op1);
10072 }
10073 else
10074 {
10075 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10076 things around if they appear profitable, otherwise force op0
10077 into a register. */
10078
10079 if (standard_80387_constant_p (op0) == 0
10080 || (GET_CODE (op0) == MEM
10081 && ! (standard_80387_constant_p (op1) == 0
10082 || GET_CODE (op1) == MEM)))
10083 {
10084 rtx tmp;
10085 tmp = op0, op0 = op1, op1 = tmp;
10086 code = swap_condition (code);
10087 }
10088
10089 if (GET_CODE (op0) != REG)
10090 op0 = force_reg (op_mode, op0);
10091
10092 if (CONSTANT_P (op1))
10093 {
10094 int tmp = standard_80387_constant_p (op1);
10095 if (tmp == 0)
10096 op1 = validize_mem (force_const_mem (op_mode, op1));
10097 else if (tmp == 1)
10098 {
10099 if (TARGET_CMOVE)
10100 op1 = force_reg (op_mode, op1);
10101 }
10102 else
10103 op1 = force_reg (op_mode, op1);
10104 }
10105 }
10106
10107 /* Try to rearrange the comparison to make it cheaper. */
10108 if (ix86_fp_comparison_cost (code)
10109 > ix86_fp_comparison_cost (swap_condition (code))
10110 && (GET_CODE (op1) == REG || !no_new_pseudos))
10111 {
10112 rtx tmp;
10113 tmp = op0, op0 = op1, op1 = tmp;
10114 code = swap_condition (code);
10115 if (GET_CODE (op0) != REG)
10116 op0 = force_reg (op_mode, op0);
10117 }
10118
10119 *pop0 = op0;
10120 *pop1 = op1;
10121 return code;
10122 }
10123
10124 /* Convert comparison codes we use to represent FP comparison to integer
10125 code that will result in proper branch. Return UNKNOWN if no such code
10126 is available. */
10127
10128 enum rtx_code
10129 ix86_fp_compare_code_to_integer (enum rtx_code code)
10130 {
10131 switch (code)
10132 {
10133 case GT:
10134 return GTU;
10135 case GE:
10136 return GEU;
10137 case ORDERED:
10138 case UNORDERED:
10139 return code;
10140 break;
10141 case UNEQ:
10142 return EQ;
10143 break;
10144 case UNLT:
10145 return LTU;
10146 break;
10147 case UNLE:
10148 return LEU;
10149 break;
10150 case LTGT:
10151 return NE;
10152 break;
10153 default:
10154 return UNKNOWN;
10155 }
10156 }
10157
10158 /* Split comparison code CODE into comparisons we can do using branch
10159 instructions. BYPASS_CODE is comparison code for branch that will
10160 branch around FIRST_CODE and SECOND_CODE. If some of branches
10161 is not required, set value to UNKNOWN.
10162 We never require more than two branches. */
10163
10164 void
10165 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10166 enum rtx_code *first_code,
10167 enum rtx_code *second_code)
10168 {
10169 *first_code = code;
10170 *bypass_code = UNKNOWN;
10171 *second_code = UNKNOWN;
10172
10173 /* The fcomi comparison sets flags as follows:
10174
10175 cmp ZF PF CF
10176 > 0 0 0
10177 < 0 0 1
10178 = 1 0 0
10179 un 1 1 1 */
10180
10181 switch (code)
10182 {
10183 case GT: /* GTU - CF=0 & ZF=0 */
10184 case GE: /* GEU - CF=0 */
10185 case ORDERED: /* PF=0 */
10186 case UNORDERED: /* PF=1 */
10187 case UNEQ: /* EQ - ZF=1 */
10188 case UNLT: /* LTU - CF=1 */
10189 case UNLE: /* LEU - CF=1 | ZF=1 */
10190 case LTGT: /* EQ - ZF=0 */
10191 break;
10192 case LT: /* LTU - CF=1 - fails on unordered */
10193 *first_code = UNLT;
10194 *bypass_code = UNORDERED;
10195 break;
10196 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10197 *first_code = UNLE;
10198 *bypass_code = UNORDERED;
10199 break;
10200 case EQ: /* EQ - ZF=1 - fails on unordered */
10201 *first_code = UNEQ;
10202 *bypass_code = UNORDERED;
10203 break;
10204 case NE: /* NE - ZF=0 - fails on unordered */
10205 *first_code = LTGT;
10206 *second_code = UNORDERED;
10207 break;
10208 case UNGE: /* GEU - CF=0 - fails on unordered */
10209 *first_code = GE;
10210 *second_code = UNORDERED;
10211 break;
10212 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10213 *first_code = GT;
10214 *second_code = UNORDERED;
10215 break;
10216 default:
10217 gcc_unreachable ();
10218 }
10219 if (!TARGET_IEEE_FP)
10220 {
10221 *second_code = UNKNOWN;
10222 *bypass_code = UNKNOWN;
10223 }
10224 }
10225
10226 /* Return cost of comparison done fcom + arithmetics operations on AX.
10227 All following functions do use number of instructions as a cost metrics.
10228 In future this should be tweaked to compute bytes for optimize_size and
10229 take into account performance of various instructions on various CPUs. */
10230 static int
10231 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10232 {
10233 if (!TARGET_IEEE_FP)
10234 return 4;
10235 /* The cost of code output by ix86_expand_fp_compare. */
10236 switch (code)
10237 {
10238 case UNLE:
10239 case UNLT:
10240 case LTGT:
10241 case GT:
10242 case GE:
10243 case UNORDERED:
10244 case ORDERED:
10245 case UNEQ:
10246 return 4;
10247 break;
10248 case LT:
10249 case NE:
10250 case EQ:
10251 case UNGE:
10252 return 5;
10253 break;
10254 case LE:
10255 case UNGT:
10256 return 6;
10257 break;
10258 default:
10259 gcc_unreachable ();
10260 }
10261 }
10262
10263 /* Return cost of comparison done using fcomi operation.
10264 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10265 static int
10266 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10267 {
10268 enum rtx_code bypass_code, first_code, second_code;
10269 /* Return arbitrarily high cost when instruction is not supported - this
10270 prevents gcc from using it. */
10271 if (!TARGET_CMOVE)
10272 return 1024;
10273 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10274 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10275 }
10276
10277 /* Return cost of comparison done using sahf operation.
10278 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10279 static int
10280 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10281 {
10282 enum rtx_code bypass_code, first_code, second_code;
10283 /* Return arbitrarily high cost when instruction is not preferred - this
10284 avoids gcc from using it. */
10285 if (!TARGET_USE_SAHF && !optimize_size)
10286 return 1024;
10287 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10288 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10289 }
10290
10291 /* Compute cost of the comparison done using any method.
10292 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10293 static int
10294 ix86_fp_comparison_cost (enum rtx_code code)
10295 {
10296 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10297 int min;
10298
10299 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10300 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10301
10302 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10303 if (min > sahf_cost)
10304 min = sahf_cost;
10305 if (min > fcomi_cost)
10306 min = fcomi_cost;
10307 return min;
10308 }
10309
10310 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10311
10312 static rtx
10313 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10314 rtx *second_test, rtx *bypass_test)
10315 {
10316 enum machine_mode fpcmp_mode, intcmp_mode;
10317 rtx tmp, tmp2;
10318 int cost = ix86_fp_comparison_cost (code);
10319 enum rtx_code bypass_code, first_code, second_code;
10320
10321 fpcmp_mode = ix86_fp_compare_mode (code);
10322 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10323
10324 if (second_test)
10325 *second_test = NULL_RTX;
10326 if (bypass_test)
10327 *bypass_test = NULL_RTX;
10328
10329 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10330
10331 /* Do fcomi/sahf based test when profitable. */
10332 if ((bypass_code == UNKNOWN || bypass_test)
10333 && (second_code == UNKNOWN || second_test)
10334 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10335 {
10336 if (TARGET_CMOVE)
10337 {
10338 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10339 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10340 tmp);
10341 emit_insn (tmp);
10342 }
10343 else
10344 {
10345 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10346 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10347 if (!scratch)
10348 scratch = gen_reg_rtx (HImode);
10349 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10350 emit_insn (gen_x86_sahf_1 (scratch));
10351 }
10352
10353 /* The FP codes work out to act like unsigned. */
10354 intcmp_mode = fpcmp_mode;
10355 code = first_code;
10356 if (bypass_code != UNKNOWN)
10357 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10358 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10359 const0_rtx);
10360 if (second_code != UNKNOWN)
10361 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10362 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10363 const0_rtx);
10364 }
10365 else
10366 {
10367 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10368 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10369 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10370 if (!scratch)
10371 scratch = gen_reg_rtx (HImode);
10372 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10373
10374 /* In the unordered case, we have to check C2 for NaN's, which
10375 doesn't happen to work out to anything nice combination-wise.
10376 So do some bit twiddling on the value we've got in AH to come
10377 up with an appropriate set of condition codes. */
10378
10379 intcmp_mode = CCNOmode;
10380 switch (code)
10381 {
10382 case GT:
10383 case UNGT:
10384 if (code == GT || !TARGET_IEEE_FP)
10385 {
10386 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10387 code = EQ;
10388 }
10389 else
10390 {
10391 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10392 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10393 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10394 intcmp_mode = CCmode;
10395 code = GEU;
10396 }
10397 break;
10398 case LT:
10399 case UNLT:
10400 if (code == LT && TARGET_IEEE_FP)
10401 {
10402 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10403 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10404 intcmp_mode = CCmode;
10405 code = EQ;
10406 }
10407 else
10408 {
10409 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10410 code = NE;
10411 }
10412 break;
10413 case GE:
10414 case UNGE:
10415 if (code == GE || !TARGET_IEEE_FP)
10416 {
10417 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10418 code = EQ;
10419 }
10420 else
10421 {
10422 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10423 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10424 GEN_INT (0x01)));
10425 code = NE;
10426 }
10427 break;
10428 case LE:
10429 case UNLE:
10430 if (code == LE && TARGET_IEEE_FP)
10431 {
10432 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10433 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10434 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10435 intcmp_mode = CCmode;
10436 code = LTU;
10437 }
10438 else
10439 {
10440 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10441 code = NE;
10442 }
10443 break;
10444 case EQ:
10445 case UNEQ:
10446 if (code == EQ && TARGET_IEEE_FP)
10447 {
10448 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10449 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10450 intcmp_mode = CCmode;
10451 code = EQ;
10452 }
10453 else
10454 {
10455 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10456 code = NE;
10457 break;
10458 }
10459 break;
10460 case NE:
10461 case LTGT:
10462 if (code == NE && TARGET_IEEE_FP)
10463 {
10464 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10465 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10466 GEN_INT (0x40)));
10467 code = NE;
10468 }
10469 else
10470 {
10471 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10472 code = EQ;
10473 }
10474 break;
10475
10476 case UNORDERED:
10477 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10478 code = NE;
10479 break;
10480 case ORDERED:
10481 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10482 code = EQ;
10483 break;
10484
10485 default:
10486 gcc_unreachable ();
10487 }
10488 }
10489
10490 /* Return the test that should be put into the flags user, i.e.
10491 the bcc, scc, or cmov instruction. */
10492 return gen_rtx_fmt_ee (code, VOIDmode,
10493 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10494 const0_rtx);
10495 }
10496
10497 rtx
10498 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10499 {
10500 rtx op0, op1, ret;
10501 op0 = ix86_compare_op0;
10502 op1 = ix86_compare_op1;
10503
10504 if (second_test)
10505 *second_test = NULL_RTX;
10506 if (bypass_test)
10507 *bypass_test = NULL_RTX;
10508
10509 if (ix86_compare_emitted)
10510 {
10511 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10512 ix86_compare_emitted = NULL_RTX;
10513 }
10514 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10515 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10516 second_test, bypass_test);
10517 else
10518 ret = ix86_expand_int_compare (code, op0, op1);
10519
10520 return ret;
10521 }
10522
10523 /* Return true if the CODE will result in nontrivial jump sequence. */
10524 bool
10525 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10526 {
10527 enum rtx_code bypass_code, first_code, second_code;
10528 if (!TARGET_CMOVE)
10529 return true;
10530 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10531 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10532 }
10533
10534 void
10535 ix86_expand_branch (enum rtx_code code, rtx label)
10536 {
10537 rtx tmp;
10538
10539 /* If we have emitted a compare insn, go straight to simple.
10540 ix86_expand_compare won't emit anything if ix86_compare_emitted
10541 is non NULL. */
10542 if (ix86_compare_emitted)
10543 goto simple;
10544
10545 switch (GET_MODE (ix86_compare_op0))
10546 {
10547 case QImode:
10548 case HImode:
10549 case SImode:
10550 simple:
10551 tmp = ix86_expand_compare (code, NULL, NULL);
10552 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10553 gen_rtx_LABEL_REF (VOIDmode, label),
10554 pc_rtx);
10555 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10556 return;
10557
10558 case SFmode:
10559 case DFmode:
10560 case XFmode:
10561 {
10562 rtvec vec;
10563 int use_fcomi;
10564 enum rtx_code bypass_code, first_code, second_code;
10565
10566 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10567 &ix86_compare_op1);
10568
10569 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10570
10571 /* Check whether we will use the natural sequence with one jump. If
10572 so, we can expand jump early. Otherwise delay expansion by
10573 creating compound insn to not confuse optimizers. */
10574 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10575 && TARGET_CMOVE)
10576 {
10577 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10578 gen_rtx_LABEL_REF (VOIDmode, label),
10579 pc_rtx, NULL_RTX, NULL_RTX);
10580 }
10581 else
10582 {
10583 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10584 ix86_compare_op0, ix86_compare_op1);
10585 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10586 gen_rtx_LABEL_REF (VOIDmode, label),
10587 pc_rtx);
10588 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10589
10590 use_fcomi = ix86_use_fcomi_compare (code);
10591 vec = rtvec_alloc (3 + !use_fcomi);
10592 RTVEC_ELT (vec, 0) = tmp;
10593 RTVEC_ELT (vec, 1)
10594 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10595 RTVEC_ELT (vec, 2)
10596 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10597 if (! use_fcomi)
10598 RTVEC_ELT (vec, 3)
10599 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10600
10601 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10602 }
10603 return;
10604 }
10605
10606 case DImode:
10607 if (TARGET_64BIT)
10608 goto simple;
10609 case TImode:
10610 /* Expand DImode branch into multiple compare+branch. */
10611 {
10612 rtx lo[2], hi[2], label2;
10613 enum rtx_code code1, code2, code3;
10614 enum machine_mode submode;
10615
10616 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10617 {
10618 tmp = ix86_compare_op0;
10619 ix86_compare_op0 = ix86_compare_op1;
10620 ix86_compare_op1 = tmp;
10621 code = swap_condition (code);
10622 }
10623 if (GET_MODE (ix86_compare_op0) == DImode)
10624 {
10625 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10626 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10627 submode = SImode;
10628 }
10629 else
10630 {
10631 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10632 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10633 submode = DImode;
10634 }
10635
10636 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10637 avoid two branches. This costs one extra insn, so disable when
10638 optimizing for size. */
10639
10640 if ((code == EQ || code == NE)
10641 && (!optimize_size
10642 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10643 {
10644 rtx xor0, xor1;
10645
10646 xor1 = hi[0];
10647 if (hi[1] != const0_rtx)
10648 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10649 NULL_RTX, 0, OPTAB_WIDEN);
10650
10651 xor0 = lo[0];
10652 if (lo[1] != const0_rtx)
10653 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10654 NULL_RTX, 0, OPTAB_WIDEN);
10655
10656 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10657 NULL_RTX, 0, OPTAB_WIDEN);
10658
10659 ix86_compare_op0 = tmp;
10660 ix86_compare_op1 = const0_rtx;
10661 ix86_expand_branch (code, label);
10662 return;
10663 }
10664
10665 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10666 op1 is a constant and the low word is zero, then we can just
10667 examine the high word. */
10668
10669 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
10670 switch (code)
10671 {
10672 case LT: case LTU: case GE: case GEU:
10673 ix86_compare_op0 = hi[0];
10674 ix86_compare_op1 = hi[1];
10675 ix86_expand_branch (code, label);
10676 return;
10677 default:
10678 break;
10679 }
10680
10681 /* Otherwise, we need two or three jumps. */
10682
10683 label2 = gen_label_rtx ();
10684
10685 code1 = code;
10686 code2 = swap_condition (code);
10687 code3 = unsigned_condition (code);
10688
10689 switch (code)
10690 {
10691 case LT: case GT: case LTU: case GTU:
10692 break;
10693
10694 case LE: code1 = LT; code2 = GT; break;
10695 case GE: code1 = GT; code2 = LT; break;
10696 case LEU: code1 = LTU; code2 = GTU; break;
10697 case GEU: code1 = GTU; code2 = LTU; break;
10698
10699 case EQ: code1 = UNKNOWN; code2 = NE; break;
10700 case NE: code2 = UNKNOWN; break;
10701
10702 default:
10703 gcc_unreachable ();
10704 }
10705
10706 /*
10707 * a < b =>
10708 * if (hi(a) < hi(b)) goto true;
10709 * if (hi(a) > hi(b)) goto false;
10710 * if (lo(a) < lo(b)) goto true;
10711 * false:
10712 */
10713
10714 ix86_compare_op0 = hi[0];
10715 ix86_compare_op1 = hi[1];
10716
10717 if (code1 != UNKNOWN)
10718 ix86_expand_branch (code1, label);
10719 if (code2 != UNKNOWN)
10720 ix86_expand_branch (code2, label2);
10721
10722 ix86_compare_op0 = lo[0];
10723 ix86_compare_op1 = lo[1];
10724 ix86_expand_branch (code3, label);
10725
10726 if (code2 != UNKNOWN)
10727 emit_label (label2);
10728 return;
10729 }
10730
10731 default:
10732 gcc_unreachable ();
10733 }
10734 }
10735
10736 /* Split branch based on floating point condition. */
10737 void
10738 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10739 rtx target1, rtx target2, rtx tmp, rtx pushed)
10740 {
10741 rtx second, bypass;
10742 rtx label = NULL_RTX;
10743 rtx condition;
10744 int bypass_probability = -1, second_probability = -1, probability = -1;
10745 rtx i;
10746
10747 if (target2 != pc_rtx)
10748 {
10749 rtx tmp = target2;
10750 code = reverse_condition_maybe_unordered (code);
10751 target2 = target1;
10752 target1 = tmp;
10753 }
10754
10755 condition = ix86_expand_fp_compare (code, op1, op2,
10756 tmp, &second, &bypass);
10757
10758 /* Remove pushed operand from stack. */
10759 if (pushed)
10760 ix86_free_from_memory (GET_MODE (pushed));
10761
10762 if (split_branch_probability >= 0)
10763 {
10764 /* Distribute the probabilities across the jumps.
10765 Assume the BYPASS and SECOND to be always test
10766 for UNORDERED. */
10767 probability = split_branch_probability;
10768
10769 /* Value of 1 is low enough to make no need for probability
10770 to be updated. Later we may run some experiments and see
10771 if unordered values are more frequent in practice. */
10772 if (bypass)
10773 bypass_probability = 1;
10774 if (second)
10775 second_probability = 1;
10776 }
10777 if (bypass != NULL_RTX)
10778 {
10779 label = gen_label_rtx ();
10780 i = emit_jump_insn (gen_rtx_SET
10781 (VOIDmode, pc_rtx,
10782 gen_rtx_IF_THEN_ELSE (VOIDmode,
10783 bypass,
10784 gen_rtx_LABEL_REF (VOIDmode,
10785 label),
10786 pc_rtx)));
10787 if (bypass_probability >= 0)
10788 REG_NOTES (i)
10789 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10790 GEN_INT (bypass_probability),
10791 REG_NOTES (i));
10792 }
10793 i = emit_jump_insn (gen_rtx_SET
10794 (VOIDmode, pc_rtx,
10795 gen_rtx_IF_THEN_ELSE (VOIDmode,
10796 condition, target1, target2)));
10797 if (probability >= 0)
10798 REG_NOTES (i)
10799 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10800 GEN_INT (probability),
10801 REG_NOTES (i));
10802 if (second != NULL_RTX)
10803 {
10804 i = emit_jump_insn (gen_rtx_SET
10805 (VOIDmode, pc_rtx,
10806 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10807 target2)));
10808 if (second_probability >= 0)
10809 REG_NOTES (i)
10810 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10811 GEN_INT (second_probability),
10812 REG_NOTES (i));
10813 }
10814 if (label != NULL_RTX)
10815 emit_label (label);
10816 }
10817
10818 int
10819 ix86_expand_setcc (enum rtx_code code, rtx dest)
10820 {
10821 rtx ret, tmp, tmpreg, equiv;
10822 rtx second_test, bypass_test;
10823
10824 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10825 return 0; /* FAIL */
10826
10827 gcc_assert (GET_MODE (dest) == QImode);
10828
10829 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10830 PUT_MODE (ret, QImode);
10831
10832 tmp = dest;
10833 tmpreg = dest;
10834
10835 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10836 if (bypass_test || second_test)
10837 {
10838 rtx test = second_test;
10839 int bypass = 0;
10840 rtx tmp2 = gen_reg_rtx (QImode);
10841 if (bypass_test)
10842 {
10843 gcc_assert (!second_test);
10844 test = bypass_test;
10845 bypass = 1;
10846 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10847 }
10848 PUT_MODE (test, QImode);
10849 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10850
10851 if (bypass)
10852 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10853 else
10854 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10855 }
10856
10857 /* Attach a REG_EQUAL note describing the comparison result. */
10858 if (ix86_compare_op0 && ix86_compare_op1)
10859 {
10860 equiv = simplify_gen_relational (code, QImode,
10861 GET_MODE (ix86_compare_op0),
10862 ix86_compare_op0, ix86_compare_op1);
10863 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10864 }
10865
10866 return 1; /* DONE */
10867 }
10868
10869 /* Expand comparison setting or clearing carry flag. Return true when
10870 successful and set pop for the operation. */
10871 static bool
10872 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10873 {
10874 enum machine_mode mode =
10875 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10876
10877 /* Do not handle DImode compares that go through special path. Also we can't
10878 deal with FP compares yet. This is possible to add. */
10879 if (mode == (TARGET_64BIT ? TImode : DImode))
10880 return false;
10881 if (FLOAT_MODE_P (mode))
10882 {
10883 rtx second_test = NULL, bypass_test = NULL;
10884 rtx compare_op, compare_seq;
10885
10886 /* Shortcut: following common codes never translate into carry flag compares. */
10887 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10888 || code == ORDERED || code == UNORDERED)
10889 return false;
10890
10891 /* These comparisons require zero flag; swap operands so they won't. */
10892 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10893 && !TARGET_IEEE_FP)
10894 {
10895 rtx tmp = op0;
10896 op0 = op1;
10897 op1 = tmp;
10898 code = swap_condition (code);
10899 }
10900
10901 /* Try to expand the comparison and verify that we end up with carry flag
10902 based comparison. This is fails to be true only when we decide to expand
10903 comparison using arithmetic that is not too common scenario. */
10904 start_sequence ();
10905 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10906 &second_test, &bypass_test);
10907 compare_seq = get_insns ();
10908 end_sequence ();
10909
10910 if (second_test || bypass_test)
10911 return false;
10912 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10913 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10914 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10915 else
10916 code = GET_CODE (compare_op);
10917 if (code != LTU && code != GEU)
10918 return false;
10919 emit_insn (compare_seq);
10920 *pop = compare_op;
10921 return true;
10922 }
10923 if (!INTEGRAL_MODE_P (mode))
10924 return false;
10925 switch (code)
10926 {
10927 case LTU:
10928 case GEU:
10929 break;
10930
10931 /* Convert a==0 into (unsigned)a<1. */
10932 case EQ:
10933 case NE:
10934 if (op1 != const0_rtx)
10935 return false;
10936 op1 = const1_rtx;
10937 code = (code == EQ ? LTU : GEU);
10938 break;
10939
10940 /* Convert a>b into b<a or a>=b-1. */
10941 case GTU:
10942 case LEU:
10943 if (GET_CODE (op1) == CONST_INT)
10944 {
10945 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10946 /* Bail out on overflow. We still can swap operands but that
10947 would force loading of the constant into register. */
10948 if (op1 == const0_rtx
10949 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10950 return false;
10951 code = (code == GTU ? GEU : LTU);
10952 }
10953 else
10954 {
10955 rtx tmp = op1;
10956 op1 = op0;
10957 op0 = tmp;
10958 code = (code == GTU ? LTU : GEU);
10959 }
10960 break;
10961
10962 /* Convert a>=0 into (unsigned)a<0x80000000. */
10963 case LT:
10964 case GE:
10965 if (mode == DImode || op1 != const0_rtx)
10966 return false;
10967 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10968 code = (code == LT ? GEU : LTU);
10969 break;
10970 case LE:
10971 case GT:
10972 if (mode == DImode || op1 != constm1_rtx)
10973 return false;
10974 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10975 code = (code == LE ? GEU : LTU);
10976 break;
10977
10978 default:
10979 return false;
10980 }
10981 /* Swapping operands may cause constant to appear as first operand. */
10982 if (!nonimmediate_operand (op0, VOIDmode))
10983 {
10984 if (no_new_pseudos)
10985 return false;
10986 op0 = force_reg (mode, op0);
10987 }
10988 ix86_compare_op0 = op0;
10989 ix86_compare_op1 = op1;
10990 *pop = ix86_expand_compare (code, NULL, NULL);
10991 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10992 return true;
10993 }
10994
10995 int
10996 ix86_expand_int_movcc (rtx operands[])
10997 {
10998 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10999 rtx compare_seq, compare_op;
11000 rtx second_test, bypass_test;
11001 enum machine_mode mode = GET_MODE (operands[0]);
11002 bool sign_bit_compare_p = false;;
11003
11004 start_sequence ();
11005 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11006 compare_seq = get_insns ();
11007 end_sequence ();
11008
11009 compare_code = GET_CODE (compare_op);
11010
11011 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11012 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11013 sign_bit_compare_p = true;
11014
11015 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11016 HImode insns, we'd be swallowed in word prefix ops. */
11017
11018 if ((mode != HImode || TARGET_FAST_PREFIX)
11019 && (mode != (TARGET_64BIT ? TImode : DImode))
11020 && GET_CODE (operands[2]) == CONST_INT
11021 && GET_CODE (operands[3]) == CONST_INT)
11022 {
11023 rtx out = operands[0];
11024 HOST_WIDE_INT ct = INTVAL (operands[2]);
11025 HOST_WIDE_INT cf = INTVAL (operands[3]);
11026 HOST_WIDE_INT diff;
11027
11028 diff = ct - cf;
11029 /* Sign bit compares are better done using shifts than we do by using
11030 sbb. */
11031 if (sign_bit_compare_p
11032 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11033 ix86_compare_op1, &compare_op))
11034 {
11035 /* Detect overlap between destination and compare sources. */
11036 rtx tmp = out;
11037
11038 if (!sign_bit_compare_p)
11039 {
11040 bool fpcmp = false;
11041
11042 compare_code = GET_CODE (compare_op);
11043
11044 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11045 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11046 {
11047 fpcmp = true;
11048 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11049 }
11050
11051 /* To simplify rest of code, restrict to the GEU case. */
11052 if (compare_code == LTU)
11053 {
11054 HOST_WIDE_INT tmp = ct;
11055 ct = cf;
11056 cf = tmp;
11057 compare_code = reverse_condition (compare_code);
11058 code = reverse_condition (code);
11059 }
11060 else
11061 {
11062 if (fpcmp)
11063 PUT_CODE (compare_op,
11064 reverse_condition_maybe_unordered
11065 (GET_CODE (compare_op)));
11066 else
11067 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11068 }
11069 diff = ct - cf;
11070
11071 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11072 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11073 tmp = gen_reg_rtx (mode);
11074
11075 if (mode == DImode)
11076 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11077 else
11078 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11079 }
11080 else
11081 {
11082 if (code == GT || code == GE)
11083 code = reverse_condition (code);
11084 else
11085 {
11086 HOST_WIDE_INT tmp = ct;
11087 ct = cf;
11088 cf = tmp;
11089 diff = ct - cf;
11090 }
11091 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11092 ix86_compare_op1, VOIDmode, 0, -1);
11093 }
11094
11095 if (diff == 1)
11096 {
11097 /*
11098 * cmpl op0,op1
11099 * sbbl dest,dest
11100 * [addl dest, ct]
11101 *
11102 * Size 5 - 8.
11103 */
11104 if (ct)
11105 tmp = expand_simple_binop (mode, PLUS,
11106 tmp, GEN_INT (ct),
11107 copy_rtx (tmp), 1, OPTAB_DIRECT);
11108 }
11109 else if (cf == -1)
11110 {
11111 /*
11112 * cmpl op0,op1
11113 * sbbl dest,dest
11114 * orl $ct, dest
11115 *
11116 * Size 8.
11117 */
11118 tmp = expand_simple_binop (mode, IOR,
11119 tmp, GEN_INT (ct),
11120 copy_rtx (tmp), 1, OPTAB_DIRECT);
11121 }
11122 else if (diff == -1 && ct)
11123 {
11124 /*
11125 * cmpl op0,op1
11126 * sbbl dest,dest
11127 * notl dest
11128 * [addl dest, cf]
11129 *
11130 * Size 8 - 11.
11131 */
11132 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11133 if (cf)
11134 tmp = expand_simple_binop (mode, PLUS,
11135 copy_rtx (tmp), GEN_INT (cf),
11136 copy_rtx (tmp), 1, OPTAB_DIRECT);
11137 }
11138 else
11139 {
11140 /*
11141 * cmpl op0,op1
11142 * sbbl dest,dest
11143 * [notl dest]
11144 * andl cf - ct, dest
11145 * [addl dest, ct]
11146 *
11147 * Size 8 - 11.
11148 */
11149
11150 if (cf == 0)
11151 {
11152 cf = ct;
11153 ct = 0;
11154 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11155 }
11156
11157 tmp = expand_simple_binop (mode, AND,
11158 copy_rtx (tmp),
11159 gen_int_mode (cf - ct, mode),
11160 copy_rtx (tmp), 1, OPTAB_DIRECT);
11161 if (ct)
11162 tmp = expand_simple_binop (mode, PLUS,
11163 copy_rtx (tmp), GEN_INT (ct),
11164 copy_rtx (tmp), 1, OPTAB_DIRECT);
11165 }
11166
11167 if (!rtx_equal_p (tmp, out))
11168 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11169
11170 return 1; /* DONE */
11171 }
11172
11173 if (diff < 0)
11174 {
11175 HOST_WIDE_INT tmp;
11176 tmp = ct, ct = cf, cf = tmp;
11177 diff = -diff;
11178 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11179 {
11180 /* We may be reversing unordered compare to normal compare, that
11181 is not valid in general (we may convert non-trapping condition
11182 to trapping one), however on i386 we currently emit all
11183 comparisons unordered. */
11184 compare_code = reverse_condition_maybe_unordered (compare_code);
11185 code = reverse_condition_maybe_unordered (code);
11186 }
11187 else
11188 {
11189 compare_code = reverse_condition (compare_code);
11190 code = reverse_condition (code);
11191 }
11192 }
11193
11194 compare_code = UNKNOWN;
11195 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11196 && GET_CODE (ix86_compare_op1) == CONST_INT)
11197 {
11198 if (ix86_compare_op1 == const0_rtx
11199 && (code == LT || code == GE))
11200 compare_code = code;
11201 else if (ix86_compare_op1 == constm1_rtx)
11202 {
11203 if (code == LE)
11204 compare_code = LT;
11205 else if (code == GT)
11206 compare_code = GE;
11207 }
11208 }
11209
11210 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11211 if (compare_code != UNKNOWN
11212 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11213 && (cf == -1 || ct == -1))
11214 {
11215 /* If lea code below could be used, only optimize
11216 if it results in a 2 insn sequence. */
11217
11218 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11219 || diff == 3 || diff == 5 || diff == 9)
11220 || (compare_code == LT && ct == -1)
11221 || (compare_code == GE && cf == -1))
11222 {
11223 /*
11224 * notl op1 (if necessary)
11225 * sarl $31, op1
11226 * orl cf, op1
11227 */
11228 if (ct != -1)
11229 {
11230 cf = ct;
11231 ct = -1;
11232 code = reverse_condition (code);
11233 }
11234
11235 out = emit_store_flag (out, code, ix86_compare_op0,
11236 ix86_compare_op1, VOIDmode, 0, -1);
11237
11238 out = expand_simple_binop (mode, IOR,
11239 out, GEN_INT (cf),
11240 out, 1, OPTAB_DIRECT);
11241 if (out != operands[0])
11242 emit_move_insn (operands[0], out);
11243
11244 return 1; /* DONE */
11245 }
11246 }
11247
11248
11249 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11250 || diff == 3 || diff == 5 || diff == 9)
11251 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11252 && (mode != DImode
11253 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11254 {
11255 /*
11256 * xorl dest,dest
11257 * cmpl op1,op2
11258 * setcc dest
11259 * lea cf(dest*(ct-cf)),dest
11260 *
11261 * Size 14.
11262 *
11263 * This also catches the degenerate setcc-only case.
11264 */
11265
11266 rtx tmp;
11267 int nops;
11268
11269 out = emit_store_flag (out, code, ix86_compare_op0,
11270 ix86_compare_op1, VOIDmode, 0, 1);
11271
11272 nops = 0;
11273 /* On x86_64 the lea instruction operates on Pmode, so we need
11274 to get arithmetics done in proper mode to match. */
11275 if (diff == 1)
11276 tmp = copy_rtx (out);
11277 else
11278 {
11279 rtx out1;
11280 out1 = copy_rtx (out);
11281 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11282 nops++;
11283 if (diff & 1)
11284 {
11285 tmp = gen_rtx_PLUS (mode, tmp, out1);
11286 nops++;
11287 }
11288 }
11289 if (cf != 0)
11290 {
11291 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11292 nops++;
11293 }
11294 if (!rtx_equal_p (tmp, out))
11295 {
11296 if (nops == 1)
11297 out = force_operand (tmp, copy_rtx (out));
11298 else
11299 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11300 }
11301 if (!rtx_equal_p (out, operands[0]))
11302 emit_move_insn (operands[0], copy_rtx (out));
11303
11304 return 1; /* DONE */
11305 }
11306
11307 /*
11308 * General case: Jumpful:
11309 * xorl dest,dest cmpl op1, op2
11310 * cmpl op1, op2 movl ct, dest
11311 * setcc dest jcc 1f
11312 * decl dest movl cf, dest
11313 * andl (cf-ct),dest 1:
11314 * addl ct,dest
11315 *
11316 * Size 20. Size 14.
11317 *
11318 * This is reasonably steep, but branch mispredict costs are
11319 * high on modern cpus, so consider failing only if optimizing
11320 * for space.
11321 */
11322
11323 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11324 && BRANCH_COST >= 2)
11325 {
11326 if (cf == 0)
11327 {
11328 cf = ct;
11329 ct = 0;
11330 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11331 /* We may be reversing unordered compare to normal compare,
11332 that is not valid in general (we may convert non-trapping
11333 condition to trapping one), however on i386 we currently
11334 emit all comparisons unordered. */
11335 code = reverse_condition_maybe_unordered (code);
11336 else
11337 {
11338 code = reverse_condition (code);
11339 if (compare_code != UNKNOWN)
11340 compare_code = reverse_condition (compare_code);
11341 }
11342 }
11343
11344 if (compare_code != UNKNOWN)
11345 {
11346 /* notl op1 (if needed)
11347 sarl $31, op1
11348 andl (cf-ct), op1
11349 addl ct, op1
11350
11351 For x < 0 (resp. x <= -1) there will be no notl,
11352 so if possible swap the constants to get rid of the
11353 complement.
11354 True/false will be -1/0 while code below (store flag
11355 followed by decrement) is 0/-1, so the constants need
11356 to be exchanged once more. */
11357
11358 if (compare_code == GE || !cf)
11359 {
11360 code = reverse_condition (code);
11361 compare_code = LT;
11362 }
11363 else
11364 {
11365 HOST_WIDE_INT tmp = cf;
11366 cf = ct;
11367 ct = tmp;
11368 }
11369
11370 out = emit_store_flag (out, code, ix86_compare_op0,
11371 ix86_compare_op1, VOIDmode, 0, -1);
11372 }
11373 else
11374 {
11375 out = emit_store_flag (out, code, ix86_compare_op0,
11376 ix86_compare_op1, VOIDmode, 0, 1);
11377
11378 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11379 copy_rtx (out), 1, OPTAB_DIRECT);
11380 }
11381
11382 out = expand_simple_binop (mode, AND, copy_rtx (out),
11383 gen_int_mode (cf - ct, mode),
11384 copy_rtx (out), 1, OPTAB_DIRECT);
11385 if (ct)
11386 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11387 copy_rtx (out), 1, OPTAB_DIRECT);
11388 if (!rtx_equal_p (out, operands[0]))
11389 emit_move_insn (operands[0], copy_rtx (out));
11390
11391 return 1; /* DONE */
11392 }
11393 }
11394
11395 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11396 {
11397 /* Try a few things more with specific constants and a variable. */
11398
11399 optab op;
11400 rtx var, orig_out, out, tmp;
11401
11402 if (BRANCH_COST <= 2)
11403 return 0; /* FAIL */
11404
11405 /* If one of the two operands is an interesting constant, load a
11406 constant with the above and mask it in with a logical operation. */
11407
11408 if (GET_CODE (operands[2]) == CONST_INT)
11409 {
11410 var = operands[3];
11411 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11412 operands[3] = constm1_rtx, op = and_optab;
11413 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11414 operands[3] = const0_rtx, op = ior_optab;
11415 else
11416 return 0; /* FAIL */
11417 }
11418 else if (GET_CODE (operands[3]) == CONST_INT)
11419 {
11420 var = operands[2];
11421 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11422 operands[2] = constm1_rtx, op = and_optab;
11423 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11424 operands[2] = const0_rtx, op = ior_optab;
11425 else
11426 return 0; /* FAIL */
11427 }
11428 else
11429 return 0; /* FAIL */
11430
11431 orig_out = operands[0];
11432 tmp = gen_reg_rtx (mode);
11433 operands[0] = tmp;
11434
11435 /* Recurse to get the constant loaded. */
11436 if (ix86_expand_int_movcc (operands) == 0)
11437 return 0; /* FAIL */
11438
11439 /* Mask in the interesting variable. */
11440 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11441 OPTAB_WIDEN);
11442 if (!rtx_equal_p (out, orig_out))
11443 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11444
11445 return 1; /* DONE */
11446 }
11447
11448 /*
11449 * For comparison with above,
11450 *
11451 * movl cf,dest
11452 * movl ct,tmp
11453 * cmpl op1,op2
11454 * cmovcc tmp,dest
11455 *
11456 * Size 15.
11457 */
11458
11459 if (! nonimmediate_operand (operands[2], mode))
11460 operands[2] = force_reg (mode, operands[2]);
11461 if (! nonimmediate_operand (operands[3], mode))
11462 operands[3] = force_reg (mode, operands[3]);
11463
11464 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11465 {
11466 rtx tmp = gen_reg_rtx (mode);
11467 emit_move_insn (tmp, operands[3]);
11468 operands[3] = tmp;
11469 }
11470 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11471 {
11472 rtx tmp = gen_reg_rtx (mode);
11473 emit_move_insn (tmp, operands[2]);
11474 operands[2] = tmp;
11475 }
11476
11477 if (! register_operand (operands[2], VOIDmode)
11478 && (mode == QImode
11479 || ! register_operand (operands[3], VOIDmode)))
11480 operands[2] = force_reg (mode, operands[2]);
11481
11482 if (mode == QImode
11483 && ! register_operand (operands[3], VOIDmode))
11484 operands[3] = force_reg (mode, operands[3]);
11485
11486 emit_insn (compare_seq);
11487 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11488 gen_rtx_IF_THEN_ELSE (mode,
11489 compare_op, operands[2],
11490 operands[3])));
11491 if (bypass_test)
11492 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11493 gen_rtx_IF_THEN_ELSE (mode,
11494 bypass_test,
11495 copy_rtx (operands[3]),
11496 copy_rtx (operands[0]))));
11497 if (second_test)
11498 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11499 gen_rtx_IF_THEN_ELSE (mode,
11500 second_test,
11501 copy_rtx (operands[2]),
11502 copy_rtx (operands[0]))));
11503
11504 return 1; /* DONE */
11505 }
11506
11507 /* Swap, force into registers, or otherwise massage the two operands
11508 to an sse comparison with a mask result. Thus we differ a bit from
11509 ix86_prepare_fp_compare_args which expects to produce a flags result.
11510
11511 The DEST operand exists to help determine whether to commute commutative
11512 operators. The POP0/POP1 operands are updated in place. The new
11513 comparison code is returned, or UNKNOWN if not implementable. */
11514
11515 static enum rtx_code
11516 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11517 rtx *pop0, rtx *pop1)
11518 {
11519 rtx tmp;
11520
11521 switch (code)
11522 {
11523 case LTGT:
11524 case UNEQ:
11525 /* We have no LTGT as an operator. We could implement it with
11526 NE & ORDERED, but this requires an extra temporary. It's
11527 not clear that it's worth it. */
11528 return UNKNOWN;
11529
11530 case LT:
11531 case LE:
11532 case UNGT:
11533 case UNGE:
11534 /* These are supported directly. */
11535 break;
11536
11537 case EQ:
11538 case NE:
11539 case UNORDERED:
11540 case ORDERED:
11541 /* For commutative operators, try to canonicalize the destination
11542 operand to be first in the comparison - this helps reload to
11543 avoid extra moves. */
11544 if (!dest || !rtx_equal_p (dest, *pop1))
11545 break;
11546 /* FALLTHRU */
11547
11548 case GE:
11549 case GT:
11550 case UNLE:
11551 case UNLT:
11552 /* These are not supported directly. Swap the comparison operands
11553 to transform into something that is supported. */
11554 tmp = *pop0;
11555 *pop0 = *pop1;
11556 *pop1 = tmp;
11557 code = swap_condition (code);
11558 break;
11559
11560 default:
11561 gcc_unreachable ();
11562 }
11563
11564 return code;
11565 }
11566
11567 /* Detect conditional moves that exactly match min/max operational
11568 semantics. Note that this is IEEE safe, as long as we don't
11569 interchange the operands.
11570
11571 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11572 and TRUE if the operation is successful and instructions are emitted. */
11573
11574 static bool
11575 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11576 rtx cmp_op1, rtx if_true, rtx if_false)
11577 {
11578 enum machine_mode mode;
11579 bool is_min;
11580 rtx tmp;
11581
11582 if (code == LT)
11583 ;
11584 else if (code == UNGE)
11585 {
11586 tmp = if_true;
11587 if_true = if_false;
11588 if_false = tmp;
11589 }
11590 else
11591 return false;
11592
11593 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11594 is_min = true;
11595 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11596 is_min = false;
11597 else
11598 return false;
11599
11600 mode = GET_MODE (dest);
11601
11602 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11603 but MODE may be a vector mode and thus not appropriate. */
11604 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11605 {
11606 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11607 rtvec v;
11608
11609 if_true = force_reg (mode, if_true);
11610 v = gen_rtvec (2, if_true, if_false);
11611 tmp = gen_rtx_UNSPEC (mode, v, u);
11612 }
11613 else
11614 {
11615 code = is_min ? SMIN : SMAX;
11616 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11617 }
11618
11619 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11620 return true;
11621 }
11622
11623 /* Expand an sse vector comparison. Return the register with the result. */
11624
11625 static rtx
11626 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11627 rtx op_true, rtx op_false)
11628 {
11629 enum machine_mode mode = GET_MODE (dest);
11630 rtx x;
11631
11632 cmp_op0 = force_reg (mode, cmp_op0);
11633 if (!nonimmediate_operand (cmp_op1, mode))
11634 cmp_op1 = force_reg (mode, cmp_op1);
11635
11636 if (optimize
11637 || reg_overlap_mentioned_p (dest, op_true)
11638 || reg_overlap_mentioned_p (dest, op_false))
11639 dest = gen_reg_rtx (mode);
11640
11641 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11642 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11643
11644 return dest;
11645 }
11646
11647 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11648 operations. This is used for both scalar and vector conditional moves. */
11649
11650 static void
11651 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11652 {
11653 enum machine_mode mode = GET_MODE (dest);
11654 rtx t2, t3, x;
11655
11656 if (op_false == CONST0_RTX (mode))
11657 {
11658 op_true = force_reg (mode, op_true);
11659 x = gen_rtx_AND (mode, cmp, op_true);
11660 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11661 }
11662 else if (op_true == CONST0_RTX (mode))
11663 {
11664 op_false = force_reg (mode, op_false);
11665 x = gen_rtx_NOT (mode, cmp);
11666 x = gen_rtx_AND (mode, x, op_false);
11667 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11668 }
11669 else
11670 {
11671 op_true = force_reg (mode, op_true);
11672 op_false = force_reg (mode, op_false);
11673
11674 t2 = gen_reg_rtx (mode);
11675 if (optimize)
11676 t3 = gen_reg_rtx (mode);
11677 else
11678 t3 = dest;
11679
11680 x = gen_rtx_AND (mode, op_true, cmp);
11681 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11682
11683 x = gen_rtx_NOT (mode, cmp);
11684 x = gen_rtx_AND (mode, x, op_false);
11685 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11686
11687 x = gen_rtx_IOR (mode, t3, t2);
11688 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11689 }
11690 }
11691
11692 /* Expand a floating-point conditional move. Return true if successful. */
11693
11694 int
11695 ix86_expand_fp_movcc (rtx operands[])
11696 {
11697 enum machine_mode mode = GET_MODE (operands[0]);
11698 enum rtx_code code = GET_CODE (operands[1]);
11699 rtx tmp, compare_op, second_test, bypass_test;
11700
11701 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11702 {
11703 enum machine_mode cmode;
11704
11705 /* Since we've no cmove for sse registers, don't force bad register
11706 allocation just to gain access to it. Deny movcc when the
11707 comparison mode doesn't match the move mode. */
11708 cmode = GET_MODE (ix86_compare_op0);
11709 if (cmode == VOIDmode)
11710 cmode = GET_MODE (ix86_compare_op1);
11711 if (cmode != mode)
11712 return 0;
11713
11714 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11715 &ix86_compare_op0,
11716 &ix86_compare_op1);
11717 if (code == UNKNOWN)
11718 return 0;
11719
11720 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11721 ix86_compare_op1, operands[2],
11722 operands[3]))
11723 return 1;
11724
11725 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11726 ix86_compare_op1, operands[2], operands[3]);
11727 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11728 return 1;
11729 }
11730
11731 /* The floating point conditional move instructions don't directly
11732 support conditions resulting from a signed integer comparison. */
11733
11734 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11735
11736 /* The floating point conditional move instructions don't directly
11737 support signed integer comparisons. */
11738
11739 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11740 {
11741 gcc_assert (!second_test && !bypass_test);
11742 tmp = gen_reg_rtx (QImode);
11743 ix86_expand_setcc (code, tmp);
11744 code = NE;
11745 ix86_compare_op0 = tmp;
11746 ix86_compare_op1 = const0_rtx;
11747 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11748 }
11749 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11750 {
11751 tmp = gen_reg_rtx (mode);
11752 emit_move_insn (tmp, operands[3]);
11753 operands[3] = tmp;
11754 }
11755 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11756 {
11757 tmp = gen_reg_rtx (mode);
11758 emit_move_insn (tmp, operands[2]);
11759 operands[2] = tmp;
11760 }
11761
11762 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11763 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11764 operands[2], operands[3])));
11765 if (bypass_test)
11766 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11767 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11768 operands[3], operands[0])));
11769 if (second_test)
11770 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11771 gen_rtx_IF_THEN_ELSE (mode, second_test,
11772 operands[2], operands[0])));
11773
11774 return 1;
11775 }
11776
11777 /* Expand a floating-point vector conditional move; a vcond operation
11778 rather than a movcc operation. */
11779
11780 bool
11781 ix86_expand_fp_vcond (rtx operands[])
11782 {
11783 enum rtx_code code = GET_CODE (operands[3]);
11784 rtx cmp;
11785
11786 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11787 &operands[4], &operands[5]);
11788 if (code == UNKNOWN)
11789 return false;
11790
11791 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11792 operands[5], operands[1], operands[2]))
11793 return true;
11794
11795 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11796 operands[1], operands[2]);
11797 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11798 return true;
11799 }
11800
11801 /* Expand a signed integral vector conditional move. */
11802
11803 bool
11804 ix86_expand_int_vcond (rtx operands[])
11805 {
11806 enum machine_mode mode = GET_MODE (operands[0]);
11807 enum rtx_code code = GET_CODE (operands[3]);
11808 bool negate = false;
11809 rtx x, cop0, cop1;
11810
11811 cop0 = operands[4];
11812 cop1 = operands[5];
11813
11814 /* Canonicalize the comparison to EQ, GT, GTU. */
11815 switch (code)
11816 {
11817 case EQ:
11818 case GT:
11819 case GTU:
11820 break;
11821
11822 case NE:
11823 case LE:
11824 case LEU:
11825 code = reverse_condition (code);
11826 negate = true;
11827 break;
11828
11829 case GE:
11830 case GEU:
11831 code = reverse_condition (code);
11832 negate = true;
11833 /* FALLTHRU */
11834
11835 case LT:
11836 case LTU:
11837 code = swap_condition (code);
11838 x = cop0, cop0 = cop1, cop1 = x;
11839 break;
11840
11841 default:
11842 gcc_unreachable ();
11843 }
11844
11845 /* Unsigned parallel compare is not supported by the hardware. Play some
11846 tricks to turn this into a signed comparison against 0. */
11847 if (code == GTU)
11848 {
11849 cop0 = force_reg (mode, cop0);
11850
11851 switch (mode)
11852 {
11853 case V4SImode:
11854 {
11855 rtx t1, t2, mask;
11856
11857 /* Perform a parallel modulo subtraction. */
11858 t1 = gen_reg_rtx (mode);
11859 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11860
11861 /* Extract the original sign bit of op0. */
11862 mask = GEN_INT (-0x80000000);
11863 mask = gen_rtx_CONST_VECTOR (mode,
11864 gen_rtvec (4, mask, mask, mask, mask));
11865 mask = force_reg (mode, mask);
11866 t2 = gen_reg_rtx (mode);
11867 emit_insn (gen_andv4si3 (t2, cop0, mask));
11868
11869 /* XOR it back into the result of the subtraction. This results
11870 in the sign bit set iff we saw unsigned underflow. */
11871 x = gen_reg_rtx (mode);
11872 emit_insn (gen_xorv4si3 (x, t1, t2));
11873
11874 code = GT;
11875 }
11876 break;
11877
11878 case V16QImode:
11879 case V8HImode:
11880 /* Perform a parallel unsigned saturating subtraction. */
11881 x = gen_reg_rtx (mode);
11882 emit_insn (gen_rtx_SET (VOIDmode, x,
11883 gen_rtx_US_MINUS (mode, cop0, cop1)));
11884
11885 code = EQ;
11886 negate = !negate;
11887 break;
11888
11889 default:
11890 gcc_unreachable ();
11891 }
11892
11893 cop0 = x;
11894 cop1 = CONST0_RTX (mode);
11895 }
11896
11897 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11898 operands[1+negate], operands[2-negate]);
11899
11900 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11901 operands[2-negate]);
11902 return true;
11903 }
11904
11905 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11906 true if we should do zero extension, else sign extension. HIGH_P is
11907 true if we want the N/2 high elements, else the low elements. */
11908
11909 void
11910 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11911 {
11912 enum machine_mode imode = GET_MODE (operands[1]);
11913 rtx (*unpack)(rtx, rtx, rtx);
11914 rtx se, dest;
11915
11916 switch (imode)
11917 {
11918 case V16QImode:
11919 if (high_p)
11920 unpack = gen_vec_interleave_highv16qi;
11921 else
11922 unpack = gen_vec_interleave_lowv16qi;
11923 break;
11924 case V8HImode:
11925 if (high_p)
11926 unpack = gen_vec_interleave_highv8hi;
11927 else
11928 unpack = gen_vec_interleave_lowv8hi;
11929 break;
11930 case V4SImode:
11931 if (high_p)
11932 unpack = gen_vec_interleave_highv4si;
11933 else
11934 unpack = gen_vec_interleave_lowv4si;
11935 break;
11936 default:
11937 gcc_unreachable ();
11938 }
11939
11940 dest = gen_lowpart (imode, operands[0]);
11941
11942 if (unsigned_p)
11943 se = force_reg (imode, CONST0_RTX (imode));
11944 else
11945 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11946 operands[1], pc_rtx, pc_rtx);
11947
11948 emit_insn (unpack (dest, operands[1], se));
11949 }
11950
11951 /* Expand conditional increment or decrement using adb/sbb instructions.
11952 The default case using setcc followed by the conditional move can be
11953 done by generic code. */
11954 int
11955 ix86_expand_int_addcc (rtx operands[])
11956 {
11957 enum rtx_code code = GET_CODE (operands[1]);
11958 rtx compare_op;
11959 rtx val = const0_rtx;
11960 bool fpcmp = false;
11961 enum machine_mode mode = GET_MODE (operands[0]);
11962
11963 if (operands[3] != const1_rtx
11964 && operands[3] != constm1_rtx)
11965 return 0;
11966 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11967 ix86_compare_op1, &compare_op))
11968 return 0;
11969 code = GET_CODE (compare_op);
11970
11971 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11972 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11973 {
11974 fpcmp = true;
11975 code = ix86_fp_compare_code_to_integer (code);
11976 }
11977
11978 if (code != LTU)
11979 {
11980 val = constm1_rtx;
11981 if (fpcmp)
11982 PUT_CODE (compare_op,
11983 reverse_condition_maybe_unordered
11984 (GET_CODE (compare_op)));
11985 else
11986 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11987 }
11988 PUT_MODE (compare_op, mode);
11989
11990 /* Construct either adc or sbb insn. */
11991 if ((code == LTU) == (operands[3] == constm1_rtx))
11992 {
11993 switch (GET_MODE (operands[0]))
11994 {
11995 case QImode:
11996 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
11997 break;
11998 case HImode:
11999 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12000 break;
12001 case SImode:
12002 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12003 break;
12004 case DImode:
12005 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12006 break;
12007 default:
12008 gcc_unreachable ();
12009 }
12010 }
12011 else
12012 {
12013 switch (GET_MODE (operands[0]))
12014 {
12015 case QImode:
12016 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12017 break;
12018 case HImode:
12019 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12020 break;
12021 case SImode:
12022 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12023 break;
12024 case DImode:
12025 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12026 break;
12027 default:
12028 gcc_unreachable ();
12029 }
12030 }
12031 return 1; /* DONE */
12032 }
12033
12034
12035 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12036 works for floating pointer parameters and nonoffsetable memories.
12037 For pushes, it returns just stack offsets; the values will be saved
12038 in the right order. Maximally three parts are generated. */
12039
12040 static int
12041 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12042 {
12043 int size;
12044
12045 if (!TARGET_64BIT)
12046 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12047 else
12048 size = (GET_MODE_SIZE (mode) + 4) / 8;
12049
12050 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
12051 gcc_assert (size >= 2 && size <= 3);
12052
12053 /* Optimize constant pool reference to immediates. This is used by fp
12054 moves, that force all constants to memory to allow combining. */
12055 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
12056 {
12057 rtx tmp = maybe_get_pool_constant (operand);
12058 if (tmp)
12059 operand = tmp;
12060 }
12061
12062 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
12063 {
12064 /* The only non-offsetable memories we handle are pushes. */
12065 int ok = push_operand (operand, VOIDmode);
12066
12067 gcc_assert (ok);
12068
12069 operand = copy_rtx (operand);
12070 PUT_MODE (operand, Pmode);
12071 parts[0] = parts[1] = parts[2] = operand;
12072 return size;
12073 }
12074
12075 if (GET_CODE (operand) == CONST_VECTOR)
12076 {
12077 enum machine_mode imode = int_mode_for_mode (mode);
12078 /* Caution: if we looked through a constant pool memory above,
12079 the operand may actually have a different mode now. That's
12080 ok, since we want to pun this all the way back to an integer. */
12081 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12082 gcc_assert (operand != NULL);
12083 mode = imode;
12084 }
12085
12086 if (!TARGET_64BIT)
12087 {
12088 if (mode == DImode)
12089 split_di (&operand, 1, &parts[0], &parts[1]);
12090 else
12091 {
12092 if (REG_P (operand))
12093 {
12094 gcc_assert (reload_completed);
12095 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12096 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12097 if (size == 3)
12098 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12099 }
12100 else if (offsettable_memref_p (operand))
12101 {
12102 operand = adjust_address (operand, SImode, 0);
12103 parts[0] = operand;
12104 parts[1] = adjust_address (operand, SImode, 4);
12105 if (size == 3)
12106 parts[2] = adjust_address (operand, SImode, 8);
12107 }
12108 else if (GET_CODE (operand) == CONST_DOUBLE)
12109 {
12110 REAL_VALUE_TYPE r;
12111 long l[4];
12112
12113 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12114 switch (mode)
12115 {
12116 case XFmode:
12117 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12118 parts[2] = gen_int_mode (l[2], SImode);
12119 break;
12120 case DFmode:
12121 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12122 break;
12123 default:
12124 gcc_unreachable ();
12125 }
12126 parts[1] = gen_int_mode (l[1], SImode);
12127 parts[0] = gen_int_mode (l[0], SImode);
12128 }
12129 else
12130 gcc_unreachable ();
12131 }
12132 }
12133 else
12134 {
12135 if (mode == TImode)
12136 split_ti (&operand, 1, &parts[0], &parts[1]);
12137 if (mode == XFmode || mode == TFmode)
12138 {
12139 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12140 if (REG_P (operand))
12141 {
12142 gcc_assert (reload_completed);
12143 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12144 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12145 }
12146 else if (offsettable_memref_p (operand))
12147 {
12148 operand = adjust_address (operand, DImode, 0);
12149 parts[0] = operand;
12150 parts[1] = adjust_address (operand, upper_mode, 8);
12151 }
12152 else if (GET_CODE (operand) == CONST_DOUBLE)
12153 {
12154 REAL_VALUE_TYPE r;
12155 long l[4];
12156
12157 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12158 real_to_target (l, &r, mode);
12159
12160 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12161 if (HOST_BITS_PER_WIDE_INT >= 64)
12162 parts[0]
12163 = gen_int_mode
12164 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12165 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12166 DImode);
12167 else
12168 parts[0] = immed_double_const (l[0], l[1], DImode);
12169
12170 if (upper_mode == SImode)
12171 parts[1] = gen_int_mode (l[2], SImode);
12172 else if (HOST_BITS_PER_WIDE_INT >= 64)
12173 parts[1]
12174 = gen_int_mode
12175 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12176 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12177 DImode);
12178 else
12179 parts[1] = immed_double_const (l[2], l[3], DImode);
12180 }
12181 else
12182 gcc_unreachable ();
12183 }
12184 }
12185
12186 return size;
12187 }
12188
12189 /* Emit insns to perform a move or push of DI, DF, and XF values.
12190 Return false when normal moves are needed; true when all required
12191 insns have been emitted. Operands 2-4 contain the input values
12192 int the correct order; operands 5-7 contain the output values. */
12193
12194 void
12195 ix86_split_long_move (rtx operands[])
12196 {
12197 rtx part[2][3];
12198 int nparts;
12199 int push = 0;
12200 int collisions = 0;
12201 enum machine_mode mode = GET_MODE (operands[0]);
12202
12203 /* The DFmode expanders may ask us to move double.
12204 For 64bit target this is single move. By hiding the fact
12205 here we simplify i386.md splitters. */
12206 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12207 {
12208 /* Optimize constant pool reference to immediates. This is used by
12209 fp moves, that force all constants to memory to allow combining. */
12210
12211 if (GET_CODE (operands[1]) == MEM
12212 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12213 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12214 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12215 if (push_operand (operands[0], VOIDmode))
12216 {
12217 operands[0] = copy_rtx (operands[0]);
12218 PUT_MODE (operands[0], Pmode);
12219 }
12220 else
12221 operands[0] = gen_lowpart (DImode, operands[0]);
12222 operands[1] = gen_lowpart (DImode, operands[1]);
12223 emit_move_insn (operands[0], operands[1]);
12224 return;
12225 }
12226
12227 /* The only non-offsettable memory we handle is push. */
12228 if (push_operand (operands[0], VOIDmode))
12229 push = 1;
12230 else
12231 gcc_assert (GET_CODE (operands[0]) != MEM
12232 || offsettable_memref_p (operands[0]));
12233
12234 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12235 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12236
12237 /* When emitting push, take care for source operands on the stack. */
12238 if (push && GET_CODE (operands[1]) == MEM
12239 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12240 {
12241 if (nparts == 3)
12242 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12243 XEXP (part[1][2], 0));
12244 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12245 XEXP (part[1][1], 0));
12246 }
12247
12248 /* We need to do copy in the right order in case an address register
12249 of the source overlaps the destination. */
12250 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
12251 {
12252 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12253 collisions++;
12254 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12255 collisions++;
12256 if (nparts == 3
12257 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12258 collisions++;
12259
12260 /* Collision in the middle part can be handled by reordering. */
12261 if (collisions == 1 && nparts == 3
12262 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12263 {
12264 rtx tmp;
12265 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12266 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12267 }
12268
12269 /* If there are more collisions, we can't handle it by reordering.
12270 Do an lea to the last part and use only one colliding move. */
12271 else if (collisions > 1)
12272 {
12273 rtx base;
12274
12275 collisions = 1;
12276
12277 base = part[0][nparts - 1];
12278
12279 /* Handle the case when the last part isn't valid for lea.
12280 Happens in 64-bit mode storing the 12-byte XFmode. */
12281 if (GET_MODE (base) != Pmode)
12282 base = gen_rtx_REG (Pmode, REGNO (base));
12283
12284 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12285 part[1][0] = replace_equiv_address (part[1][0], base);
12286 part[1][1] = replace_equiv_address (part[1][1],
12287 plus_constant (base, UNITS_PER_WORD));
12288 if (nparts == 3)
12289 part[1][2] = replace_equiv_address (part[1][2],
12290 plus_constant (base, 8));
12291 }
12292 }
12293
12294 if (push)
12295 {
12296 if (!TARGET_64BIT)
12297 {
12298 if (nparts == 3)
12299 {
12300 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12301 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12302 emit_move_insn (part[0][2], part[1][2]);
12303 }
12304 }
12305 else
12306 {
12307 /* In 64bit mode we don't have 32bit push available. In case this is
12308 register, it is OK - we will just use larger counterpart. We also
12309 retype memory - these comes from attempt to avoid REX prefix on
12310 moving of second half of TFmode value. */
12311 if (GET_MODE (part[1][1]) == SImode)
12312 {
12313 switch (GET_CODE (part[1][1]))
12314 {
12315 case MEM:
12316 part[1][1] = adjust_address (part[1][1], DImode, 0);
12317 break;
12318
12319 case REG:
12320 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12321 break;
12322
12323 default:
12324 gcc_unreachable ();
12325 }
12326
12327 if (GET_MODE (part[1][0]) == SImode)
12328 part[1][0] = part[1][1];
12329 }
12330 }
12331 emit_move_insn (part[0][1], part[1][1]);
12332 emit_move_insn (part[0][0], part[1][0]);
12333 return;
12334 }
12335
12336 /* Choose correct order to not overwrite the source before it is copied. */
12337 if ((REG_P (part[0][0])
12338 && REG_P (part[1][1])
12339 && (REGNO (part[0][0]) == REGNO (part[1][1])
12340 || (nparts == 3
12341 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12342 || (collisions > 0
12343 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12344 {
12345 if (nparts == 3)
12346 {
12347 operands[2] = part[0][2];
12348 operands[3] = part[0][1];
12349 operands[4] = part[0][0];
12350 operands[5] = part[1][2];
12351 operands[6] = part[1][1];
12352 operands[7] = part[1][0];
12353 }
12354 else
12355 {
12356 operands[2] = part[0][1];
12357 operands[3] = part[0][0];
12358 operands[5] = part[1][1];
12359 operands[6] = part[1][0];
12360 }
12361 }
12362 else
12363 {
12364 if (nparts == 3)
12365 {
12366 operands[2] = part[0][0];
12367 operands[3] = part[0][1];
12368 operands[4] = part[0][2];
12369 operands[5] = part[1][0];
12370 operands[6] = part[1][1];
12371 operands[7] = part[1][2];
12372 }
12373 else
12374 {
12375 operands[2] = part[0][0];
12376 operands[3] = part[0][1];
12377 operands[5] = part[1][0];
12378 operands[6] = part[1][1];
12379 }
12380 }
12381
12382 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12383 if (optimize_size)
12384 {
12385 if (GET_CODE (operands[5]) == CONST_INT
12386 && operands[5] != const0_rtx
12387 && REG_P (operands[2]))
12388 {
12389 if (GET_CODE (operands[6]) == CONST_INT
12390 && INTVAL (operands[6]) == INTVAL (operands[5]))
12391 operands[6] = operands[2];
12392
12393 if (nparts == 3
12394 && GET_CODE (operands[7]) == CONST_INT
12395 && INTVAL (operands[7]) == INTVAL (operands[5]))
12396 operands[7] = operands[2];
12397 }
12398
12399 if (nparts == 3
12400 && GET_CODE (operands[6]) == CONST_INT
12401 && operands[6] != const0_rtx
12402 && REG_P (operands[3])
12403 && GET_CODE (operands[7]) == CONST_INT
12404 && INTVAL (operands[7]) == INTVAL (operands[6]))
12405 operands[7] = operands[3];
12406 }
12407
12408 emit_move_insn (operands[2], operands[5]);
12409 emit_move_insn (operands[3], operands[6]);
12410 if (nparts == 3)
12411 emit_move_insn (operands[4], operands[7]);
12412
12413 return;
12414 }
12415
12416 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12417 left shift by a constant, either using a single shift or
12418 a sequence of add instructions. */
12419
12420 static void
12421 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12422 {
12423 if (count == 1)
12424 {
12425 emit_insn ((mode == DImode
12426 ? gen_addsi3
12427 : gen_adddi3) (operand, operand, operand));
12428 }
12429 else if (!optimize_size
12430 && count * ix86_cost->add <= ix86_cost->shift_const)
12431 {
12432 int i;
12433 for (i=0; i<count; i++)
12434 {
12435 emit_insn ((mode == DImode
12436 ? gen_addsi3
12437 : gen_adddi3) (operand, operand, operand));
12438 }
12439 }
12440 else
12441 emit_insn ((mode == DImode
12442 ? gen_ashlsi3
12443 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12444 }
12445
12446 void
12447 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12448 {
12449 rtx low[2], high[2];
12450 int count;
12451 const int single_width = mode == DImode ? 32 : 64;
12452
12453 if (GET_CODE (operands[2]) == CONST_INT)
12454 {
12455 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12456 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12457
12458 if (count >= single_width)
12459 {
12460 emit_move_insn (high[0], low[1]);
12461 emit_move_insn (low[0], const0_rtx);
12462
12463 if (count > single_width)
12464 ix86_expand_ashl_const (high[0], count - single_width, mode);
12465 }
12466 else
12467 {
12468 if (!rtx_equal_p (operands[0], operands[1]))
12469 emit_move_insn (operands[0], operands[1]);
12470 emit_insn ((mode == DImode
12471 ? gen_x86_shld_1
12472 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12473 ix86_expand_ashl_const (low[0], count, mode);
12474 }
12475 return;
12476 }
12477
12478 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12479
12480 if (operands[1] == const1_rtx)
12481 {
12482 /* Assuming we've chosen a QImode capable registers, then 1 << N
12483 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12484 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12485 {
12486 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12487
12488 ix86_expand_clear (low[0]);
12489 ix86_expand_clear (high[0]);
12490 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12491
12492 d = gen_lowpart (QImode, low[0]);
12493 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12494 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12495 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12496
12497 d = gen_lowpart (QImode, high[0]);
12498 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12499 s = gen_rtx_NE (QImode, flags, const0_rtx);
12500 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12501 }
12502
12503 /* Otherwise, we can get the same results by manually performing
12504 a bit extract operation on bit 5/6, and then performing the two
12505 shifts. The two methods of getting 0/1 into low/high are exactly
12506 the same size. Avoiding the shift in the bit extract case helps
12507 pentium4 a bit; no one else seems to care much either way. */
12508 else
12509 {
12510 rtx x;
12511
12512 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12513 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12514 else
12515 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12516 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12517
12518 emit_insn ((mode == DImode
12519 ? gen_lshrsi3
12520 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12521 emit_insn ((mode == DImode
12522 ? gen_andsi3
12523 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12524 emit_move_insn (low[0], high[0]);
12525 emit_insn ((mode == DImode
12526 ? gen_xorsi3
12527 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12528 }
12529
12530 emit_insn ((mode == DImode
12531 ? gen_ashlsi3
12532 : gen_ashldi3) (low[0], low[0], operands[2]));
12533 emit_insn ((mode == DImode
12534 ? gen_ashlsi3
12535 : gen_ashldi3) (high[0], high[0], operands[2]));
12536 return;
12537 }
12538
12539 if (operands[1] == constm1_rtx)
12540 {
12541 /* For -1 << N, we can avoid the shld instruction, because we
12542 know that we're shifting 0...31/63 ones into a -1. */
12543 emit_move_insn (low[0], constm1_rtx);
12544 if (optimize_size)
12545 emit_move_insn (high[0], low[0]);
12546 else
12547 emit_move_insn (high[0], constm1_rtx);
12548 }
12549 else
12550 {
12551 if (!rtx_equal_p (operands[0], operands[1]))
12552 emit_move_insn (operands[0], operands[1]);
12553
12554 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12555 emit_insn ((mode == DImode
12556 ? gen_x86_shld_1
12557 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12558 }
12559
12560 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12561
12562 if (TARGET_CMOVE && scratch)
12563 {
12564 ix86_expand_clear (scratch);
12565 emit_insn ((mode == DImode
12566 ? gen_x86_shift_adj_1
12567 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12568 }
12569 else
12570 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12571 }
12572
12573 void
12574 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12575 {
12576 rtx low[2], high[2];
12577 int count;
12578 const int single_width = mode == DImode ? 32 : 64;
12579
12580 if (GET_CODE (operands[2]) == CONST_INT)
12581 {
12582 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12583 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12584
12585 if (count == single_width * 2 - 1)
12586 {
12587 emit_move_insn (high[0], high[1]);
12588 emit_insn ((mode == DImode
12589 ? gen_ashrsi3
12590 : gen_ashrdi3) (high[0], high[0],
12591 GEN_INT (single_width - 1)));
12592 emit_move_insn (low[0], high[0]);
12593
12594 }
12595 else if (count >= single_width)
12596 {
12597 emit_move_insn (low[0], high[1]);
12598 emit_move_insn (high[0], low[0]);
12599 emit_insn ((mode == DImode
12600 ? gen_ashrsi3
12601 : gen_ashrdi3) (high[0], high[0],
12602 GEN_INT (single_width - 1)));
12603 if (count > single_width)
12604 emit_insn ((mode == DImode
12605 ? gen_ashrsi3
12606 : gen_ashrdi3) (low[0], low[0],
12607 GEN_INT (count - single_width)));
12608 }
12609 else
12610 {
12611 if (!rtx_equal_p (operands[0], operands[1]))
12612 emit_move_insn (operands[0], operands[1]);
12613 emit_insn ((mode == DImode
12614 ? gen_x86_shrd_1
12615 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12616 emit_insn ((mode == DImode
12617 ? gen_ashrsi3
12618 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12619 }
12620 }
12621 else
12622 {
12623 if (!rtx_equal_p (operands[0], operands[1]))
12624 emit_move_insn (operands[0], operands[1]);
12625
12626 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12627
12628 emit_insn ((mode == DImode
12629 ? gen_x86_shrd_1
12630 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12631 emit_insn ((mode == DImode
12632 ? gen_ashrsi3
12633 : gen_ashrdi3) (high[0], high[0], operands[2]));
12634
12635 if (TARGET_CMOVE && scratch)
12636 {
12637 emit_move_insn (scratch, high[0]);
12638 emit_insn ((mode == DImode
12639 ? gen_ashrsi3
12640 : gen_ashrdi3) (scratch, scratch,
12641 GEN_INT (single_width - 1)));
12642 emit_insn ((mode == DImode
12643 ? gen_x86_shift_adj_1
12644 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12645 scratch));
12646 }
12647 else
12648 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12649 }
12650 }
12651
12652 void
12653 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12654 {
12655 rtx low[2], high[2];
12656 int count;
12657 const int single_width = mode == DImode ? 32 : 64;
12658
12659 if (GET_CODE (operands[2]) == CONST_INT)
12660 {
12661 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12662 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12663
12664 if (count >= single_width)
12665 {
12666 emit_move_insn (low[0], high[1]);
12667 ix86_expand_clear (high[0]);
12668
12669 if (count > single_width)
12670 emit_insn ((mode == DImode
12671 ? gen_lshrsi3
12672 : gen_lshrdi3) (low[0], low[0],
12673 GEN_INT (count - single_width)));
12674 }
12675 else
12676 {
12677 if (!rtx_equal_p (operands[0], operands[1]))
12678 emit_move_insn (operands[0], operands[1]);
12679 emit_insn ((mode == DImode
12680 ? gen_x86_shrd_1
12681 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12682 emit_insn ((mode == DImode
12683 ? gen_lshrsi3
12684 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12685 }
12686 }
12687 else
12688 {
12689 if (!rtx_equal_p (operands[0], operands[1]))
12690 emit_move_insn (operands[0], operands[1]);
12691
12692 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12693
12694 emit_insn ((mode == DImode
12695 ? gen_x86_shrd_1
12696 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12697 emit_insn ((mode == DImode
12698 ? gen_lshrsi3
12699 : gen_lshrdi3) (high[0], high[0], operands[2]));
12700
12701 /* Heh. By reversing the arguments, we can reuse this pattern. */
12702 if (TARGET_CMOVE && scratch)
12703 {
12704 ix86_expand_clear (scratch);
12705 emit_insn ((mode == DImode
12706 ? gen_x86_shift_adj_1
12707 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12708 scratch));
12709 }
12710 else
12711 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12712 }
12713 }
12714
12715 /* Predict just emitted jump instruction to be taken with probability PROB. */
12716 static void
12717 predict_jump (int prob)
12718 {
12719 rtx insn = get_last_insn ();
12720 gcc_assert (GET_CODE (insn) == JUMP_INSN);
12721 REG_NOTES (insn)
12722 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12723 GEN_INT (prob),
12724 REG_NOTES (insn));
12725 }
12726
12727 /* Helper function for the string operations below. Dest VARIABLE whether
12728 it is aligned to VALUE bytes. If true, jump to the label. */
12729 static rtx
12730 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12731 {
12732 rtx label = gen_label_rtx ();
12733 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12734 if (GET_MODE (variable) == DImode)
12735 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12736 else
12737 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12738 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12739 1, label);
12740 if (epilogue)
12741 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12742 else
12743 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12744 return label;
12745 }
12746
12747 /* Adjust COUNTER by the VALUE. */
12748 static void
12749 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12750 {
12751 if (GET_MODE (countreg) == DImode)
12752 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12753 else
12754 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12755 }
12756
12757 /* Zero extend possibly SImode EXP to Pmode register. */
12758 rtx
12759 ix86_zero_extend_to_Pmode (rtx exp)
12760 {
12761 rtx r;
12762 if (GET_MODE (exp) == VOIDmode)
12763 return force_reg (Pmode, exp);
12764 if (GET_MODE (exp) == Pmode)
12765 return copy_to_mode_reg (Pmode, exp);
12766 r = gen_reg_rtx (Pmode);
12767 emit_insn (gen_zero_extendsidi2 (r, exp));
12768 return r;
12769 }
12770
12771 /* Divide COUNTREG by SCALE. */
12772 static rtx
12773 scale_counter (rtx countreg, int scale)
12774 {
12775 rtx sc;
12776 rtx piece_size_mask;
12777
12778 if (scale == 1)
12779 return countreg;
12780 if (GET_CODE (countreg) == CONST_INT)
12781 return GEN_INT (INTVAL (countreg) / scale);
12782 gcc_assert (REG_P (countreg));
12783
12784 piece_size_mask = GEN_INT (scale - 1);
12785 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12786 GEN_INT (exact_log2 (scale)),
12787 NULL, 1, OPTAB_DIRECT);
12788 return sc;
12789 }
12790
12791 /* When SRCPTR is non-NULL, output simple loop to move memory
12792 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12793 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12794 equivalent loop to set memory by VALUE (supposed to be in MODE).
12795
12796 The size is rounded down to whole number of chunk size moved at once.
12797 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12798
12799
12800 static void
12801 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12802 rtx destptr, rtx srcptr, rtx value,
12803 rtx count, enum machine_mode mode, int unroll,
12804 int expected_size)
12805 {
12806 rtx out_label, top_label, iter, tmp;
12807 enum machine_mode iter_mode;
12808 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12809 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12810 rtx size;
12811 rtx x_addr;
12812 rtx y_addr;
12813 int i;
12814
12815 iter_mode = GET_MODE (count);
12816 if (iter_mode == VOIDmode)
12817 iter_mode = word_mode;
12818
12819 top_label = gen_label_rtx ();
12820 out_label = gen_label_rtx ();
12821 iter = gen_reg_rtx (iter_mode);
12822
12823 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12824 NULL, 1, OPTAB_DIRECT);
12825 /* Those two should combine. */
12826 if (piece_size == const1_rtx)
12827 {
12828 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12829 true, out_label);
12830 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12831 }
12832 emit_move_insn (iter, const0_rtx);
12833
12834 emit_label (top_label);
12835
12836 tmp = convert_modes (Pmode, iter_mode, iter, true);
12837 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12838 destmem = change_address (destmem, mode, x_addr);
12839
12840 if (srcmem)
12841 {
12842 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12843 srcmem = change_address (srcmem, mode, y_addr);
12844
12845 /* When unrolling for chips that reorder memory reads and writes,
12846 we can save registers by using single temporary.
12847 Also using 4 temporaries is overkill in 32bit mode. */
12848 if (!TARGET_64BIT && 0)
12849 {
12850 for (i = 0; i < unroll; i++)
12851 {
12852 if (i)
12853 {
12854 destmem =
12855 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12856 srcmem =
12857 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12858 }
12859 emit_move_insn (destmem, srcmem);
12860 }
12861 }
12862 else
12863 {
12864 rtx tmpreg[4];
12865 gcc_assert (unroll <= 4);
12866 for (i = 0; i < unroll; i++)
12867 {
12868 tmpreg[i] = gen_reg_rtx (mode);
12869 if (i)
12870 {
12871 srcmem =
12872 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12873 }
12874 emit_move_insn (tmpreg[i], srcmem);
12875 }
12876 for (i = 0; i < unroll; i++)
12877 {
12878 if (i)
12879 {
12880 destmem =
12881 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12882 }
12883 emit_move_insn (destmem, tmpreg[i]);
12884 }
12885 }
12886 }
12887 else
12888 for (i = 0; i < unroll; i++)
12889 {
12890 if (i)
12891 destmem =
12892 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12893 emit_move_insn (destmem, value);
12894 }
12895
12896 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
12897 true, OPTAB_LIB_WIDEN);
12898 if (tmp != iter)
12899 emit_move_insn (iter, tmp);
12900
12901 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
12902 true, top_label);
12903 if (expected_size != -1)
12904 {
12905 expected_size /= GET_MODE_SIZE (mode) * unroll;
12906 if (expected_size == 0)
12907 predict_jump (0);
12908 else if (expected_size > REG_BR_PROB_BASE)
12909 predict_jump (REG_BR_PROB_BASE - 1);
12910 else
12911 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
12912 }
12913 else
12914 predict_jump (REG_BR_PROB_BASE * 80 / 100);
12915 iter = ix86_zero_extend_to_Pmode (iter);
12916 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
12917 true, OPTAB_LIB_WIDEN);
12918 if (tmp != destptr)
12919 emit_move_insn (destptr, tmp);
12920 if (srcptr)
12921 {
12922 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
12923 true, OPTAB_LIB_WIDEN);
12924 if (tmp != srcptr)
12925 emit_move_insn (srcptr, tmp);
12926 }
12927 emit_label (out_label);
12928 }
12929
12930 /* Output "rep; mov" instruction.
12931 Arguments have same meaning as for previous function */
12932 static void
12933 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
12934 rtx destptr, rtx srcptr,
12935 rtx count,
12936 enum machine_mode mode)
12937 {
12938 rtx destexp;
12939 rtx srcexp;
12940 rtx countreg;
12941
12942 /* If the size is known, it is shorter to use rep movs. */
12943 if (mode == QImode && GET_CODE (count) == CONST_INT
12944 && !(INTVAL (count) & 3))
12945 mode = SImode;
12946
12947 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12948 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12949 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
12950 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
12951 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12952 if (mode != QImode)
12953 {
12954 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12955 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12956 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12957 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
12958 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12959 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
12960 }
12961 else
12962 {
12963 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12964 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
12965 }
12966 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
12967 destexp, srcexp));
12968 }
12969
12970 /* Output "rep; stos" instruction.
12971 Arguments have same meaning as for previous function */
12972 static void
12973 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
12974 rtx count,
12975 enum machine_mode mode)
12976 {
12977 rtx destexp;
12978 rtx countreg;
12979
12980 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12981 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12982 value = force_reg (mode, gen_lowpart (mode, value));
12983 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12984 if (mode != QImode)
12985 {
12986 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12987 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12988 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12989 }
12990 else
12991 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12992 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
12993 }
12994
12995 static void
12996 emit_strmov (rtx destmem, rtx srcmem,
12997 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
12998 {
12999 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13000 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13001 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13002 }
13003
13004 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13005 static void
13006 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13007 rtx destptr, rtx srcptr, rtx count, int max_size)
13008 {
13009 rtx src, dest;
13010 if (GET_CODE (count) == CONST_INT)
13011 {
13012 HOST_WIDE_INT countval = INTVAL (count);
13013 int offset = 0;
13014
13015 if ((countval & 0x16) && max_size > 16)
13016 {
13017 if (TARGET_64BIT)
13018 {
13019 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13020 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13021 }
13022 else
13023 gcc_unreachable ();
13024 offset += 16;
13025 }
13026 if ((countval & 0x08) && max_size > 8)
13027 {
13028 if (TARGET_64BIT)
13029 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13030 else
13031 {
13032 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13033 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13034 }
13035 offset += 8;
13036 }
13037 if ((countval & 0x04) && max_size > 4)
13038 {
13039 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13040 offset += 4;
13041 }
13042 if ((countval & 0x02) && max_size > 2)
13043 {
13044 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13045 offset += 2;
13046 }
13047 if ((countval & 0x01) && max_size > 1)
13048 {
13049 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13050 offset += 1;
13051 }
13052 return;
13053 }
13054 if (max_size > 8)
13055 {
13056 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13057 count, 1, OPTAB_DIRECT);
13058 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13059 count, QImode, 1, 4);
13060 return;
13061 }
13062
13063 /* When there are stringops, we can cheaply increase dest and src pointers.
13064 Otherwise we save code size by maintaining offset (zero is readily
13065 available from preceding rep operation) and using x86 addressing modes.
13066 */
13067 if (TARGET_SINGLE_STRINGOP)
13068 {
13069 if (max_size > 4)
13070 {
13071 rtx label = ix86_expand_aligntest (count, 4, true);
13072 src = change_address (srcmem, SImode, srcptr);
13073 dest = change_address (destmem, SImode, destptr);
13074 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13075 emit_label (label);
13076 LABEL_NUSES (label) = 1;
13077 }
13078 if (max_size > 2)
13079 {
13080 rtx label = ix86_expand_aligntest (count, 2, true);
13081 src = change_address (srcmem, HImode, srcptr);
13082 dest = change_address (destmem, HImode, destptr);
13083 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13084 emit_label (label);
13085 LABEL_NUSES (label) = 1;
13086 }
13087 if (max_size > 1)
13088 {
13089 rtx label = ix86_expand_aligntest (count, 1, true);
13090 src = change_address (srcmem, QImode, srcptr);
13091 dest = change_address (destmem, QImode, destptr);
13092 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13093 emit_label (label);
13094 LABEL_NUSES (label) = 1;
13095 }
13096 }
13097 else
13098 {
13099 rtx offset = force_reg (Pmode, const0_rtx);
13100 rtx tmp;
13101
13102 if (max_size > 4)
13103 {
13104 rtx label = ix86_expand_aligntest (count, 4, true);
13105 src = change_address (srcmem, SImode, srcptr);
13106 dest = change_address (destmem, SImode, destptr);
13107 emit_move_insn (dest, src);
13108 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13109 true, OPTAB_LIB_WIDEN);
13110 if (tmp != offset)
13111 emit_move_insn (offset, tmp);
13112 emit_label (label);
13113 LABEL_NUSES (label) = 1;
13114 }
13115 if (max_size > 2)
13116 {
13117 rtx label = ix86_expand_aligntest (count, 2, true);
13118 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13119 src = change_address (srcmem, HImode, tmp);
13120 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13121 dest = change_address (destmem, HImode, tmp);
13122 emit_move_insn (dest, src);
13123 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13124 true, OPTAB_LIB_WIDEN);
13125 if (tmp != offset)
13126 emit_move_insn (offset, tmp);
13127 emit_label (label);
13128 LABEL_NUSES (label) = 1;
13129 }
13130 if (max_size > 1)
13131 {
13132 rtx label = ix86_expand_aligntest (count, 1, true);
13133 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13134 src = change_address (srcmem, QImode, tmp);
13135 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13136 dest = change_address (destmem, QImode, tmp);
13137 emit_move_insn (dest, src);
13138 emit_label (label);
13139 LABEL_NUSES (label) = 1;
13140 }
13141 }
13142 }
13143
13144 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13145 static void
13146 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13147 rtx count, int max_size)
13148 {
13149 count =
13150 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13151 count, 1, OPTAB_DIRECT);
13152 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13153 gen_lowpart (QImode, value), count, QImode,
13154 1, max_size / 2);
13155 }
13156
13157 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13158 static void
13159 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13160 {
13161 rtx dest;
13162 if (GET_CODE (count) == CONST_INT)
13163 {
13164 HOST_WIDE_INT countval = INTVAL (count);
13165 int offset = 0;
13166
13167 if ((countval & 0x16) && max_size > 16)
13168 {
13169 if (TARGET_64BIT)
13170 {
13171 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13172 emit_insn (gen_strset (destptr, dest, value));
13173 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13174 emit_insn (gen_strset (destptr, dest, value));
13175 }
13176 else
13177 gcc_unreachable ();
13178 offset += 16;
13179 }
13180 if ((countval & 0x08) && max_size > 8)
13181 {
13182 if (TARGET_64BIT)
13183 {
13184 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13185 emit_insn (gen_strset (destptr, dest, value));
13186 }
13187 else
13188 {
13189 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13190 emit_insn (gen_strset (destptr, dest, value));
13191 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13192 emit_insn (gen_strset (destptr, dest, value));
13193 }
13194 offset += 8;
13195 }
13196 if ((countval & 0x04) && max_size > 4)
13197 {
13198 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13199 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13200 offset += 4;
13201 }
13202 if ((countval & 0x02) && max_size > 2)
13203 {
13204 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13205 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13206 offset += 2;
13207 }
13208 if ((countval & 0x01) && max_size > 1)
13209 {
13210 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13211 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13212 offset += 1;
13213 }
13214 return;
13215 }
13216 if (max_size > 32)
13217 {
13218 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13219 return;
13220 }
13221 if (max_size > 16)
13222 {
13223 rtx label = ix86_expand_aligntest (count, 16, true);
13224 if (TARGET_64BIT)
13225 {
13226 dest = change_address (destmem, DImode, destptr);
13227 emit_insn (gen_strset (destptr, dest, value));
13228 emit_insn (gen_strset (destptr, dest, value));
13229 }
13230 else
13231 {
13232 dest = change_address (destmem, SImode, destptr);
13233 emit_insn (gen_strset (destptr, dest, value));
13234 emit_insn (gen_strset (destptr, dest, value));
13235 emit_insn (gen_strset (destptr, dest, value));
13236 emit_insn (gen_strset (destptr, dest, value));
13237 }
13238 emit_label (label);
13239 LABEL_NUSES (label) = 1;
13240 }
13241 if (max_size > 8)
13242 {
13243 rtx label = ix86_expand_aligntest (count, 8, true);
13244 if (TARGET_64BIT)
13245 {
13246 dest = change_address (destmem, DImode, destptr);
13247 emit_insn (gen_strset (destptr, dest, value));
13248 }
13249 else
13250 {
13251 dest = change_address (destmem, SImode, destptr);
13252 emit_insn (gen_strset (destptr, dest, value));
13253 emit_insn (gen_strset (destptr, dest, value));
13254 }
13255 emit_label (label);
13256 LABEL_NUSES (label) = 1;
13257 }
13258 if (max_size > 4)
13259 {
13260 rtx label = ix86_expand_aligntest (count, 4, true);
13261 dest = change_address (destmem, SImode, destptr);
13262 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13263 emit_label (label);
13264 LABEL_NUSES (label) = 1;
13265 }
13266 if (max_size > 2)
13267 {
13268 rtx label = ix86_expand_aligntest (count, 2, true);
13269 dest = change_address (destmem, HImode, destptr);
13270 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13271 emit_label (label);
13272 LABEL_NUSES (label) = 1;
13273 }
13274 if (max_size > 1)
13275 {
13276 rtx label = ix86_expand_aligntest (count, 1, true);
13277 dest = change_address (destmem, QImode, destptr);
13278 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13279 emit_label (label);
13280 LABEL_NUSES (label) = 1;
13281 }
13282 }
13283
13284 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13285 DESIRED_ALIGNMENT. */
13286 static void
13287 expand_movmem_prologue (rtx destmem, rtx srcmem,
13288 rtx destptr, rtx srcptr, rtx count,
13289 int align, int desired_alignment)
13290 {
13291 if (align <= 1 && desired_alignment > 1)
13292 {
13293 rtx label = ix86_expand_aligntest (destptr, 1, false);
13294 srcmem = change_address (srcmem, QImode, srcptr);
13295 destmem = change_address (destmem, QImode, destptr);
13296 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13297 ix86_adjust_counter (count, 1);
13298 emit_label (label);
13299 LABEL_NUSES (label) = 1;
13300 }
13301 if (align <= 2 && desired_alignment > 2)
13302 {
13303 rtx label = ix86_expand_aligntest (destptr, 2, false);
13304 srcmem = change_address (srcmem, HImode, srcptr);
13305 destmem = change_address (destmem, HImode, destptr);
13306 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13307 ix86_adjust_counter (count, 2);
13308 emit_label (label);
13309 LABEL_NUSES (label) = 1;
13310 }
13311 if (align <= 4 && desired_alignment > 4)
13312 {
13313 rtx label = ix86_expand_aligntest (destptr, 4, false);
13314 srcmem = change_address (srcmem, SImode, srcptr);
13315 destmem = change_address (destmem, SImode, destptr);
13316 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13317 ix86_adjust_counter (count, 4);
13318 emit_label (label);
13319 LABEL_NUSES (label) = 1;
13320 }
13321 gcc_assert (desired_alignment <= 8);
13322 }
13323
13324 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13325 DESIRED_ALIGNMENT. */
13326 static void
13327 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13328 int align, int desired_alignment)
13329 {
13330 if (align <= 1 && desired_alignment > 1)
13331 {
13332 rtx label = ix86_expand_aligntest (destptr, 1, false);
13333 destmem = change_address (destmem, QImode, destptr);
13334 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13335 ix86_adjust_counter (count, 1);
13336 emit_label (label);
13337 LABEL_NUSES (label) = 1;
13338 }
13339 if (align <= 2 && desired_alignment > 2)
13340 {
13341 rtx label = ix86_expand_aligntest (destptr, 2, false);
13342 destmem = change_address (destmem, HImode, destptr);
13343 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13344 ix86_adjust_counter (count, 2);
13345 emit_label (label);
13346 LABEL_NUSES (label) = 1;
13347 }
13348 if (align <= 4 && desired_alignment > 4)
13349 {
13350 rtx label = ix86_expand_aligntest (destptr, 4, false);
13351 destmem = change_address (destmem, SImode, destptr);
13352 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13353 ix86_adjust_counter (count, 4);
13354 emit_label (label);
13355 LABEL_NUSES (label) = 1;
13356 }
13357 gcc_assert (desired_alignment <= 8);
13358 }
13359
13360 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13361 static enum stringop_alg
13362 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13363 int *dynamic_check)
13364 {
13365 const struct stringop_algs * algs;
13366
13367 *dynamic_check = -1;
13368 if (memset)
13369 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13370 else
13371 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13372 if (stringop_alg != no_stringop)
13373 return stringop_alg;
13374 /* rep; movq or rep; movl is the smallest variant. */
13375 else if (optimize_size)
13376 {
13377 if (!count || (count & 3))
13378 return rep_prefix_1_byte;
13379 else
13380 return rep_prefix_4_byte;
13381 }
13382 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13383 */
13384 else if (expected_size != -1 && expected_size < 4)
13385 return loop_1_byte;
13386 else if (expected_size != -1)
13387 {
13388 unsigned int i;
13389 enum stringop_alg alg = libcall;
13390 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13391 {
13392 gcc_assert (algs->size[i].max);
13393 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13394 {
13395 if (algs->size[i].alg != libcall)
13396 alg = algs->size[i].alg;
13397 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13398 last non-libcall inline algorithm. */
13399 if (TARGET_INLINE_ALL_STRINGOPS)
13400 {
13401 /* When the current size is best to be copied by a libcall,
13402 but we are still forced to inline, run the heuristic bellow
13403 that will pick code for medium sized blocks. */
13404 if (alg != libcall)
13405 return alg;
13406 break;
13407 }
13408 else
13409 return algs->size[i].alg;
13410 }
13411 }
13412 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13413 }
13414 /* When asked to inline the call anyway, try to pick meaningful choice.
13415 We look for maximal size of block that is faster to copy by hand and
13416 take blocks of at most of that size guessing that average size will
13417 be roughly half of the block.
13418
13419 If this turns out to be bad, we might simply specify the preferred
13420 choice in ix86_costs. */
13421 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13422 && algs->unknown_size == libcall)
13423 {
13424 int max = -1;
13425 enum stringop_alg alg;
13426 int i;
13427
13428 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13429 if (algs->size[i].alg != libcall && algs->size[i].alg)
13430 max = algs->size[i].max;
13431 if (max == -1)
13432 max = 4096;
13433 alg = decide_alg (count, max / 2, memset, dynamic_check);
13434 gcc_assert (*dynamic_check == -1);
13435 gcc_assert (alg != libcall);
13436 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13437 *dynamic_check = max;
13438 return alg;
13439 }
13440 return algs->unknown_size;
13441 }
13442
13443 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13444 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13445 static int
13446 decide_alignment (int align,
13447 enum stringop_alg alg,
13448 int expected_size)
13449 {
13450 int desired_align = 0;
13451 switch (alg)
13452 {
13453 case no_stringop:
13454 gcc_unreachable ();
13455 case loop:
13456 case unrolled_loop:
13457 desired_align = GET_MODE_SIZE (Pmode);
13458 break;
13459 case rep_prefix_8_byte:
13460 desired_align = 8;
13461 break;
13462 case rep_prefix_4_byte:
13463 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13464 copying whole cacheline at once. */
13465 if (TARGET_PENTIUMPRO)
13466 desired_align = 8;
13467 else
13468 desired_align = 4;
13469 break;
13470 case rep_prefix_1_byte:
13471 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13472 copying whole cacheline at once. */
13473 if (TARGET_PENTIUMPRO)
13474 desired_align = 8;
13475 else
13476 desired_align = 1;
13477 break;
13478 case loop_1_byte:
13479 desired_align = 1;
13480 break;
13481 case libcall:
13482 return 0;
13483 }
13484
13485 if (optimize_size)
13486 desired_align = 1;
13487 if (desired_align < align)
13488 desired_align = align;
13489 if (expected_size != -1 && expected_size < 4)
13490 desired_align = align;
13491 return desired_align;
13492 }
13493
13494 /* Expand string move (memcpy) operation. Use i386 string operations when
13495 profitable. expand_clrmem contains similar code. */
13496 int
13497 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13498 rtx expected_align_exp, rtx expected_size_exp)
13499 {
13500 rtx destreg;
13501 rtx srcreg;
13502 rtx label = NULL;
13503 rtx tmp;
13504 rtx jump_around_label = NULL;
13505 HOST_WIDE_INT align = 1;
13506 unsigned HOST_WIDE_INT count = 0;
13507 HOST_WIDE_INT expected_size = -1;
13508 int size_needed = 0;
13509 int desired_align = 0;
13510 enum stringop_alg alg;
13511 int dynamic_check;
13512
13513 if (GET_CODE (align_exp) == CONST_INT)
13514 align = INTVAL (align_exp);
13515 /* i386 can do misaligned access on reasonably increased cost. */
13516 if (GET_CODE (expected_align_exp) == CONST_INT
13517 && INTVAL (expected_align_exp) > align)
13518 align = INTVAL (expected_align_exp);
13519 if (GET_CODE (count_exp) == CONST_INT)
13520 count = expected_size = INTVAL (count_exp);
13521 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13522 {
13523 expected_size = INTVAL (expected_size_exp);
13524 }
13525
13526 alg = decide_alg (count, expected_size, false, &dynamic_check);
13527 desired_align = decide_alignment (align, alg, expected_size);
13528
13529 if (!TARGET_ALIGN_STRINGOPS)
13530 align = desired_align;
13531
13532 if (alg == libcall)
13533 return 0;
13534 gcc_assert (alg != no_stringop);
13535 if (!count)
13536 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13537 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13538 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13539 switch (alg)
13540 {
13541 case libcall:
13542 case no_stringop:
13543 gcc_unreachable ();
13544 case loop:
13545 size_needed = GET_MODE_SIZE (Pmode);
13546 break;
13547 case unrolled_loop:
13548 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13549 break;
13550 case rep_prefix_8_byte:
13551 size_needed = 8;
13552 break;
13553 case rep_prefix_4_byte:
13554 size_needed = 4;
13555 break;
13556 case rep_prefix_1_byte:
13557 case loop_1_byte:
13558 size_needed = 1;
13559 break;
13560 }
13561
13562 /* Alignment code needs count to be in register. */
13563 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13564 {
13565 enum machine_mode mode = SImode;
13566 if (TARGET_64BIT && (count & ~0xffffffff))
13567 mode = DImode;
13568 count_exp = force_reg (mode, count_exp);
13569 }
13570 gcc_assert (desired_align >= 1 && align >= 1);
13571 /* Ensure that alignment prologue won't copy past end of block. */
13572 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13573 && !count)
13574 {
13575 int size = MAX (size_needed - 1, desired_align - align);
13576
13577 label = gen_label_rtx ();
13578 emit_cmp_and_jump_insns (count_exp,
13579 GEN_INT (size),
13580 LEU, 0, GET_MODE (count_exp), 1, label);
13581 if (expected_size == -1 || expected_size < size)
13582 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13583 else
13584 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13585 }
13586 /* Emit code to decide on runtime whether library call or inline should be
13587 used. */
13588 if (dynamic_check != -1)
13589 {
13590 rtx hot_label = gen_label_rtx ();
13591 jump_around_label = gen_label_rtx ();
13592 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13593 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13594 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13595 emit_block_move_via_libcall (dst, src, count_exp, false);
13596 emit_jump (jump_around_label);
13597 emit_label (hot_label);
13598 }
13599
13600
13601 /* Alignment prologue. */
13602 if (desired_align > align)
13603 {
13604 /* Except for the first move in epilogue, we no longer know
13605 constant offset in aliasing info. It don't seems to worth
13606 the pain to maintain it for the first move, so throw away
13607 the info early. */
13608 src = change_address (src, BLKmode, srcreg);
13609 dst = change_address (dst, BLKmode, destreg);
13610 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13611 desired_align);
13612 }
13613 if (label && size_needed == 1)
13614 {
13615 emit_label (label);
13616 LABEL_NUSES (label) = 1;
13617 label = NULL;
13618 }
13619
13620 /* Main body. */
13621 switch (alg)
13622 {
13623 case libcall:
13624 case no_stringop:
13625 gcc_unreachable ();
13626 case loop_1_byte:
13627 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13628 count_exp, QImode, 1, expected_size);
13629 break;
13630 case loop:
13631 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13632 count_exp, Pmode, 1, expected_size);
13633 break;
13634 case unrolled_loop:
13635 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13636 registers for 4 temporaries anyway. */
13637 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13638 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13639 expected_size);
13640 break;
13641 case rep_prefix_8_byte:
13642 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13643 DImode);
13644 break;
13645 case rep_prefix_4_byte:
13646 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13647 SImode);
13648 break;
13649 case rep_prefix_1_byte:
13650 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13651 QImode);
13652 break;
13653 }
13654 /* Adjust properly the offset of src and dest memory for aliasing. */
13655 if (GET_CODE (count_exp) == CONST_INT)
13656 {
13657 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13658 (count / size_needed) * size_needed);
13659 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13660 (count / size_needed) * size_needed);
13661 }
13662 else
13663 {
13664 src = change_address (src, BLKmode, srcreg);
13665 dst = change_address (dst, BLKmode, destreg);
13666 }
13667
13668 /* Epilogue to copy the remaining bytes. */
13669 if (label)
13670 {
13671 if (size_needed < desired_align - align)
13672 {
13673 tmp =
13674 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13675 GEN_INT (size_needed - 1), count_exp, 1,
13676 OPTAB_DIRECT);
13677 size_needed = desired_align - align + 1;
13678 if (tmp != count_exp)
13679 emit_move_insn (count_exp, tmp);
13680 }
13681 emit_label (label);
13682 LABEL_NUSES (label) = 1;
13683 }
13684 if (count_exp != const0_rtx && size_needed > 1)
13685 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13686 size_needed);
13687 if (jump_around_label)
13688 emit_label (jump_around_label);
13689 return 1;
13690 }
13691
13692 /* Helper function for memcpy. For QImode value 0xXY produce
13693 0xXYXYXYXY of wide specified by MODE. This is essentially
13694 a * 0x10101010, but we can do slightly better than
13695 synth_mult by unwinding the sequence by hand on CPUs with
13696 slow multiply. */
13697 static rtx
13698 promote_duplicated_reg (enum machine_mode mode, rtx val)
13699 {
13700 enum machine_mode valmode = GET_MODE (val);
13701 rtx tmp;
13702 int nops = mode == DImode ? 3 : 2;
13703
13704 gcc_assert (mode == SImode || mode == DImode);
13705 if (val == const0_rtx)
13706 return copy_to_mode_reg (mode, const0_rtx);
13707 if (GET_CODE (val) == CONST_INT)
13708 {
13709 HOST_WIDE_INT v = INTVAL (val) & 255;
13710
13711 v |= v << 8;
13712 v |= v << 16;
13713 if (mode == DImode)
13714 v |= (v << 16) << 16;
13715 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13716 }
13717
13718 if (valmode == VOIDmode)
13719 valmode = QImode;
13720 if (valmode != QImode)
13721 val = gen_lowpart (QImode, val);
13722 if (mode == QImode)
13723 return val;
13724 if (!TARGET_PARTIAL_REG_STALL)
13725 nops--;
13726 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13727 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13728 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13729 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13730 {
13731 rtx reg = convert_modes (mode, QImode, val, true);
13732 tmp = promote_duplicated_reg (mode, const1_rtx);
13733 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13734 OPTAB_DIRECT);
13735 }
13736 else
13737 {
13738 rtx reg = convert_modes (mode, QImode, val, true);
13739
13740 if (!TARGET_PARTIAL_REG_STALL)
13741 if (mode == SImode)
13742 emit_insn (gen_movsi_insv_1 (reg, reg));
13743 else
13744 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13745 else
13746 {
13747 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13748 NULL, 1, OPTAB_DIRECT);
13749 reg =
13750 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13751 }
13752 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13753 NULL, 1, OPTAB_DIRECT);
13754 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13755 if (mode == SImode)
13756 return reg;
13757 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13758 NULL, 1, OPTAB_DIRECT);
13759 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13760 return reg;
13761 }
13762 }
13763
13764 /* Expand string clear operation (bzero). Use i386 string operations when
13765 profitable. expand_movmem contains similar code. */
13766 int
13767 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13768 rtx expected_align_exp, rtx expected_size_exp)
13769 {
13770 rtx destreg;
13771 rtx label = NULL;
13772 rtx tmp;
13773 rtx jump_around_label = NULL;
13774 HOST_WIDE_INT align = 1;
13775 unsigned HOST_WIDE_INT count = 0;
13776 HOST_WIDE_INT expected_size = -1;
13777 int size_needed = 0;
13778 int desired_align = 0;
13779 enum stringop_alg alg;
13780 rtx promoted_val = val_exp;
13781 bool force_loopy_epilogue = false;
13782 int dynamic_check;
13783
13784 if (GET_CODE (align_exp) == CONST_INT)
13785 align = INTVAL (align_exp);
13786 /* i386 can do misaligned access on reasonably increased cost. */
13787 if (GET_CODE (expected_align_exp) == CONST_INT
13788 && INTVAL (expected_align_exp) > align)
13789 align = INTVAL (expected_align_exp);
13790 if (GET_CODE (count_exp) == CONST_INT)
13791 count = expected_size = INTVAL (count_exp);
13792 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13793 expected_size = INTVAL (expected_size_exp);
13794
13795 alg = decide_alg (count, expected_size, true, &dynamic_check);
13796 desired_align = decide_alignment (align, alg, expected_size);
13797
13798 if (!TARGET_ALIGN_STRINGOPS)
13799 align = desired_align;
13800
13801 if (alg == libcall)
13802 return 0;
13803 gcc_assert (alg != no_stringop);
13804 if (!count)
13805 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13806 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13807 switch (alg)
13808 {
13809 case libcall:
13810 case no_stringop:
13811 gcc_unreachable ();
13812 case loop:
13813 size_needed = GET_MODE_SIZE (Pmode);
13814 break;
13815 case unrolled_loop:
13816 size_needed = GET_MODE_SIZE (Pmode) * 4;
13817 break;
13818 case rep_prefix_8_byte:
13819 size_needed = 8;
13820 break;
13821 case rep_prefix_4_byte:
13822 size_needed = 4;
13823 break;
13824 case rep_prefix_1_byte:
13825 case loop_1_byte:
13826 size_needed = 1;
13827 break;
13828 }
13829 /* Alignment code needs count to be in register. */
13830 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13831 {
13832 enum machine_mode mode = SImode;
13833 if (TARGET_64BIT && (count & ~0xffffffff))
13834 mode = DImode;
13835 count_exp = force_reg (mode, count_exp);
13836 }
13837 /* Ensure that alignment prologue won't copy past end of block. */
13838 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13839 && !count)
13840 {
13841 int size = MAX (size_needed - 1, desired_align - align);
13842 /* To improve performance of small blocks, we jump around the promoting
13843 code, so we need to use QImode accesses in epilogue. */
13844 if (GET_CODE (val_exp) != CONST_INT && size_needed > 1)
13845 force_loopy_epilogue = true;
13846 label = gen_label_rtx ();
13847 emit_cmp_and_jump_insns (count_exp,
13848 GEN_INT (size),
13849 LEU, 0, GET_MODE (count_exp), 1, label);
13850 if (expected_size == -1 || expected_size <= size)
13851 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13852 else
13853 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13854 }
13855 if (dynamic_check != -1)
13856 {
13857 rtx hot_label = gen_label_rtx ();
13858 jump_around_label = gen_label_rtx ();
13859 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13860 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13861 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13862 set_storage_via_libcall (dst, count_exp, val_exp, false);
13863 emit_jump (jump_around_label);
13864 emit_label (hot_label);
13865 }
13866 if (TARGET_64BIT
13867 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
13868 promoted_val = promote_duplicated_reg (DImode, val_exp);
13869 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
13870 promoted_val = promote_duplicated_reg (SImode, val_exp);
13871 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
13872 promoted_val = promote_duplicated_reg (HImode, val_exp);
13873 else
13874 promoted_val = val_exp;
13875 gcc_assert (desired_align >= 1 && align >= 1);
13876 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13877 && !count && !label)
13878 {
13879 int size = MAX (size_needed - 1, desired_align - align);
13880
13881 label = gen_label_rtx ();
13882 emit_cmp_and_jump_insns (count_exp,
13883 GEN_INT (size),
13884 LEU, 0, GET_MODE (count_exp), 1, label);
13885 if (expected_size == -1 || expected_size <= size)
13886 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13887 else
13888 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13889 }
13890 if (desired_align > align)
13891 {
13892 /* Except for the first move in epilogue, we no longer know
13893 constant offset in aliasing info. It don't seems to worth
13894 the pain to maintain it for the first move, so throw away
13895 the info early. */
13896 dst = change_address (dst, BLKmode, destreg);
13897 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
13898 desired_align);
13899 }
13900 if (label && size_needed == 1)
13901 {
13902 emit_label (label);
13903 LABEL_NUSES (label) = 1;
13904 label = NULL;
13905 }
13906 switch (alg)
13907 {
13908 case libcall:
13909 case no_stringop:
13910 gcc_unreachable ();
13911 case loop_1_byte:
13912 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13913 count_exp, QImode, 1, expected_size);
13914 break;
13915 case loop:
13916 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13917 count_exp, Pmode, 1, expected_size);
13918 break;
13919 case unrolled_loop:
13920 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13921 count_exp, Pmode, 4, expected_size);
13922 break;
13923 case rep_prefix_8_byte:
13924 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
13925 DImode);
13926 break;
13927 case rep_prefix_4_byte:
13928 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
13929 SImode);
13930 break;
13931 case rep_prefix_1_byte:
13932 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
13933 QImode);
13934 break;
13935 }
13936 /* Adjust properly the offset of src and dest memory for aliasing. */
13937 if (GET_CODE (count_exp) == CONST_INT)
13938 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13939 (count / size_needed) * size_needed);
13940 else
13941 dst = change_address (dst, BLKmode, destreg);
13942
13943 if (label)
13944 {
13945 if (size_needed < desired_align - align)
13946 {
13947 tmp =
13948 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13949 GEN_INT (size_needed - 1), count_exp, 1,
13950 OPTAB_DIRECT);
13951 size_needed = desired_align - align + 1;
13952 if (tmp != count_exp)
13953 emit_move_insn (count_exp, tmp);
13954 }
13955 emit_label (label);
13956 LABEL_NUSES (label) = 1;
13957 }
13958 if (count_exp != const0_rtx && size_needed > 1)
13959 {
13960 if (force_loopy_epilogue)
13961 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
13962 size_needed);
13963 else
13964 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
13965 size_needed);
13966 }
13967 if (jump_around_label)
13968 emit_label (jump_around_label);
13969 return 1;
13970 }
13971
13972 /* Expand strlen. */
13973 int
13974 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
13975 {
13976 rtx addr, scratch1, scratch2, scratch3, scratch4;
13977
13978 /* The generic case of strlen expander is long. Avoid it's
13979 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
13980
13981 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
13982 && !TARGET_INLINE_ALL_STRINGOPS
13983 && !optimize_size
13984 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
13985 return 0;
13986
13987 addr = force_reg (Pmode, XEXP (src, 0));
13988 scratch1 = gen_reg_rtx (Pmode);
13989
13990 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
13991 && !optimize_size)
13992 {
13993 /* Well it seems that some optimizer does not combine a call like
13994 foo(strlen(bar), strlen(bar));
13995 when the move and the subtraction is done here. It does calculate
13996 the length just once when these instructions are done inside of
13997 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
13998 often used and I use one fewer register for the lifetime of
13999 output_strlen_unroll() this is better. */
14000
14001 emit_move_insn (out, addr);
14002
14003 ix86_expand_strlensi_unroll_1 (out, src, align);
14004
14005 /* strlensi_unroll_1 returns the address of the zero at the end of
14006 the string, like memchr(), so compute the length by subtracting
14007 the start address. */
14008 if (TARGET_64BIT)
14009 emit_insn (gen_subdi3 (out, out, addr));
14010 else
14011 emit_insn (gen_subsi3 (out, out, addr));
14012 }
14013 else
14014 {
14015 rtx unspec;
14016 scratch2 = gen_reg_rtx (Pmode);
14017 scratch3 = gen_reg_rtx (Pmode);
14018 scratch4 = force_reg (Pmode, constm1_rtx);
14019
14020 emit_move_insn (scratch3, addr);
14021 eoschar = force_reg (QImode, eoschar);
14022
14023 src = replace_equiv_address_nv (src, scratch3);
14024
14025 /* If .md starts supporting :P, this can be done in .md. */
14026 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14027 scratch4), UNSPEC_SCAS);
14028 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14029 if (TARGET_64BIT)
14030 {
14031 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14032 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14033 }
14034 else
14035 {
14036 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14037 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14038 }
14039 }
14040 return 1;
14041 }
14042
14043 /* Expand the appropriate insns for doing strlen if not just doing
14044 repnz; scasb
14045
14046 out = result, initialized with the start address
14047 align_rtx = alignment of the address.
14048 scratch = scratch register, initialized with the startaddress when
14049 not aligned, otherwise undefined
14050
14051 This is just the body. It needs the initializations mentioned above and
14052 some address computing at the end. These things are done in i386.md. */
14053
14054 static void
14055 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14056 {
14057 int align;
14058 rtx tmp;
14059 rtx align_2_label = NULL_RTX;
14060 rtx align_3_label = NULL_RTX;
14061 rtx align_4_label = gen_label_rtx ();
14062 rtx end_0_label = gen_label_rtx ();
14063 rtx mem;
14064 rtx tmpreg = gen_reg_rtx (SImode);
14065 rtx scratch = gen_reg_rtx (SImode);
14066 rtx cmp;
14067
14068 align = 0;
14069 if (GET_CODE (align_rtx) == CONST_INT)
14070 align = INTVAL (align_rtx);
14071
14072 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14073
14074 /* Is there a known alignment and is it less than 4? */
14075 if (align < 4)
14076 {
14077 rtx scratch1 = gen_reg_rtx (Pmode);
14078 emit_move_insn (scratch1, out);
14079 /* Is there a known alignment and is it not 2? */
14080 if (align != 2)
14081 {
14082 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14083 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14084
14085 /* Leave just the 3 lower bits. */
14086 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14087 NULL_RTX, 0, OPTAB_WIDEN);
14088
14089 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14090 Pmode, 1, align_4_label);
14091 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14092 Pmode, 1, align_2_label);
14093 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14094 Pmode, 1, align_3_label);
14095 }
14096 else
14097 {
14098 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14099 check if is aligned to 4 - byte. */
14100
14101 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14102 NULL_RTX, 0, OPTAB_WIDEN);
14103
14104 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14105 Pmode, 1, align_4_label);
14106 }
14107
14108 mem = change_address (src, QImode, out);
14109
14110 /* Now compare the bytes. */
14111
14112 /* Compare the first n unaligned byte on a byte per byte basis. */
14113 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14114 QImode, 1, end_0_label);
14115
14116 /* Increment the address. */
14117 if (TARGET_64BIT)
14118 emit_insn (gen_adddi3 (out, out, const1_rtx));
14119 else
14120 emit_insn (gen_addsi3 (out, out, const1_rtx));
14121
14122 /* Not needed with an alignment of 2 */
14123 if (align != 2)
14124 {
14125 emit_label (align_2_label);
14126
14127 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14128 end_0_label);
14129
14130 if (TARGET_64BIT)
14131 emit_insn (gen_adddi3 (out, out, const1_rtx));
14132 else
14133 emit_insn (gen_addsi3 (out, out, const1_rtx));
14134
14135 emit_label (align_3_label);
14136 }
14137
14138 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14139 end_0_label);
14140
14141 if (TARGET_64BIT)
14142 emit_insn (gen_adddi3 (out, out, const1_rtx));
14143 else
14144 emit_insn (gen_addsi3 (out, out, const1_rtx));
14145 }
14146
14147 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14148 align this loop. It gives only huge programs, but does not help to
14149 speed up. */
14150 emit_label (align_4_label);
14151
14152 mem = change_address (src, SImode, out);
14153 emit_move_insn (scratch, mem);
14154 if (TARGET_64BIT)
14155 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14156 else
14157 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14158
14159 /* This formula yields a nonzero result iff one of the bytes is zero.
14160 This saves three branches inside loop and many cycles. */
14161
14162 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14163 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14164 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14165 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14166 gen_int_mode (0x80808080, SImode)));
14167 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14168 align_4_label);
14169
14170 if (TARGET_CMOVE)
14171 {
14172 rtx reg = gen_reg_rtx (SImode);
14173 rtx reg2 = gen_reg_rtx (Pmode);
14174 emit_move_insn (reg, tmpreg);
14175 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14176
14177 /* If zero is not in the first two bytes, move two bytes forward. */
14178 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14179 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14180 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14181 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14182 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14183 reg,
14184 tmpreg)));
14185 /* Emit lea manually to avoid clobbering of flags. */
14186 emit_insn (gen_rtx_SET (SImode, reg2,
14187 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14188
14189 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14190 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14191 emit_insn (gen_rtx_SET (VOIDmode, out,
14192 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14193 reg2,
14194 out)));
14195
14196 }
14197 else
14198 {
14199 rtx end_2_label = gen_label_rtx ();
14200 /* Is zero in the first two bytes? */
14201
14202 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14203 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14204 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14205 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14206 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14207 pc_rtx);
14208 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14209 JUMP_LABEL (tmp) = end_2_label;
14210
14211 /* Not in the first two. Move two bytes forward. */
14212 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14213 if (TARGET_64BIT)
14214 emit_insn (gen_adddi3 (out, out, const2_rtx));
14215 else
14216 emit_insn (gen_addsi3 (out, out, const2_rtx));
14217
14218 emit_label (end_2_label);
14219
14220 }
14221
14222 /* Avoid branch in fixing the byte. */
14223 tmpreg = gen_lowpart (QImode, tmpreg);
14224 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14225 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14226 if (TARGET_64BIT)
14227 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14228 else
14229 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14230
14231 emit_label (end_0_label);
14232 }
14233
14234 void
14235 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14236 rtx callarg2 ATTRIBUTE_UNUSED,
14237 rtx pop, int sibcall)
14238 {
14239 rtx use = NULL, call;
14240
14241 if (pop == const0_rtx)
14242 pop = NULL;
14243 gcc_assert (!TARGET_64BIT || !pop);
14244
14245 if (TARGET_MACHO && !TARGET_64BIT)
14246 {
14247 #if TARGET_MACHO
14248 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14249 fnaddr = machopic_indirect_call_target (fnaddr);
14250 #endif
14251 }
14252 else
14253 {
14254 /* Static functions and indirect calls don't need the pic register. */
14255 if (! TARGET_64BIT && flag_pic
14256 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14257 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14258 use_reg (&use, pic_offset_table_rtx);
14259 }
14260
14261 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14262 {
14263 rtx al = gen_rtx_REG (QImode, 0);
14264 emit_move_insn (al, callarg2);
14265 use_reg (&use, al);
14266 }
14267
14268 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14269 {
14270 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14271 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14272 }
14273 if (sibcall && TARGET_64BIT
14274 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14275 {
14276 rtx addr;
14277 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14278 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14279 emit_move_insn (fnaddr, addr);
14280 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14281 }
14282
14283 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14284 if (retval)
14285 call = gen_rtx_SET (VOIDmode, retval, call);
14286 if (pop)
14287 {
14288 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14289 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14290 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14291 }
14292
14293 call = emit_call_insn (call);
14294 if (use)
14295 CALL_INSN_FUNCTION_USAGE (call) = use;
14296 }
14297
14298 \f
14299 /* Clear stack slot assignments remembered from previous functions.
14300 This is called from INIT_EXPANDERS once before RTL is emitted for each
14301 function. */
14302
14303 static struct machine_function *
14304 ix86_init_machine_status (void)
14305 {
14306 struct machine_function *f;
14307
14308 f = ggc_alloc_cleared (sizeof (struct machine_function));
14309 f->use_fast_prologue_epilogue_nregs = -1;
14310 f->tls_descriptor_call_expanded_p = 0;
14311
14312 return f;
14313 }
14314
14315 /* Return a MEM corresponding to a stack slot with mode MODE.
14316 Allocate a new slot if necessary.
14317
14318 The RTL for a function can have several slots available: N is
14319 which slot to use. */
14320
14321 rtx
14322 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14323 {
14324 struct stack_local_entry *s;
14325
14326 gcc_assert (n < MAX_386_STACK_LOCALS);
14327
14328 for (s = ix86_stack_locals; s; s = s->next)
14329 if (s->mode == mode && s->n == n)
14330 return copy_rtx (s->rtl);
14331
14332 s = (struct stack_local_entry *)
14333 ggc_alloc (sizeof (struct stack_local_entry));
14334 s->n = n;
14335 s->mode = mode;
14336 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14337
14338 s->next = ix86_stack_locals;
14339 ix86_stack_locals = s;
14340 return s->rtl;
14341 }
14342
14343 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14344
14345 static GTY(()) rtx ix86_tls_symbol;
14346 rtx
14347 ix86_tls_get_addr (void)
14348 {
14349
14350 if (!ix86_tls_symbol)
14351 {
14352 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14353 (TARGET_ANY_GNU_TLS
14354 && !TARGET_64BIT)
14355 ? "___tls_get_addr"
14356 : "__tls_get_addr");
14357 }
14358
14359 return ix86_tls_symbol;
14360 }
14361
14362 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14363
14364 static GTY(()) rtx ix86_tls_module_base_symbol;
14365 rtx
14366 ix86_tls_module_base (void)
14367 {
14368
14369 if (!ix86_tls_module_base_symbol)
14370 {
14371 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14372 "_TLS_MODULE_BASE_");
14373 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14374 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14375 }
14376
14377 return ix86_tls_module_base_symbol;
14378 }
14379 \f
14380 /* Calculate the length of the memory address in the instruction
14381 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14382
14383 int
14384 memory_address_length (rtx addr)
14385 {
14386 struct ix86_address parts;
14387 rtx base, index, disp;
14388 int len;
14389 int ok;
14390
14391 if (GET_CODE (addr) == PRE_DEC
14392 || GET_CODE (addr) == POST_INC
14393 || GET_CODE (addr) == PRE_MODIFY
14394 || GET_CODE (addr) == POST_MODIFY)
14395 return 0;
14396
14397 ok = ix86_decompose_address (addr, &parts);
14398 gcc_assert (ok);
14399
14400 if (parts.base && GET_CODE (parts.base) == SUBREG)
14401 parts.base = SUBREG_REG (parts.base);
14402 if (parts.index && GET_CODE (parts.index) == SUBREG)
14403 parts.index = SUBREG_REG (parts.index);
14404
14405 base = parts.base;
14406 index = parts.index;
14407 disp = parts.disp;
14408 len = 0;
14409
14410 /* Rule of thumb:
14411 - esp as the base always wants an index,
14412 - ebp as the base always wants a displacement. */
14413
14414 /* Register Indirect. */
14415 if (base && !index && !disp)
14416 {
14417 /* esp (for its index) and ebp (for its displacement) need
14418 the two-byte modrm form. */
14419 if (addr == stack_pointer_rtx
14420 || addr == arg_pointer_rtx
14421 || addr == frame_pointer_rtx
14422 || addr == hard_frame_pointer_rtx)
14423 len = 1;
14424 }
14425
14426 /* Direct Addressing. */
14427 else if (disp && !base && !index)
14428 len = 4;
14429
14430 else
14431 {
14432 /* Find the length of the displacement constant. */
14433 if (disp)
14434 {
14435 if (base && satisfies_constraint_K (disp))
14436 len = 1;
14437 else
14438 len = 4;
14439 }
14440 /* ebp always wants a displacement. */
14441 else if (base == hard_frame_pointer_rtx)
14442 len = 1;
14443
14444 /* An index requires the two-byte modrm form.... */
14445 if (index
14446 /* ...like esp, which always wants an index. */
14447 || base == stack_pointer_rtx
14448 || base == arg_pointer_rtx
14449 || base == frame_pointer_rtx)
14450 len += 1;
14451 }
14452
14453 return len;
14454 }
14455
14456 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14457 is set, expect that insn have 8bit immediate alternative. */
14458 int
14459 ix86_attr_length_immediate_default (rtx insn, int shortform)
14460 {
14461 int len = 0;
14462 int i;
14463 extract_insn_cached (insn);
14464 for (i = recog_data.n_operands - 1; i >= 0; --i)
14465 if (CONSTANT_P (recog_data.operand[i]))
14466 {
14467 gcc_assert (!len);
14468 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14469 len = 1;
14470 else
14471 {
14472 switch (get_attr_mode (insn))
14473 {
14474 case MODE_QI:
14475 len+=1;
14476 break;
14477 case MODE_HI:
14478 len+=2;
14479 break;
14480 case MODE_SI:
14481 len+=4;
14482 break;
14483 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14484 case MODE_DI:
14485 len+=4;
14486 break;
14487 default:
14488 fatal_insn ("unknown insn mode", insn);
14489 }
14490 }
14491 }
14492 return len;
14493 }
14494 /* Compute default value for "length_address" attribute. */
14495 int
14496 ix86_attr_length_address_default (rtx insn)
14497 {
14498 int i;
14499
14500 if (get_attr_type (insn) == TYPE_LEA)
14501 {
14502 rtx set = PATTERN (insn);
14503
14504 if (GET_CODE (set) == PARALLEL)
14505 set = XVECEXP (set, 0, 0);
14506
14507 gcc_assert (GET_CODE (set) == SET);
14508
14509 return memory_address_length (SET_SRC (set));
14510 }
14511
14512 extract_insn_cached (insn);
14513 for (i = recog_data.n_operands - 1; i >= 0; --i)
14514 if (GET_CODE (recog_data.operand[i]) == MEM)
14515 {
14516 return memory_address_length (XEXP (recog_data.operand[i], 0));
14517 break;
14518 }
14519 return 0;
14520 }
14521 \f
14522 /* Return the maximum number of instructions a cpu can issue. */
14523
14524 static int
14525 ix86_issue_rate (void)
14526 {
14527 switch (ix86_tune)
14528 {
14529 case PROCESSOR_PENTIUM:
14530 case PROCESSOR_K6:
14531 return 2;
14532
14533 case PROCESSOR_PENTIUMPRO:
14534 case PROCESSOR_PENTIUM4:
14535 case PROCESSOR_ATHLON:
14536 case PROCESSOR_K8:
14537 case PROCESSOR_NOCONA:
14538 case PROCESSOR_GENERIC32:
14539 case PROCESSOR_GENERIC64:
14540 return 3;
14541
14542 case PROCESSOR_CORE2:
14543 return 4;
14544
14545 default:
14546 return 1;
14547 }
14548 }
14549
14550 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14551 by DEP_INSN and nothing set by DEP_INSN. */
14552
14553 static int
14554 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14555 {
14556 rtx set, set2;
14557
14558 /* Simplify the test for uninteresting insns. */
14559 if (insn_type != TYPE_SETCC
14560 && insn_type != TYPE_ICMOV
14561 && insn_type != TYPE_FCMOV
14562 && insn_type != TYPE_IBR)
14563 return 0;
14564
14565 if ((set = single_set (dep_insn)) != 0)
14566 {
14567 set = SET_DEST (set);
14568 set2 = NULL_RTX;
14569 }
14570 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14571 && XVECLEN (PATTERN (dep_insn), 0) == 2
14572 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14573 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14574 {
14575 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14576 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14577 }
14578 else
14579 return 0;
14580
14581 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
14582 return 0;
14583
14584 /* This test is true if the dependent insn reads the flags but
14585 not any other potentially set register. */
14586 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14587 return 0;
14588
14589 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14590 return 0;
14591
14592 return 1;
14593 }
14594
14595 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14596 address with operands set by DEP_INSN. */
14597
14598 static int
14599 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14600 {
14601 rtx addr;
14602
14603 if (insn_type == TYPE_LEA
14604 && TARGET_PENTIUM)
14605 {
14606 addr = PATTERN (insn);
14607
14608 if (GET_CODE (addr) == PARALLEL)
14609 addr = XVECEXP (addr, 0, 0);
14610
14611 gcc_assert (GET_CODE (addr) == SET);
14612
14613 addr = SET_SRC (addr);
14614 }
14615 else
14616 {
14617 int i;
14618 extract_insn_cached (insn);
14619 for (i = recog_data.n_operands - 1; i >= 0; --i)
14620 if (GET_CODE (recog_data.operand[i]) == MEM)
14621 {
14622 addr = XEXP (recog_data.operand[i], 0);
14623 goto found;
14624 }
14625 return 0;
14626 found:;
14627 }
14628
14629 return modified_in_p (addr, dep_insn);
14630 }
14631
14632 static int
14633 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14634 {
14635 enum attr_type insn_type, dep_insn_type;
14636 enum attr_memory memory;
14637 rtx set, set2;
14638 int dep_insn_code_number;
14639
14640 /* Anti and output dependencies have zero cost on all CPUs. */
14641 if (REG_NOTE_KIND (link) != 0)
14642 return 0;
14643
14644 dep_insn_code_number = recog_memoized (dep_insn);
14645
14646 /* If we can't recognize the insns, we can't really do anything. */
14647 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14648 return cost;
14649
14650 insn_type = get_attr_type (insn);
14651 dep_insn_type = get_attr_type (dep_insn);
14652
14653 switch (ix86_tune)
14654 {
14655 case PROCESSOR_PENTIUM:
14656 /* Address Generation Interlock adds a cycle of latency. */
14657 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14658 cost += 1;
14659
14660 /* ??? Compares pair with jump/setcc. */
14661 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14662 cost = 0;
14663
14664 /* Floating point stores require value to be ready one cycle earlier. */
14665 if (insn_type == TYPE_FMOV
14666 && get_attr_memory (insn) == MEMORY_STORE
14667 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14668 cost += 1;
14669 break;
14670
14671 case PROCESSOR_PENTIUMPRO:
14672 memory = get_attr_memory (insn);
14673
14674 /* INT->FP conversion is expensive. */
14675 if (get_attr_fp_int_src (dep_insn))
14676 cost += 5;
14677
14678 /* There is one cycle extra latency between an FP op and a store. */
14679 if (insn_type == TYPE_FMOV
14680 && (set = single_set (dep_insn)) != NULL_RTX
14681 && (set2 = single_set (insn)) != NULL_RTX
14682 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14683 && GET_CODE (SET_DEST (set2)) == MEM)
14684 cost += 1;
14685
14686 /* Show ability of reorder buffer to hide latency of load by executing
14687 in parallel with previous instruction in case
14688 previous instruction is not needed to compute the address. */
14689 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14690 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14691 {
14692 /* Claim moves to take one cycle, as core can issue one load
14693 at time and the next load can start cycle later. */
14694 if (dep_insn_type == TYPE_IMOV
14695 || dep_insn_type == TYPE_FMOV)
14696 cost = 1;
14697 else if (cost > 1)
14698 cost--;
14699 }
14700 break;
14701
14702 case PROCESSOR_K6:
14703 memory = get_attr_memory (insn);
14704
14705 /* The esp dependency is resolved before the instruction is really
14706 finished. */
14707 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14708 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14709 return 1;
14710
14711 /* INT->FP conversion is expensive. */
14712 if (get_attr_fp_int_src (dep_insn))
14713 cost += 5;
14714
14715 /* Show ability of reorder buffer to hide latency of load by executing
14716 in parallel with previous instruction in case
14717 previous instruction is not needed to compute the address. */
14718 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14719 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14720 {
14721 /* Claim moves to take one cycle, as core can issue one load
14722 at time and the next load can start cycle later. */
14723 if (dep_insn_type == TYPE_IMOV
14724 || dep_insn_type == TYPE_FMOV)
14725 cost = 1;
14726 else if (cost > 2)
14727 cost -= 2;
14728 else
14729 cost = 1;
14730 }
14731 break;
14732
14733 case PROCESSOR_ATHLON:
14734 case PROCESSOR_K8:
14735 case PROCESSOR_GENERIC32:
14736 case PROCESSOR_GENERIC64:
14737 memory = get_attr_memory (insn);
14738
14739 /* Show ability of reorder buffer to hide latency of load by executing
14740 in parallel with previous instruction in case
14741 previous instruction is not needed to compute the address. */
14742 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14743 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14744 {
14745 enum attr_unit unit = get_attr_unit (insn);
14746 int loadcost = 3;
14747
14748 /* Because of the difference between the length of integer and
14749 floating unit pipeline preparation stages, the memory operands
14750 for floating point are cheaper.
14751
14752 ??? For Athlon it the difference is most probably 2. */
14753 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14754 loadcost = 3;
14755 else
14756 loadcost = TARGET_ATHLON ? 2 : 0;
14757
14758 if (cost >= loadcost)
14759 cost -= loadcost;
14760 else
14761 cost = 0;
14762 }
14763
14764 default:
14765 break;
14766 }
14767
14768 return cost;
14769 }
14770
14771 /* How many alternative schedules to try. This should be as wide as the
14772 scheduling freedom in the DFA, but no wider. Making this value too
14773 large results extra work for the scheduler. */
14774
14775 static int
14776 ia32_multipass_dfa_lookahead (void)
14777 {
14778 if (ix86_tune == PROCESSOR_PENTIUM)
14779 return 2;
14780
14781 if (ix86_tune == PROCESSOR_PENTIUMPRO
14782 || ix86_tune == PROCESSOR_K6)
14783 return 1;
14784
14785 else
14786 return 0;
14787 }
14788
14789 \f
14790 /* Compute the alignment given to a constant that is being placed in memory.
14791 EXP is the constant and ALIGN is the alignment that the object would
14792 ordinarily have.
14793 The value of this function is used instead of that alignment to align
14794 the object. */
14795
14796 int
14797 ix86_constant_alignment (tree exp, int align)
14798 {
14799 if (TREE_CODE (exp) == REAL_CST)
14800 {
14801 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14802 return 64;
14803 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14804 return 128;
14805 }
14806 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14807 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14808 return BITS_PER_WORD;
14809
14810 return align;
14811 }
14812
14813 /* Compute the alignment for a static variable.
14814 TYPE is the data type, and ALIGN is the alignment that
14815 the object would ordinarily have. The value of this function is used
14816 instead of that alignment to align the object. */
14817
14818 int
14819 ix86_data_alignment (tree type, int align)
14820 {
14821 int max_align = optimize_size ? BITS_PER_WORD : 256;
14822
14823 if (AGGREGATE_TYPE_P (type)
14824 && TYPE_SIZE (type)
14825 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14826 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14827 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14828 && align < max_align)
14829 align = max_align;
14830
14831 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14832 to 16byte boundary. */
14833 if (TARGET_64BIT)
14834 {
14835 if (AGGREGATE_TYPE_P (type)
14836 && TYPE_SIZE (type)
14837 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14838 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14839 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14840 return 128;
14841 }
14842
14843 if (TREE_CODE (type) == ARRAY_TYPE)
14844 {
14845 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14846 return 64;
14847 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14848 return 128;
14849 }
14850 else if (TREE_CODE (type) == COMPLEX_TYPE)
14851 {
14852
14853 if (TYPE_MODE (type) == DCmode && align < 64)
14854 return 64;
14855 if (TYPE_MODE (type) == XCmode && align < 128)
14856 return 128;
14857 }
14858 else if ((TREE_CODE (type) == RECORD_TYPE
14859 || TREE_CODE (type) == UNION_TYPE
14860 || TREE_CODE (type) == QUAL_UNION_TYPE)
14861 && TYPE_FIELDS (type))
14862 {
14863 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14864 return 64;
14865 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14866 return 128;
14867 }
14868 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14869 || TREE_CODE (type) == INTEGER_TYPE)
14870 {
14871 if (TYPE_MODE (type) == DFmode && align < 64)
14872 return 64;
14873 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14874 return 128;
14875 }
14876
14877 return align;
14878 }
14879
14880 /* Compute the alignment for a local variable.
14881 TYPE is the data type, and ALIGN is the alignment that
14882 the object would ordinarily have. The value of this macro is used
14883 instead of that alignment to align the object. */
14884
14885 int
14886 ix86_local_alignment (tree type, int align)
14887 {
14888 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14889 to 16byte boundary. */
14890 if (TARGET_64BIT)
14891 {
14892 if (AGGREGATE_TYPE_P (type)
14893 && TYPE_SIZE (type)
14894 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14895 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
14896 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14897 return 128;
14898 }
14899 if (TREE_CODE (type) == ARRAY_TYPE)
14900 {
14901 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14902 return 64;
14903 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14904 return 128;
14905 }
14906 else if (TREE_CODE (type) == COMPLEX_TYPE)
14907 {
14908 if (TYPE_MODE (type) == DCmode && align < 64)
14909 return 64;
14910 if (TYPE_MODE (type) == XCmode && align < 128)
14911 return 128;
14912 }
14913 else if ((TREE_CODE (type) == RECORD_TYPE
14914 || TREE_CODE (type) == UNION_TYPE
14915 || TREE_CODE (type) == QUAL_UNION_TYPE)
14916 && TYPE_FIELDS (type))
14917 {
14918 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14919 return 64;
14920 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14921 return 128;
14922 }
14923 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14924 || TREE_CODE (type) == INTEGER_TYPE)
14925 {
14926
14927 if (TYPE_MODE (type) == DFmode && align < 64)
14928 return 64;
14929 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14930 return 128;
14931 }
14932 return align;
14933 }
14934 \f
14935 /* Emit RTL insns to initialize the variable parts of a trampoline.
14936 FNADDR is an RTX for the address of the function's pure code.
14937 CXT is an RTX for the static chain value for the function. */
14938 void
14939 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
14940 {
14941 if (!TARGET_64BIT)
14942 {
14943 /* Compute offset from the end of the jmp to the target function. */
14944 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
14945 plus_constant (tramp, 10),
14946 NULL_RTX, 1, OPTAB_DIRECT);
14947 emit_move_insn (gen_rtx_MEM (QImode, tramp),
14948 gen_int_mode (0xb9, QImode));
14949 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
14950 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
14951 gen_int_mode (0xe9, QImode));
14952 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
14953 }
14954 else
14955 {
14956 int offset = 0;
14957 /* Try to load address using shorter movl instead of movabs.
14958 We may want to support movq for kernel mode, but kernel does not use
14959 trampolines at the moment. */
14960 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
14961 {
14962 fnaddr = copy_to_mode_reg (DImode, fnaddr);
14963 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
14964 gen_int_mode (0xbb41, HImode));
14965 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
14966 gen_lowpart (SImode, fnaddr));
14967 offset += 6;
14968 }
14969 else
14970 {
14971 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
14972 gen_int_mode (0xbb49, HImode));
14973 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
14974 fnaddr);
14975 offset += 10;
14976 }
14977 /* Load static chain using movabs to r10. */
14978 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
14979 gen_int_mode (0xba49, HImode));
14980 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
14981 cxt);
14982 offset += 10;
14983 /* Jump to the r11 */
14984 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
14985 gen_int_mode (0xff49, HImode));
14986 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
14987 gen_int_mode (0xe3, QImode));
14988 offset += 3;
14989 gcc_assert (offset <= TRAMPOLINE_SIZE);
14990 }
14991
14992 #ifdef ENABLE_EXECUTE_STACK
14993 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
14994 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
14995 #endif
14996 }
14997 \f
14998 /* Codes for all the SSE/MMX builtins. */
14999 enum ix86_builtins
15000 {
15001 IX86_BUILTIN_ADDPS,
15002 IX86_BUILTIN_ADDSS,
15003 IX86_BUILTIN_DIVPS,
15004 IX86_BUILTIN_DIVSS,
15005 IX86_BUILTIN_MULPS,
15006 IX86_BUILTIN_MULSS,
15007 IX86_BUILTIN_SUBPS,
15008 IX86_BUILTIN_SUBSS,
15009
15010 IX86_BUILTIN_CMPEQPS,
15011 IX86_BUILTIN_CMPLTPS,
15012 IX86_BUILTIN_CMPLEPS,
15013 IX86_BUILTIN_CMPGTPS,
15014 IX86_BUILTIN_CMPGEPS,
15015 IX86_BUILTIN_CMPNEQPS,
15016 IX86_BUILTIN_CMPNLTPS,
15017 IX86_BUILTIN_CMPNLEPS,
15018 IX86_BUILTIN_CMPNGTPS,
15019 IX86_BUILTIN_CMPNGEPS,
15020 IX86_BUILTIN_CMPORDPS,
15021 IX86_BUILTIN_CMPUNORDPS,
15022 IX86_BUILTIN_CMPEQSS,
15023 IX86_BUILTIN_CMPLTSS,
15024 IX86_BUILTIN_CMPLESS,
15025 IX86_BUILTIN_CMPNEQSS,
15026 IX86_BUILTIN_CMPNLTSS,
15027 IX86_BUILTIN_CMPNLESS,
15028 IX86_BUILTIN_CMPNGTSS,
15029 IX86_BUILTIN_CMPNGESS,
15030 IX86_BUILTIN_CMPORDSS,
15031 IX86_BUILTIN_CMPUNORDSS,
15032
15033 IX86_BUILTIN_COMIEQSS,
15034 IX86_BUILTIN_COMILTSS,
15035 IX86_BUILTIN_COMILESS,
15036 IX86_BUILTIN_COMIGTSS,
15037 IX86_BUILTIN_COMIGESS,
15038 IX86_BUILTIN_COMINEQSS,
15039 IX86_BUILTIN_UCOMIEQSS,
15040 IX86_BUILTIN_UCOMILTSS,
15041 IX86_BUILTIN_UCOMILESS,
15042 IX86_BUILTIN_UCOMIGTSS,
15043 IX86_BUILTIN_UCOMIGESS,
15044 IX86_BUILTIN_UCOMINEQSS,
15045
15046 IX86_BUILTIN_CVTPI2PS,
15047 IX86_BUILTIN_CVTPS2PI,
15048 IX86_BUILTIN_CVTSI2SS,
15049 IX86_BUILTIN_CVTSI642SS,
15050 IX86_BUILTIN_CVTSS2SI,
15051 IX86_BUILTIN_CVTSS2SI64,
15052 IX86_BUILTIN_CVTTPS2PI,
15053 IX86_BUILTIN_CVTTSS2SI,
15054 IX86_BUILTIN_CVTTSS2SI64,
15055
15056 IX86_BUILTIN_MAXPS,
15057 IX86_BUILTIN_MAXSS,
15058 IX86_BUILTIN_MINPS,
15059 IX86_BUILTIN_MINSS,
15060
15061 IX86_BUILTIN_LOADUPS,
15062 IX86_BUILTIN_STOREUPS,
15063 IX86_BUILTIN_MOVSS,
15064
15065 IX86_BUILTIN_MOVHLPS,
15066 IX86_BUILTIN_MOVLHPS,
15067 IX86_BUILTIN_LOADHPS,
15068 IX86_BUILTIN_LOADLPS,
15069 IX86_BUILTIN_STOREHPS,
15070 IX86_BUILTIN_STORELPS,
15071
15072 IX86_BUILTIN_MASKMOVQ,
15073 IX86_BUILTIN_MOVMSKPS,
15074 IX86_BUILTIN_PMOVMSKB,
15075
15076 IX86_BUILTIN_MOVNTPS,
15077 IX86_BUILTIN_MOVNTQ,
15078
15079 IX86_BUILTIN_LOADDQU,
15080 IX86_BUILTIN_STOREDQU,
15081
15082 IX86_BUILTIN_PACKSSWB,
15083 IX86_BUILTIN_PACKSSDW,
15084 IX86_BUILTIN_PACKUSWB,
15085
15086 IX86_BUILTIN_PADDB,
15087 IX86_BUILTIN_PADDW,
15088 IX86_BUILTIN_PADDD,
15089 IX86_BUILTIN_PADDQ,
15090 IX86_BUILTIN_PADDSB,
15091 IX86_BUILTIN_PADDSW,
15092 IX86_BUILTIN_PADDUSB,
15093 IX86_BUILTIN_PADDUSW,
15094 IX86_BUILTIN_PSUBB,
15095 IX86_BUILTIN_PSUBW,
15096 IX86_BUILTIN_PSUBD,
15097 IX86_BUILTIN_PSUBQ,
15098 IX86_BUILTIN_PSUBSB,
15099 IX86_BUILTIN_PSUBSW,
15100 IX86_BUILTIN_PSUBUSB,
15101 IX86_BUILTIN_PSUBUSW,
15102
15103 IX86_BUILTIN_PAND,
15104 IX86_BUILTIN_PANDN,
15105 IX86_BUILTIN_POR,
15106 IX86_BUILTIN_PXOR,
15107
15108 IX86_BUILTIN_PAVGB,
15109 IX86_BUILTIN_PAVGW,
15110
15111 IX86_BUILTIN_PCMPEQB,
15112 IX86_BUILTIN_PCMPEQW,
15113 IX86_BUILTIN_PCMPEQD,
15114 IX86_BUILTIN_PCMPGTB,
15115 IX86_BUILTIN_PCMPGTW,
15116 IX86_BUILTIN_PCMPGTD,
15117
15118 IX86_BUILTIN_PMADDWD,
15119
15120 IX86_BUILTIN_PMAXSW,
15121 IX86_BUILTIN_PMAXUB,
15122 IX86_BUILTIN_PMINSW,
15123 IX86_BUILTIN_PMINUB,
15124
15125 IX86_BUILTIN_PMULHUW,
15126 IX86_BUILTIN_PMULHW,
15127 IX86_BUILTIN_PMULLW,
15128
15129 IX86_BUILTIN_PSADBW,
15130 IX86_BUILTIN_PSHUFW,
15131
15132 IX86_BUILTIN_PSLLW,
15133 IX86_BUILTIN_PSLLD,
15134 IX86_BUILTIN_PSLLQ,
15135 IX86_BUILTIN_PSRAW,
15136 IX86_BUILTIN_PSRAD,
15137 IX86_BUILTIN_PSRLW,
15138 IX86_BUILTIN_PSRLD,
15139 IX86_BUILTIN_PSRLQ,
15140 IX86_BUILTIN_PSLLWI,
15141 IX86_BUILTIN_PSLLDI,
15142 IX86_BUILTIN_PSLLQI,
15143 IX86_BUILTIN_PSRAWI,
15144 IX86_BUILTIN_PSRADI,
15145 IX86_BUILTIN_PSRLWI,
15146 IX86_BUILTIN_PSRLDI,
15147 IX86_BUILTIN_PSRLQI,
15148
15149 IX86_BUILTIN_PUNPCKHBW,
15150 IX86_BUILTIN_PUNPCKHWD,
15151 IX86_BUILTIN_PUNPCKHDQ,
15152 IX86_BUILTIN_PUNPCKLBW,
15153 IX86_BUILTIN_PUNPCKLWD,
15154 IX86_BUILTIN_PUNPCKLDQ,
15155
15156 IX86_BUILTIN_SHUFPS,
15157
15158 IX86_BUILTIN_RCPPS,
15159 IX86_BUILTIN_RCPSS,
15160 IX86_BUILTIN_RSQRTPS,
15161 IX86_BUILTIN_RSQRTSS,
15162 IX86_BUILTIN_SQRTPS,
15163 IX86_BUILTIN_SQRTSS,
15164
15165 IX86_BUILTIN_UNPCKHPS,
15166 IX86_BUILTIN_UNPCKLPS,
15167
15168 IX86_BUILTIN_ANDPS,
15169 IX86_BUILTIN_ANDNPS,
15170 IX86_BUILTIN_ORPS,
15171 IX86_BUILTIN_XORPS,
15172
15173 IX86_BUILTIN_EMMS,
15174 IX86_BUILTIN_LDMXCSR,
15175 IX86_BUILTIN_STMXCSR,
15176 IX86_BUILTIN_SFENCE,
15177
15178 /* 3DNow! Original */
15179 IX86_BUILTIN_FEMMS,
15180 IX86_BUILTIN_PAVGUSB,
15181 IX86_BUILTIN_PF2ID,
15182 IX86_BUILTIN_PFACC,
15183 IX86_BUILTIN_PFADD,
15184 IX86_BUILTIN_PFCMPEQ,
15185 IX86_BUILTIN_PFCMPGE,
15186 IX86_BUILTIN_PFCMPGT,
15187 IX86_BUILTIN_PFMAX,
15188 IX86_BUILTIN_PFMIN,
15189 IX86_BUILTIN_PFMUL,
15190 IX86_BUILTIN_PFRCP,
15191 IX86_BUILTIN_PFRCPIT1,
15192 IX86_BUILTIN_PFRCPIT2,
15193 IX86_BUILTIN_PFRSQIT1,
15194 IX86_BUILTIN_PFRSQRT,
15195 IX86_BUILTIN_PFSUB,
15196 IX86_BUILTIN_PFSUBR,
15197 IX86_BUILTIN_PI2FD,
15198 IX86_BUILTIN_PMULHRW,
15199
15200 /* 3DNow! Athlon Extensions */
15201 IX86_BUILTIN_PF2IW,
15202 IX86_BUILTIN_PFNACC,
15203 IX86_BUILTIN_PFPNACC,
15204 IX86_BUILTIN_PI2FW,
15205 IX86_BUILTIN_PSWAPDSI,
15206 IX86_BUILTIN_PSWAPDSF,
15207
15208 /* SSE2 */
15209 IX86_BUILTIN_ADDPD,
15210 IX86_BUILTIN_ADDSD,
15211 IX86_BUILTIN_DIVPD,
15212 IX86_BUILTIN_DIVSD,
15213 IX86_BUILTIN_MULPD,
15214 IX86_BUILTIN_MULSD,
15215 IX86_BUILTIN_SUBPD,
15216 IX86_BUILTIN_SUBSD,
15217
15218 IX86_BUILTIN_CMPEQPD,
15219 IX86_BUILTIN_CMPLTPD,
15220 IX86_BUILTIN_CMPLEPD,
15221 IX86_BUILTIN_CMPGTPD,
15222 IX86_BUILTIN_CMPGEPD,
15223 IX86_BUILTIN_CMPNEQPD,
15224 IX86_BUILTIN_CMPNLTPD,
15225 IX86_BUILTIN_CMPNLEPD,
15226 IX86_BUILTIN_CMPNGTPD,
15227 IX86_BUILTIN_CMPNGEPD,
15228 IX86_BUILTIN_CMPORDPD,
15229 IX86_BUILTIN_CMPUNORDPD,
15230 IX86_BUILTIN_CMPNEPD,
15231 IX86_BUILTIN_CMPEQSD,
15232 IX86_BUILTIN_CMPLTSD,
15233 IX86_BUILTIN_CMPLESD,
15234 IX86_BUILTIN_CMPNEQSD,
15235 IX86_BUILTIN_CMPNLTSD,
15236 IX86_BUILTIN_CMPNLESD,
15237 IX86_BUILTIN_CMPORDSD,
15238 IX86_BUILTIN_CMPUNORDSD,
15239 IX86_BUILTIN_CMPNESD,
15240
15241 IX86_BUILTIN_COMIEQSD,
15242 IX86_BUILTIN_COMILTSD,
15243 IX86_BUILTIN_COMILESD,
15244 IX86_BUILTIN_COMIGTSD,
15245 IX86_BUILTIN_COMIGESD,
15246 IX86_BUILTIN_COMINEQSD,
15247 IX86_BUILTIN_UCOMIEQSD,
15248 IX86_BUILTIN_UCOMILTSD,
15249 IX86_BUILTIN_UCOMILESD,
15250 IX86_BUILTIN_UCOMIGTSD,
15251 IX86_BUILTIN_UCOMIGESD,
15252 IX86_BUILTIN_UCOMINEQSD,
15253
15254 IX86_BUILTIN_MAXPD,
15255 IX86_BUILTIN_MAXSD,
15256 IX86_BUILTIN_MINPD,
15257 IX86_BUILTIN_MINSD,
15258
15259 IX86_BUILTIN_ANDPD,
15260 IX86_BUILTIN_ANDNPD,
15261 IX86_BUILTIN_ORPD,
15262 IX86_BUILTIN_XORPD,
15263
15264 IX86_BUILTIN_SQRTPD,
15265 IX86_BUILTIN_SQRTSD,
15266
15267 IX86_BUILTIN_UNPCKHPD,
15268 IX86_BUILTIN_UNPCKLPD,
15269
15270 IX86_BUILTIN_SHUFPD,
15271
15272 IX86_BUILTIN_LOADUPD,
15273 IX86_BUILTIN_STOREUPD,
15274 IX86_BUILTIN_MOVSD,
15275
15276 IX86_BUILTIN_LOADHPD,
15277 IX86_BUILTIN_LOADLPD,
15278
15279 IX86_BUILTIN_CVTDQ2PD,
15280 IX86_BUILTIN_CVTDQ2PS,
15281
15282 IX86_BUILTIN_CVTPD2DQ,
15283 IX86_BUILTIN_CVTPD2PI,
15284 IX86_BUILTIN_CVTPD2PS,
15285 IX86_BUILTIN_CVTTPD2DQ,
15286 IX86_BUILTIN_CVTTPD2PI,
15287
15288 IX86_BUILTIN_CVTPI2PD,
15289 IX86_BUILTIN_CVTSI2SD,
15290 IX86_BUILTIN_CVTSI642SD,
15291
15292 IX86_BUILTIN_CVTSD2SI,
15293 IX86_BUILTIN_CVTSD2SI64,
15294 IX86_BUILTIN_CVTSD2SS,
15295 IX86_BUILTIN_CVTSS2SD,
15296 IX86_BUILTIN_CVTTSD2SI,
15297 IX86_BUILTIN_CVTTSD2SI64,
15298
15299 IX86_BUILTIN_CVTPS2DQ,
15300 IX86_BUILTIN_CVTPS2PD,
15301 IX86_BUILTIN_CVTTPS2DQ,
15302
15303 IX86_BUILTIN_MOVNTI,
15304 IX86_BUILTIN_MOVNTPD,
15305 IX86_BUILTIN_MOVNTDQ,
15306
15307 /* SSE2 MMX */
15308 IX86_BUILTIN_MASKMOVDQU,
15309 IX86_BUILTIN_MOVMSKPD,
15310 IX86_BUILTIN_PMOVMSKB128,
15311
15312 IX86_BUILTIN_PACKSSWB128,
15313 IX86_BUILTIN_PACKSSDW128,
15314 IX86_BUILTIN_PACKUSWB128,
15315
15316 IX86_BUILTIN_PADDB128,
15317 IX86_BUILTIN_PADDW128,
15318 IX86_BUILTIN_PADDD128,
15319 IX86_BUILTIN_PADDQ128,
15320 IX86_BUILTIN_PADDSB128,
15321 IX86_BUILTIN_PADDSW128,
15322 IX86_BUILTIN_PADDUSB128,
15323 IX86_BUILTIN_PADDUSW128,
15324 IX86_BUILTIN_PSUBB128,
15325 IX86_BUILTIN_PSUBW128,
15326 IX86_BUILTIN_PSUBD128,
15327 IX86_BUILTIN_PSUBQ128,
15328 IX86_BUILTIN_PSUBSB128,
15329 IX86_BUILTIN_PSUBSW128,
15330 IX86_BUILTIN_PSUBUSB128,
15331 IX86_BUILTIN_PSUBUSW128,
15332
15333 IX86_BUILTIN_PAND128,
15334 IX86_BUILTIN_PANDN128,
15335 IX86_BUILTIN_POR128,
15336 IX86_BUILTIN_PXOR128,
15337
15338 IX86_BUILTIN_PAVGB128,
15339 IX86_BUILTIN_PAVGW128,
15340
15341 IX86_BUILTIN_PCMPEQB128,
15342 IX86_BUILTIN_PCMPEQW128,
15343 IX86_BUILTIN_PCMPEQD128,
15344 IX86_BUILTIN_PCMPGTB128,
15345 IX86_BUILTIN_PCMPGTW128,
15346 IX86_BUILTIN_PCMPGTD128,
15347
15348 IX86_BUILTIN_PMADDWD128,
15349
15350 IX86_BUILTIN_PMAXSW128,
15351 IX86_BUILTIN_PMAXUB128,
15352 IX86_BUILTIN_PMINSW128,
15353 IX86_BUILTIN_PMINUB128,
15354
15355 IX86_BUILTIN_PMULUDQ,
15356 IX86_BUILTIN_PMULUDQ128,
15357 IX86_BUILTIN_PMULHUW128,
15358 IX86_BUILTIN_PMULHW128,
15359 IX86_BUILTIN_PMULLW128,
15360
15361 IX86_BUILTIN_PSADBW128,
15362 IX86_BUILTIN_PSHUFHW,
15363 IX86_BUILTIN_PSHUFLW,
15364 IX86_BUILTIN_PSHUFD,
15365
15366 IX86_BUILTIN_PSLLW128,
15367 IX86_BUILTIN_PSLLD128,
15368 IX86_BUILTIN_PSLLQ128,
15369 IX86_BUILTIN_PSRAW128,
15370 IX86_BUILTIN_PSRAD128,
15371 IX86_BUILTIN_PSRLW128,
15372 IX86_BUILTIN_PSRLD128,
15373 IX86_BUILTIN_PSRLQ128,
15374 IX86_BUILTIN_PSLLDQI128,
15375 IX86_BUILTIN_PSLLWI128,
15376 IX86_BUILTIN_PSLLDI128,
15377 IX86_BUILTIN_PSLLQI128,
15378 IX86_BUILTIN_PSRAWI128,
15379 IX86_BUILTIN_PSRADI128,
15380 IX86_BUILTIN_PSRLDQI128,
15381 IX86_BUILTIN_PSRLWI128,
15382 IX86_BUILTIN_PSRLDI128,
15383 IX86_BUILTIN_PSRLQI128,
15384
15385 IX86_BUILTIN_PUNPCKHBW128,
15386 IX86_BUILTIN_PUNPCKHWD128,
15387 IX86_BUILTIN_PUNPCKHDQ128,
15388 IX86_BUILTIN_PUNPCKHQDQ128,
15389 IX86_BUILTIN_PUNPCKLBW128,
15390 IX86_BUILTIN_PUNPCKLWD128,
15391 IX86_BUILTIN_PUNPCKLDQ128,
15392 IX86_BUILTIN_PUNPCKLQDQ128,
15393
15394 IX86_BUILTIN_CLFLUSH,
15395 IX86_BUILTIN_MFENCE,
15396 IX86_BUILTIN_LFENCE,
15397
15398 /* Prescott New Instructions. */
15399 IX86_BUILTIN_ADDSUBPS,
15400 IX86_BUILTIN_HADDPS,
15401 IX86_BUILTIN_HSUBPS,
15402 IX86_BUILTIN_MOVSHDUP,
15403 IX86_BUILTIN_MOVSLDUP,
15404 IX86_BUILTIN_ADDSUBPD,
15405 IX86_BUILTIN_HADDPD,
15406 IX86_BUILTIN_HSUBPD,
15407 IX86_BUILTIN_LDDQU,
15408
15409 IX86_BUILTIN_MONITOR,
15410 IX86_BUILTIN_MWAIT,
15411
15412 /* SSSE3. */
15413 IX86_BUILTIN_PHADDW,
15414 IX86_BUILTIN_PHADDD,
15415 IX86_BUILTIN_PHADDSW,
15416 IX86_BUILTIN_PHSUBW,
15417 IX86_BUILTIN_PHSUBD,
15418 IX86_BUILTIN_PHSUBSW,
15419 IX86_BUILTIN_PMADDUBSW,
15420 IX86_BUILTIN_PMULHRSW,
15421 IX86_BUILTIN_PSHUFB,
15422 IX86_BUILTIN_PSIGNB,
15423 IX86_BUILTIN_PSIGNW,
15424 IX86_BUILTIN_PSIGND,
15425 IX86_BUILTIN_PALIGNR,
15426 IX86_BUILTIN_PABSB,
15427 IX86_BUILTIN_PABSW,
15428 IX86_BUILTIN_PABSD,
15429
15430 IX86_BUILTIN_PHADDW128,
15431 IX86_BUILTIN_PHADDD128,
15432 IX86_BUILTIN_PHADDSW128,
15433 IX86_BUILTIN_PHSUBW128,
15434 IX86_BUILTIN_PHSUBD128,
15435 IX86_BUILTIN_PHSUBSW128,
15436 IX86_BUILTIN_PMADDUBSW128,
15437 IX86_BUILTIN_PMULHRSW128,
15438 IX86_BUILTIN_PSHUFB128,
15439 IX86_BUILTIN_PSIGNB128,
15440 IX86_BUILTIN_PSIGNW128,
15441 IX86_BUILTIN_PSIGND128,
15442 IX86_BUILTIN_PALIGNR128,
15443 IX86_BUILTIN_PABSB128,
15444 IX86_BUILTIN_PABSW128,
15445 IX86_BUILTIN_PABSD128,
15446
15447 IX86_BUILTIN_VEC_INIT_V2SI,
15448 IX86_BUILTIN_VEC_INIT_V4HI,
15449 IX86_BUILTIN_VEC_INIT_V8QI,
15450 IX86_BUILTIN_VEC_EXT_V2DF,
15451 IX86_BUILTIN_VEC_EXT_V2DI,
15452 IX86_BUILTIN_VEC_EXT_V4SF,
15453 IX86_BUILTIN_VEC_EXT_V4SI,
15454 IX86_BUILTIN_VEC_EXT_V8HI,
15455 IX86_BUILTIN_VEC_EXT_V2SI,
15456 IX86_BUILTIN_VEC_EXT_V4HI,
15457 IX86_BUILTIN_VEC_SET_V8HI,
15458 IX86_BUILTIN_VEC_SET_V4HI,
15459
15460 IX86_BUILTIN_MAX
15461 };
15462
15463 /* Table for the ix86 builtin decls. */
15464 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15465
15466 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15467 * if the target_flags include one of MASK. Stores the function decl
15468 * in the ix86_builtins array.
15469 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15470
15471 static inline tree
15472 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15473 {
15474 tree decl = NULL_TREE;
15475
15476 if (mask & target_flags
15477 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15478 {
15479 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15480 NULL, NULL_TREE);
15481 ix86_builtins[(int) code] = decl;
15482 }
15483
15484 return decl;
15485 }
15486
15487 /* Like def_builtin, but also marks the function decl "const". */
15488
15489 static inline tree
15490 def_builtin_const (int mask, const char *name, tree type,
15491 enum ix86_builtins code)
15492 {
15493 tree decl = def_builtin (mask, name, type, code);
15494 if (decl)
15495 TREE_READONLY (decl) = 1;
15496 return decl;
15497 }
15498
15499 /* Bits for builtin_description.flag. */
15500
15501 /* Set when we don't support the comparison natively, and should
15502 swap_comparison in order to support it. */
15503 #define BUILTIN_DESC_SWAP_OPERANDS 1
15504
15505 struct builtin_description
15506 {
15507 const unsigned int mask;
15508 const enum insn_code icode;
15509 const char *const name;
15510 const enum ix86_builtins code;
15511 const enum rtx_code comparison;
15512 const unsigned int flag;
15513 };
15514
15515 static const struct builtin_description bdesc_comi[] =
15516 {
15517 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15518 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15519 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15520 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15521 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15522 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15523 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15524 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15525 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15526 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15527 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15528 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15529 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15530 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15531 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15532 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15533 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15534 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15535 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15536 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15537 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15538 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15539 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15540 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15541 };
15542
15543 static const struct builtin_description bdesc_2arg[] =
15544 {
15545 /* SSE */
15546 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15547 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15548 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15549 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15550 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15551 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15552 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15553 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15554
15555 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15556 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15557 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15558 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15559 BUILTIN_DESC_SWAP_OPERANDS },
15560 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15561 BUILTIN_DESC_SWAP_OPERANDS },
15562 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15563 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15564 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15565 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15566 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15567 BUILTIN_DESC_SWAP_OPERANDS },
15568 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15569 BUILTIN_DESC_SWAP_OPERANDS },
15570 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15571 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15572 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15573 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15574 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15575 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15576 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15577 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15578 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15579 BUILTIN_DESC_SWAP_OPERANDS },
15580 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15581 BUILTIN_DESC_SWAP_OPERANDS },
15582 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15583
15584 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15585 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15586 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15587 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15588
15589 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15590 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15591 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15592 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15593
15594 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15595 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15596 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15597 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15598 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15599
15600 /* MMX */
15601 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15602 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15603 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15604 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15605 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15606 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15607 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15608 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15609
15610 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15611 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15612 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15613 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15614 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15615 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15616 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15617 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15618
15619 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15620 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15621 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15622
15623 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15624 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15625 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15626 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15627
15628 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15629 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15630
15631 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15632 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15633 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15634 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15635 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15636 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15637
15638 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15639 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15640 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15641 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15642
15643 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15644 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15645 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15646 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15647 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15648 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15649
15650 /* Special. */
15651 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15652 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15653 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15654
15655 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15656 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15657 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15658
15659 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15660 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15661 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15662 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15663 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15664 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15665
15666 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15667 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15668 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15669 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15670 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15671 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15672
15673 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15674 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15675 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15676 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15677
15678 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15679 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15680
15681 /* SSE2 */
15682 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15683 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15684 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15685 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15686 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15687 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15688 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15689 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15690
15691 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15692 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15693 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15694 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15695 BUILTIN_DESC_SWAP_OPERANDS },
15696 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15697 BUILTIN_DESC_SWAP_OPERANDS },
15698 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15699 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15700 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15701 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15702 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15703 BUILTIN_DESC_SWAP_OPERANDS },
15704 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15705 BUILTIN_DESC_SWAP_OPERANDS },
15706 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15707 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15708 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15709 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15710 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15711 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15712 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15713 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15714 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15715
15716 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15717 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15718 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15719 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15720
15721 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15722 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15723 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15724 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15725
15726 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15727 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15728 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15729
15730 /* SSE2 MMX */
15731 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15732 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15733 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15734 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15735 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15736 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15737 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15738 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15739
15740 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15741 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15742 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15743 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15744 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15745 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15746 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15747 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15748
15749 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15750 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15751
15752 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15753 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15754 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15755 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15756
15757 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15758 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15759
15760 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15761 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15762 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15763 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15764 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15765 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15766
15767 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15768 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15769 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15770 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15771
15772 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15773 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15774 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15775 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15776 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15777 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15778 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15779 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15780
15781 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15782 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15783 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15784
15785 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15786 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15787
15788 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15789 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15790
15791 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15792 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15793 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15794
15795 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15796 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15797 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15798
15799 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15800 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15801
15802 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15803
15804 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15805 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15806 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15807 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15808
15809 /* SSE3 MMX */
15810 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15811 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15812 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15813 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15814 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15815 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15816
15817 /* SSSE3 */
15818 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15819 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15820 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15821 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15822 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15823 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15824 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15825 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15826 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15827 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15828 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15829 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15830 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15831 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15832 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15833 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15834 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15835 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15836 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15837 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15838 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15839 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15840 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15841 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15842 };
15843
15844 static const struct builtin_description bdesc_1arg[] =
15845 {
15846 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15847 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15848
15849 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
15850 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
15851 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
15852
15853 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
15854 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
15855 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
15856 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
15857 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
15858 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
15859
15860 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
15861 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
15862
15863 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
15864
15865 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
15866 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
15867
15868 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
15870 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
15871 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
15872 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
15873
15874 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
15875
15876 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
15877 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
15878 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
15879 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
15880
15881 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
15882 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
15883 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
15884
15885 /* SSE3 */
15886 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
15887 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
15888
15889 /* SSSE3 */
15890 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
15891 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
15892 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
15893 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
15894 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
15895 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
15896 };
15897
15898 static void
15899 ix86_init_builtins (void)
15900 {
15901 if (TARGET_MMX)
15902 ix86_init_mmx_sse_builtins ();
15903 }
15904
15905 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
15906 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
15907 builtins. */
15908 static void
15909 ix86_init_mmx_sse_builtins (void)
15910 {
15911 const struct builtin_description * d;
15912 size_t i;
15913
15914 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
15915 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
15916 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
15917 tree V2DI_type_node
15918 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
15919 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
15920 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
15921 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
15922 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
15923 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
15924 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
15925
15926 tree pchar_type_node = build_pointer_type (char_type_node);
15927 tree pcchar_type_node = build_pointer_type (
15928 build_type_variant (char_type_node, 1, 0));
15929 tree pfloat_type_node = build_pointer_type (float_type_node);
15930 tree pcfloat_type_node = build_pointer_type (
15931 build_type_variant (float_type_node, 1, 0));
15932 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
15933 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
15934 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
15935
15936 /* Comparisons. */
15937 tree int_ftype_v4sf_v4sf
15938 = build_function_type_list (integer_type_node,
15939 V4SF_type_node, V4SF_type_node, NULL_TREE);
15940 tree v4si_ftype_v4sf_v4sf
15941 = build_function_type_list (V4SI_type_node,
15942 V4SF_type_node, V4SF_type_node, NULL_TREE);
15943 /* MMX/SSE/integer conversions. */
15944 tree int_ftype_v4sf
15945 = build_function_type_list (integer_type_node,
15946 V4SF_type_node, NULL_TREE);
15947 tree int64_ftype_v4sf
15948 = build_function_type_list (long_long_integer_type_node,
15949 V4SF_type_node, NULL_TREE);
15950 tree int_ftype_v8qi
15951 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
15952 tree v4sf_ftype_v4sf_int
15953 = build_function_type_list (V4SF_type_node,
15954 V4SF_type_node, integer_type_node, NULL_TREE);
15955 tree v4sf_ftype_v4sf_int64
15956 = build_function_type_list (V4SF_type_node,
15957 V4SF_type_node, long_long_integer_type_node,
15958 NULL_TREE);
15959 tree v4sf_ftype_v4sf_v2si
15960 = build_function_type_list (V4SF_type_node,
15961 V4SF_type_node, V2SI_type_node, NULL_TREE);
15962
15963 /* Miscellaneous. */
15964 tree v8qi_ftype_v4hi_v4hi
15965 = build_function_type_list (V8QI_type_node,
15966 V4HI_type_node, V4HI_type_node, NULL_TREE);
15967 tree v4hi_ftype_v2si_v2si
15968 = build_function_type_list (V4HI_type_node,
15969 V2SI_type_node, V2SI_type_node, NULL_TREE);
15970 tree v4sf_ftype_v4sf_v4sf_int
15971 = build_function_type_list (V4SF_type_node,
15972 V4SF_type_node, V4SF_type_node,
15973 integer_type_node, NULL_TREE);
15974 tree v2si_ftype_v4hi_v4hi
15975 = build_function_type_list (V2SI_type_node,
15976 V4HI_type_node, V4HI_type_node, NULL_TREE);
15977 tree v4hi_ftype_v4hi_int
15978 = build_function_type_list (V4HI_type_node,
15979 V4HI_type_node, integer_type_node, NULL_TREE);
15980 tree v4hi_ftype_v4hi_di
15981 = build_function_type_list (V4HI_type_node,
15982 V4HI_type_node, long_long_unsigned_type_node,
15983 NULL_TREE);
15984 tree v2si_ftype_v2si_di
15985 = build_function_type_list (V2SI_type_node,
15986 V2SI_type_node, long_long_unsigned_type_node,
15987 NULL_TREE);
15988 tree void_ftype_void
15989 = build_function_type (void_type_node, void_list_node);
15990 tree void_ftype_unsigned
15991 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
15992 tree void_ftype_unsigned_unsigned
15993 = build_function_type_list (void_type_node, unsigned_type_node,
15994 unsigned_type_node, NULL_TREE);
15995 tree void_ftype_pcvoid_unsigned_unsigned
15996 = build_function_type_list (void_type_node, const_ptr_type_node,
15997 unsigned_type_node, unsigned_type_node,
15998 NULL_TREE);
15999 tree unsigned_ftype_void
16000 = build_function_type (unsigned_type_node, void_list_node);
16001 tree v2si_ftype_v4sf
16002 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16003 /* Loads/stores. */
16004 tree void_ftype_v8qi_v8qi_pchar
16005 = build_function_type_list (void_type_node,
16006 V8QI_type_node, V8QI_type_node,
16007 pchar_type_node, NULL_TREE);
16008 tree v4sf_ftype_pcfloat
16009 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16010 /* @@@ the type is bogus */
16011 tree v4sf_ftype_v4sf_pv2si
16012 = build_function_type_list (V4SF_type_node,
16013 V4SF_type_node, pv2si_type_node, NULL_TREE);
16014 tree void_ftype_pv2si_v4sf
16015 = build_function_type_list (void_type_node,
16016 pv2si_type_node, V4SF_type_node, NULL_TREE);
16017 tree void_ftype_pfloat_v4sf
16018 = build_function_type_list (void_type_node,
16019 pfloat_type_node, V4SF_type_node, NULL_TREE);
16020 tree void_ftype_pdi_di
16021 = build_function_type_list (void_type_node,
16022 pdi_type_node, long_long_unsigned_type_node,
16023 NULL_TREE);
16024 tree void_ftype_pv2di_v2di
16025 = build_function_type_list (void_type_node,
16026 pv2di_type_node, V2DI_type_node, NULL_TREE);
16027 /* Normal vector unops. */
16028 tree v4sf_ftype_v4sf
16029 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16030 tree v16qi_ftype_v16qi
16031 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16032 tree v8hi_ftype_v8hi
16033 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16034 tree v4si_ftype_v4si
16035 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16036 tree v8qi_ftype_v8qi
16037 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16038 tree v4hi_ftype_v4hi
16039 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16040
16041 /* Normal vector binops. */
16042 tree v4sf_ftype_v4sf_v4sf
16043 = build_function_type_list (V4SF_type_node,
16044 V4SF_type_node, V4SF_type_node, NULL_TREE);
16045 tree v8qi_ftype_v8qi_v8qi
16046 = build_function_type_list (V8QI_type_node,
16047 V8QI_type_node, V8QI_type_node, NULL_TREE);
16048 tree v4hi_ftype_v4hi_v4hi
16049 = build_function_type_list (V4HI_type_node,
16050 V4HI_type_node, V4HI_type_node, NULL_TREE);
16051 tree v2si_ftype_v2si_v2si
16052 = build_function_type_list (V2SI_type_node,
16053 V2SI_type_node, V2SI_type_node, NULL_TREE);
16054 tree di_ftype_di_di
16055 = build_function_type_list (long_long_unsigned_type_node,
16056 long_long_unsigned_type_node,
16057 long_long_unsigned_type_node, NULL_TREE);
16058
16059 tree di_ftype_di_di_int
16060 = build_function_type_list (long_long_unsigned_type_node,
16061 long_long_unsigned_type_node,
16062 long_long_unsigned_type_node,
16063 integer_type_node, NULL_TREE);
16064
16065 tree v2si_ftype_v2sf
16066 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16067 tree v2sf_ftype_v2si
16068 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16069 tree v2si_ftype_v2si
16070 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16071 tree v2sf_ftype_v2sf
16072 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16073 tree v2sf_ftype_v2sf_v2sf
16074 = build_function_type_list (V2SF_type_node,
16075 V2SF_type_node, V2SF_type_node, NULL_TREE);
16076 tree v2si_ftype_v2sf_v2sf
16077 = build_function_type_list (V2SI_type_node,
16078 V2SF_type_node, V2SF_type_node, NULL_TREE);
16079 tree pint_type_node = build_pointer_type (integer_type_node);
16080 tree pdouble_type_node = build_pointer_type (double_type_node);
16081 tree pcdouble_type_node = build_pointer_type (
16082 build_type_variant (double_type_node, 1, 0));
16083 tree int_ftype_v2df_v2df
16084 = build_function_type_list (integer_type_node,
16085 V2DF_type_node, V2DF_type_node, NULL_TREE);
16086
16087 tree void_ftype_pcvoid
16088 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16089 tree v4sf_ftype_v4si
16090 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16091 tree v4si_ftype_v4sf
16092 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16093 tree v2df_ftype_v4si
16094 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16095 tree v4si_ftype_v2df
16096 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16097 tree v2si_ftype_v2df
16098 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16099 tree v4sf_ftype_v2df
16100 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16101 tree v2df_ftype_v2si
16102 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16103 tree v2df_ftype_v4sf
16104 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16105 tree int_ftype_v2df
16106 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16107 tree int64_ftype_v2df
16108 = build_function_type_list (long_long_integer_type_node,
16109 V2DF_type_node, NULL_TREE);
16110 tree v2df_ftype_v2df_int
16111 = build_function_type_list (V2DF_type_node,
16112 V2DF_type_node, integer_type_node, NULL_TREE);
16113 tree v2df_ftype_v2df_int64
16114 = build_function_type_list (V2DF_type_node,
16115 V2DF_type_node, long_long_integer_type_node,
16116 NULL_TREE);
16117 tree v4sf_ftype_v4sf_v2df
16118 = build_function_type_list (V4SF_type_node,
16119 V4SF_type_node, V2DF_type_node, NULL_TREE);
16120 tree v2df_ftype_v2df_v4sf
16121 = build_function_type_list (V2DF_type_node,
16122 V2DF_type_node, V4SF_type_node, NULL_TREE);
16123 tree v2df_ftype_v2df_v2df_int
16124 = build_function_type_list (V2DF_type_node,
16125 V2DF_type_node, V2DF_type_node,
16126 integer_type_node,
16127 NULL_TREE);
16128 tree v2df_ftype_v2df_pcdouble
16129 = build_function_type_list (V2DF_type_node,
16130 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16131 tree void_ftype_pdouble_v2df
16132 = build_function_type_list (void_type_node,
16133 pdouble_type_node, V2DF_type_node, NULL_TREE);
16134 tree void_ftype_pint_int
16135 = build_function_type_list (void_type_node,
16136 pint_type_node, integer_type_node, NULL_TREE);
16137 tree void_ftype_v16qi_v16qi_pchar
16138 = build_function_type_list (void_type_node,
16139 V16QI_type_node, V16QI_type_node,
16140 pchar_type_node, NULL_TREE);
16141 tree v2df_ftype_pcdouble
16142 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16143 tree v2df_ftype_v2df_v2df
16144 = build_function_type_list (V2DF_type_node,
16145 V2DF_type_node, V2DF_type_node, NULL_TREE);
16146 tree v16qi_ftype_v16qi_v16qi
16147 = build_function_type_list (V16QI_type_node,
16148 V16QI_type_node, V16QI_type_node, NULL_TREE);
16149 tree v8hi_ftype_v8hi_v8hi
16150 = build_function_type_list (V8HI_type_node,
16151 V8HI_type_node, V8HI_type_node, NULL_TREE);
16152 tree v4si_ftype_v4si_v4si
16153 = build_function_type_list (V4SI_type_node,
16154 V4SI_type_node, V4SI_type_node, NULL_TREE);
16155 tree v2di_ftype_v2di_v2di
16156 = build_function_type_list (V2DI_type_node,
16157 V2DI_type_node, V2DI_type_node, NULL_TREE);
16158 tree v2di_ftype_v2df_v2df
16159 = build_function_type_list (V2DI_type_node,
16160 V2DF_type_node, V2DF_type_node, NULL_TREE);
16161 tree v2df_ftype_v2df
16162 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16163 tree v2di_ftype_v2di_int
16164 = build_function_type_list (V2DI_type_node,
16165 V2DI_type_node, integer_type_node, NULL_TREE);
16166 tree v2di_ftype_v2di_v2di_int
16167 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16168 V2DI_type_node, integer_type_node, NULL_TREE);
16169 tree v4si_ftype_v4si_int
16170 = build_function_type_list (V4SI_type_node,
16171 V4SI_type_node, integer_type_node, NULL_TREE);
16172 tree v8hi_ftype_v8hi_int
16173 = build_function_type_list (V8HI_type_node,
16174 V8HI_type_node, integer_type_node, NULL_TREE);
16175 tree v8hi_ftype_v8hi_v2di
16176 = build_function_type_list (V8HI_type_node,
16177 V8HI_type_node, V2DI_type_node, NULL_TREE);
16178 tree v4si_ftype_v4si_v2di
16179 = build_function_type_list (V4SI_type_node,
16180 V4SI_type_node, V2DI_type_node, NULL_TREE);
16181 tree v4si_ftype_v8hi_v8hi
16182 = build_function_type_list (V4SI_type_node,
16183 V8HI_type_node, V8HI_type_node, NULL_TREE);
16184 tree di_ftype_v8qi_v8qi
16185 = build_function_type_list (long_long_unsigned_type_node,
16186 V8QI_type_node, V8QI_type_node, NULL_TREE);
16187 tree di_ftype_v2si_v2si
16188 = build_function_type_list (long_long_unsigned_type_node,
16189 V2SI_type_node, V2SI_type_node, NULL_TREE);
16190 tree v2di_ftype_v16qi_v16qi
16191 = build_function_type_list (V2DI_type_node,
16192 V16QI_type_node, V16QI_type_node, NULL_TREE);
16193 tree v2di_ftype_v4si_v4si
16194 = build_function_type_list (V2DI_type_node,
16195 V4SI_type_node, V4SI_type_node, NULL_TREE);
16196 tree int_ftype_v16qi
16197 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16198 tree v16qi_ftype_pcchar
16199 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16200 tree void_ftype_pchar_v16qi
16201 = build_function_type_list (void_type_node,
16202 pchar_type_node, V16QI_type_node, NULL_TREE);
16203
16204 tree float80_type;
16205 tree float128_type;
16206 tree ftype;
16207
16208 /* The __float80 type. */
16209 if (TYPE_MODE (long_double_type_node) == XFmode)
16210 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16211 "__float80");
16212 else
16213 {
16214 /* The __float80 type. */
16215 float80_type = make_node (REAL_TYPE);
16216 TYPE_PRECISION (float80_type) = 80;
16217 layout_type (float80_type);
16218 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16219 }
16220
16221 if (TARGET_64BIT)
16222 {
16223 float128_type = make_node (REAL_TYPE);
16224 TYPE_PRECISION (float128_type) = 128;
16225 layout_type (float128_type);
16226 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16227 }
16228
16229 /* Add all builtins that are more or less simple operations on two
16230 operands. */
16231 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16232 {
16233 /* Use one of the operands; the target can have a different mode for
16234 mask-generating compares. */
16235 enum machine_mode mode;
16236 tree type;
16237
16238 if (d->name == 0)
16239 continue;
16240 mode = insn_data[d->icode].operand[1].mode;
16241
16242 switch (mode)
16243 {
16244 case V16QImode:
16245 type = v16qi_ftype_v16qi_v16qi;
16246 break;
16247 case V8HImode:
16248 type = v8hi_ftype_v8hi_v8hi;
16249 break;
16250 case V4SImode:
16251 type = v4si_ftype_v4si_v4si;
16252 break;
16253 case V2DImode:
16254 type = v2di_ftype_v2di_v2di;
16255 break;
16256 case V2DFmode:
16257 type = v2df_ftype_v2df_v2df;
16258 break;
16259 case V4SFmode:
16260 type = v4sf_ftype_v4sf_v4sf;
16261 break;
16262 case V8QImode:
16263 type = v8qi_ftype_v8qi_v8qi;
16264 break;
16265 case V4HImode:
16266 type = v4hi_ftype_v4hi_v4hi;
16267 break;
16268 case V2SImode:
16269 type = v2si_ftype_v2si_v2si;
16270 break;
16271 case DImode:
16272 type = di_ftype_di_di;
16273 break;
16274
16275 default:
16276 gcc_unreachable ();
16277 }
16278
16279 /* Override for comparisons. */
16280 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16281 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16282 type = v4si_ftype_v4sf_v4sf;
16283
16284 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16285 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16286 type = v2di_ftype_v2df_v2df;
16287
16288 def_builtin (d->mask, d->name, type, d->code);
16289 }
16290
16291 /* Add all builtins that are more or less simple operations on 1 operand. */
16292 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16293 {
16294 enum machine_mode mode;
16295 tree type;
16296
16297 if (d->name == 0)
16298 continue;
16299 mode = insn_data[d->icode].operand[1].mode;
16300
16301 switch (mode)
16302 {
16303 case V16QImode:
16304 type = v16qi_ftype_v16qi;
16305 break;
16306 case V8HImode:
16307 type = v8hi_ftype_v8hi;
16308 break;
16309 case V4SImode:
16310 type = v4si_ftype_v4si;
16311 break;
16312 case V2DFmode:
16313 type = v2df_ftype_v2df;
16314 break;
16315 case V4SFmode:
16316 type = v4sf_ftype_v4sf;
16317 break;
16318 case V8QImode:
16319 type = v8qi_ftype_v8qi;
16320 break;
16321 case V4HImode:
16322 type = v4hi_ftype_v4hi;
16323 break;
16324 case V2SImode:
16325 type = v2si_ftype_v2si;
16326 break;
16327
16328 default:
16329 abort ();
16330 }
16331
16332 def_builtin (d->mask, d->name, type, d->code);
16333 }
16334
16335 /* Add the remaining MMX insns with somewhat more complicated types. */
16336 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16337 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16338 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16339 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16340
16341 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16342 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16343 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16344
16345 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16346 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16347
16348 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16349 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16350
16351 /* comi/ucomi insns. */
16352 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16353 if (d->mask == MASK_SSE2)
16354 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16355 else
16356 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16357
16358 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16359 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16360 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16361
16362 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16363 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16364 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16365 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16366 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16367 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16368 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16369 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16370 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16371 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16372 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16373
16374 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16375
16376 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16377 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16378
16379 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16380 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16381 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16382 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16383
16384 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16385 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16386 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16387 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16388
16389 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16390
16391 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16392
16393 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16394 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16395 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16396 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16397 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16398 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16399
16400 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16401
16402 /* Original 3DNow! */
16403 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16404 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16405 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16406 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16407 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16408 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16409 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16410 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16411 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16412 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16413 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16414 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16415 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16416 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16417 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16418 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16419 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16420 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16421 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16422 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16423
16424 /* 3DNow! extension as used in the Athlon CPU. */
16425 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16426 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16427 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16428 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16429 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16430 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16431
16432 /* SSE2 */
16433 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16434
16435 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16436 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16437
16438 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16439 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16440
16441 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16442 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16443 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16444 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16445 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16446
16447 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16448 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16449 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16450 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16451
16452 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16453 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16454
16455 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16456
16457 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16458 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16459
16460 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16461 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16462 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16463 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16464 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16465
16466 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16467
16468 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16469 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16470 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16471 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16472
16473 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16474 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16475 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16476
16477 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16478 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16479 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16480 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16481
16482 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16483 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16484 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16485
16486 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16487 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16488
16489 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16490 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16491
16492 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16493 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16494 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16495
16496 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16497 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16498 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16499
16500 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16501 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16502
16503 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16504 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16505 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16506 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16507
16508 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16509 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16510 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16511 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16512
16513 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16514 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16515
16516 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16517
16518 /* Prescott New Instructions. */
16519 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16520 void_ftype_pcvoid_unsigned_unsigned,
16521 IX86_BUILTIN_MONITOR);
16522 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16523 void_ftype_unsigned_unsigned,
16524 IX86_BUILTIN_MWAIT);
16525 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16526 v4sf_ftype_v4sf,
16527 IX86_BUILTIN_MOVSHDUP);
16528 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16529 v4sf_ftype_v4sf,
16530 IX86_BUILTIN_MOVSLDUP);
16531 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16532 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16533
16534 /* SSSE3. */
16535 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16536 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16537 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16538 IX86_BUILTIN_PALIGNR);
16539
16540 /* Access to the vec_init patterns. */
16541 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16542 integer_type_node, NULL_TREE);
16543 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16544 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16545
16546 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16547 short_integer_type_node,
16548 short_integer_type_node,
16549 short_integer_type_node, NULL_TREE);
16550 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16551 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16552
16553 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16554 char_type_node, char_type_node,
16555 char_type_node, char_type_node,
16556 char_type_node, char_type_node,
16557 char_type_node, NULL_TREE);
16558 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16559 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16560
16561 /* Access to the vec_extract patterns. */
16562 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16563 integer_type_node, NULL_TREE);
16564 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16565 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16566
16567 ftype = build_function_type_list (long_long_integer_type_node,
16568 V2DI_type_node, integer_type_node,
16569 NULL_TREE);
16570 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16571 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16572
16573 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16574 integer_type_node, NULL_TREE);
16575 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16576 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16577
16578 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16579 integer_type_node, NULL_TREE);
16580 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16581 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16582
16583 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16584 integer_type_node, NULL_TREE);
16585 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16586 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16587
16588 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16589 integer_type_node, NULL_TREE);
16590 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16591 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16592
16593 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16594 integer_type_node, NULL_TREE);
16595 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16596 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16597
16598 /* Access to the vec_set patterns. */
16599 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16600 intHI_type_node,
16601 integer_type_node, NULL_TREE);
16602 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16603 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16604
16605 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16606 intHI_type_node,
16607 integer_type_node, NULL_TREE);
16608 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16609 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16610 }
16611
16612 /* Errors in the source file can cause expand_expr to return const0_rtx
16613 where we expect a vector. To avoid crashing, use one of the vector
16614 clear instructions. */
16615 static rtx
16616 safe_vector_operand (rtx x, enum machine_mode mode)
16617 {
16618 if (x == const0_rtx)
16619 x = CONST0_RTX (mode);
16620 return x;
16621 }
16622
16623 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16624
16625 static rtx
16626 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16627 {
16628 rtx pat, xops[3];
16629 tree arg0 = TREE_VALUE (arglist);
16630 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16631 rtx op0 = expand_normal (arg0);
16632 rtx op1 = expand_normal (arg1);
16633 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16634 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16635 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16636
16637 if (VECTOR_MODE_P (mode0))
16638 op0 = safe_vector_operand (op0, mode0);
16639 if (VECTOR_MODE_P (mode1))
16640 op1 = safe_vector_operand (op1, mode1);
16641
16642 if (optimize || !target
16643 || GET_MODE (target) != tmode
16644 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16645 target = gen_reg_rtx (tmode);
16646
16647 if (GET_MODE (op1) == SImode && mode1 == TImode)
16648 {
16649 rtx x = gen_reg_rtx (V4SImode);
16650 emit_insn (gen_sse2_loadd (x, op1));
16651 op1 = gen_lowpart (TImode, x);
16652 }
16653
16654 /* The insn must want input operands in the same modes as the
16655 result. */
16656 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16657 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16658
16659 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16660 op0 = copy_to_mode_reg (mode0, op0);
16661 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16662 op1 = copy_to_mode_reg (mode1, op1);
16663
16664 /* ??? Using ix86_fixup_binary_operands is problematic when
16665 we've got mismatched modes. Fake it. */
16666
16667 xops[0] = target;
16668 xops[1] = op0;
16669 xops[2] = op1;
16670
16671 if (tmode == mode0 && tmode == mode1)
16672 {
16673 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16674 op0 = xops[1];
16675 op1 = xops[2];
16676 }
16677 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16678 {
16679 op0 = force_reg (mode0, op0);
16680 op1 = force_reg (mode1, op1);
16681 target = gen_reg_rtx (tmode);
16682 }
16683
16684 pat = GEN_FCN (icode) (target, op0, op1);
16685 if (! pat)
16686 return 0;
16687 emit_insn (pat);
16688 return target;
16689 }
16690
16691 /* Subroutine of ix86_expand_builtin to take care of stores. */
16692
16693 static rtx
16694 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16695 {
16696 rtx pat;
16697 tree arg0 = TREE_VALUE (arglist);
16698 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16699 rtx op0 = expand_normal (arg0);
16700 rtx op1 = expand_normal (arg1);
16701 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16702 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16703
16704 if (VECTOR_MODE_P (mode1))
16705 op1 = safe_vector_operand (op1, mode1);
16706
16707 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16708 op1 = copy_to_mode_reg (mode1, op1);
16709
16710 pat = GEN_FCN (icode) (op0, op1);
16711 if (pat)
16712 emit_insn (pat);
16713 return 0;
16714 }
16715
16716 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16717
16718 static rtx
16719 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16720 rtx target, int do_load)
16721 {
16722 rtx pat;
16723 tree arg0 = TREE_VALUE (arglist);
16724 rtx op0 = expand_normal (arg0);
16725 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16726 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16727
16728 if (optimize || !target
16729 || GET_MODE (target) != tmode
16730 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16731 target = gen_reg_rtx (tmode);
16732 if (do_load)
16733 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16734 else
16735 {
16736 if (VECTOR_MODE_P (mode0))
16737 op0 = safe_vector_operand (op0, mode0);
16738
16739 if ((optimize && !register_operand (op0, mode0))
16740 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16741 op0 = copy_to_mode_reg (mode0, op0);
16742 }
16743
16744 pat = GEN_FCN (icode) (target, op0);
16745 if (! pat)
16746 return 0;
16747 emit_insn (pat);
16748 return target;
16749 }
16750
16751 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16752 sqrtss, rsqrtss, rcpss. */
16753
16754 static rtx
16755 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16756 {
16757 rtx pat;
16758 tree arg0 = TREE_VALUE (arglist);
16759 rtx op1, op0 = expand_normal (arg0);
16760 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16761 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16762
16763 if (optimize || !target
16764 || GET_MODE (target) != tmode
16765 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16766 target = gen_reg_rtx (tmode);
16767
16768 if (VECTOR_MODE_P (mode0))
16769 op0 = safe_vector_operand (op0, mode0);
16770
16771 if ((optimize && !register_operand (op0, mode0))
16772 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16773 op0 = copy_to_mode_reg (mode0, op0);
16774
16775 op1 = op0;
16776 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16777 op1 = copy_to_mode_reg (mode0, op1);
16778
16779 pat = GEN_FCN (icode) (target, op0, op1);
16780 if (! pat)
16781 return 0;
16782 emit_insn (pat);
16783 return target;
16784 }
16785
16786 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16787
16788 static rtx
16789 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16790 rtx target)
16791 {
16792 rtx pat;
16793 tree arg0 = TREE_VALUE (arglist);
16794 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16795 rtx op0 = expand_normal (arg0);
16796 rtx op1 = expand_normal (arg1);
16797 rtx op2;
16798 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16799 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16800 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16801 enum rtx_code comparison = d->comparison;
16802
16803 if (VECTOR_MODE_P (mode0))
16804 op0 = safe_vector_operand (op0, mode0);
16805 if (VECTOR_MODE_P (mode1))
16806 op1 = safe_vector_operand (op1, mode1);
16807
16808 /* Swap operands if we have a comparison that isn't available in
16809 hardware. */
16810 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16811 {
16812 rtx tmp = gen_reg_rtx (mode1);
16813 emit_move_insn (tmp, op1);
16814 op1 = op0;
16815 op0 = tmp;
16816 }
16817
16818 if (optimize || !target
16819 || GET_MODE (target) != tmode
16820 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16821 target = gen_reg_rtx (tmode);
16822
16823 if ((optimize && !register_operand (op0, mode0))
16824 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16825 op0 = copy_to_mode_reg (mode0, op0);
16826 if ((optimize && !register_operand (op1, mode1))
16827 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16828 op1 = copy_to_mode_reg (mode1, op1);
16829
16830 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16831 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16832 if (! pat)
16833 return 0;
16834 emit_insn (pat);
16835 return target;
16836 }
16837
16838 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16839
16840 static rtx
16841 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16842 rtx target)
16843 {
16844 rtx pat;
16845 tree arg0 = TREE_VALUE (arglist);
16846 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16847 rtx op0 = expand_normal (arg0);
16848 rtx op1 = expand_normal (arg1);
16849 rtx op2;
16850 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
16851 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
16852 enum rtx_code comparison = d->comparison;
16853
16854 if (VECTOR_MODE_P (mode0))
16855 op0 = safe_vector_operand (op0, mode0);
16856 if (VECTOR_MODE_P (mode1))
16857 op1 = safe_vector_operand (op1, mode1);
16858
16859 /* Swap operands if we have a comparison that isn't available in
16860 hardware. */
16861 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16862 {
16863 rtx tmp = op1;
16864 op1 = op0;
16865 op0 = tmp;
16866 }
16867
16868 target = gen_reg_rtx (SImode);
16869 emit_move_insn (target, const0_rtx);
16870 target = gen_rtx_SUBREG (QImode, target, 0);
16871
16872 if ((optimize && !register_operand (op0, mode0))
16873 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16874 op0 = copy_to_mode_reg (mode0, op0);
16875 if ((optimize && !register_operand (op1, mode1))
16876 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16877 op1 = copy_to_mode_reg (mode1, op1);
16878
16879 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16880 pat = GEN_FCN (d->icode) (op0, op1);
16881 if (! pat)
16882 return 0;
16883 emit_insn (pat);
16884 emit_insn (gen_rtx_SET (VOIDmode,
16885 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
16886 gen_rtx_fmt_ee (comparison, QImode,
16887 SET_DEST (pat),
16888 const0_rtx)));
16889
16890 return SUBREG_REG (target);
16891 }
16892
16893 /* Return the integer constant in ARG. Constrain it to be in the range
16894 of the subparts of VEC_TYPE; issue an error if not. */
16895
16896 static int
16897 get_element_number (tree vec_type, tree arg)
16898 {
16899 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16900
16901 if (!host_integerp (arg, 1)
16902 || (elt = tree_low_cst (arg, 1), elt > max))
16903 {
16904 error ("selector must be an integer constant in the range 0..%wi", max);
16905 return 0;
16906 }
16907
16908 return elt;
16909 }
16910
16911 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
16912 ix86_expand_vector_init. We DO have language-level syntax for this, in
16913 the form of (type){ init-list }. Except that since we can't place emms
16914 instructions from inside the compiler, we can't allow the use of MMX
16915 registers unless the user explicitly asks for it. So we do *not* define
16916 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
16917 we have builtins invoked by mmintrin.h that gives us license to emit
16918 these sorts of instructions. */
16919
16920 static rtx
16921 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
16922 {
16923 enum machine_mode tmode = TYPE_MODE (type);
16924 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
16925 int i, n_elt = GET_MODE_NUNITS (tmode);
16926 rtvec v = rtvec_alloc (n_elt);
16927
16928 gcc_assert (VECTOR_MODE_P (tmode));
16929
16930 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
16931 {
16932 rtx x = expand_normal (TREE_VALUE (arglist));
16933 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
16934 }
16935
16936 gcc_assert (arglist == NULL);
16937
16938 if (!target || !register_operand (target, tmode))
16939 target = gen_reg_rtx (tmode);
16940
16941 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
16942 return target;
16943 }
16944
16945 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
16946 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
16947 had a language-level syntax for referencing vector elements. */
16948
16949 static rtx
16950 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
16951 {
16952 enum machine_mode tmode, mode0;
16953 tree arg0, arg1;
16954 int elt;
16955 rtx op0;
16956
16957 arg0 = TREE_VALUE (arglist);
16958 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16959
16960 op0 = expand_normal (arg0);
16961 elt = get_element_number (TREE_TYPE (arg0), arg1);
16962
16963 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
16964 mode0 = TYPE_MODE (TREE_TYPE (arg0));
16965 gcc_assert (VECTOR_MODE_P (mode0));
16966
16967 op0 = force_reg (mode0, op0);
16968
16969 if (optimize || !target || !register_operand (target, tmode))
16970 target = gen_reg_rtx (tmode);
16971
16972 ix86_expand_vector_extract (true, target, op0, elt);
16973
16974 return target;
16975 }
16976
16977 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
16978 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
16979 a language-level syntax for referencing vector elements. */
16980
16981 static rtx
16982 ix86_expand_vec_set_builtin (tree arglist)
16983 {
16984 enum machine_mode tmode, mode1;
16985 tree arg0, arg1, arg2;
16986 int elt;
16987 rtx op0, op1;
16988
16989 arg0 = TREE_VALUE (arglist);
16990 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16991 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
16992
16993 tmode = TYPE_MODE (TREE_TYPE (arg0));
16994 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
16995 gcc_assert (VECTOR_MODE_P (tmode));
16996
16997 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
16998 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
16999 elt = get_element_number (TREE_TYPE (arg0), arg2);
17000
17001 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17002 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17003
17004 op0 = force_reg (tmode, op0);
17005 op1 = force_reg (mode1, op1);
17006
17007 ix86_expand_vector_set (true, op0, op1, elt);
17008
17009 return op0;
17010 }
17011
17012 /* Expand an expression EXP that calls a built-in function,
17013 with result going to TARGET if that's convenient
17014 (and in mode MODE if that's convenient).
17015 SUBTARGET may be used as the target for computing one of EXP's operands.
17016 IGNORE is nonzero if the value is to be ignored. */
17017
17018 static rtx
17019 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17020 enum machine_mode mode ATTRIBUTE_UNUSED,
17021 int ignore ATTRIBUTE_UNUSED)
17022 {
17023 const struct builtin_description *d;
17024 size_t i;
17025 enum insn_code icode;
17026 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17027 tree arglist = TREE_OPERAND (exp, 1);
17028 tree arg0, arg1, arg2;
17029 rtx op0, op1, op2, pat;
17030 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17031 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17032
17033 switch (fcode)
17034 {
17035 case IX86_BUILTIN_EMMS:
17036 emit_insn (gen_mmx_emms ());
17037 return 0;
17038
17039 case IX86_BUILTIN_SFENCE:
17040 emit_insn (gen_sse_sfence ());
17041 return 0;
17042
17043 case IX86_BUILTIN_MASKMOVQ:
17044 case IX86_BUILTIN_MASKMOVDQU:
17045 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17046 ? CODE_FOR_mmx_maskmovq
17047 : CODE_FOR_sse2_maskmovdqu);
17048 /* Note the arg order is different from the operand order. */
17049 arg1 = TREE_VALUE (arglist);
17050 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17051 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17052 op0 = expand_normal (arg0);
17053 op1 = expand_normal (arg1);
17054 op2 = expand_normal (arg2);
17055 mode0 = insn_data[icode].operand[0].mode;
17056 mode1 = insn_data[icode].operand[1].mode;
17057 mode2 = insn_data[icode].operand[2].mode;
17058
17059 op0 = force_reg (Pmode, op0);
17060 op0 = gen_rtx_MEM (mode1, op0);
17061
17062 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17063 op0 = copy_to_mode_reg (mode0, op0);
17064 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17065 op1 = copy_to_mode_reg (mode1, op1);
17066 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17067 op2 = copy_to_mode_reg (mode2, op2);
17068 pat = GEN_FCN (icode) (op0, op1, op2);
17069 if (! pat)
17070 return 0;
17071 emit_insn (pat);
17072 return 0;
17073
17074 case IX86_BUILTIN_SQRTSS:
17075 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17076 case IX86_BUILTIN_RSQRTSS:
17077 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17078 case IX86_BUILTIN_RCPSS:
17079 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17080
17081 case IX86_BUILTIN_LOADUPS:
17082 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17083
17084 case IX86_BUILTIN_STOREUPS:
17085 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17086
17087 case IX86_BUILTIN_LOADHPS:
17088 case IX86_BUILTIN_LOADLPS:
17089 case IX86_BUILTIN_LOADHPD:
17090 case IX86_BUILTIN_LOADLPD:
17091 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17092 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17093 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17094 : CODE_FOR_sse2_loadlpd);
17095 arg0 = TREE_VALUE (arglist);
17096 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17097 op0 = expand_normal (arg0);
17098 op1 = expand_normal (arg1);
17099 tmode = insn_data[icode].operand[0].mode;
17100 mode0 = insn_data[icode].operand[1].mode;
17101 mode1 = insn_data[icode].operand[2].mode;
17102
17103 op0 = force_reg (mode0, op0);
17104 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17105 if (optimize || target == 0
17106 || GET_MODE (target) != tmode
17107 || !register_operand (target, tmode))
17108 target = gen_reg_rtx (tmode);
17109 pat = GEN_FCN (icode) (target, op0, op1);
17110 if (! pat)
17111 return 0;
17112 emit_insn (pat);
17113 return target;
17114
17115 case IX86_BUILTIN_STOREHPS:
17116 case IX86_BUILTIN_STORELPS:
17117 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17118 : CODE_FOR_sse_storelps);
17119 arg0 = TREE_VALUE (arglist);
17120 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17121 op0 = expand_normal (arg0);
17122 op1 = expand_normal (arg1);
17123 mode0 = insn_data[icode].operand[0].mode;
17124 mode1 = insn_data[icode].operand[1].mode;
17125
17126 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17127 op1 = force_reg (mode1, op1);
17128
17129 pat = GEN_FCN (icode) (op0, op1);
17130 if (! pat)
17131 return 0;
17132 emit_insn (pat);
17133 return const0_rtx;
17134
17135 case IX86_BUILTIN_MOVNTPS:
17136 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17137 case IX86_BUILTIN_MOVNTQ:
17138 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17139
17140 case IX86_BUILTIN_LDMXCSR:
17141 op0 = expand_normal (TREE_VALUE (arglist));
17142 target = assign_386_stack_local (SImode, SLOT_TEMP);
17143 emit_move_insn (target, op0);
17144 emit_insn (gen_sse_ldmxcsr (target));
17145 return 0;
17146
17147 case IX86_BUILTIN_STMXCSR:
17148 target = assign_386_stack_local (SImode, SLOT_TEMP);
17149 emit_insn (gen_sse_stmxcsr (target));
17150 return copy_to_mode_reg (SImode, target);
17151
17152 case IX86_BUILTIN_SHUFPS:
17153 case IX86_BUILTIN_SHUFPD:
17154 icode = (fcode == IX86_BUILTIN_SHUFPS
17155 ? CODE_FOR_sse_shufps
17156 : CODE_FOR_sse2_shufpd);
17157 arg0 = TREE_VALUE (arglist);
17158 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17159 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17160 op0 = expand_normal (arg0);
17161 op1 = expand_normal (arg1);
17162 op2 = expand_normal (arg2);
17163 tmode = insn_data[icode].operand[0].mode;
17164 mode0 = insn_data[icode].operand[1].mode;
17165 mode1 = insn_data[icode].operand[2].mode;
17166 mode2 = insn_data[icode].operand[3].mode;
17167
17168 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17169 op0 = copy_to_mode_reg (mode0, op0);
17170 if ((optimize && !register_operand (op1, mode1))
17171 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17172 op1 = copy_to_mode_reg (mode1, op1);
17173 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17174 {
17175 /* @@@ better error message */
17176 error ("mask must be an immediate");
17177 return gen_reg_rtx (tmode);
17178 }
17179 if (optimize || target == 0
17180 || GET_MODE (target) != tmode
17181 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17182 target = gen_reg_rtx (tmode);
17183 pat = GEN_FCN (icode) (target, op0, op1, op2);
17184 if (! pat)
17185 return 0;
17186 emit_insn (pat);
17187 return target;
17188
17189 case IX86_BUILTIN_PSHUFW:
17190 case IX86_BUILTIN_PSHUFD:
17191 case IX86_BUILTIN_PSHUFHW:
17192 case IX86_BUILTIN_PSHUFLW:
17193 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17194 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17195 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17196 : CODE_FOR_mmx_pshufw);
17197 arg0 = TREE_VALUE (arglist);
17198 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17199 op0 = expand_normal (arg0);
17200 op1 = expand_normal (arg1);
17201 tmode = insn_data[icode].operand[0].mode;
17202 mode1 = insn_data[icode].operand[1].mode;
17203 mode2 = insn_data[icode].operand[2].mode;
17204
17205 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17206 op0 = copy_to_mode_reg (mode1, op0);
17207 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17208 {
17209 /* @@@ better error message */
17210 error ("mask must be an immediate");
17211 return const0_rtx;
17212 }
17213 if (target == 0
17214 || GET_MODE (target) != tmode
17215 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17216 target = gen_reg_rtx (tmode);
17217 pat = GEN_FCN (icode) (target, op0, op1);
17218 if (! pat)
17219 return 0;
17220 emit_insn (pat);
17221 return target;
17222
17223 case IX86_BUILTIN_PSLLDQI128:
17224 case IX86_BUILTIN_PSRLDQI128:
17225 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17226 : CODE_FOR_sse2_lshrti3);
17227 arg0 = TREE_VALUE (arglist);
17228 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17229 op0 = expand_normal (arg0);
17230 op1 = expand_normal (arg1);
17231 tmode = insn_data[icode].operand[0].mode;
17232 mode1 = insn_data[icode].operand[1].mode;
17233 mode2 = insn_data[icode].operand[2].mode;
17234
17235 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17236 {
17237 op0 = copy_to_reg (op0);
17238 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17239 }
17240 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17241 {
17242 error ("shift must be an immediate");
17243 return const0_rtx;
17244 }
17245 target = gen_reg_rtx (V2DImode);
17246 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17247 if (! pat)
17248 return 0;
17249 emit_insn (pat);
17250 return target;
17251
17252 case IX86_BUILTIN_FEMMS:
17253 emit_insn (gen_mmx_femms ());
17254 return NULL_RTX;
17255
17256 case IX86_BUILTIN_PAVGUSB:
17257 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17258
17259 case IX86_BUILTIN_PF2ID:
17260 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17261
17262 case IX86_BUILTIN_PFACC:
17263 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17264
17265 case IX86_BUILTIN_PFADD:
17266 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17267
17268 case IX86_BUILTIN_PFCMPEQ:
17269 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17270
17271 case IX86_BUILTIN_PFCMPGE:
17272 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17273
17274 case IX86_BUILTIN_PFCMPGT:
17275 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17276
17277 case IX86_BUILTIN_PFMAX:
17278 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17279
17280 case IX86_BUILTIN_PFMIN:
17281 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17282
17283 case IX86_BUILTIN_PFMUL:
17284 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17285
17286 case IX86_BUILTIN_PFRCP:
17287 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17288
17289 case IX86_BUILTIN_PFRCPIT1:
17290 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17291
17292 case IX86_BUILTIN_PFRCPIT2:
17293 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17294
17295 case IX86_BUILTIN_PFRSQIT1:
17296 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17297
17298 case IX86_BUILTIN_PFRSQRT:
17299 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17300
17301 case IX86_BUILTIN_PFSUB:
17302 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17303
17304 case IX86_BUILTIN_PFSUBR:
17305 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17306
17307 case IX86_BUILTIN_PI2FD:
17308 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17309
17310 case IX86_BUILTIN_PMULHRW:
17311 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17312
17313 case IX86_BUILTIN_PF2IW:
17314 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17315
17316 case IX86_BUILTIN_PFNACC:
17317 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17318
17319 case IX86_BUILTIN_PFPNACC:
17320 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17321
17322 case IX86_BUILTIN_PI2FW:
17323 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17324
17325 case IX86_BUILTIN_PSWAPDSI:
17326 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17327
17328 case IX86_BUILTIN_PSWAPDSF:
17329 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17330
17331 case IX86_BUILTIN_SQRTSD:
17332 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17333 case IX86_BUILTIN_LOADUPD:
17334 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17335 case IX86_BUILTIN_STOREUPD:
17336 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17337
17338 case IX86_BUILTIN_MFENCE:
17339 emit_insn (gen_sse2_mfence ());
17340 return 0;
17341 case IX86_BUILTIN_LFENCE:
17342 emit_insn (gen_sse2_lfence ());
17343 return 0;
17344
17345 case IX86_BUILTIN_CLFLUSH:
17346 arg0 = TREE_VALUE (arglist);
17347 op0 = expand_normal (arg0);
17348 icode = CODE_FOR_sse2_clflush;
17349 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17350 op0 = copy_to_mode_reg (Pmode, op0);
17351
17352 emit_insn (gen_sse2_clflush (op0));
17353 return 0;
17354
17355 case IX86_BUILTIN_MOVNTPD:
17356 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17357 case IX86_BUILTIN_MOVNTDQ:
17358 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17359 case IX86_BUILTIN_MOVNTI:
17360 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17361
17362 case IX86_BUILTIN_LOADDQU:
17363 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17364 case IX86_BUILTIN_STOREDQU:
17365 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17366
17367 case IX86_BUILTIN_MONITOR:
17368 arg0 = TREE_VALUE (arglist);
17369 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17370 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17371 op0 = expand_normal (arg0);
17372 op1 = expand_normal (arg1);
17373 op2 = expand_normal (arg2);
17374 if (!REG_P (op0))
17375 op0 = copy_to_mode_reg (Pmode, op0);
17376 if (!REG_P (op1))
17377 op1 = copy_to_mode_reg (SImode, op1);
17378 if (!REG_P (op2))
17379 op2 = copy_to_mode_reg (SImode, op2);
17380 if (!TARGET_64BIT)
17381 emit_insn (gen_sse3_monitor (op0, op1, op2));
17382 else
17383 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17384 return 0;
17385
17386 case IX86_BUILTIN_MWAIT:
17387 arg0 = TREE_VALUE (arglist);
17388 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17389 op0 = expand_normal (arg0);
17390 op1 = expand_normal (arg1);
17391 if (!REG_P (op0))
17392 op0 = copy_to_mode_reg (SImode, op0);
17393 if (!REG_P (op1))
17394 op1 = copy_to_mode_reg (SImode, op1);
17395 emit_insn (gen_sse3_mwait (op0, op1));
17396 return 0;
17397
17398 case IX86_BUILTIN_LDDQU:
17399 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17400 target, 1);
17401
17402 case IX86_BUILTIN_PALIGNR:
17403 case IX86_BUILTIN_PALIGNR128:
17404 if (fcode == IX86_BUILTIN_PALIGNR)
17405 {
17406 icode = CODE_FOR_ssse3_palignrdi;
17407 mode = DImode;
17408 }
17409 else
17410 {
17411 icode = CODE_FOR_ssse3_palignrti;
17412 mode = V2DImode;
17413 }
17414 arg0 = TREE_VALUE (arglist);
17415 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17416 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17417 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17418 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17419 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17420 tmode = insn_data[icode].operand[0].mode;
17421 mode1 = insn_data[icode].operand[1].mode;
17422 mode2 = insn_data[icode].operand[2].mode;
17423 mode3 = insn_data[icode].operand[3].mode;
17424
17425 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17426 {
17427 op0 = copy_to_reg (op0);
17428 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17429 }
17430 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17431 {
17432 op1 = copy_to_reg (op1);
17433 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17434 }
17435 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17436 {
17437 error ("shift must be an immediate");
17438 return const0_rtx;
17439 }
17440 target = gen_reg_rtx (mode);
17441 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17442 op0, op1, op2);
17443 if (! pat)
17444 return 0;
17445 emit_insn (pat);
17446 return target;
17447
17448 case IX86_BUILTIN_VEC_INIT_V2SI:
17449 case IX86_BUILTIN_VEC_INIT_V4HI:
17450 case IX86_BUILTIN_VEC_INIT_V8QI:
17451 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17452
17453 case IX86_BUILTIN_VEC_EXT_V2DF:
17454 case IX86_BUILTIN_VEC_EXT_V2DI:
17455 case IX86_BUILTIN_VEC_EXT_V4SF:
17456 case IX86_BUILTIN_VEC_EXT_V4SI:
17457 case IX86_BUILTIN_VEC_EXT_V8HI:
17458 case IX86_BUILTIN_VEC_EXT_V2SI:
17459 case IX86_BUILTIN_VEC_EXT_V4HI:
17460 return ix86_expand_vec_ext_builtin (arglist, target);
17461
17462 case IX86_BUILTIN_VEC_SET_V8HI:
17463 case IX86_BUILTIN_VEC_SET_V4HI:
17464 return ix86_expand_vec_set_builtin (arglist);
17465
17466 default:
17467 break;
17468 }
17469
17470 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17471 if (d->code == fcode)
17472 {
17473 /* Compares are treated specially. */
17474 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17475 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17476 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17477 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17478 return ix86_expand_sse_compare (d, arglist, target);
17479
17480 return ix86_expand_binop_builtin (d->icode, arglist, target);
17481 }
17482
17483 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17484 if (d->code == fcode)
17485 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17486
17487 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17488 if (d->code == fcode)
17489 return ix86_expand_sse_comi (d, arglist, target);
17490
17491 gcc_unreachable ();
17492 }
17493
17494 /* Returns a function decl for a vectorized version of the builtin function
17495 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17496 if it is not available. */
17497
17498 static tree
17499 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17500 {
17501 enum machine_mode el_mode;
17502 int n;
17503
17504 if (TREE_CODE (type) != VECTOR_TYPE)
17505 return NULL_TREE;
17506
17507 el_mode = TYPE_MODE (TREE_TYPE (type));
17508 n = TYPE_VECTOR_SUBPARTS (type);
17509
17510 switch (fn)
17511 {
17512 case BUILT_IN_SQRT:
17513 if (el_mode == DFmode && n == 2)
17514 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17515 return NULL_TREE;
17516
17517 case BUILT_IN_SQRTF:
17518 if (el_mode == SFmode && n == 4)
17519 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17520 return NULL_TREE;
17521
17522 default:
17523 ;
17524 }
17525
17526 return NULL_TREE;
17527 }
17528
17529 /* Store OPERAND to the memory after reload is completed. This means
17530 that we can't easily use assign_stack_local. */
17531 rtx
17532 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17533 {
17534 rtx result;
17535
17536 gcc_assert (reload_completed);
17537 if (TARGET_RED_ZONE)
17538 {
17539 result = gen_rtx_MEM (mode,
17540 gen_rtx_PLUS (Pmode,
17541 stack_pointer_rtx,
17542 GEN_INT (-RED_ZONE_SIZE)));
17543 emit_move_insn (result, operand);
17544 }
17545 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17546 {
17547 switch (mode)
17548 {
17549 case HImode:
17550 case SImode:
17551 operand = gen_lowpart (DImode, operand);
17552 /* FALLTHRU */
17553 case DImode:
17554 emit_insn (
17555 gen_rtx_SET (VOIDmode,
17556 gen_rtx_MEM (DImode,
17557 gen_rtx_PRE_DEC (DImode,
17558 stack_pointer_rtx)),
17559 operand));
17560 break;
17561 default:
17562 gcc_unreachable ();
17563 }
17564 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17565 }
17566 else
17567 {
17568 switch (mode)
17569 {
17570 case DImode:
17571 {
17572 rtx operands[2];
17573 split_di (&operand, 1, operands, operands + 1);
17574 emit_insn (
17575 gen_rtx_SET (VOIDmode,
17576 gen_rtx_MEM (SImode,
17577 gen_rtx_PRE_DEC (Pmode,
17578 stack_pointer_rtx)),
17579 operands[1]));
17580 emit_insn (
17581 gen_rtx_SET (VOIDmode,
17582 gen_rtx_MEM (SImode,
17583 gen_rtx_PRE_DEC (Pmode,
17584 stack_pointer_rtx)),
17585 operands[0]));
17586 }
17587 break;
17588 case HImode:
17589 /* Store HImodes as SImodes. */
17590 operand = gen_lowpart (SImode, operand);
17591 /* FALLTHRU */
17592 case SImode:
17593 emit_insn (
17594 gen_rtx_SET (VOIDmode,
17595 gen_rtx_MEM (GET_MODE (operand),
17596 gen_rtx_PRE_DEC (SImode,
17597 stack_pointer_rtx)),
17598 operand));
17599 break;
17600 default:
17601 gcc_unreachable ();
17602 }
17603 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17604 }
17605 return result;
17606 }
17607
17608 /* Free operand from the memory. */
17609 void
17610 ix86_free_from_memory (enum machine_mode mode)
17611 {
17612 if (!TARGET_RED_ZONE)
17613 {
17614 int size;
17615
17616 if (mode == DImode || TARGET_64BIT)
17617 size = 8;
17618 else
17619 size = 4;
17620 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17621 to pop or add instruction if registers are available. */
17622 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17623 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17624 GEN_INT (size))));
17625 }
17626 }
17627
17628 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17629 QImode must go into class Q_REGS.
17630 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17631 movdf to do mem-to-mem moves through integer regs. */
17632 enum reg_class
17633 ix86_preferred_reload_class (rtx x, enum reg_class class)
17634 {
17635 enum machine_mode mode = GET_MODE (x);
17636
17637 /* We're only allowed to return a subclass of CLASS. Many of the
17638 following checks fail for NO_REGS, so eliminate that early. */
17639 if (class == NO_REGS)
17640 return NO_REGS;
17641
17642 /* All classes can load zeros. */
17643 if (x == CONST0_RTX (mode))
17644 return class;
17645
17646 /* Force constants into memory if we are loading a (nonzero) constant into
17647 an MMX or SSE register. This is because there are no MMX/SSE instructions
17648 to load from a constant. */
17649 if (CONSTANT_P (x)
17650 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17651 return NO_REGS;
17652
17653 /* Prefer SSE regs only, if we can use them for math. */
17654 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17655 return SSE_CLASS_P (class) ? class : NO_REGS;
17656
17657 /* Floating-point constants need more complex checks. */
17658 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17659 {
17660 /* General regs can load everything. */
17661 if (reg_class_subset_p (class, GENERAL_REGS))
17662 return class;
17663
17664 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17665 zero above. We only want to wind up preferring 80387 registers if
17666 we plan on doing computation with them. */
17667 if (TARGET_80387
17668 && standard_80387_constant_p (x))
17669 {
17670 /* Limit class to non-sse. */
17671 if (class == FLOAT_SSE_REGS)
17672 return FLOAT_REGS;
17673 if (class == FP_TOP_SSE_REGS)
17674 return FP_TOP_REG;
17675 if (class == FP_SECOND_SSE_REGS)
17676 return FP_SECOND_REG;
17677 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17678 return class;
17679 }
17680
17681 return NO_REGS;
17682 }
17683
17684 /* Generally when we see PLUS here, it's the function invariant
17685 (plus soft-fp const_int). Which can only be computed into general
17686 regs. */
17687 if (GET_CODE (x) == PLUS)
17688 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17689
17690 /* QImode constants are easy to load, but non-constant QImode data
17691 must go into Q_REGS. */
17692 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17693 {
17694 if (reg_class_subset_p (class, Q_REGS))
17695 return class;
17696 if (reg_class_subset_p (Q_REGS, class))
17697 return Q_REGS;
17698 return NO_REGS;
17699 }
17700
17701 return class;
17702 }
17703
17704 /* Discourage putting floating-point values in SSE registers unless
17705 SSE math is being used, and likewise for the 387 registers. */
17706 enum reg_class
17707 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17708 {
17709 enum machine_mode mode = GET_MODE (x);
17710
17711 /* Restrict the output reload class to the register bank that we are doing
17712 math on. If we would like not to return a subset of CLASS, reject this
17713 alternative: if reload cannot do this, it will still use its choice. */
17714 mode = GET_MODE (x);
17715 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17716 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17717
17718 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17719 {
17720 if (class == FP_TOP_SSE_REGS)
17721 return FP_TOP_REG;
17722 else if (class == FP_SECOND_SSE_REGS)
17723 return FP_SECOND_REG;
17724 else
17725 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17726 }
17727
17728 return class;
17729 }
17730
17731 /* If we are copying between general and FP registers, we need a memory
17732 location. The same is true for SSE and MMX registers.
17733
17734 The macro can't work reliably when one of the CLASSES is class containing
17735 registers from multiple units (SSE, MMX, integer). We avoid this by never
17736 combining those units in single alternative in the machine description.
17737 Ensure that this constraint holds to avoid unexpected surprises.
17738
17739 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17740 enforce these sanity checks. */
17741
17742 int
17743 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17744 enum machine_mode mode, int strict)
17745 {
17746 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17747 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17748 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17749 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17750 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17751 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17752 {
17753 gcc_assert (!strict);
17754 return true;
17755 }
17756
17757 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17758 return true;
17759
17760 /* ??? This is a lie. We do have moves between mmx/general, and for
17761 mmx/sse2. But by saying we need secondary memory we discourage the
17762 register allocator from using the mmx registers unless needed. */
17763 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17764 return true;
17765
17766 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17767 {
17768 /* SSE1 doesn't have any direct moves from other classes. */
17769 if (!TARGET_SSE2)
17770 return true;
17771
17772 /* If the target says that inter-unit moves are more expensive
17773 than moving through memory, then don't generate them. */
17774 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17775 return true;
17776
17777 /* Between SSE and general, we have moves no larger than word size. */
17778 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17779 return true;
17780
17781 /* ??? For the cost of one register reformat penalty, we could use
17782 the same instructions to move SFmode and DFmode data, but the
17783 relevant move patterns don't support those alternatives. */
17784 if (mode == SFmode || mode == DFmode)
17785 return true;
17786 }
17787
17788 return false;
17789 }
17790
17791 /* Return true if the registers in CLASS cannot represent the change from
17792 modes FROM to TO. */
17793
17794 bool
17795 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17796 enum reg_class class)
17797 {
17798 if (from == to)
17799 return false;
17800
17801 /* x87 registers can't do subreg at all, as all values are reformatted
17802 to extended precision. */
17803 if (MAYBE_FLOAT_CLASS_P (class))
17804 return true;
17805
17806 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17807 {
17808 /* Vector registers do not support QI or HImode loads. If we don't
17809 disallow a change to these modes, reload will assume it's ok to
17810 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17811 the vec_dupv4hi pattern. */
17812 if (GET_MODE_SIZE (from) < 4)
17813 return true;
17814
17815 /* Vector registers do not support subreg with nonzero offsets, which
17816 are otherwise valid for integer registers. Since we can't see
17817 whether we have a nonzero offset from here, prohibit all
17818 nonparadoxical subregs changing size. */
17819 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17820 return true;
17821 }
17822
17823 return false;
17824 }
17825
17826 /* Return the cost of moving data from a register in class CLASS1 to
17827 one in class CLASS2.
17828
17829 It is not required that the cost always equal 2 when FROM is the same as TO;
17830 on some machines it is expensive to move between registers if they are not
17831 general registers. */
17832
17833 int
17834 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17835 enum reg_class class2)
17836 {
17837 /* In case we require secondary memory, compute cost of the store followed
17838 by load. In order to avoid bad register allocation choices, we need
17839 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17840
17841 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17842 {
17843 int cost = 1;
17844
17845 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17846 MEMORY_MOVE_COST (mode, class1, 1));
17847 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17848 MEMORY_MOVE_COST (mode, class2, 1));
17849
17850 /* In case of copying from general_purpose_register we may emit multiple
17851 stores followed by single load causing memory size mismatch stall.
17852 Count this as arbitrarily high cost of 20. */
17853 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
17854 cost += 20;
17855
17856 /* In the case of FP/MMX moves, the registers actually overlap, and we
17857 have to switch modes in order to treat them differently. */
17858 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
17859 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
17860 cost += 20;
17861
17862 return cost;
17863 }
17864
17865 /* Moves between SSE/MMX and integer unit are expensive. */
17866 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
17867 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17868 return ix86_cost->mmxsse_to_integer;
17869 if (MAYBE_FLOAT_CLASS_P (class1))
17870 return ix86_cost->fp_move;
17871 if (MAYBE_SSE_CLASS_P (class1))
17872 return ix86_cost->sse_move;
17873 if (MAYBE_MMX_CLASS_P (class1))
17874 return ix86_cost->mmx_move;
17875 return 2;
17876 }
17877
17878 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
17879
17880 bool
17881 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
17882 {
17883 /* Flags and only flags can only hold CCmode values. */
17884 if (CC_REGNO_P (regno))
17885 return GET_MODE_CLASS (mode) == MODE_CC;
17886 if (GET_MODE_CLASS (mode) == MODE_CC
17887 || GET_MODE_CLASS (mode) == MODE_RANDOM
17888 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
17889 return 0;
17890 if (FP_REGNO_P (regno))
17891 return VALID_FP_MODE_P (mode);
17892 if (SSE_REGNO_P (regno))
17893 {
17894 /* We implement the move patterns for all vector modes into and
17895 out of SSE registers, even when no operation instructions
17896 are available. */
17897 return (VALID_SSE_REG_MODE (mode)
17898 || VALID_SSE2_REG_MODE (mode)
17899 || VALID_MMX_REG_MODE (mode)
17900 || VALID_MMX_REG_MODE_3DNOW (mode));
17901 }
17902 if (MMX_REGNO_P (regno))
17903 {
17904 /* We implement the move patterns for 3DNOW modes even in MMX mode,
17905 so if the register is available at all, then we can move data of
17906 the given mode into or out of it. */
17907 return (VALID_MMX_REG_MODE (mode)
17908 || VALID_MMX_REG_MODE_3DNOW (mode));
17909 }
17910
17911 if (mode == QImode)
17912 {
17913 /* Take care for QImode values - they can be in non-QI regs,
17914 but then they do cause partial register stalls. */
17915 if (regno < 4 || TARGET_64BIT)
17916 return 1;
17917 if (!TARGET_PARTIAL_REG_STALL)
17918 return 1;
17919 return reload_in_progress || reload_completed;
17920 }
17921 /* We handle both integer and floats in the general purpose registers. */
17922 else if (VALID_INT_MODE_P (mode))
17923 return 1;
17924 else if (VALID_FP_MODE_P (mode))
17925 return 1;
17926 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
17927 on to use that value in smaller contexts, this can easily force a
17928 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
17929 supporting DImode, allow it. */
17930 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
17931 return 1;
17932
17933 return 0;
17934 }
17935
17936 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
17937 tieable integer mode. */
17938
17939 static bool
17940 ix86_tieable_integer_mode_p (enum machine_mode mode)
17941 {
17942 switch (mode)
17943 {
17944 case HImode:
17945 case SImode:
17946 return true;
17947
17948 case QImode:
17949 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
17950
17951 case DImode:
17952 return TARGET_64BIT;
17953
17954 default:
17955 return false;
17956 }
17957 }
17958
17959 /* Return true if MODE1 is accessible in a register that can hold MODE2
17960 without copying. That is, all register classes that can hold MODE2
17961 can also hold MODE1. */
17962
17963 bool
17964 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
17965 {
17966 if (mode1 == mode2)
17967 return true;
17968
17969 if (ix86_tieable_integer_mode_p (mode1)
17970 && ix86_tieable_integer_mode_p (mode2))
17971 return true;
17972
17973 /* MODE2 being XFmode implies fp stack or general regs, which means we
17974 can tie any smaller floating point modes to it. Note that we do not
17975 tie this with TFmode. */
17976 if (mode2 == XFmode)
17977 return mode1 == SFmode || mode1 == DFmode;
17978
17979 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
17980 that we can tie it with SFmode. */
17981 if (mode2 == DFmode)
17982 return mode1 == SFmode;
17983
17984 /* If MODE2 is only appropriate for an SSE register, then tie with
17985 any other mode acceptable to SSE registers. */
17986 if (GET_MODE_SIZE (mode2) >= 8
17987 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
17988 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
17989
17990 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
17991 with any other mode acceptable to MMX registers. */
17992 if (GET_MODE_SIZE (mode2) == 8
17993 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
17994 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
17995
17996 return false;
17997 }
17998
17999 /* Return the cost of moving data of mode M between a
18000 register and memory. A value of 2 is the default; this cost is
18001 relative to those in `REGISTER_MOVE_COST'.
18002
18003 If moving between registers and memory is more expensive than
18004 between two registers, you should define this macro to express the
18005 relative cost.
18006
18007 Model also increased moving costs of QImode registers in non
18008 Q_REGS classes.
18009 */
18010 int
18011 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18012 {
18013 if (FLOAT_CLASS_P (class))
18014 {
18015 int index;
18016 switch (mode)
18017 {
18018 case SFmode:
18019 index = 0;
18020 break;
18021 case DFmode:
18022 index = 1;
18023 break;
18024 case XFmode:
18025 index = 2;
18026 break;
18027 default:
18028 return 100;
18029 }
18030 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18031 }
18032 if (SSE_CLASS_P (class))
18033 {
18034 int index;
18035 switch (GET_MODE_SIZE (mode))
18036 {
18037 case 4:
18038 index = 0;
18039 break;
18040 case 8:
18041 index = 1;
18042 break;
18043 case 16:
18044 index = 2;
18045 break;
18046 default:
18047 return 100;
18048 }
18049 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18050 }
18051 if (MMX_CLASS_P (class))
18052 {
18053 int index;
18054 switch (GET_MODE_SIZE (mode))
18055 {
18056 case 4:
18057 index = 0;
18058 break;
18059 case 8:
18060 index = 1;
18061 break;
18062 default:
18063 return 100;
18064 }
18065 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18066 }
18067 switch (GET_MODE_SIZE (mode))
18068 {
18069 case 1:
18070 if (in)
18071 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18072 : ix86_cost->movzbl_load);
18073 else
18074 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18075 : ix86_cost->int_store[0] + 4);
18076 break;
18077 case 2:
18078 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18079 default:
18080 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18081 if (mode == TFmode)
18082 mode = XFmode;
18083 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18084 * (((int) GET_MODE_SIZE (mode)
18085 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18086 }
18087 }
18088
18089 /* Compute a (partial) cost for rtx X. Return true if the complete
18090 cost has been computed, and false if subexpressions should be
18091 scanned. In either case, *TOTAL contains the cost result. */
18092
18093 static bool
18094 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18095 {
18096 enum machine_mode mode = GET_MODE (x);
18097
18098 switch (code)
18099 {
18100 case CONST_INT:
18101 case CONST:
18102 case LABEL_REF:
18103 case SYMBOL_REF:
18104 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18105 *total = 3;
18106 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18107 *total = 2;
18108 else if (flag_pic && SYMBOLIC_CONST (x)
18109 && (!TARGET_64BIT
18110 || (!GET_CODE (x) != LABEL_REF
18111 && (GET_CODE (x) != SYMBOL_REF
18112 || !SYMBOL_REF_LOCAL_P (x)))))
18113 *total = 1;
18114 else
18115 *total = 0;
18116 return true;
18117
18118 case CONST_DOUBLE:
18119 if (mode == VOIDmode)
18120 *total = 0;
18121 else
18122 switch (standard_80387_constant_p (x))
18123 {
18124 case 1: /* 0.0 */
18125 *total = 1;
18126 break;
18127 default: /* Other constants */
18128 *total = 2;
18129 break;
18130 case 0:
18131 case -1:
18132 /* Start with (MEM (SYMBOL_REF)), since that's where
18133 it'll probably end up. Add a penalty for size. */
18134 *total = (COSTS_N_INSNS (1)
18135 + (flag_pic != 0 && !TARGET_64BIT)
18136 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18137 break;
18138 }
18139 return true;
18140
18141 case ZERO_EXTEND:
18142 /* The zero extensions is often completely free on x86_64, so make
18143 it as cheap as possible. */
18144 if (TARGET_64BIT && mode == DImode
18145 && GET_MODE (XEXP (x, 0)) == SImode)
18146 *total = 1;
18147 else if (TARGET_ZERO_EXTEND_WITH_AND)
18148 *total = ix86_cost->add;
18149 else
18150 *total = ix86_cost->movzx;
18151 return false;
18152
18153 case SIGN_EXTEND:
18154 *total = ix86_cost->movsx;
18155 return false;
18156
18157 case ASHIFT:
18158 if (GET_CODE (XEXP (x, 1)) == CONST_INT
18159 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18160 {
18161 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18162 if (value == 1)
18163 {
18164 *total = ix86_cost->add;
18165 return false;
18166 }
18167 if ((value == 2 || value == 3)
18168 && ix86_cost->lea <= ix86_cost->shift_const)
18169 {
18170 *total = ix86_cost->lea;
18171 return false;
18172 }
18173 }
18174 /* FALLTHRU */
18175
18176 case ROTATE:
18177 case ASHIFTRT:
18178 case LSHIFTRT:
18179 case ROTATERT:
18180 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18181 {
18182 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18183 {
18184 if (INTVAL (XEXP (x, 1)) > 32)
18185 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18186 else
18187 *total = ix86_cost->shift_const * 2;
18188 }
18189 else
18190 {
18191 if (GET_CODE (XEXP (x, 1)) == AND)
18192 *total = ix86_cost->shift_var * 2;
18193 else
18194 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18195 }
18196 }
18197 else
18198 {
18199 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18200 *total = ix86_cost->shift_const;
18201 else
18202 *total = ix86_cost->shift_var;
18203 }
18204 return false;
18205
18206 case MULT:
18207 if (FLOAT_MODE_P (mode))
18208 {
18209 *total = ix86_cost->fmul;
18210 return false;
18211 }
18212 else
18213 {
18214 rtx op0 = XEXP (x, 0);
18215 rtx op1 = XEXP (x, 1);
18216 int nbits;
18217 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18218 {
18219 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18220 for (nbits = 0; value != 0; value &= value - 1)
18221 nbits++;
18222 }
18223 else
18224 /* This is arbitrary. */
18225 nbits = 7;
18226
18227 /* Compute costs correctly for widening multiplication. */
18228 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18229 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18230 == GET_MODE_SIZE (mode))
18231 {
18232 int is_mulwiden = 0;
18233 enum machine_mode inner_mode = GET_MODE (op0);
18234
18235 if (GET_CODE (op0) == GET_CODE (op1))
18236 is_mulwiden = 1, op1 = XEXP (op1, 0);
18237 else if (GET_CODE (op1) == CONST_INT)
18238 {
18239 if (GET_CODE (op0) == SIGN_EXTEND)
18240 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18241 == INTVAL (op1);
18242 else
18243 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18244 }
18245
18246 if (is_mulwiden)
18247 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18248 }
18249
18250 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18251 + nbits * ix86_cost->mult_bit
18252 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18253
18254 return true;
18255 }
18256
18257 case DIV:
18258 case UDIV:
18259 case MOD:
18260 case UMOD:
18261 if (FLOAT_MODE_P (mode))
18262 *total = ix86_cost->fdiv;
18263 else
18264 *total = ix86_cost->divide[MODE_INDEX (mode)];
18265 return false;
18266
18267 case PLUS:
18268 if (FLOAT_MODE_P (mode))
18269 *total = ix86_cost->fadd;
18270 else if (GET_MODE_CLASS (mode) == MODE_INT
18271 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18272 {
18273 if (GET_CODE (XEXP (x, 0)) == PLUS
18274 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18275 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
18276 && CONSTANT_P (XEXP (x, 1)))
18277 {
18278 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18279 if (val == 2 || val == 4 || val == 8)
18280 {
18281 *total = ix86_cost->lea;
18282 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18283 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18284 outer_code);
18285 *total += rtx_cost (XEXP (x, 1), outer_code);
18286 return true;
18287 }
18288 }
18289 else if (GET_CODE (XEXP (x, 0)) == MULT
18290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
18291 {
18292 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18293 if (val == 2 || val == 4 || val == 8)
18294 {
18295 *total = ix86_cost->lea;
18296 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18297 *total += rtx_cost (XEXP (x, 1), outer_code);
18298 return true;
18299 }
18300 }
18301 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18302 {
18303 *total = ix86_cost->lea;
18304 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18305 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18306 *total += rtx_cost (XEXP (x, 1), outer_code);
18307 return true;
18308 }
18309 }
18310 /* FALLTHRU */
18311
18312 case MINUS:
18313 if (FLOAT_MODE_P (mode))
18314 {
18315 *total = ix86_cost->fadd;
18316 return false;
18317 }
18318 /* FALLTHRU */
18319
18320 case AND:
18321 case IOR:
18322 case XOR:
18323 if (!TARGET_64BIT && mode == DImode)
18324 {
18325 *total = (ix86_cost->add * 2
18326 + (rtx_cost (XEXP (x, 0), outer_code)
18327 << (GET_MODE (XEXP (x, 0)) != DImode))
18328 + (rtx_cost (XEXP (x, 1), outer_code)
18329 << (GET_MODE (XEXP (x, 1)) != DImode)));
18330 return true;
18331 }
18332 /* FALLTHRU */
18333
18334 case NEG:
18335 if (FLOAT_MODE_P (mode))
18336 {
18337 *total = ix86_cost->fchs;
18338 return false;
18339 }
18340 /* FALLTHRU */
18341
18342 case NOT:
18343 if (!TARGET_64BIT && mode == DImode)
18344 *total = ix86_cost->add * 2;
18345 else
18346 *total = ix86_cost->add;
18347 return false;
18348
18349 case COMPARE:
18350 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18351 && XEXP (XEXP (x, 0), 1) == const1_rtx
18352 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
18353 && XEXP (x, 1) == const0_rtx)
18354 {
18355 /* This kind of construct is implemented using test[bwl].
18356 Treat it as if we had an AND. */
18357 *total = (ix86_cost->add
18358 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18359 + rtx_cost (const1_rtx, outer_code));
18360 return true;
18361 }
18362 return false;
18363
18364 case FLOAT_EXTEND:
18365 if (!TARGET_SSE_MATH
18366 || mode == XFmode
18367 || (mode == DFmode && !TARGET_SSE2))
18368 *total = 0;
18369 return false;
18370
18371 case ABS:
18372 if (FLOAT_MODE_P (mode))
18373 *total = ix86_cost->fabs;
18374 return false;
18375
18376 case SQRT:
18377 if (FLOAT_MODE_P (mode))
18378 *total = ix86_cost->fsqrt;
18379 return false;
18380
18381 case UNSPEC:
18382 if (XINT (x, 1) == UNSPEC_TP)
18383 *total = 0;
18384 return false;
18385
18386 default:
18387 return false;
18388 }
18389 }
18390
18391 #if TARGET_MACHO
18392
18393 static int current_machopic_label_num;
18394
18395 /* Given a symbol name and its associated stub, write out the
18396 definition of the stub. */
18397
18398 void
18399 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18400 {
18401 unsigned int length;
18402 char *binder_name, *symbol_name, lazy_ptr_name[32];
18403 int label = ++current_machopic_label_num;
18404
18405 /* For 64-bit we shouldn't get here. */
18406 gcc_assert (!TARGET_64BIT);
18407
18408 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18409 symb = (*targetm.strip_name_encoding) (symb);
18410
18411 length = strlen (stub);
18412 binder_name = alloca (length + 32);
18413 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18414
18415 length = strlen (symb);
18416 symbol_name = alloca (length + 32);
18417 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18418
18419 sprintf (lazy_ptr_name, "L%d$lz", label);
18420
18421 if (MACHOPIC_PURE)
18422 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18423 else
18424 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18425
18426 fprintf (file, "%s:\n", stub);
18427 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18428
18429 if (MACHOPIC_PURE)
18430 {
18431 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18432 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18433 fprintf (file, "\tjmp\t*%%edx\n");
18434 }
18435 else
18436 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18437
18438 fprintf (file, "%s:\n", binder_name);
18439
18440 if (MACHOPIC_PURE)
18441 {
18442 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18443 fprintf (file, "\tpushl\t%%eax\n");
18444 }
18445 else
18446 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18447
18448 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18449
18450 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18451 fprintf (file, "%s:\n", lazy_ptr_name);
18452 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18453 fprintf (file, "\t.long %s\n", binder_name);
18454 }
18455
18456 void
18457 darwin_x86_file_end (void)
18458 {
18459 darwin_file_end ();
18460 ix86_file_end ();
18461 }
18462 #endif /* TARGET_MACHO */
18463
18464 /* Order the registers for register allocator. */
18465
18466 void
18467 x86_order_regs_for_local_alloc (void)
18468 {
18469 int pos = 0;
18470 int i;
18471
18472 /* First allocate the local general purpose registers. */
18473 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18474 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18475 reg_alloc_order [pos++] = i;
18476
18477 /* Global general purpose registers. */
18478 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18479 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18480 reg_alloc_order [pos++] = i;
18481
18482 /* x87 registers come first in case we are doing FP math
18483 using them. */
18484 if (!TARGET_SSE_MATH)
18485 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18486 reg_alloc_order [pos++] = i;
18487
18488 /* SSE registers. */
18489 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18490 reg_alloc_order [pos++] = i;
18491 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18492 reg_alloc_order [pos++] = i;
18493
18494 /* x87 registers. */
18495 if (TARGET_SSE_MATH)
18496 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18497 reg_alloc_order [pos++] = i;
18498
18499 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18500 reg_alloc_order [pos++] = i;
18501
18502 /* Initialize the rest of array as we do not allocate some registers
18503 at all. */
18504 while (pos < FIRST_PSEUDO_REGISTER)
18505 reg_alloc_order [pos++] = 0;
18506 }
18507
18508 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18509 struct attribute_spec.handler. */
18510 static tree
18511 ix86_handle_struct_attribute (tree *node, tree name,
18512 tree args ATTRIBUTE_UNUSED,
18513 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18514 {
18515 tree *type = NULL;
18516 if (DECL_P (*node))
18517 {
18518 if (TREE_CODE (*node) == TYPE_DECL)
18519 type = &TREE_TYPE (*node);
18520 }
18521 else
18522 type = node;
18523
18524 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18525 || TREE_CODE (*type) == UNION_TYPE)))
18526 {
18527 warning (OPT_Wattributes, "%qs attribute ignored",
18528 IDENTIFIER_POINTER (name));
18529 *no_add_attrs = true;
18530 }
18531
18532 else if ((is_attribute_p ("ms_struct", name)
18533 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18534 || ((is_attribute_p ("gcc_struct", name)
18535 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18536 {
18537 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18538 IDENTIFIER_POINTER (name));
18539 *no_add_attrs = true;
18540 }
18541
18542 return NULL_TREE;
18543 }
18544
18545 static bool
18546 ix86_ms_bitfield_layout_p (tree record_type)
18547 {
18548 return (TARGET_MS_BITFIELD_LAYOUT &&
18549 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18550 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18551 }
18552
18553 /* Returns an expression indicating where the this parameter is
18554 located on entry to the FUNCTION. */
18555
18556 static rtx
18557 x86_this_parameter (tree function)
18558 {
18559 tree type = TREE_TYPE (function);
18560
18561 if (TARGET_64BIT)
18562 {
18563 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18564 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18565 }
18566
18567 if (ix86_function_regparm (type, function) > 0)
18568 {
18569 tree parm;
18570
18571 parm = TYPE_ARG_TYPES (type);
18572 /* Figure out whether or not the function has a variable number of
18573 arguments. */
18574 for (; parm; parm = TREE_CHAIN (parm))
18575 if (TREE_VALUE (parm) == void_type_node)
18576 break;
18577 /* If not, the this parameter is in the first argument. */
18578 if (parm)
18579 {
18580 int regno = 0;
18581 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18582 regno = 2;
18583 return gen_rtx_REG (SImode, regno);
18584 }
18585 }
18586
18587 if (aggregate_value_p (TREE_TYPE (type), type))
18588 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18589 else
18590 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18591 }
18592
18593 /* Determine whether x86_output_mi_thunk can succeed. */
18594
18595 static bool
18596 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18597 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18598 HOST_WIDE_INT vcall_offset, tree function)
18599 {
18600 /* 64-bit can handle anything. */
18601 if (TARGET_64BIT)
18602 return true;
18603
18604 /* For 32-bit, everything's fine if we have one free register. */
18605 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18606 return true;
18607
18608 /* Need a free register for vcall_offset. */
18609 if (vcall_offset)
18610 return false;
18611
18612 /* Need a free register for GOT references. */
18613 if (flag_pic && !(*targetm.binds_local_p) (function))
18614 return false;
18615
18616 /* Otherwise ok. */
18617 return true;
18618 }
18619
18620 /* Output the assembler code for a thunk function. THUNK_DECL is the
18621 declaration for the thunk function itself, FUNCTION is the decl for
18622 the target function. DELTA is an immediate constant offset to be
18623 added to THIS. If VCALL_OFFSET is nonzero, the word at
18624 *(*this + vcall_offset) should be added to THIS. */
18625
18626 static void
18627 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18628 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18629 HOST_WIDE_INT vcall_offset, tree function)
18630 {
18631 rtx xops[3];
18632 rtx this = x86_this_parameter (function);
18633 rtx this_reg, tmp;
18634
18635 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18636 pull it in now and let DELTA benefit. */
18637 if (REG_P (this))
18638 this_reg = this;
18639 else if (vcall_offset)
18640 {
18641 /* Put the this parameter into %eax. */
18642 xops[0] = this;
18643 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18644 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18645 }
18646 else
18647 this_reg = NULL_RTX;
18648
18649 /* Adjust the this parameter by a fixed constant. */
18650 if (delta)
18651 {
18652 xops[0] = GEN_INT (delta);
18653 xops[1] = this_reg ? this_reg : this;
18654 if (TARGET_64BIT)
18655 {
18656 if (!x86_64_general_operand (xops[0], DImode))
18657 {
18658 tmp = gen_rtx_REG (DImode, R10_REG);
18659 xops[1] = tmp;
18660 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18661 xops[0] = tmp;
18662 xops[1] = this;
18663 }
18664 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18665 }
18666 else
18667 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18668 }
18669
18670 /* Adjust the this parameter by a value stored in the vtable. */
18671 if (vcall_offset)
18672 {
18673 if (TARGET_64BIT)
18674 tmp = gen_rtx_REG (DImode, R10_REG);
18675 else
18676 {
18677 int tmp_regno = 2 /* ECX */;
18678 if (lookup_attribute ("fastcall",
18679 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18680 tmp_regno = 0 /* EAX */;
18681 tmp = gen_rtx_REG (SImode, tmp_regno);
18682 }
18683
18684 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18685 xops[1] = tmp;
18686 if (TARGET_64BIT)
18687 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18688 else
18689 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18690
18691 /* Adjust the this parameter. */
18692 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18693 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18694 {
18695 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18696 xops[0] = GEN_INT (vcall_offset);
18697 xops[1] = tmp2;
18698 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18699 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18700 }
18701 xops[1] = this_reg;
18702 if (TARGET_64BIT)
18703 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18704 else
18705 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18706 }
18707
18708 /* If necessary, drop THIS back to its stack slot. */
18709 if (this_reg && this_reg != this)
18710 {
18711 xops[0] = this_reg;
18712 xops[1] = this;
18713 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18714 }
18715
18716 xops[0] = XEXP (DECL_RTL (function), 0);
18717 if (TARGET_64BIT)
18718 {
18719 if (!flag_pic || (*targetm.binds_local_p) (function))
18720 output_asm_insn ("jmp\t%P0", xops);
18721 else
18722 {
18723 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18724 tmp = gen_rtx_CONST (Pmode, tmp);
18725 tmp = gen_rtx_MEM (QImode, tmp);
18726 xops[0] = tmp;
18727 output_asm_insn ("jmp\t%A0", xops);
18728 }
18729 }
18730 else
18731 {
18732 if (!flag_pic || (*targetm.binds_local_p) (function))
18733 output_asm_insn ("jmp\t%P0", xops);
18734 else
18735 #if TARGET_MACHO
18736 if (TARGET_MACHO)
18737 {
18738 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18739 tmp = (gen_rtx_SYMBOL_REF
18740 (Pmode,
18741 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18742 tmp = gen_rtx_MEM (QImode, tmp);
18743 xops[0] = tmp;
18744 output_asm_insn ("jmp\t%0", xops);
18745 }
18746 else
18747 #endif /* TARGET_MACHO */
18748 {
18749 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18750 output_set_got (tmp, NULL_RTX);
18751
18752 xops[1] = tmp;
18753 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18754 output_asm_insn ("jmp\t{*}%1", xops);
18755 }
18756 }
18757 }
18758
18759 static void
18760 x86_file_start (void)
18761 {
18762 default_file_start ();
18763 #if TARGET_MACHO
18764 darwin_file_start ();
18765 #endif
18766 if (X86_FILE_START_VERSION_DIRECTIVE)
18767 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18768 if (X86_FILE_START_FLTUSED)
18769 fputs ("\t.global\t__fltused\n", asm_out_file);
18770 if (ix86_asm_dialect == ASM_INTEL)
18771 fputs ("\t.intel_syntax\n", asm_out_file);
18772 }
18773
18774 int
18775 x86_field_alignment (tree field, int computed)
18776 {
18777 enum machine_mode mode;
18778 tree type = TREE_TYPE (field);
18779
18780 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18781 return computed;
18782 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18783 ? get_inner_array_type (type) : type);
18784 if (mode == DFmode || mode == DCmode
18785 || GET_MODE_CLASS (mode) == MODE_INT
18786 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18787 return MIN (32, computed);
18788 return computed;
18789 }
18790
18791 /* Output assembler code to FILE to increment profiler label # LABELNO
18792 for profiling a function entry. */
18793 void
18794 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18795 {
18796 if (TARGET_64BIT)
18797 if (flag_pic)
18798 {
18799 #ifndef NO_PROFILE_COUNTERS
18800 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18801 #endif
18802 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18803 }
18804 else
18805 {
18806 #ifndef NO_PROFILE_COUNTERS
18807 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18808 #endif
18809 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18810 }
18811 else if (flag_pic)
18812 {
18813 #ifndef NO_PROFILE_COUNTERS
18814 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18815 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18816 #endif
18817 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18818 }
18819 else
18820 {
18821 #ifndef NO_PROFILE_COUNTERS
18822 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18823 PROFILE_COUNT_REGISTER);
18824 #endif
18825 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18826 }
18827 }
18828
18829 /* We don't have exact information about the insn sizes, but we may assume
18830 quite safely that we are informed about all 1 byte insns and memory
18831 address sizes. This is enough to eliminate unnecessary padding in
18832 99% of cases. */
18833
18834 static int
18835 min_insn_size (rtx insn)
18836 {
18837 int l = 0;
18838
18839 if (!INSN_P (insn) || !active_insn_p (insn))
18840 return 0;
18841
18842 /* Discard alignments we've emit and jump instructions. */
18843 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18844 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18845 return 0;
18846 if (GET_CODE (insn) == JUMP_INSN
18847 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18848 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
18849 return 0;
18850
18851 /* Important case - calls are always 5 bytes.
18852 It is common to have many calls in the row. */
18853 if (GET_CODE (insn) == CALL_INSN
18854 && symbolic_reference_mentioned_p (PATTERN (insn))
18855 && !SIBLING_CALL_P (insn))
18856 return 5;
18857 if (get_attr_length (insn) <= 1)
18858 return 1;
18859
18860 /* For normal instructions we may rely on the sizes of addresses
18861 and the presence of symbol to require 4 bytes of encoding.
18862 This is not the case for jumps where references are PC relative. */
18863 if (GET_CODE (insn) != JUMP_INSN)
18864 {
18865 l = get_attr_length_address (insn);
18866 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
18867 l = 4;
18868 }
18869 if (l)
18870 return 1+l;
18871 else
18872 return 2;
18873 }
18874
18875 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
18876 window. */
18877
18878 static void
18879 ix86_avoid_jump_misspredicts (void)
18880 {
18881 rtx insn, start = get_insns ();
18882 int nbytes = 0, njumps = 0;
18883 int isjump = 0;
18884
18885 /* Look for all minimal intervals of instructions containing 4 jumps.
18886 The intervals are bounded by START and INSN. NBYTES is the total
18887 size of instructions in the interval including INSN and not including
18888 START. When the NBYTES is smaller than 16 bytes, it is possible
18889 that the end of START and INSN ends up in the same 16byte page.
18890
18891 The smallest offset in the page INSN can start is the case where START
18892 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
18893 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
18894 */
18895 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18896 {
18897
18898 nbytes += min_insn_size (insn);
18899 if (dump_file)
18900 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
18901 INSN_UID (insn), min_insn_size (insn));
18902 if ((GET_CODE (insn) == JUMP_INSN
18903 && GET_CODE (PATTERN (insn)) != ADDR_VEC
18904 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
18905 || GET_CODE (insn) == CALL_INSN)
18906 njumps++;
18907 else
18908 continue;
18909
18910 while (njumps > 3)
18911 {
18912 start = NEXT_INSN (start);
18913 if ((GET_CODE (start) == JUMP_INSN
18914 && GET_CODE (PATTERN (start)) != ADDR_VEC
18915 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
18916 || GET_CODE (start) == CALL_INSN)
18917 njumps--, isjump = 1;
18918 else
18919 isjump = 0;
18920 nbytes -= min_insn_size (start);
18921 }
18922 gcc_assert (njumps >= 0);
18923 if (dump_file)
18924 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
18925 INSN_UID (start), INSN_UID (insn), nbytes);
18926
18927 if (njumps == 3 && isjump && nbytes < 16)
18928 {
18929 int padsize = 15 - nbytes + min_insn_size (insn);
18930
18931 if (dump_file)
18932 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
18933 INSN_UID (insn), padsize);
18934 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
18935 }
18936 }
18937 }
18938
18939 /* AMD Athlon works faster
18940 when RET is not destination of conditional jump or directly preceded
18941 by other jump instruction. We avoid the penalty by inserting NOP just
18942 before the RET instructions in such cases. */
18943 static void
18944 ix86_pad_returns (void)
18945 {
18946 edge e;
18947 edge_iterator ei;
18948
18949 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
18950 {
18951 basic_block bb = e->src;
18952 rtx ret = BB_END (bb);
18953 rtx prev;
18954 bool replace = false;
18955
18956 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
18957 || !maybe_hot_bb_p (bb))
18958 continue;
18959 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
18960 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
18961 break;
18962 if (prev && GET_CODE (prev) == CODE_LABEL)
18963 {
18964 edge e;
18965 edge_iterator ei;
18966
18967 FOR_EACH_EDGE (e, ei, bb->preds)
18968 if (EDGE_FREQUENCY (e) && e->src->index >= 0
18969 && !(e->flags & EDGE_FALLTHRU))
18970 replace = true;
18971 }
18972 if (!replace)
18973 {
18974 prev = prev_active_insn (ret);
18975 if (prev
18976 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
18977 || GET_CODE (prev) == CALL_INSN))
18978 replace = true;
18979 /* Empty functions get branch mispredict even when the jump destination
18980 is not visible to us. */
18981 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
18982 replace = true;
18983 }
18984 if (replace)
18985 {
18986 emit_insn_before (gen_return_internal_long (), ret);
18987 delete_insn (ret);
18988 }
18989 }
18990 }
18991
18992 /* Implement machine specific optimizations. We implement padding of returns
18993 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
18994 static void
18995 ix86_reorg (void)
18996 {
18997 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
18998 ix86_pad_returns ();
18999 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19000 ix86_avoid_jump_misspredicts ();
19001 }
19002
19003 /* Return nonzero when QImode register that must be represented via REX prefix
19004 is used. */
19005 bool
19006 x86_extended_QIreg_mentioned_p (rtx insn)
19007 {
19008 int i;
19009 extract_insn_cached (insn);
19010 for (i = 0; i < recog_data.n_operands; i++)
19011 if (REG_P (recog_data.operand[i])
19012 && REGNO (recog_data.operand[i]) >= 4)
19013 return true;
19014 return false;
19015 }
19016
19017 /* Return nonzero when P points to register encoded via REX prefix.
19018 Called via for_each_rtx. */
19019 static int
19020 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19021 {
19022 unsigned int regno;
19023 if (!REG_P (*p))
19024 return 0;
19025 regno = REGNO (*p);
19026 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19027 }
19028
19029 /* Return true when INSN mentions register that must be encoded using REX
19030 prefix. */
19031 bool
19032 x86_extended_reg_mentioned_p (rtx insn)
19033 {
19034 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19035 }
19036
19037 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19038 optabs would emit if we didn't have TFmode patterns. */
19039
19040 void
19041 x86_emit_floatuns (rtx operands[2])
19042 {
19043 rtx neglab, donelab, i0, i1, f0, in, out;
19044 enum machine_mode mode, inmode;
19045
19046 inmode = GET_MODE (operands[1]);
19047 gcc_assert (inmode == SImode || inmode == DImode);
19048
19049 out = operands[0];
19050 in = force_reg (inmode, operands[1]);
19051 mode = GET_MODE (out);
19052 neglab = gen_label_rtx ();
19053 donelab = gen_label_rtx ();
19054 i1 = gen_reg_rtx (Pmode);
19055 f0 = gen_reg_rtx (mode);
19056
19057 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19058
19059 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19060 emit_jump_insn (gen_jump (donelab));
19061 emit_barrier ();
19062
19063 emit_label (neglab);
19064
19065 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19066 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19067 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19068 expand_float (f0, i0, 0);
19069 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19070
19071 emit_label (donelab);
19072 }
19073 \f
19074 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19075 with all elements equal to VAR. Return true if successful. */
19076
19077 static bool
19078 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19079 rtx target, rtx val)
19080 {
19081 enum machine_mode smode, wsmode, wvmode;
19082 rtx x;
19083
19084 switch (mode)
19085 {
19086 case V2SImode:
19087 case V2SFmode:
19088 if (!mmx_ok)
19089 return false;
19090 /* FALLTHRU */
19091
19092 case V2DFmode:
19093 case V2DImode:
19094 case V4SFmode:
19095 case V4SImode:
19096 val = force_reg (GET_MODE_INNER (mode), val);
19097 x = gen_rtx_VEC_DUPLICATE (mode, val);
19098 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19099 return true;
19100
19101 case V4HImode:
19102 if (!mmx_ok)
19103 return false;
19104 if (TARGET_SSE || TARGET_3DNOW_A)
19105 {
19106 val = gen_lowpart (SImode, val);
19107 x = gen_rtx_TRUNCATE (HImode, val);
19108 x = gen_rtx_VEC_DUPLICATE (mode, x);
19109 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19110 return true;
19111 }
19112 else
19113 {
19114 smode = HImode;
19115 wsmode = SImode;
19116 wvmode = V2SImode;
19117 goto widen;
19118 }
19119
19120 case V8QImode:
19121 if (!mmx_ok)
19122 return false;
19123 smode = QImode;
19124 wsmode = HImode;
19125 wvmode = V4HImode;
19126 goto widen;
19127 case V8HImode:
19128 if (TARGET_SSE2)
19129 {
19130 rtx tmp1, tmp2;
19131 /* Extend HImode to SImode using a paradoxical SUBREG. */
19132 tmp1 = gen_reg_rtx (SImode);
19133 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19134 /* Insert the SImode value as low element of V4SImode vector. */
19135 tmp2 = gen_reg_rtx (V4SImode);
19136 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19137 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19138 CONST0_RTX (V4SImode),
19139 const1_rtx);
19140 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19141 /* Cast the V4SImode vector back to a V8HImode vector. */
19142 tmp1 = gen_reg_rtx (V8HImode);
19143 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19144 /* Duplicate the low short through the whole low SImode word. */
19145 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19146 /* Cast the V8HImode vector back to a V4SImode vector. */
19147 tmp2 = gen_reg_rtx (V4SImode);
19148 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19149 /* Replicate the low element of the V4SImode vector. */
19150 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19151 /* Cast the V2SImode back to V8HImode, and store in target. */
19152 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19153 return true;
19154 }
19155 smode = HImode;
19156 wsmode = SImode;
19157 wvmode = V4SImode;
19158 goto widen;
19159 case V16QImode:
19160 if (TARGET_SSE2)
19161 {
19162 rtx tmp1, tmp2;
19163 /* Extend QImode to SImode using a paradoxical SUBREG. */
19164 tmp1 = gen_reg_rtx (SImode);
19165 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19166 /* Insert the SImode value as low element of V4SImode vector. */
19167 tmp2 = gen_reg_rtx (V4SImode);
19168 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19169 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19170 CONST0_RTX (V4SImode),
19171 const1_rtx);
19172 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19173 /* Cast the V4SImode vector back to a V16QImode vector. */
19174 tmp1 = gen_reg_rtx (V16QImode);
19175 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19176 /* Duplicate the low byte through the whole low SImode word. */
19177 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19178 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19179 /* Cast the V16QImode vector back to a V4SImode vector. */
19180 tmp2 = gen_reg_rtx (V4SImode);
19181 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19182 /* Replicate the low element of the V4SImode vector. */
19183 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19184 /* Cast the V2SImode back to V16QImode, and store in target. */
19185 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19186 return true;
19187 }
19188 smode = QImode;
19189 wsmode = HImode;
19190 wvmode = V8HImode;
19191 goto widen;
19192 widen:
19193 /* Replicate the value once into the next wider mode and recurse. */
19194 val = convert_modes (wsmode, smode, val, true);
19195 x = expand_simple_binop (wsmode, ASHIFT, val,
19196 GEN_INT (GET_MODE_BITSIZE (smode)),
19197 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19198 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19199
19200 x = gen_reg_rtx (wvmode);
19201 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19202 gcc_unreachable ();
19203 emit_move_insn (target, gen_lowpart (mode, x));
19204 return true;
19205
19206 default:
19207 return false;
19208 }
19209 }
19210
19211 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19212 whose ONE_VAR element is VAR, and other elements are zero. Return true
19213 if successful. */
19214
19215 static bool
19216 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19217 rtx target, rtx var, int one_var)
19218 {
19219 enum machine_mode vsimode;
19220 rtx new_target;
19221 rtx x, tmp;
19222
19223 switch (mode)
19224 {
19225 case V2SFmode:
19226 case V2SImode:
19227 if (!mmx_ok)
19228 return false;
19229 /* FALLTHRU */
19230
19231 case V2DFmode:
19232 case V2DImode:
19233 if (one_var != 0)
19234 return false;
19235 var = force_reg (GET_MODE_INNER (mode), var);
19236 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19237 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19238 return true;
19239
19240 case V4SFmode:
19241 case V4SImode:
19242 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19243 new_target = gen_reg_rtx (mode);
19244 else
19245 new_target = target;
19246 var = force_reg (GET_MODE_INNER (mode), var);
19247 x = gen_rtx_VEC_DUPLICATE (mode, var);
19248 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19249 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19250 if (one_var != 0)
19251 {
19252 /* We need to shuffle the value to the correct position, so
19253 create a new pseudo to store the intermediate result. */
19254
19255 /* With SSE2, we can use the integer shuffle insns. */
19256 if (mode != V4SFmode && TARGET_SSE2)
19257 {
19258 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19259 GEN_INT (1),
19260 GEN_INT (one_var == 1 ? 0 : 1),
19261 GEN_INT (one_var == 2 ? 0 : 1),
19262 GEN_INT (one_var == 3 ? 0 : 1)));
19263 if (target != new_target)
19264 emit_move_insn (target, new_target);
19265 return true;
19266 }
19267
19268 /* Otherwise convert the intermediate result to V4SFmode and
19269 use the SSE1 shuffle instructions. */
19270 if (mode != V4SFmode)
19271 {
19272 tmp = gen_reg_rtx (V4SFmode);
19273 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19274 }
19275 else
19276 tmp = new_target;
19277
19278 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19279 GEN_INT (1),
19280 GEN_INT (one_var == 1 ? 0 : 1),
19281 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19282 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19283
19284 if (mode != V4SFmode)
19285 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19286 else if (tmp != target)
19287 emit_move_insn (target, tmp);
19288 }
19289 else if (target != new_target)
19290 emit_move_insn (target, new_target);
19291 return true;
19292
19293 case V8HImode:
19294 case V16QImode:
19295 vsimode = V4SImode;
19296 goto widen;
19297 case V4HImode:
19298 case V8QImode:
19299 if (!mmx_ok)
19300 return false;
19301 vsimode = V2SImode;
19302 goto widen;
19303 widen:
19304 if (one_var != 0)
19305 return false;
19306
19307 /* Zero extend the variable element to SImode and recurse. */
19308 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19309
19310 x = gen_reg_rtx (vsimode);
19311 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19312 var, one_var))
19313 gcc_unreachable ();
19314
19315 emit_move_insn (target, gen_lowpart (mode, x));
19316 return true;
19317
19318 default:
19319 return false;
19320 }
19321 }
19322
19323 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19324 consisting of the values in VALS. It is known that all elements
19325 except ONE_VAR are constants. Return true if successful. */
19326
19327 static bool
19328 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19329 rtx target, rtx vals, int one_var)
19330 {
19331 rtx var = XVECEXP (vals, 0, one_var);
19332 enum machine_mode wmode;
19333 rtx const_vec, x;
19334
19335 const_vec = copy_rtx (vals);
19336 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19337 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19338
19339 switch (mode)
19340 {
19341 case V2DFmode:
19342 case V2DImode:
19343 case V2SFmode:
19344 case V2SImode:
19345 /* For the two element vectors, it's just as easy to use
19346 the general case. */
19347 return false;
19348
19349 case V4SFmode:
19350 case V4SImode:
19351 case V8HImode:
19352 case V4HImode:
19353 break;
19354
19355 case V16QImode:
19356 wmode = V8HImode;
19357 goto widen;
19358 case V8QImode:
19359 wmode = V4HImode;
19360 goto widen;
19361 widen:
19362 /* There's no way to set one QImode entry easily. Combine
19363 the variable value with its adjacent constant value, and
19364 promote to an HImode set. */
19365 x = XVECEXP (vals, 0, one_var ^ 1);
19366 if (one_var & 1)
19367 {
19368 var = convert_modes (HImode, QImode, var, true);
19369 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19370 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19371 x = GEN_INT (INTVAL (x) & 0xff);
19372 }
19373 else
19374 {
19375 var = convert_modes (HImode, QImode, var, true);
19376 x = gen_int_mode (INTVAL (x) << 8, HImode);
19377 }
19378 if (x != const0_rtx)
19379 var = expand_simple_binop (HImode, IOR, var, x, var,
19380 1, OPTAB_LIB_WIDEN);
19381
19382 x = gen_reg_rtx (wmode);
19383 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19384 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19385
19386 emit_move_insn (target, gen_lowpart (mode, x));
19387 return true;
19388
19389 default:
19390 return false;
19391 }
19392
19393 emit_move_insn (target, const_vec);
19394 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19395 return true;
19396 }
19397
19398 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19399 all values variable, and none identical. */
19400
19401 static void
19402 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19403 rtx target, rtx vals)
19404 {
19405 enum machine_mode half_mode = GET_MODE_INNER (mode);
19406 rtx op0 = NULL, op1 = NULL;
19407 bool use_vec_concat = false;
19408
19409 switch (mode)
19410 {
19411 case V2SFmode:
19412 case V2SImode:
19413 if (!mmx_ok && !TARGET_SSE)
19414 break;
19415 /* FALLTHRU */
19416
19417 case V2DFmode:
19418 case V2DImode:
19419 /* For the two element vectors, we always implement VEC_CONCAT. */
19420 op0 = XVECEXP (vals, 0, 0);
19421 op1 = XVECEXP (vals, 0, 1);
19422 use_vec_concat = true;
19423 break;
19424
19425 case V4SFmode:
19426 half_mode = V2SFmode;
19427 goto half;
19428 case V4SImode:
19429 half_mode = V2SImode;
19430 goto half;
19431 half:
19432 {
19433 rtvec v;
19434
19435 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19436 Recurse to load the two halves. */
19437
19438 op0 = gen_reg_rtx (half_mode);
19439 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19440 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19441
19442 op1 = gen_reg_rtx (half_mode);
19443 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19444 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19445
19446 use_vec_concat = true;
19447 }
19448 break;
19449
19450 case V8HImode:
19451 case V16QImode:
19452 case V4HImode:
19453 case V8QImode:
19454 break;
19455
19456 default:
19457 gcc_unreachable ();
19458 }
19459
19460 if (use_vec_concat)
19461 {
19462 if (!register_operand (op0, half_mode))
19463 op0 = force_reg (half_mode, op0);
19464 if (!register_operand (op1, half_mode))
19465 op1 = force_reg (half_mode, op1);
19466
19467 emit_insn (gen_rtx_SET (VOIDmode, target,
19468 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19469 }
19470 else
19471 {
19472 int i, j, n_elts, n_words, n_elt_per_word;
19473 enum machine_mode inner_mode;
19474 rtx words[4], shift;
19475
19476 inner_mode = GET_MODE_INNER (mode);
19477 n_elts = GET_MODE_NUNITS (mode);
19478 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19479 n_elt_per_word = n_elts / n_words;
19480 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19481
19482 for (i = 0; i < n_words; ++i)
19483 {
19484 rtx word = NULL_RTX;
19485
19486 for (j = 0; j < n_elt_per_word; ++j)
19487 {
19488 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19489 elt = convert_modes (word_mode, inner_mode, elt, true);
19490
19491 if (j == 0)
19492 word = elt;
19493 else
19494 {
19495 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19496 word, 1, OPTAB_LIB_WIDEN);
19497 word = expand_simple_binop (word_mode, IOR, word, elt,
19498 word, 1, OPTAB_LIB_WIDEN);
19499 }
19500 }
19501
19502 words[i] = word;
19503 }
19504
19505 if (n_words == 1)
19506 emit_move_insn (target, gen_lowpart (mode, words[0]));
19507 else if (n_words == 2)
19508 {
19509 rtx tmp = gen_reg_rtx (mode);
19510 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19511 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19512 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19513 emit_move_insn (target, tmp);
19514 }
19515 else if (n_words == 4)
19516 {
19517 rtx tmp = gen_reg_rtx (V4SImode);
19518 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19519 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19520 emit_move_insn (target, gen_lowpart (mode, tmp));
19521 }
19522 else
19523 gcc_unreachable ();
19524 }
19525 }
19526
19527 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19528 instructions unless MMX_OK is true. */
19529
19530 void
19531 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19532 {
19533 enum machine_mode mode = GET_MODE (target);
19534 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19535 int n_elts = GET_MODE_NUNITS (mode);
19536 int n_var = 0, one_var = -1;
19537 bool all_same = true, all_const_zero = true;
19538 int i;
19539 rtx x;
19540
19541 for (i = 0; i < n_elts; ++i)
19542 {
19543 x = XVECEXP (vals, 0, i);
19544 if (!CONSTANT_P (x))
19545 n_var++, one_var = i;
19546 else if (x != CONST0_RTX (inner_mode))
19547 all_const_zero = false;
19548 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19549 all_same = false;
19550 }
19551
19552 /* Constants are best loaded from the constant pool. */
19553 if (n_var == 0)
19554 {
19555 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19556 return;
19557 }
19558
19559 /* If all values are identical, broadcast the value. */
19560 if (all_same
19561 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19562 XVECEXP (vals, 0, 0)))
19563 return;
19564
19565 /* Values where only one field is non-constant are best loaded from
19566 the pool and overwritten via move later. */
19567 if (n_var == 1)
19568 {
19569 if (all_const_zero
19570 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19571 XVECEXP (vals, 0, one_var),
19572 one_var))
19573 return;
19574
19575 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19576 return;
19577 }
19578
19579 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19580 }
19581
19582 void
19583 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19584 {
19585 enum machine_mode mode = GET_MODE (target);
19586 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19587 bool use_vec_merge = false;
19588 rtx tmp;
19589
19590 switch (mode)
19591 {
19592 case V2SFmode:
19593 case V2SImode:
19594 if (mmx_ok)
19595 {
19596 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19597 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19598 if (elt == 0)
19599 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19600 else
19601 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19602 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19603 return;
19604 }
19605 break;
19606
19607 case V2DFmode:
19608 case V2DImode:
19609 {
19610 rtx op0, op1;
19611
19612 /* For the two element vectors, we implement a VEC_CONCAT with
19613 the extraction of the other element. */
19614
19615 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19616 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19617
19618 if (elt == 0)
19619 op0 = val, op1 = tmp;
19620 else
19621 op0 = tmp, op1 = val;
19622
19623 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19624 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19625 }
19626 return;
19627
19628 case V4SFmode:
19629 switch (elt)
19630 {
19631 case 0:
19632 use_vec_merge = true;
19633 break;
19634
19635 case 1:
19636 /* tmp = target = A B C D */
19637 tmp = copy_to_reg (target);
19638 /* target = A A B B */
19639 emit_insn (gen_sse_unpcklps (target, target, target));
19640 /* target = X A B B */
19641 ix86_expand_vector_set (false, target, val, 0);
19642 /* target = A X C D */
19643 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19644 GEN_INT (1), GEN_INT (0),
19645 GEN_INT (2+4), GEN_INT (3+4)));
19646 return;
19647
19648 case 2:
19649 /* tmp = target = A B C D */
19650 tmp = copy_to_reg (target);
19651 /* tmp = X B C D */
19652 ix86_expand_vector_set (false, tmp, val, 0);
19653 /* target = A B X D */
19654 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19655 GEN_INT (0), GEN_INT (1),
19656 GEN_INT (0+4), GEN_INT (3+4)));
19657 return;
19658
19659 case 3:
19660 /* tmp = target = A B C D */
19661 tmp = copy_to_reg (target);
19662 /* tmp = X B C D */
19663 ix86_expand_vector_set (false, tmp, val, 0);
19664 /* target = A B X D */
19665 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19666 GEN_INT (0), GEN_INT (1),
19667 GEN_INT (2+4), GEN_INT (0+4)));
19668 return;
19669
19670 default:
19671 gcc_unreachable ();
19672 }
19673 break;
19674
19675 case V4SImode:
19676 /* Element 0 handled by vec_merge below. */
19677 if (elt == 0)
19678 {
19679 use_vec_merge = true;
19680 break;
19681 }
19682
19683 if (TARGET_SSE2)
19684 {
19685 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19686 store into element 0, then shuffle them back. */
19687
19688 rtx order[4];
19689
19690 order[0] = GEN_INT (elt);
19691 order[1] = const1_rtx;
19692 order[2] = const2_rtx;
19693 order[3] = GEN_INT (3);
19694 order[elt] = const0_rtx;
19695
19696 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19697 order[1], order[2], order[3]));
19698
19699 ix86_expand_vector_set (false, target, val, 0);
19700
19701 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19702 order[1], order[2], order[3]));
19703 }
19704 else
19705 {
19706 /* For SSE1, we have to reuse the V4SF code. */
19707 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19708 gen_lowpart (SFmode, val), elt);
19709 }
19710 return;
19711
19712 case V8HImode:
19713 use_vec_merge = TARGET_SSE2;
19714 break;
19715 case V4HImode:
19716 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19717 break;
19718
19719 case V16QImode:
19720 case V8QImode:
19721 default:
19722 break;
19723 }
19724
19725 if (use_vec_merge)
19726 {
19727 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19728 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19729 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19730 }
19731 else
19732 {
19733 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19734
19735 emit_move_insn (mem, target);
19736
19737 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19738 emit_move_insn (tmp, val);
19739
19740 emit_move_insn (target, mem);
19741 }
19742 }
19743
19744 void
19745 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19746 {
19747 enum machine_mode mode = GET_MODE (vec);
19748 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19749 bool use_vec_extr = false;
19750 rtx tmp;
19751
19752 switch (mode)
19753 {
19754 case V2SImode:
19755 case V2SFmode:
19756 if (!mmx_ok)
19757 break;
19758 /* FALLTHRU */
19759
19760 case V2DFmode:
19761 case V2DImode:
19762 use_vec_extr = true;
19763 break;
19764
19765 case V4SFmode:
19766 switch (elt)
19767 {
19768 case 0:
19769 tmp = vec;
19770 break;
19771
19772 case 1:
19773 case 3:
19774 tmp = gen_reg_rtx (mode);
19775 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19776 GEN_INT (elt), GEN_INT (elt),
19777 GEN_INT (elt+4), GEN_INT (elt+4)));
19778 break;
19779
19780 case 2:
19781 tmp = gen_reg_rtx (mode);
19782 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19783 break;
19784
19785 default:
19786 gcc_unreachable ();
19787 }
19788 vec = tmp;
19789 use_vec_extr = true;
19790 elt = 0;
19791 break;
19792
19793 case V4SImode:
19794 if (TARGET_SSE2)
19795 {
19796 switch (elt)
19797 {
19798 case 0:
19799 tmp = vec;
19800 break;
19801
19802 case 1:
19803 case 3:
19804 tmp = gen_reg_rtx (mode);
19805 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19806 GEN_INT (elt), GEN_INT (elt),
19807 GEN_INT (elt), GEN_INT (elt)));
19808 break;
19809
19810 case 2:
19811 tmp = gen_reg_rtx (mode);
19812 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19813 break;
19814
19815 default:
19816 gcc_unreachable ();
19817 }
19818 vec = tmp;
19819 use_vec_extr = true;
19820 elt = 0;
19821 }
19822 else
19823 {
19824 /* For SSE1, we have to reuse the V4SF code. */
19825 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19826 gen_lowpart (V4SFmode, vec), elt);
19827 return;
19828 }
19829 break;
19830
19831 case V8HImode:
19832 use_vec_extr = TARGET_SSE2;
19833 break;
19834 case V4HImode:
19835 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19836 break;
19837
19838 case V16QImode:
19839 case V8QImode:
19840 /* ??? Could extract the appropriate HImode element and shift. */
19841 default:
19842 break;
19843 }
19844
19845 if (use_vec_extr)
19846 {
19847 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19848 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
19849
19850 /* Let the rtl optimizers know about the zero extension performed. */
19851 if (inner_mode == HImode)
19852 {
19853 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
19854 target = gen_lowpart (SImode, target);
19855 }
19856
19857 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19858 }
19859 else
19860 {
19861 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19862
19863 emit_move_insn (mem, vec);
19864
19865 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19866 emit_move_insn (target, tmp);
19867 }
19868 }
19869
19870 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
19871 pattern to reduce; DEST is the destination; IN is the input vector. */
19872
19873 void
19874 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
19875 {
19876 rtx tmp1, tmp2, tmp3;
19877
19878 tmp1 = gen_reg_rtx (V4SFmode);
19879 tmp2 = gen_reg_rtx (V4SFmode);
19880 tmp3 = gen_reg_rtx (V4SFmode);
19881
19882 emit_insn (gen_sse_movhlps (tmp1, in, in));
19883 emit_insn (fn (tmp2, tmp1, in));
19884
19885 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
19886 GEN_INT (1), GEN_INT (1),
19887 GEN_INT (1+4), GEN_INT (1+4)));
19888 emit_insn (fn (dest, tmp2, tmp3));
19889 }
19890 \f
19891 /* Target hook for scalar_mode_supported_p. */
19892 static bool
19893 ix86_scalar_mode_supported_p (enum machine_mode mode)
19894 {
19895 if (DECIMAL_FLOAT_MODE_P (mode))
19896 return true;
19897 else
19898 return default_scalar_mode_supported_p (mode);
19899 }
19900
19901 /* Implements target hook vector_mode_supported_p. */
19902 static bool
19903 ix86_vector_mode_supported_p (enum machine_mode mode)
19904 {
19905 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
19906 return true;
19907 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
19908 return true;
19909 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
19910 return true;
19911 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
19912 return true;
19913 return false;
19914 }
19915
19916 /* Worker function for TARGET_MD_ASM_CLOBBERS.
19917
19918 We do this in the new i386 backend to maintain source compatibility
19919 with the old cc0-based compiler. */
19920
19921 static tree
19922 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
19923 tree inputs ATTRIBUTE_UNUSED,
19924 tree clobbers)
19925 {
19926 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
19927 clobbers);
19928 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
19929 clobbers);
19930 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
19931 clobbers);
19932 return clobbers;
19933 }
19934
19935 /* Return true if this goes in small data/bss. */
19936
19937 static bool
19938 ix86_in_large_data_p (tree exp)
19939 {
19940 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
19941 return false;
19942
19943 /* Functions are never large data. */
19944 if (TREE_CODE (exp) == FUNCTION_DECL)
19945 return false;
19946
19947 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
19948 {
19949 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
19950 if (strcmp (section, ".ldata") == 0
19951 || strcmp (section, ".lbss") == 0)
19952 return true;
19953 return false;
19954 }
19955 else
19956 {
19957 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
19958
19959 /* If this is an incomplete type with size 0, then we can't put it
19960 in data because it might be too big when completed. */
19961 if (!size || size > ix86_section_threshold)
19962 return true;
19963 }
19964
19965 return false;
19966 }
19967 static void
19968 ix86_encode_section_info (tree decl, rtx rtl, int first)
19969 {
19970 default_encode_section_info (decl, rtl, first);
19971
19972 if (TREE_CODE (decl) == VAR_DECL
19973 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
19974 && ix86_in_large_data_p (decl))
19975 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
19976 }
19977
19978 /* Worker function for REVERSE_CONDITION. */
19979
19980 enum rtx_code
19981 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
19982 {
19983 return (mode != CCFPmode && mode != CCFPUmode
19984 ? reverse_condition (code)
19985 : reverse_condition_maybe_unordered (code));
19986 }
19987
19988 /* Output code to perform an x87 FP register move, from OPERANDS[1]
19989 to OPERANDS[0]. */
19990
19991 const char *
19992 output_387_reg_move (rtx insn, rtx *operands)
19993 {
19994 if (REG_P (operands[1])
19995 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
19996 {
19997 if (REGNO (operands[0]) == FIRST_STACK_REG)
19998 return output_387_ffreep (operands, 0);
19999 return "fstp\t%y0";
20000 }
20001 if (STACK_TOP_P (operands[0]))
20002 return "fld%z1\t%y1";
20003 return "fst\t%y0";
20004 }
20005
20006 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20007 FP status register is set. */
20008
20009 void
20010 ix86_emit_fp_unordered_jump (rtx label)
20011 {
20012 rtx reg = gen_reg_rtx (HImode);
20013 rtx temp;
20014
20015 emit_insn (gen_x86_fnstsw_1 (reg));
20016
20017 if (TARGET_USE_SAHF)
20018 {
20019 emit_insn (gen_x86_sahf_1 (reg));
20020
20021 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20022 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20023 }
20024 else
20025 {
20026 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20027
20028 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20029 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20030 }
20031
20032 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20033 gen_rtx_LABEL_REF (VOIDmode, label),
20034 pc_rtx);
20035 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20036 emit_jump_insn (temp);
20037 }
20038
20039 /* Output code to perform a log1p XFmode calculation. */
20040
20041 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20042 {
20043 rtx label1 = gen_label_rtx ();
20044 rtx label2 = gen_label_rtx ();
20045
20046 rtx tmp = gen_reg_rtx (XFmode);
20047 rtx tmp2 = gen_reg_rtx (XFmode);
20048
20049 emit_insn (gen_absxf2 (tmp, op1));
20050 emit_insn (gen_cmpxf (tmp,
20051 CONST_DOUBLE_FROM_REAL_VALUE (
20052 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20053 XFmode)));
20054 emit_jump_insn (gen_bge (label1));
20055
20056 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20057 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
20058 emit_jump (label2);
20059
20060 emit_label (label1);
20061 emit_move_insn (tmp, CONST1_RTX (XFmode));
20062 emit_insn (gen_addxf3 (tmp, op1, tmp));
20063 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20064 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
20065
20066 emit_label (label2);
20067 }
20068
20069 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20070
20071 static void
20072 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20073 tree decl)
20074 {
20075 /* With Binutils 2.15, the "@unwind" marker must be specified on
20076 every occurrence of the ".eh_frame" section, not just the first
20077 one. */
20078 if (TARGET_64BIT
20079 && strcmp (name, ".eh_frame") == 0)
20080 {
20081 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20082 flags & SECTION_WRITE ? "aw" : "a");
20083 return;
20084 }
20085 default_elf_asm_named_section (name, flags, decl);
20086 }
20087
20088 /* Return the mangling of TYPE if it is an extended fundamental type. */
20089
20090 static const char *
20091 ix86_mangle_fundamental_type (tree type)
20092 {
20093 switch (TYPE_MODE (type))
20094 {
20095 case TFmode:
20096 /* __float128 is "g". */
20097 return "g";
20098 case XFmode:
20099 /* "long double" or __float80 is "e". */
20100 return "e";
20101 default:
20102 return NULL;
20103 }
20104 }
20105
20106 /* For 32-bit code we can save PIC register setup by using
20107 __stack_chk_fail_local hidden function instead of calling
20108 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20109 register, so it is better to call __stack_chk_fail directly. */
20110
20111 static tree
20112 ix86_stack_protect_fail (void)
20113 {
20114 return TARGET_64BIT
20115 ? default_external_stack_protect_fail ()
20116 : default_hidden_stack_protect_fail ();
20117 }
20118
20119 /* Select a format to encode pointers in exception handling data. CODE
20120 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20121 true if the symbol may be affected by dynamic relocations.
20122
20123 ??? All x86 object file formats are capable of representing this.
20124 After all, the relocation needed is the same as for the call insn.
20125 Whether or not a particular assembler allows us to enter such, I
20126 guess we'll have to see. */
20127 int
20128 asm_preferred_eh_data_format (int code, int global)
20129 {
20130 if (flag_pic)
20131 {
20132 int type = DW_EH_PE_sdata8;
20133 if (!TARGET_64BIT
20134 || ix86_cmodel == CM_SMALL_PIC
20135 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20136 type = DW_EH_PE_sdata4;
20137 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20138 }
20139 if (ix86_cmodel == CM_SMALL
20140 || (ix86_cmodel == CM_MEDIUM && code))
20141 return DW_EH_PE_udata4;
20142 return DW_EH_PE_absptr;
20143 }
20144 \f
20145 /* Expand copysign from SIGN to the positive value ABS_VALUE
20146 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20147 the sign-bit. */
20148 static void
20149 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20150 {
20151 enum machine_mode mode = GET_MODE (sign);
20152 rtx sgn = gen_reg_rtx (mode);
20153 if (mask == NULL_RTX)
20154 {
20155 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20156 if (!VECTOR_MODE_P (mode))
20157 {
20158 /* We need to generate a scalar mode mask in this case. */
20159 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20160 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20161 mask = gen_reg_rtx (mode);
20162 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20163 }
20164 }
20165 else
20166 mask = gen_rtx_NOT (mode, mask);
20167 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20168 gen_rtx_AND (mode, mask, sign)));
20169 emit_insn (gen_rtx_SET (VOIDmode, result,
20170 gen_rtx_IOR (mode, abs_value, sgn)));
20171 }
20172
20173 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20174 mask for masking out the sign-bit is stored in *SMASK, if that is
20175 non-null. */
20176 static rtx
20177 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20178 {
20179 enum machine_mode mode = GET_MODE (op0);
20180 rtx xa, mask;
20181
20182 xa = gen_reg_rtx (mode);
20183 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20184 if (!VECTOR_MODE_P (mode))
20185 {
20186 /* We need to generate a scalar mode mask in this case. */
20187 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20188 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20189 mask = gen_reg_rtx (mode);
20190 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20191 }
20192 emit_insn (gen_rtx_SET (VOIDmode, xa,
20193 gen_rtx_AND (mode, op0, mask)));
20194
20195 if (smask)
20196 *smask = mask;
20197
20198 return xa;
20199 }
20200
20201 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20202 swapping the operands if SWAP_OPERANDS is true. The expanded
20203 code is a forward jump to a newly created label in case the
20204 comparison is true. The generated label rtx is returned. */
20205 static rtx
20206 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20207 bool swap_operands)
20208 {
20209 rtx label, tmp;
20210
20211 if (swap_operands)
20212 {
20213 tmp = op0;
20214 op0 = op1;
20215 op1 = tmp;
20216 }
20217
20218 label = gen_label_rtx ();
20219 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20220 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20221 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20222 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20223 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20224 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20225 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20226 JUMP_LABEL (tmp) = label;
20227
20228 return label;
20229 }
20230
20231 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20232 using comparison code CODE. Operands are swapped for the comparison if
20233 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20234 static rtx
20235 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20236 bool swap_operands)
20237 {
20238 enum machine_mode mode = GET_MODE (op0);
20239 rtx mask = gen_reg_rtx (mode);
20240
20241 if (swap_operands)
20242 {
20243 rtx tmp = op0;
20244 op0 = op1;
20245 op1 = tmp;
20246 }
20247
20248 if (mode == DFmode)
20249 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20250 gen_rtx_fmt_ee (code, mode, op0, op1)));
20251 else
20252 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20253 gen_rtx_fmt_ee (code, mode, op0, op1)));
20254
20255 return mask;
20256 }
20257
20258 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20259 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20260 static rtx
20261 ix86_gen_TWO52 (enum machine_mode mode)
20262 {
20263 REAL_VALUE_TYPE TWO52r;
20264 rtx TWO52;
20265
20266 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20267 TWO52 = const_double_from_real_value (TWO52r, mode);
20268 TWO52 = force_reg (mode, TWO52);
20269
20270 return TWO52;
20271 }
20272
20273 /* Expand SSE sequence for computing lround from OP1 storing
20274 into OP0. */
20275 void
20276 ix86_expand_lround (rtx op0, rtx op1)
20277 {
20278 /* C code for the stuff we're doing below:
20279 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20280 return (long)tmp;
20281 */
20282 enum machine_mode mode = GET_MODE (op1);
20283 const struct real_format *fmt;
20284 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20285 rtx adj;
20286
20287 /* load nextafter (0.5, 0.0) */
20288 fmt = REAL_MODE_FORMAT (mode);
20289 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20290 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20291
20292 /* adj = copysign (0.5, op1) */
20293 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20294 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20295
20296 /* adj = op1 + adj */
20297 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20298
20299 /* op0 = (imode)adj */
20300 expand_fix (op0, adj, 0);
20301 }
20302
20303 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20304 into OPERAND0. */
20305 void
20306 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20307 {
20308 /* C code for the stuff we're doing below (for do_floor):
20309 xi = (long)op1;
20310 xi -= (double)xi > op1 ? 1 : 0;
20311 return xi;
20312 */
20313 enum machine_mode fmode = GET_MODE (op1);
20314 enum machine_mode imode = GET_MODE (op0);
20315 rtx ireg, freg, label, tmp;
20316
20317 /* reg = (long)op1 */
20318 ireg = gen_reg_rtx (imode);
20319 expand_fix (ireg, op1, 0);
20320
20321 /* freg = (double)reg */
20322 freg = gen_reg_rtx (fmode);
20323 expand_float (freg, ireg, 0);
20324
20325 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20326 label = ix86_expand_sse_compare_and_jump (UNLE,
20327 freg, op1, !do_floor);
20328 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20329 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20330 emit_move_insn (ireg, tmp);
20331
20332 emit_label (label);
20333 LABEL_NUSES (label) = 1;
20334
20335 emit_move_insn (op0, ireg);
20336 }
20337
20338 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20339 result in OPERAND0. */
20340 void
20341 ix86_expand_rint (rtx operand0, rtx operand1)
20342 {
20343 /* C code for the stuff we're doing below:
20344 xa = fabs (operand1);
20345 if (!isless (xa, 2**52))
20346 return operand1;
20347 xa = xa + 2**52 - 2**52;
20348 return copysign (xa, operand1);
20349 */
20350 enum machine_mode mode = GET_MODE (operand0);
20351 rtx res, xa, label, TWO52, mask;
20352
20353 res = gen_reg_rtx (mode);
20354 emit_move_insn (res, operand1);
20355
20356 /* xa = abs (operand1) */
20357 xa = ix86_expand_sse_fabs (res, &mask);
20358
20359 /* if (!isless (xa, TWO52)) goto label; */
20360 TWO52 = ix86_gen_TWO52 (mode);
20361 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20362
20363 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20364 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20365
20366 ix86_sse_copysign_to_positive (res, xa, res, mask);
20367
20368 emit_label (label);
20369 LABEL_NUSES (label) = 1;
20370
20371 emit_move_insn (operand0, res);
20372 }
20373
20374 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20375 into OPERAND0. */
20376 void
20377 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20378 {
20379 /* C code for the stuff we expand below.
20380 double xa = fabs (x), x2;
20381 if (!isless (xa, TWO52))
20382 return x;
20383 xa = xa + TWO52 - TWO52;
20384 x2 = copysign (xa, x);
20385 Compensate. Floor:
20386 if (x2 > x)
20387 x2 -= 1;
20388 Compensate. Ceil:
20389 if (x2 < x)
20390 x2 -= -1;
20391 return x2;
20392 */
20393 enum machine_mode mode = GET_MODE (operand0);
20394 rtx xa, TWO52, tmp, label, one, res, mask;
20395
20396 TWO52 = ix86_gen_TWO52 (mode);
20397
20398 /* Temporary for holding the result, initialized to the input
20399 operand to ease control flow. */
20400 res = gen_reg_rtx (mode);
20401 emit_move_insn (res, operand1);
20402
20403 /* xa = abs (operand1) */
20404 xa = ix86_expand_sse_fabs (res, &mask);
20405
20406 /* if (!isless (xa, TWO52)) goto label; */
20407 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20408
20409 /* xa = xa + TWO52 - TWO52; */
20410 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20411 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20412
20413 /* xa = copysign (xa, operand1) */
20414 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20415
20416 /* generate 1.0 or -1.0 */
20417 one = force_reg (mode,
20418 const_double_from_real_value (do_floor
20419 ? dconst1 : dconstm1, mode));
20420
20421 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20422 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20423 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20424 gen_rtx_AND (mode, one, tmp)));
20425 /* We always need to subtract here to preserve signed zero. */
20426 tmp = expand_simple_binop (mode, MINUS,
20427 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20428 emit_move_insn (res, tmp);
20429
20430 emit_label (label);
20431 LABEL_NUSES (label) = 1;
20432
20433 emit_move_insn (operand0, res);
20434 }
20435
20436 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20437 into OPERAND0. */
20438 void
20439 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20440 {
20441 /* C code for the stuff we expand below.
20442 double xa = fabs (x), x2;
20443 if (!isless (xa, TWO52))
20444 return x;
20445 x2 = (double)(long)x;
20446 Compensate. Floor:
20447 if (x2 > x)
20448 x2 -= 1;
20449 Compensate. Ceil:
20450 if (x2 < x)
20451 x2 += 1;
20452 if (HONOR_SIGNED_ZEROS (mode))
20453 return copysign (x2, x);
20454 return x2;
20455 */
20456 enum machine_mode mode = GET_MODE (operand0);
20457 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20458
20459 TWO52 = ix86_gen_TWO52 (mode);
20460
20461 /* Temporary for holding the result, initialized to the input
20462 operand to ease control flow. */
20463 res = gen_reg_rtx (mode);
20464 emit_move_insn (res, operand1);
20465
20466 /* xa = abs (operand1) */
20467 xa = ix86_expand_sse_fabs (res, &mask);
20468
20469 /* if (!isless (xa, TWO52)) goto label; */
20470 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20471
20472 /* xa = (double)(long)x */
20473 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20474 expand_fix (xi, res, 0);
20475 expand_float (xa, xi, 0);
20476
20477 /* generate 1.0 */
20478 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20479
20480 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20481 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20482 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20483 gen_rtx_AND (mode, one, tmp)));
20484 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20485 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20486 emit_move_insn (res, tmp);
20487
20488 if (HONOR_SIGNED_ZEROS (mode))
20489 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20490
20491 emit_label (label);
20492 LABEL_NUSES (label) = 1;
20493
20494 emit_move_insn (operand0, res);
20495 }
20496
20497 /* Expand SSE sequence for computing round from OPERAND1 storing
20498 into OPERAND0. Sequence that works without relying on DImode truncation
20499 via cvttsd2siq that is only available on 64bit targets. */
20500 void
20501 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20502 {
20503 /* C code for the stuff we expand below.
20504 double xa = fabs (x), xa2, x2;
20505 if (!isless (xa, TWO52))
20506 return x;
20507 Using the absolute value and copying back sign makes
20508 -0.0 -> -0.0 correct.
20509 xa2 = xa + TWO52 - TWO52;
20510 Compensate.
20511 dxa = xa2 - xa;
20512 if (dxa <= -0.5)
20513 xa2 += 1;
20514 else if (dxa > 0.5)
20515 xa2 -= 1;
20516 x2 = copysign (xa2, x);
20517 return x2;
20518 */
20519 enum machine_mode mode = GET_MODE (operand0);
20520 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20521
20522 TWO52 = ix86_gen_TWO52 (mode);
20523
20524 /* Temporary for holding the result, initialized to the input
20525 operand to ease control flow. */
20526 res = gen_reg_rtx (mode);
20527 emit_move_insn (res, operand1);
20528
20529 /* xa = abs (operand1) */
20530 xa = ix86_expand_sse_fabs (res, &mask);
20531
20532 /* if (!isless (xa, TWO52)) goto label; */
20533 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20534
20535 /* xa2 = xa + TWO52 - TWO52; */
20536 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20537 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20538
20539 /* dxa = xa2 - xa; */
20540 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20541
20542 /* generate 0.5, 1.0 and -0.5 */
20543 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20544 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20545 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20546 0, OPTAB_DIRECT);
20547
20548 /* Compensate. */
20549 tmp = gen_reg_rtx (mode);
20550 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20551 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20552 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20553 gen_rtx_AND (mode, one, tmp)));
20554 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20555 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20556 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20557 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20558 gen_rtx_AND (mode, one, tmp)));
20559 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20560
20561 /* res = copysign (xa2, operand1) */
20562 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20563
20564 emit_label (label);
20565 LABEL_NUSES (label) = 1;
20566
20567 emit_move_insn (operand0, res);
20568 }
20569
20570 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20571 into OPERAND0. */
20572 void
20573 ix86_expand_trunc (rtx operand0, rtx operand1)
20574 {
20575 /* C code for SSE variant we expand below.
20576 double xa = fabs (x), x2;
20577 if (!isless (xa, TWO52))
20578 return x;
20579 x2 = (double)(long)x;
20580 if (HONOR_SIGNED_ZEROS (mode))
20581 return copysign (x2, x);
20582 return x2;
20583 */
20584 enum machine_mode mode = GET_MODE (operand0);
20585 rtx xa, xi, TWO52, label, res, mask;
20586
20587 TWO52 = ix86_gen_TWO52 (mode);
20588
20589 /* Temporary for holding the result, initialized to the input
20590 operand to ease control flow. */
20591 res = gen_reg_rtx (mode);
20592 emit_move_insn (res, operand1);
20593
20594 /* xa = abs (operand1) */
20595 xa = ix86_expand_sse_fabs (res, &mask);
20596
20597 /* if (!isless (xa, TWO52)) goto label; */
20598 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20599
20600 /* x = (double)(long)x */
20601 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20602 expand_fix (xi, res, 0);
20603 expand_float (res, xi, 0);
20604
20605 if (HONOR_SIGNED_ZEROS (mode))
20606 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20607
20608 emit_label (label);
20609 LABEL_NUSES (label) = 1;
20610
20611 emit_move_insn (operand0, res);
20612 }
20613
20614 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20615 into OPERAND0. */
20616 void
20617 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20618 {
20619 enum machine_mode mode = GET_MODE (operand0);
20620 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20621
20622 /* C code for SSE variant we expand below.
20623 double xa = fabs (x), x2;
20624 if (!isless (xa, TWO52))
20625 return x;
20626 xa2 = xa + TWO52 - TWO52;
20627 Compensate:
20628 if (xa2 > xa)
20629 xa2 -= 1.0;
20630 x2 = copysign (xa2, x);
20631 return x2;
20632 */
20633
20634 TWO52 = ix86_gen_TWO52 (mode);
20635
20636 /* Temporary for holding the result, initialized to the input
20637 operand to ease control flow. */
20638 res = gen_reg_rtx (mode);
20639 emit_move_insn (res, operand1);
20640
20641 /* xa = abs (operand1) */
20642 xa = ix86_expand_sse_fabs (res, &smask);
20643
20644 /* if (!isless (xa, TWO52)) goto label; */
20645 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20646
20647 /* res = xa + TWO52 - TWO52; */
20648 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20649 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20650 emit_move_insn (res, tmp);
20651
20652 /* generate 1.0 */
20653 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20654
20655 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20656 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20657 emit_insn (gen_rtx_SET (VOIDmode, mask,
20658 gen_rtx_AND (mode, mask, one)));
20659 tmp = expand_simple_binop (mode, MINUS,
20660 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20661 emit_move_insn (res, tmp);
20662
20663 /* res = copysign (res, operand1) */
20664 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20665
20666 emit_label (label);
20667 LABEL_NUSES (label) = 1;
20668
20669 emit_move_insn (operand0, res);
20670 }
20671
20672 /* Expand SSE sequence for computing round from OPERAND1 storing
20673 into OPERAND0. */
20674 void
20675 ix86_expand_round (rtx operand0, rtx operand1)
20676 {
20677 /* C code for the stuff we're doing below:
20678 double xa = fabs (x);
20679 if (!isless (xa, TWO52))
20680 return x;
20681 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20682 return copysign (xa, x);
20683 */
20684 enum machine_mode mode = GET_MODE (operand0);
20685 rtx res, TWO52, xa, label, xi, half, mask;
20686 const struct real_format *fmt;
20687 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20688
20689 /* Temporary for holding the result, initialized to the input
20690 operand to ease control flow. */
20691 res = gen_reg_rtx (mode);
20692 emit_move_insn (res, operand1);
20693
20694 TWO52 = ix86_gen_TWO52 (mode);
20695 xa = ix86_expand_sse_fabs (res, &mask);
20696 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20697
20698 /* load nextafter (0.5, 0.0) */
20699 fmt = REAL_MODE_FORMAT (mode);
20700 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20701 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20702
20703 /* xa = xa + 0.5 */
20704 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20705 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20706
20707 /* xa = (double)(int64_t)xa */
20708 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20709 expand_fix (xi, xa, 0);
20710 expand_float (xa, xi, 0);
20711
20712 /* res = copysign (xa, operand1) */
20713 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20714
20715 emit_label (label);
20716 LABEL_NUSES (label) = 1;
20717
20718 emit_move_insn (operand0, res);
20719 }
20720
20721 #include "gt-i386.h"