re PR target/30413 (%z produces ICE for char operands)
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
57 #endif
58
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
65 : 4)
66
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
70
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
72
73 static const
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
92 0, /* "large" insn */
93 2, /* MOVE_RATIO */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
117 2, /* Branch cost */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
128 };
129
130 /* Processor costs (relative to an add) */
131 static const
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
151 3, /* MOVE_RATIO */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
175 1, /* Branch cost */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
186 };
187
188 static const
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
208 3, /* MOVE_RATIO */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
232 1, /* Branch cost */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
242 DUMMY_STRINGOP_ALGS}
243 };
244
245 static const
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
265 6, /* MOVE_RATIO */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
289 2, /* Branch cost */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
299 DUMMY_STRINGOP_ALGS}
300 };
301
302 static const
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
322 6, /* MOVE_RATIO */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
346 2, /* Branch cost */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
357 */
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
363 DUMMY_STRINGOP_ALGS}
364 };
365
366 static const
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
386 4, /* MOVE_RATIO */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
397
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
411 1, /* Branch cost */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
421 DUMMY_STRINGOP_ALGS}
422 };
423
424 static const
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
444 4, /* MOVE_RATIO */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
468 1, /* Branch cost */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
478 DUMMY_STRINGOP_ALGS}
479 };
480
481 static const
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
501 9, /* MOVE_RATIO */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
525 5, /* Branch cost */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
538 DUMMY_STRINGOP_ALGS}
539 };
540
541 static const
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
561 9, /* MOVE_RATIO */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
588 time). */
589 100, /* number of parallel prefetches */
590 5, /* Branch cost */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
605 };
606
607 static const
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
627 6, /* MOVE_RATIO */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
651 2, /* Branch cost */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
661 {-1, libcall}}},
662 DUMMY_STRINGOP_ALGS},
663 };
664
665 static const
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
685 17, /* MOVE_RATIO */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
709 1, /* Branch cost */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
720 {-1, libcall}}},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
723 };
724
725 static const
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
745 16, /* MOVE_RATIO */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
768 3, /* Branch cost */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
782 };
783
784 /* Generic64 should produce code tuned for Nocona and K8. */
785 static const
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
809 17, /* MOVE_RATIO */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
835 3, /* Branch cost */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
846 };
847
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
849 static const
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
869 17, /* MOVE_RATIO */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
893 3, /* Branch cost */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
904 };
905
906 const struct processor_costs *ix86_cost = &pentium_cost;
907
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
925
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
928
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
995 of moves. */
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1000 undefined. */
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1006
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1010
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Compare and exchange 16 bytes was added for nocona. */
1022 const int x86_cmpxchg16b = m_NOCONA;
1023 /* Exchange and add was added for 80486. */
1024 const int x86_xadd = ~m_386;
1025 /* Byteswap was added for 80486. */
1026 const int x86_bswap = ~m_386;
1027 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1028
1029 static enum stringop_alg stringop_alg = no_stringop;
1030
1031 /* In case the average insn count for single function invocation is
1032 lower than this constant, emit fast (but longer) prologue and
1033 epilogue code. */
1034 #define FAST_PROLOGUE_INSN_COUNT 20
1035
1036 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1037 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1038 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1039 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1040
1041 /* Array of the smallest class containing reg number REGNO, indexed by
1042 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1043
1044 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1045 {
1046 /* ax, dx, cx, bx */
1047 AREG, DREG, CREG, BREG,
1048 /* si, di, bp, sp */
1049 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1050 /* FP registers */
1051 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1052 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1053 /* arg pointer */
1054 NON_Q_REGS,
1055 /* flags, fpsr, fpcr, frame */
1056 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1057 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1058 SSE_REGS, SSE_REGS,
1059 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1060 MMX_REGS, MMX_REGS,
1061 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1062 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1063 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1064 SSE_REGS, SSE_REGS,
1065 };
1066
1067 /* The "default" register map used in 32bit mode. */
1068
1069 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1070 {
1071 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1072 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1073 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1074 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1075 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1076 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1077 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1078 };
1079
1080 static int const x86_64_int_parameter_registers[6] =
1081 {
1082 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1083 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1084 };
1085
1086 static int const x86_64_int_return_registers[4] =
1087 {
1088 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1089 };
1090
1091 /* The "default" register map used in 64bit mode. */
1092 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1093 {
1094 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1095 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1096 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1097 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1098 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1099 8,9,10,11,12,13,14,15, /* extended integer registers */
1100 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1101 };
1102
1103 /* Define the register numbers to be used in Dwarf debugging information.
1104 The SVR4 reference port C compiler uses the following register numbers
1105 in its Dwarf output code:
1106 0 for %eax (gcc regno = 0)
1107 1 for %ecx (gcc regno = 2)
1108 2 for %edx (gcc regno = 1)
1109 3 for %ebx (gcc regno = 3)
1110 4 for %esp (gcc regno = 7)
1111 5 for %ebp (gcc regno = 6)
1112 6 for %esi (gcc regno = 4)
1113 7 for %edi (gcc regno = 5)
1114 The following three DWARF register numbers are never generated by
1115 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1116 believes these numbers have these meanings.
1117 8 for %eip (no gcc equivalent)
1118 9 for %eflags (gcc regno = 17)
1119 10 for %trapno (no gcc equivalent)
1120 It is not at all clear how we should number the FP stack registers
1121 for the x86 architecture. If the version of SDB on x86/svr4 were
1122 a bit less brain dead with respect to floating-point then we would
1123 have a precedent to follow with respect to DWARF register numbers
1124 for x86 FP registers, but the SDB on x86/svr4 is so completely
1125 broken with respect to FP registers that it is hardly worth thinking
1126 of it as something to strive for compatibility with.
1127 The version of x86/svr4 SDB I have at the moment does (partially)
1128 seem to believe that DWARF register number 11 is associated with
1129 the x86 register %st(0), but that's about all. Higher DWARF
1130 register numbers don't seem to be associated with anything in
1131 particular, and even for DWARF regno 11, SDB only seems to under-
1132 stand that it should say that a variable lives in %st(0) (when
1133 asked via an `=' command) if we said it was in DWARF regno 11,
1134 but SDB still prints garbage when asked for the value of the
1135 variable in question (via a `/' command).
1136 (Also note that the labels SDB prints for various FP stack regs
1137 when doing an `x' command are all wrong.)
1138 Note that these problems generally don't affect the native SVR4
1139 C compiler because it doesn't allow the use of -O with -g and
1140 because when it is *not* optimizing, it allocates a memory
1141 location for each floating-point variable, and the memory
1142 location is what gets described in the DWARF AT_location
1143 attribute for the variable in question.
1144 Regardless of the severe mental illness of the x86/svr4 SDB, we
1145 do something sensible here and we use the following DWARF
1146 register numbers. Note that these are all stack-top-relative
1147 numbers.
1148 11 for %st(0) (gcc regno = 8)
1149 12 for %st(1) (gcc regno = 9)
1150 13 for %st(2) (gcc regno = 10)
1151 14 for %st(3) (gcc regno = 11)
1152 15 for %st(4) (gcc regno = 12)
1153 16 for %st(5) (gcc regno = 13)
1154 17 for %st(6) (gcc regno = 14)
1155 18 for %st(7) (gcc regno = 15)
1156 */
1157 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1158 {
1159 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1160 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1161 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1162 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1163 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1164 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1165 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1166 };
1167
1168 /* Test and compare insns in i386.md store the information needed to
1169 generate branch and scc insns here. */
1170
1171 rtx ix86_compare_op0 = NULL_RTX;
1172 rtx ix86_compare_op1 = NULL_RTX;
1173 rtx ix86_compare_emitted = NULL_RTX;
1174
1175 /* Size of the register save area. */
1176 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1177
1178 /* Define the structure for the machine field in struct function. */
1179
1180 struct stack_local_entry GTY(())
1181 {
1182 unsigned short mode;
1183 unsigned short n;
1184 rtx rtl;
1185 struct stack_local_entry *next;
1186 };
1187
1188 /* Structure describing stack frame layout.
1189 Stack grows downward:
1190
1191 [arguments]
1192 <- ARG_POINTER
1193 saved pc
1194
1195 saved frame pointer if frame_pointer_needed
1196 <- HARD_FRAME_POINTER
1197 [saved regs]
1198
1199 [padding1] \
1200 )
1201 [va_arg registers] (
1202 > to_allocate <- FRAME_POINTER
1203 [frame] (
1204 )
1205 [padding2] /
1206 */
1207 struct ix86_frame
1208 {
1209 int nregs;
1210 int padding1;
1211 int va_arg_size;
1212 HOST_WIDE_INT frame;
1213 int padding2;
1214 int outgoing_arguments_size;
1215 int red_zone_size;
1216
1217 HOST_WIDE_INT to_allocate;
1218 /* The offsets relative to ARG_POINTER. */
1219 HOST_WIDE_INT frame_pointer_offset;
1220 HOST_WIDE_INT hard_frame_pointer_offset;
1221 HOST_WIDE_INT stack_pointer_offset;
1222
1223 /* When save_regs_using_mov is set, emit prologue using
1224 move instead of push instructions. */
1225 bool save_regs_using_mov;
1226 };
1227
1228 /* Code model option. */
1229 enum cmodel ix86_cmodel;
1230 /* Asm dialect. */
1231 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1232 /* TLS dialects. */
1233 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1234
1235 /* Which unit we are generating floating point math for. */
1236 enum fpmath_unit ix86_fpmath;
1237
1238 /* Which cpu are we scheduling for. */
1239 enum processor_type ix86_tune;
1240 /* Which instruction set architecture to use. */
1241 enum processor_type ix86_arch;
1242
1243 /* true if sse prefetch instruction is not NOOP. */
1244 int x86_prefetch_sse;
1245
1246 /* ix86_regparm_string as a number */
1247 static int ix86_regparm;
1248
1249 /* -mstackrealign option */
1250 extern int ix86_force_align_arg_pointer;
1251 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1252
1253 /* Preferred alignment for stack boundary in bits. */
1254 unsigned int ix86_preferred_stack_boundary;
1255
1256 /* Values 1-5: see jump.c */
1257 int ix86_branch_cost;
1258
1259 /* Variables which are this size or smaller are put in the data/bss
1260 or ldata/lbss sections. */
1261
1262 int ix86_section_threshold = 65536;
1263
1264 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1265 char internal_label_prefix[16];
1266 int internal_label_prefix_len;
1267 \f
1268 static bool ix86_handle_option (size_t, const char *, int);
1269 static void output_pic_addr_const (FILE *, rtx, int);
1270 static void put_condition_code (enum rtx_code, enum machine_mode,
1271 int, int, FILE *);
1272 static const char *get_some_local_dynamic_name (void);
1273 static int get_some_local_dynamic_name_1 (rtx *, void *);
1274 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1275 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1276 rtx *);
1277 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1278 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1279 enum machine_mode);
1280 static rtx get_thread_pointer (int);
1281 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1282 static void get_pc_thunk_name (char [32], unsigned int);
1283 static rtx gen_push (rtx);
1284 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1285 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1286 static struct machine_function * ix86_init_machine_status (void);
1287 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1288 static int ix86_nsaved_regs (void);
1289 static void ix86_emit_save_regs (void);
1290 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1291 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1292 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1293 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1294 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1295 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1296 static int ix86_issue_rate (void);
1297 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1298 static int ia32_multipass_dfa_lookahead (void);
1299 static void ix86_init_mmx_sse_builtins (void);
1300 static rtx x86_this_parameter (tree);
1301 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1302 HOST_WIDE_INT, tree);
1303 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1304 static void x86_file_start (void);
1305 static void ix86_reorg (void);
1306 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1307 static tree ix86_build_builtin_va_list (void);
1308 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1309 tree, int *, int);
1310 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1311 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1312 static bool ix86_vector_mode_supported_p (enum machine_mode);
1313
1314 static int ix86_address_cost (rtx);
1315 static bool ix86_cannot_force_const_mem (rtx);
1316 static rtx ix86_delegitimize_address (rtx);
1317
1318 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1319
1320 struct builtin_description;
1321 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1322 tree, rtx);
1323 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1324 tree, rtx);
1325 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1326 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1327 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1328 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1329 static rtx safe_vector_operand (rtx, enum machine_mode);
1330 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1331 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1332 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_cost (enum rtx_code code);
1335 static unsigned int ix86_select_alt_pic_regnum (void);
1336 static int ix86_save_reg (unsigned int, int);
1337 static void ix86_compute_frame_layout (struct ix86_frame *);
1338 static int ix86_comp_type_attributes (tree, tree);
1339 static int ix86_function_regparm (tree, tree);
1340 const struct attribute_spec ix86_attribute_table[];
1341 static bool ix86_function_ok_for_sibcall (tree, tree);
1342 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1343 static int ix86_value_regno (enum machine_mode, tree, tree);
1344 static bool contains_128bit_aligned_vector_p (tree);
1345 static rtx ix86_struct_value_rtx (tree, int);
1346 static bool ix86_ms_bitfield_layout_p (tree);
1347 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1348 static int extended_reg_mentioned_1 (rtx *, void *);
1349 static bool ix86_rtx_costs (rtx, int, int, int *);
1350 static int min_insn_size (rtx);
1351 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1352 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1353 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1354 tree, bool);
1355 static void ix86_init_builtins (void);
1356 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1357 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1358 static const char *ix86_mangle_fundamental_type (tree);
1359 static tree ix86_stack_protect_fail (void);
1360 static rtx ix86_internal_arg_pointer (void);
1361 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1362
1363 /* This function is only used on Solaris. */
1364 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1365 ATTRIBUTE_UNUSED;
1366
1367 /* Register class used for passing given 64bit part of the argument.
1368 These represent classes as documented by the PS ABI, with the exception
1369 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1370 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1371
1372 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1373 whenever possible (upper half does contain padding).
1374 */
1375 enum x86_64_reg_class
1376 {
1377 X86_64_NO_CLASS,
1378 X86_64_INTEGER_CLASS,
1379 X86_64_INTEGERSI_CLASS,
1380 X86_64_SSE_CLASS,
1381 X86_64_SSESF_CLASS,
1382 X86_64_SSEDF_CLASS,
1383 X86_64_SSEUP_CLASS,
1384 X86_64_X87_CLASS,
1385 X86_64_X87UP_CLASS,
1386 X86_64_COMPLEX_X87_CLASS,
1387 X86_64_MEMORY_CLASS
1388 };
1389 static const char * const x86_64_reg_class_name[] = {
1390 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1391 "sseup", "x87", "x87up", "cplx87", "no"
1392 };
1393
1394 #define MAX_CLASSES 4
1395
1396 /* Table of constants used by fldpi, fldln2, etc.... */
1397 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1398 static bool ext_80387_constants_init = 0;
1399 static void init_ext_80387_constants (void);
1400 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1401 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1402 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1403 static section *x86_64_elf_select_section (tree decl, int reloc,
1404 unsigned HOST_WIDE_INT align)
1405 ATTRIBUTE_UNUSED;
1406 \f
1407 /* Initialize the GCC target structure. */
1408 #undef TARGET_ATTRIBUTE_TABLE
1409 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1410 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1411 # undef TARGET_MERGE_DECL_ATTRIBUTES
1412 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1413 #endif
1414
1415 #undef TARGET_COMP_TYPE_ATTRIBUTES
1416 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1417
1418 #undef TARGET_INIT_BUILTINS
1419 #define TARGET_INIT_BUILTINS ix86_init_builtins
1420 #undef TARGET_EXPAND_BUILTIN
1421 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1422 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1423 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1424
1425 #undef TARGET_ASM_FUNCTION_EPILOGUE
1426 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1427
1428 #undef TARGET_ENCODE_SECTION_INFO
1429 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1430 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1431 #else
1432 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1433 #endif
1434
1435 #undef TARGET_ASM_OPEN_PAREN
1436 #define TARGET_ASM_OPEN_PAREN ""
1437 #undef TARGET_ASM_CLOSE_PAREN
1438 #define TARGET_ASM_CLOSE_PAREN ""
1439
1440 #undef TARGET_ASM_ALIGNED_HI_OP
1441 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1442 #undef TARGET_ASM_ALIGNED_SI_OP
1443 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1444 #ifdef ASM_QUAD
1445 #undef TARGET_ASM_ALIGNED_DI_OP
1446 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1447 #endif
1448
1449 #undef TARGET_ASM_UNALIGNED_HI_OP
1450 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1451 #undef TARGET_ASM_UNALIGNED_SI_OP
1452 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1453 #undef TARGET_ASM_UNALIGNED_DI_OP
1454 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1455
1456 #undef TARGET_SCHED_ADJUST_COST
1457 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1458 #undef TARGET_SCHED_ISSUE_RATE
1459 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1462 ia32_multipass_dfa_lookahead
1463
1464 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1465 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1466
1467 #ifdef HAVE_AS_TLS
1468 #undef TARGET_HAVE_TLS
1469 #define TARGET_HAVE_TLS true
1470 #endif
1471 #undef TARGET_CANNOT_FORCE_CONST_MEM
1472 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1475
1476 #undef TARGET_DELEGITIMIZE_ADDRESS
1477 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1478
1479 #undef TARGET_MS_BITFIELD_LAYOUT_P
1480 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1481
1482 #if TARGET_MACHO
1483 #undef TARGET_BINDS_LOCAL_P
1484 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1485 #endif
1486
1487 #undef TARGET_ASM_OUTPUT_MI_THUNK
1488 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1489 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1490 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1491
1492 #undef TARGET_ASM_FILE_START
1493 #define TARGET_ASM_FILE_START x86_file_start
1494
1495 #undef TARGET_DEFAULT_TARGET_FLAGS
1496 #define TARGET_DEFAULT_TARGET_FLAGS \
1497 (TARGET_DEFAULT \
1498 | TARGET_64BIT_DEFAULT \
1499 | TARGET_SUBTARGET_DEFAULT \
1500 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1501
1502 #undef TARGET_HANDLE_OPTION
1503 #define TARGET_HANDLE_OPTION ix86_handle_option
1504
1505 #undef TARGET_RTX_COSTS
1506 #define TARGET_RTX_COSTS ix86_rtx_costs
1507 #undef TARGET_ADDRESS_COST
1508 #define TARGET_ADDRESS_COST ix86_address_cost
1509
1510 #undef TARGET_FIXED_CONDITION_CODE_REGS
1511 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1512 #undef TARGET_CC_MODES_COMPATIBLE
1513 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1514
1515 #undef TARGET_MACHINE_DEPENDENT_REORG
1516 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1517
1518 #undef TARGET_BUILD_BUILTIN_VA_LIST
1519 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1520
1521 #undef TARGET_MD_ASM_CLOBBERS
1522 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1523
1524 #undef TARGET_PROMOTE_PROTOTYPES
1525 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1526 #undef TARGET_STRUCT_VALUE_RTX
1527 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1528 #undef TARGET_SETUP_INCOMING_VARARGS
1529 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1530 #undef TARGET_MUST_PASS_IN_STACK
1531 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1532 #undef TARGET_PASS_BY_REFERENCE
1533 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1534 #undef TARGET_INTERNAL_ARG_POINTER
1535 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1536 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1537 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1538
1539 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1540 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1541
1542 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1543 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1544
1545 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1546 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1547
1548 #ifdef HAVE_AS_TLS
1549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1551 #endif
1552
1553 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1554 #undef TARGET_INSERT_ATTRIBUTES
1555 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1556 #endif
1557
1558 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1559 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1560
1561 #undef TARGET_STACK_PROTECT_FAIL
1562 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1563
1564 #undef TARGET_FUNCTION_VALUE
1565 #define TARGET_FUNCTION_VALUE ix86_function_value
1566
1567 struct gcc_target targetm = TARGET_INITIALIZER;
1568
1569 \f
1570 /* The svr4 ABI for the i386 says that records and unions are returned
1571 in memory. */
1572 #ifndef DEFAULT_PCC_STRUCT_RETURN
1573 #define DEFAULT_PCC_STRUCT_RETURN 1
1574 #endif
1575
1576 /* Implement TARGET_HANDLE_OPTION. */
1577
1578 static bool
1579 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1580 {
1581 switch (code)
1582 {
1583 case OPT_m3dnow:
1584 if (!value)
1585 {
1586 target_flags &= ~MASK_3DNOW_A;
1587 target_flags_explicit |= MASK_3DNOW_A;
1588 }
1589 return true;
1590
1591 case OPT_mmmx:
1592 if (!value)
1593 {
1594 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1595 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1596 }
1597 return true;
1598
1599 case OPT_msse:
1600 if (!value)
1601 {
1602 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1603 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1604 }
1605 return true;
1606
1607 case OPT_msse2:
1608 if (!value)
1609 {
1610 target_flags &= ~MASK_SSE3;
1611 target_flags_explicit |= MASK_SSE3;
1612 }
1613 return true;
1614
1615 default:
1616 return true;
1617 }
1618 }
1619
1620 /* Sometimes certain combinations of command options do not make
1621 sense on a particular target machine. You can define a macro
1622 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1623 defined, is executed once just after all the command options have
1624 been parsed.
1625
1626 Don't use this macro to turn on various extra optimizations for
1627 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1628
1629 void
1630 override_options (void)
1631 {
1632 int i;
1633 int ix86_tune_defaulted = 0;
1634
1635 /* Comes from final.c -- no real reason to change it. */
1636 #define MAX_CODE_ALIGN 16
1637
1638 static struct ptt
1639 {
1640 const struct processor_costs *cost; /* Processor costs */
1641 const int target_enable; /* Target flags to enable. */
1642 const int target_disable; /* Target flags to disable. */
1643 const int align_loop; /* Default alignments. */
1644 const int align_loop_max_skip;
1645 const int align_jump;
1646 const int align_jump_max_skip;
1647 const int align_func;
1648 }
1649 const processor_target_table[PROCESSOR_max] =
1650 {
1651 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1652 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1653 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1654 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1655 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1656 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1657 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1658 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1659 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1660 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1661 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1662 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1664 };
1665
1666 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1667 static struct pta
1668 {
1669 const char *const name; /* processor name or nickname. */
1670 const enum processor_type processor;
1671 const enum pta_flags
1672 {
1673 PTA_SSE = 1,
1674 PTA_SSE2 = 2,
1675 PTA_SSE3 = 4,
1676 PTA_MMX = 8,
1677 PTA_PREFETCH_SSE = 16,
1678 PTA_3DNOW = 32,
1679 PTA_3DNOW_A = 64,
1680 PTA_64BIT = 128,
1681 PTA_SSSE3 = 256
1682 } flags;
1683 }
1684 const processor_alias_table[] =
1685 {
1686 {"i386", PROCESSOR_I386, 0},
1687 {"i486", PROCESSOR_I486, 0},
1688 {"i586", PROCESSOR_PENTIUM, 0},
1689 {"pentium", PROCESSOR_PENTIUM, 0},
1690 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1691 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1692 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1693 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1694 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1695 {"i686", PROCESSOR_PENTIUMPRO, 0},
1696 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1697 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1698 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1699 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1700 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1701 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1702 | PTA_MMX | PTA_PREFETCH_SSE},
1703 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1710 | PTA_64BIT | PTA_MMX
1711 | PTA_PREFETCH_SSE},
1712 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1713 | PTA_3DNOW_A},
1714 {"k6", PROCESSOR_K6, PTA_MMX},
1715 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1716 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1717 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1718 | PTA_3DNOW_A},
1719 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1720 | PTA_3DNOW | PTA_3DNOW_A},
1721 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1722 | PTA_3DNOW_A | PTA_SSE},
1723 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1728 | PTA_SSE | PTA_SSE2 },
1729 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1730 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1731 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1739 };
1740
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1742
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1745 #endif
1746
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1749 #endif
1750
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1753 flag_pic = 2;
1754
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1757 if (TARGET_64BIT)
1758 {
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1766 }
1767 else
1768 {
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1775 }
1776
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1779 {
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1786 {
1787 if (TARGET_64BIT)
1788 ix86_tune_string = "generic64";
1789 else
1790 ix86_tune_string = "generic32";
1791 }
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1794 }
1795 else
1796 {
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1800 {
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1803 }
1804
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1810 {
1811 if (TARGET_64BIT)
1812 ix86_tune_string = "generic64";
1813 else
1814 ix86_tune_string = "generic32";
1815 }
1816 }
1817 if (ix86_stringop_string)
1818 {
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1833 else
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1835 }
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1839
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1846
1847 if (ix86_cmodel_string != 0)
1848 {
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1853 else if (flag_pic)
1854 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1855 else if (!strcmp (ix86_cmodel_string, "32"))
1856 ix86_cmodel = CM_32;
1857 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1858 ix86_cmodel = CM_KERNEL;
1859 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1860 ix86_cmodel = CM_LARGE;
1861 else
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1863 }
1864 else
1865 {
1866 ix86_cmodel = CM_32;
1867 if (TARGET_64BIT)
1868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1869 }
1870 if (ix86_asm_string != 0)
1871 {
1872 if (! TARGET_MACHO
1873 && !strcmp (ix86_asm_string, "intel"))
1874 ix86_asm_dialect = ASM_INTEL;
1875 else if (!strcmp (ix86_asm_string, "att"))
1876 ix86_asm_dialect = ASM_ATT;
1877 else
1878 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1879 }
1880 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1881 error ("code model %qs not supported in the %s bit mode",
1882 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1883 if (ix86_cmodel == CM_LARGE)
1884 sorry ("code model %<large%> not supported yet");
1885 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1886 sorry ("%i-bit mode not compiled in",
1887 (target_flags & MASK_64BIT) ? 64 : 32);
1888
1889 for (i = 0; i < pta_size; i++)
1890 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1891 {
1892 ix86_arch = processor_alias_table[i].processor;
1893 /* Default cpu tuning to the architecture. */
1894 ix86_tune = ix86_arch;
1895 if (processor_alias_table[i].flags & PTA_MMX
1896 && !(target_flags_explicit & MASK_MMX))
1897 target_flags |= MASK_MMX;
1898 if (processor_alias_table[i].flags & PTA_3DNOW
1899 && !(target_flags_explicit & MASK_3DNOW))
1900 target_flags |= MASK_3DNOW;
1901 if (processor_alias_table[i].flags & PTA_3DNOW_A
1902 && !(target_flags_explicit & MASK_3DNOW_A))
1903 target_flags |= MASK_3DNOW_A;
1904 if (processor_alias_table[i].flags & PTA_SSE
1905 && !(target_flags_explicit & MASK_SSE))
1906 target_flags |= MASK_SSE;
1907 if (processor_alias_table[i].flags & PTA_SSE2
1908 && !(target_flags_explicit & MASK_SSE2))
1909 target_flags |= MASK_SSE2;
1910 if (processor_alias_table[i].flags & PTA_SSE3
1911 && !(target_flags_explicit & MASK_SSE3))
1912 target_flags |= MASK_SSE3;
1913 if (processor_alias_table[i].flags & PTA_SSSE3
1914 && !(target_flags_explicit & MASK_SSSE3))
1915 target_flags |= MASK_SSSE3;
1916 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1917 x86_prefetch_sse = true;
1918 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1919 error ("CPU you selected does not support x86-64 "
1920 "instruction set");
1921 break;
1922 }
1923
1924 if (i == pta_size)
1925 error ("bad value (%s) for -march= switch", ix86_arch_string);
1926
1927 for (i = 0; i < pta_size; i++)
1928 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1929 {
1930 ix86_tune = processor_alias_table[i].processor;
1931 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1932 {
1933 if (ix86_tune_defaulted)
1934 {
1935 ix86_tune_string = "x86-64";
1936 for (i = 0; i < pta_size; i++)
1937 if (! strcmp (ix86_tune_string,
1938 processor_alias_table[i].name))
1939 break;
1940 ix86_tune = processor_alias_table[i].processor;
1941 }
1942 else
1943 error ("CPU you selected does not support x86-64 "
1944 "instruction set");
1945 }
1946 /* Intel CPUs have always interpreted SSE prefetch instructions as
1947 NOPs; so, we can enable SSE prefetch instructions even when
1948 -mtune (rather than -march) points us to a processor that has them.
1949 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1950 higher processors. */
1951 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1952 x86_prefetch_sse = true;
1953 break;
1954 }
1955 if (i == pta_size)
1956 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1957
1958 if (optimize_size)
1959 ix86_cost = &size_cost;
1960 else
1961 ix86_cost = processor_target_table[ix86_tune].cost;
1962 target_flags |= processor_target_table[ix86_tune].target_enable;
1963 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1964
1965 /* Arrange to set up i386_stack_locals for all functions. */
1966 init_machine_status = ix86_init_machine_status;
1967
1968 /* Validate -mregparm= value. */
1969 if (ix86_regparm_string)
1970 {
1971 i = atoi (ix86_regparm_string);
1972 if (i < 0 || i > REGPARM_MAX)
1973 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1974 else
1975 ix86_regparm = i;
1976 }
1977 else
1978 if (TARGET_64BIT)
1979 ix86_regparm = REGPARM_MAX;
1980
1981 /* If the user has provided any of the -malign-* options,
1982 warn and use that value only if -falign-* is not set.
1983 Remove this code in GCC 3.2 or later. */
1984 if (ix86_align_loops_string)
1985 {
1986 warning (0, "-malign-loops is obsolete, use -falign-loops");
1987 if (align_loops == 0)
1988 {
1989 i = atoi (ix86_align_loops_string);
1990 if (i < 0 || i > MAX_CODE_ALIGN)
1991 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1992 else
1993 align_loops = 1 << i;
1994 }
1995 }
1996
1997 if (ix86_align_jumps_string)
1998 {
1999 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2000 if (align_jumps == 0)
2001 {
2002 i = atoi (ix86_align_jumps_string);
2003 if (i < 0 || i > MAX_CODE_ALIGN)
2004 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2005 else
2006 align_jumps = 1 << i;
2007 }
2008 }
2009
2010 if (ix86_align_funcs_string)
2011 {
2012 warning (0, "-malign-functions is obsolete, use -falign-functions");
2013 if (align_functions == 0)
2014 {
2015 i = atoi (ix86_align_funcs_string);
2016 if (i < 0 || i > MAX_CODE_ALIGN)
2017 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2018 else
2019 align_functions = 1 << i;
2020 }
2021 }
2022
2023 /* Default align_* from the processor table. */
2024 if (align_loops == 0)
2025 {
2026 align_loops = processor_target_table[ix86_tune].align_loop;
2027 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2028 }
2029 if (align_jumps == 0)
2030 {
2031 align_jumps = processor_target_table[ix86_tune].align_jump;
2032 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2033 }
2034 if (align_functions == 0)
2035 {
2036 align_functions = processor_target_table[ix86_tune].align_func;
2037 }
2038
2039 /* Validate -mbranch-cost= value, or provide default. */
2040 ix86_branch_cost = ix86_cost->branch_cost;
2041 if (ix86_branch_cost_string)
2042 {
2043 i = atoi (ix86_branch_cost_string);
2044 if (i < 0 || i > 5)
2045 error ("-mbranch-cost=%d is not between 0 and 5", i);
2046 else
2047 ix86_branch_cost = i;
2048 }
2049 if (ix86_section_threshold_string)
2050 {
2051 i = atoi (ix86_section_threshold_string);
2052 if (i < 0)
2053 error ("-mlarge-data-threshold=%d is negative", i);
2054 else
2055 ix86_section_threshold = i;
2056 }
2057
2058 if (ix86_tls_dialect_string)
2059 {
2060 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2061 ix86_tls_dialect = TLS_DIALECT_GNU;
2062 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2063 ix86_tls_dialect = TLS_DIALECT_GNU2;
2064 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_SUN;
2066 else
2067 error ("bad value (%s) for -mtls-dialect= switch",
2068 ix86_tls_dialect_string);
2069 }
2070
2071 /* Keep nonleaf frame pointers. */
2072 if (flag_omit_frame_pointer)
2073 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2074 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2075 flag_omit_frame_pointer = 1;
2076
2077 /* If we're doing fast math, we don't care about comparison order
2078 wrt NaNs. This lets us use a shorter comparison sequence. */
2079 if (flag_finite_math_only)
2080 target_flags &= ~MASK_IEEE_FP;
2081
2082 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2083 since the insns won't need emulation. */
2084 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2085 target_flags &= ~MASK_NO_FANCY_MATH_387;
2086
2087 /* Likewise, if the target doesn't have a 387, or we've specified
2088 software floating point, don't use 387 inline intrinsics. */
2089 if (!TARGET_80387)
2090 target_flags |= MASK_NO_FANCY_MATH_387;
2091
2092 /* Turn on SSE3 builtins for -mssse3. */
2093 if (TARGET_SSSE3)
2094 target_flags |= MASK_SSE3;
2095
2096 /* Turn on SSE2 builtins for -msse3. */
2097 if (TARGET_SSE3)
2098 target_flags |= MASK_SSE2;
2099
2100 /* Turn on SSE builtins for -msse2. */
2101 if (TARGET_SSE2)
2102 target_flags |= MASK_SSE;
2103
2104 /* Turn on MMX builtins for -msse. */
2105 if (TARGET_SSE)
2106 {
2107 target_flags |= MASK_MMX & ~target_flags_explicit;
2108 x86_prefetch_sse = true;
2109 }
2110
2111 /* Turn on MMX builtins for 3Dnow. */
2112 if (TARGET_3DNOW)
2113 target_flags |= MASK_MMX;
2114
2115 if (TARGET_64BIT)
2116 {
2117 if (TARGET_ALIGN_DOUBLE)
2118 error ("-malign-double makes no sense in the 64bit mode");
2119 if (TARGET_RTD)
2120 error ("-mrtd calling convention not supported in the 64bit mode");
2121
2122 /* Enable by default the SSE and MMX builtins. Do allow the user to
2123 explicitly disable any of these. In particular, disabling SSE and
2124 MMX for kernel code is extremely useful. */
2125 target_flags
2126 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2127 & ~target_flags_explicit);
2128 }
2129 else
2130 {
2131 /* i386 ABI does not specify red zone. It still makes sense to use it
2132 when programmer takes care to stack from being destroyed. */
2133 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2134 target_flags |= MASK_NO_RED_ZONE;
2135 }
2136
2137 /* Validate -mpreferred-stack-boundary= value, or provide default.
2138 The default of 128 bits is for Pentium III's SSE __m128. We can't
2139 change it because of optimize_size. Otherwise, we can't mix object
2140 files compiled with -Os and -On. */
2141 ix86_preferred_stack_boundary = 128;
2142 if (ix86_preferred_stack_boundary_string)
2143 {
2144 i = atoi (ix86_preferred_stack_boundary_string);
2145 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2146 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2147 TARGET_64BIT ? 4 : 2);
2148 else
2149 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2150 }
2151
2152 /* Accept -msseregparm only if at least SSE support is enabled. */
2153 if (TARGET_SSEREGPARM
2154 && ! TARGET_SSE)
2155 error ("-msseregparm used without SSE enabled");
2156
2157 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2158
2159 if (ix86_fpmath_string != 0)
2160 {
2161 if (! strcmp (ix86_fpmath_string, "387"))
2162 ix86_fpmath = FPMATH_387;
2163 else if (! strcmp (ix86_fpmath_string, "sse"))
2164 {
2165 if (!TARGET_SSE)
2166 {
2167 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2168 ix86_fpmath = FPMATH_387;
2169 }
2170 else
2171 ix86_fpmath = FPMATH_SSE;
2172 }
2173 else if (! strcmp (ix86_fpmath_string, "387,sse")
2174 || ! strcmp (ix86_fpmath_string, "sse,387"))
2175 {
2176 if (!TARGET_SSE)
2177 {
2178 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2179 ix86_fpmath = FPMATH_387;
2180 }
2181 else if (!TARGET_80387)
2182 {
2183 warning (0, "387 instruction set disabled, using SSE arithmetics");
2184 ix86_fpmath = FPMATH_SSE;
2185 }
2186 else
2187 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2188 }
2189 else
2190 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2191 }
2192
2193 /* If the i387 is disabled, then do not return values in it. */
2194 if (!TARGET_80387)
2195 target_flags &= ~MASK_FLOAT_RETURNS;
2196
2197 if ((x86_accumulate_outgoing_args & TUNEMASK)
2198 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2199 && !optimize_size)
2200 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2201
2202 /* ??? Unwind info is not correct around the CFG unless either a frame
2203 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2204 unwind info generation to be aware of the CFG and propagating states
2205 around edges. */
2206 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2207 || flag_exceptions || flag_non_call_exceptions)
2208 && flag_omit_frame_pointer
2209 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2210 {
2211 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2212 warning (0, "unwind tables currently require either a frame pointer "
2213 "or -maccumulate-outgoing-args for correctness");
2214 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2215 }
2216
2217 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2218 {
2219 char *p;
2220 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2221 p = strchr (internal_label_prefix, 'X');
2222 internal_label_prefix_len = p - internal_label_prefix;
2223 *p = '\0';
2224 }
2225
2226 /* When scheduling description is not available, disable scheduler pass
2227 so it won't slow down the compilation and make x87 code slower. */
2228 if (!TARGET_SCHEDULE)
2229 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2230
2231 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2232 set_param_value ("simultaneous-prefetches",
2233 ix86_cost->simultaneous_prefetches);
2234 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2235 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2236 }
2237 \f
2238 /* switch to the appropriate section for output of DECL.
2239 DECL is either a `VAR_DECL' node or a constant of some sort.
2240 RELOC indicates whether forming the initial value of DECL requires
2241 link-time relocations. */
2242
2243 static section *
2244 x86_64_elf_select_section (tree decl, int reloc,
2245 unsigned HOST_WIDE_INT align)
2246 {
2247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2248 && ix86_in_large_data_p (decl))
2249 {
2250 const char *sname = NULL;
2251 unsigned int flags = SECTION_WRITE;
2252 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2253 {
2254 case SECCAT_DATA:
2255 sname = ".ldata";
2256 break;
2257 case SECCAT_DATA_REL:
2258 sname = ".ldata.rel";
2259 break;
2260 case SECCAT_DATA_REL_LOCAL:
2261 sname = ".ldata.rel.local";
2262 break;
2263 case SECCAT_DATA_REL_RO:
2264 sname = ".ldata.rel.ro";
2265 break;
2266 case SECCAT_DATA_REL_RO_LOCAL:
2267 sname = ".ldata.rel.ro.local";
2268 break;
2269 case SECCAT_BSS:
2270 sname = ".lbss";
2271 flags |= SECTION_BSS;
2272 break;
2273 case SECCAT_RODATA:
2274 case SECCAT_RODATA_MERGE_STR:
2275 case SECCAT_RODATA_MERGE_STR_INIT:
2276 case SECCAT_RODATA_MERGE_CONST:
2277 sname = ".lrodata";
2278 flags = 0;
2279 break;
2280 case SECCAT_SRODATA:
2281 case SECCAT_SDATA:
2282 case SECCAT_SBSS:
2283 gcc_unreachable ();
2284 case SECCAT_TEXT:
2285 case SECCAT_TDATA:
2286 case SECCAT_TBSS:
2287 /* We don't split these for medium model. Place them into
2288 default sections and hope for best. */
2289 break;
2290 }
2291 if (sname)
2292 {
2293 /* We might get called with string constants, but get_named_section
2294 doesn't like them as they are not DECLs. Also, we need to set
2295 flags in that case. */
2296 if (!DECL_P (decl))
2297 return get_section (sname, flags, NULL);
2298 return get_named_section (decl, sname, reloc);
2299 }
2300 }
2301 return default_elf_select_section (decl, reloc, align);
2302 }
2303
2304 /* Build up a unique section name, expressed as a
2305 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2306 RELOC indicates whether the initial value of EXP requires
2307 link-time relocations. */
2308
2309 static void
2310 x86_64_elf_unique_section (tree decl, int reloc)
2311 {
2312 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2313 && ix86_in_large_data_p (decl))
2314 {
2315 const char *prefix = NULL;
2316 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2317 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2318
2319 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2320 {
2321 case SECCAT_DATA:
2322 case SECCAT_DATA_REL:
2323 case SECCAT_DATA_REL_LOCAL:
2324 case SECCAT_DATA_REL_RO:
2325 case SECCAT_DATA_REL_RO_LOCAL:
2326 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2327 break;
2328 case SECCAT_BSS:
2329 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2330 break;
2331 case SECCAT_RODATA:
2332 case SECCAT_RODATA_MERGE_STR:
2333 case SECCAT_RODATA_MERGE_STR_INIT:
2334 case SECCAT_RODATA_MERGE_CONST:
2335 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2336 break;
2337 case SECCAT_SRODATA:
2338 case SECCAT_SDATA:
2339 case SECCAT_SBSS:
2340 gcc_unreachable ();
2341 case SECCAT_TEXT:
2342 case SECCAT_TDATA:
2343 case SECCAT_TBSS:
2344 /* We don't split these for medium model. Place them into
2345 default sections and hope for best. */
2346 break;
2347 }
2348 if (prefix)
2349 {
2350 const char *name;
2351 size_t nlen, plen;
2352 char *string;
2353 plen = strlen (prefix);
2354
2355 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2356 name = targetm.strip_name_encoding (name);
2357 nlen = strlen (name);
2358
2359 string = alloca (nlen + plen + 1);
2360 memcpy (string, prefix, plen);
2361 memcpy (string + plen, name, nlen + 1);
2362
2363 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2364 return;
2365 }
2366 }
2367 default_unique_section (decl, reloc);
2368 }
2369
2370 #ifdef COMMON_ASM_OP
2371 /* This says how to output assembler code to declare an
2372 uninitialized external linkage data object.
2373
2374 For medium model x86-64 we need to use .largecomm opcode for
2375 large objects. */
2376 void
2377 x86_elf_aligned_common (FILE *file,
2378 const char *name, unsigned HOST_WIDE_INT size,
2379 int align)
2380 {
2381 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2382 && size > (unsigned int)ix86_section_threshold)
2383 fprintf (file, ".largecomm\t");
2384 else
2385 fprintf (file, "%s", COMMON_ASM_OP);
2386 assemble_name (file, name);
2387 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2388 size, align / BITS_PER_UNIT);
2389 }
2390 #endif
2391 /* Utility function for targets to use in implementing
2392 ASM_OUTPUT_ALIGNED_BSS. */
2393
2394 void
2395 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2396 const char *name, unsigned HOST_WIDE_INT size,
2397 int align)
2398 {
2399 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2400 && size > (unsigned int)ix86_section_threshold)
2401 switch_to_section (get_named_section (decl, ".lbss", 0));
2402 else
2403 switch_to_section (bss_section);
2404 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2405 #ifdef ASM_DECLARE_OBJECT_NAME
2406 last_assemble_variable_decl = decl;
2407 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2408 #else
2409 /* Standard thing is just output label for the object. */
2410 ASM_OUTPUT_LABEL (file, name);
2411 #endif /* ASM_DECLARE_OBJECT_NAME */
2412 ASM_OUTPUT_SKIP (file, size ? size : 1);
2413 }
2414 \f
2415 void
2416 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2417 {
2418 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2419 make the problem with not enough registers even worse. */
2420 #ifdef INSN_SCHEDULING
2421 if (level > 1)
2422 flag_schedule_insns = 0;
2423 #endif
2424
2425 if (TARGET_MACHO)
2426 /* The Darwin libraries never set errno, so we might as well
2427 avoid calling them when that's the only reason we would. */
2428 flag_errno_math = 0;
2429
2430 /* The default values of these switches depend on the TARGET_64BIT
2431 that is not known at this moment. Mark these values with 2 and
2432 let user the to override these. In case there is no command line option
2433 specifying them, we will set the defaults in override_options. */
2434 if (optimize >= 1)
2435 flag_omit_frame_pointer = 2;
2436 flag_pcc_struct_return = 2;
2437 flag_asynchronous_unwind_tables = 2;
2438 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2439 SUBTARGET_OPTIMIZATION_OPTIONS;
2440 #endif
2441 }
2442 \f
2443 /* Table of valid machine attributes. */
2444 const struct attribute_spec ix86_attribute_table[] =
2445 {
2446 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2447 /* Stdcall attribute says callee is responsible for popping arguments
2448 if they are not variable. */
2449 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2450 /* Fastcall attribute says callee is responsible for popping arguments
2451 if they are not variable. */
2452 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2453 /* Cdecl attribute says the callee is a normal C declaration */
2454 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2455 /* Regparm attribute specifies how many integer arguments are to be
2456 passed in registers. */
2457 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2458 /* Sseregparm attribute says we are using x86_64 calling conventions
2459 for FP arguments. */
2460 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2461 /* force_align_arg_pointer says this function realigns the stack at entry. */
2462 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2463 false, true, true, ix86_handle_cconv_attribute },
2464 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2465 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2466 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2467 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2468 #endif
2469 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2470 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2471 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2472 SUBTARGET_ATTRIBUTE_TABLE,
2473 #endif
2474 { NULL, 0, 0, false, false, false, NULL }
2475 };
2476
2477 /* Decide whether we can make a sibling call to a function. DECL is the
2478 declaration of the function being targeted by the call and EXP is the
2479 CALL_EXPR representing the call. */
2480
2481 static bool
2482 ix86_function_ok_for_sibcall (tree decl, tree exp)
2483 {
2484 tree func;
2485 rtx a, b;
2486
2487 /* If we are generating position-independent code, we cannot sibcall
2488 optimize any indirect call, or a direct call to a global function,
2489 as the PLT requires %ebx be live. */
2490 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2491 return false;
2492
2493 if (decl)
2494 func = decl;
2495 else
2496 {
2497 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2498 if (POINTER_TYPE_P (func))
2499 func = TREE_TYPE (func);
2500 }
2501
2502 /* Check that the return value locations are the same. Like
2503 if we are returning floats on the 80387 register stack, we cannot
2504 make a sibcall from a function that doesn't return a float to a
2505 function that does or, conversely, from a function that does return
2506 a float to a function that doesn't; the necessary stack adjustment
2507 would not be executed. This is also the place we notice
2508 differences in the return value ABI. Note that it is ok for one
2509 of the functions to have void return type as long as the return
2510 value of the other is passed in a register. */
2511 a = ix86_function_value (TREE_TYPE (exp), func, false);
2512 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2513 cfun->decl, false);
2514 if (STACK_REG_P (a) || STACK_REG_P (b))
2515 {
2516 if (!rtx_equal_p (a, b))
2517 return false;
2518 }
2519 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2520 ;
2521 else if (!rtx_equal_p (a, b))
2522 return false;
2523
2524 /* If this call is indirect, we'll need to be able to use a call-clobbered
2525 register for the address of the target function. Make sure that all
2526 such registers are not used for passing parameters. */
2527 if (!decl && !TARGET_64BIT)
2528 {
2529 tree type;
2530
2531 /* We're looking at the CALL_EXPR, we need the type of the function. */
2532 type = TREE_OPERAND (exp, 0); /* pointer expression */
2533 type = TREE_TYPE (type); /* pointer type */
2534 type = TREE_TYPE (type); /* function type */
2535
2536 if (ix86_function_regparm (type, NULL) >= 3)
2537 {
2538 /* ??? Need to count the actual number of registers to be used,
2539 not the possible number of registers. Fix later. */
2540 return false;
2541 }
2542 }
2543
2544 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2545 /* Dllimport'd functions are also called indirectly. */
2546 if (decl && DECL_DLLIMPORT_P (decl)
2547 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2548 return false;
2549 #endif
2550
2551 /* If we forced aligned the stack, then sibcalling would unalign the
2552 stack, which may break the called function. */
2553 if (cfun->machine->force_align_arg_pointer)
2554 return false;
2555
2556 /* Otherwise okay. That also includes certain types of indirect calls. */
2557 return true;
2558 }
2559
2560 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2561 calling convention attributes;
2562 arguments as in struct attribute_spec.handler. */
2563
2564 static tree
2565 ix86_handle_cconv_attribute (tree *node, tree name,
2566 tree args,
2567 int flags ATTRIBUTE_UNUSED,
2568 bool *no_add_attrs)
2569 {
2570 if (TREE_CODE (*node) != FUNCTION_TYPE
2571 && TREE_CODE (*node) != METHOD_TYPE
2572 && TREE_CODE (*node) != FIELD_DECL
2573 && TREE_CODE (*node) != TYPE_DECL)
2574 {
2575 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2576 IDENTIFIER_POINTER (name));
2577 *no_add_attrs = true;
2578 return NULL_TREE;
2579 }
2580
2581 /* Can combine regparm with all attributes but fastcall. */
2582 if (is_attribute_p ("regparm", name))
2583 {
2584 tree cst;
2585
2586 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2587 {
2588 error ("fastcall and regparm attributes are not compatible");
2589 }
2590
2591 cst = TREE_VALUE (args);
2592 if (TREE_CODE (cst) != INTEGER_CST)
2593 {
2594 warning (OPT_Wattributes,
2595 "%qs attribute requires an integer constant argument",
2596 IDENTIFIER_POINTER (name));
2597 *no_add_attrs = true;
2598 }
2599 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2600 {
2601 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2602 IDENTIFIER_POINTER (name), REGPARM_MAX);
2603 *no_add_attrs = true;
2604 }
2605
2606 if (!TARGET_64BIT
2607 && lookup_attribute (ix86_force_align_arg_pointer_string,
2608 TYPE_ATTRIBUTES (*node))
2609 && compare_tree_int (cst, REGPARM_MAX-1))
2610 {
2611 error ("%s functions limited to %d register parameters",
2612 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2613 }
2614
2615 return NULL_TREE;
2616 }
2617
2618 if (TARGET_64BIT)
2619 {
2620 warning (OPT_Wattributes, "%qs attribute ignored",
2621 IDENTIFIER_POINTER (name));
2622 *no_add_attrs = true;
2623 return NULL_TREE;
2624 }
2625
2626 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2627 if (is_attribute_p ("fastcall", name))
2628 {
2629 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2630 {
2631 error ("fastcall and cdecl attributes are not compatible");
2632 }
2633 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2634 {
2635 error ("fastcall and stdcall attributes are not compatible");
2636 }
2637 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2638 {
2639 error ("fastcall and regparm attributes are not compatible");
2640 }
2641 }
2642
2643 /* Can combine stdcall with fastcall (redundant), regparm and
2644 sseregparm. */
2645 else if (is_attribute_p ("stdcall", name))
2646 {
2647 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2648 {
2649 error ("stdcall and cdecl attributes are not compatible");
2650 }
2651 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2652 {
2653 error ("stdcall and fastcall attributes are not compatible");
2654 }
2655 }
2656
2657 /* Can combine cdecl with regparm and sseregparm. */
2658 else if (is_attribute_p ("cdecl", name))
2659 {
2660 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2661 {
2662 error ("stdcall and cdecl attributes are not compatible");
2663 }
2664 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2665 {
2666 error ("fastcall and cdecl attributes are not compatible");
2667 }
2668 }
2669
2670 /* Can combine sseregparm with all attributes. */
2671
2672 return NULL_TREE;
2673 }
2674
2675 /* Return 0 if the attributes for two types are incompatible, 1 if they
2676 are compatible, and 2 if they are nearly compatible (which causes a
2677 warning to be generated). */
2678
2679 static int
2680 ix86_comp_type_attributes (tree type1, tree type2)
2681 {
2682 /* Check for mismatch of non-default calling convention. */
2683 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2684
2685 if (TREE_CODE (type1) != FUNCTION_TYPE)
2686 return 1;
2687
2688 /* Check for mismatched fastcall/regparm types. */
2689 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2690 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2691 || (ix86_function_regparm (type1, NULL)
2692 != ix86_function_regparm (type2, NULL)))
2693 return 0;
2694
2695 /* Check for mismatched sseregparm types. */
2696 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2697 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2698 return 0;
2699
2700 /* Check for mismatched return types (cdecl vs stdcall). */
2701 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2702 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2703 return 0;
2704
2705 return 1;
2706 }
2707 \f
2708 /* Return the regparm value for a function with the indicated TYPE and DECL.
2709 DECL may be NULL when calling function indirectly
2710 or considering a libcall. */
2711
2712 static int
2713 ix86_function_regparm (tree type, tree decl)
2714 {
2715 tree attr;
2716 int regparm = ix86_regparm;
2717 bool user_convention = false;
2718
2719 if (!TARGET_64BIT)
2720 {
2721 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2722 if (attr)
2723 {
2724 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2725 user_convention = true;
2726 }
2727
2728 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2729 {
2730 regparm = 2;
2731 user_convention = true;
2732 }
2733
2734 /* Use register calling convention for local functions when possible. */
2735 if (!TARGET_64BIT && !user_convention && decl
2736 && flag_unit_at_a_time && !profile_flag)
2737 {
2738 struct cgraph_local_info *i = cgraph_local_info (decl);
2739 if (i && i->local)
2740 {
2741 int local_regparm, globals = 0, regno;
2742
2743 /* Make sure no regparm register is taken by a global register
2744 variable. */
2745 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2746 if (global_regs[local_regparm])
2747 break;
2748 /* We can't use regparm(3) for nested functions as these use
2749 static chain pointer in third argument. */
2750 if (local_regparm == 3
2751 && decl_function_context (decl)
2752 && !DECL_NO_STATIC_CHAIN (decl))
2753 local_regparm = 2;
2754 /* If the function realigns its stackpointer, the
2755 prologue will clobber %ecx. If we've already
2756 generated code for the callee, the callee
2757 DECL_STRUCT_FUNCTION is gone, so we fall back to
2758 scanning the attributes for the self-realigning
2759 property. */
2760 if ((DECL_STRUCT_FUNCTION (decl)
2761 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2762 || (!DECL_STRUCT_FUNCTION (decl)
2763 && lookup_attribute (ix86_force_align_arg_pointer_string,
2764 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2765 local_regparm = 2;
2766 /* Each global register variable increases register preassure,
2767 so the more global reg vars there are, the smaller regparm
2768 optimization use, unless requested by the user explicitly. */
2769 for (regno = 0; regno < 6; regno++)
2770 if (global_regs[regno])
2771 globals++;
2772 local_regparm
2773 = globals < local_regparm ? local_regparm - globals : 0;
2774
2775 if (local_regparm > regparm)
2776 regparm = local_regparm;
2777 }
2778 }
2779 }
2780 return regparm;
2781 }
2782
2783 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2784 DFmode (2) arguments in SSE registers for a function with the
2785 indicated TYPE and DECL. DECL may be NULL when calling function
2786 indirectly or considering a libcall. Otherwise return 0. */
2787
2788 static int
2789 ix86_function_sseregparm (tree type, tree decl)
2790 {
2791 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2792 by the sseregparm attribute. */
2793 if (TARGET_SSEREGPARM
2794 || (type
2795 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2796 {
2797 if (!TARGET_SSE)
2798 {
2799 if (decl)
2800 error ("Calling %qD with attribute sseregparm without "
2801 "SSE/SSE2 enabled", decl);
2802 else
2803 error ("Calling %qT with attribute sseregparm without "
2804 "SSE/SSE2 enabled", type);
2805 return 0;
2806 }
2807
2808 return 2;
2809 }
2810
2811 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2812 (and DFmode for SSE2) arguments in SSE registers,
2813 even for 32-bit targets. */
2814 if (!TARGET_64BIT && decl
2815 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2816 {
2817 struct cgraph_local_info *i = cgraph_local_info (decl);
2818 if (i && i->local)
2819 return TARGET_SSE2 ? 2 : 1;
2820 }
2821
2822 return 0;
2823 }
2824
2825 /* Return true if EAX is live at the start of the function. Used by
2826 ix86_expand_prologue to determine if we need special help before
2827 calling allocate_stack_worker. */
2828
2829 static bool
2830 ix86_eax_live_at_start_p (void)
2831 {
2832 /* Cheat. Don't bother working forward from ix86_function_regparm
2833 to the function type to whether an actual argument is located in
2834 eax. Instead just look at cfg info, which is still close enough
2835 to correct at this point. This gives false positives for broken
2836 functions that might use uninitialized data that happens to be
2837 allocated in eax, but who cares? */
2838 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2839 }
2840
2841 /* Value is the number of bytes of arguments automatically
2842 popped when returning from a subroutine call.
2843 FUNDECL is the declaration node of the function (as a tree),
2844 FUNTYPE is the data type of the function (as a tree),
2845 or for a library call it is an identifier node for the subroutine name.
2846 SIZE is the number of bytes of arguments passed on the stack.
2847
2848 On the 80386, the RTD insn may be used to pop them if the number
2849 of args is fixed, but if the number is variable then the caller
2850 must pop them all. RTD can't be used for library calls now
2851 because the library is compiled with the Unix compiler.
2852 Use of RTD is a selectable option, since it is incompatible with
2853 standard Unix calling sequences. If the option is not selected,
2854 the caller must always pop the args.
2855
2856 The attribute stdcall is equivalent to RTD on a per module basis. */
2857
2858 int
2859 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2860 {
2861 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2862
2863 /* Cdecl functions override -mrtd, and never pop the stack. */
2864 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2865
2866 /* Stdcall and fastcall functions will pop the stack if not
2867 variable args. */
2868 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2869 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2870 rtd = 1;
2871
2872 if (rtd
2873 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2874 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2875 == void_type_node)))
2876 return size;
2877 }
2878
2879 /* Lose any fake structure return argument if it is passed on the stack. */
2880 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2881 && !TARGET_64BIT
2882 && !KEEP_AGGREGATE_RETURN_POINTER)
2883 {
2884 int nregs = ix86_function_regparm (funtype, fundecl);
2885
2886 if (!nregs)
2887 return GET_MODE_SIZE (Pmode);
2888 }
2889
2890 return 0;
2891 }
2892 \f
2893 /* Argument support functions. */
2894
2895 /* Return true when register may be used to pass function parameters. */
2896 bool
2897 ix86_function_arg_regno_p (int regno)
2898 {
2899 int i;
2900 if (!TARGET_64BIT)
2901 return (regno < REGPARM_MAX
2902 || (TARGET_MMX && MMX_REGNO_P (regno)
2903 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2904 || (TARGET_SSE && SSE_REGNO_P (regno)
2905 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2906
2907 if (TARGET_SSE && SSE_REGNO_P (regno)
2908 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2909 return true;
2910 /* RAX is used as hidden argument to va_arg functions. */
2911 if (!regno)
2912 return true;
2913 for (i = 0; i < REGPARM_MAX; i++)
2914 if (regno == x86_64_int_parameter_registers[i])
2915 return true;
2916 return false;
2917 }
2918
2919 /* Return if we do not know how to pass TYPE solely in registers. */
2920
2921 static bool
2922 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2923 {
2924 if (must_pass_in_stack_var_size_or_pad (mode, type))
2925 return true;
2926
2927 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2928 The layout_type routine is crafty and tries to trick us into passing
2929 currently unsupported vector types on the stack by using TImode. */
2930 return (!TARGET_64BIT && mode == TImode
2931 && type && TREE_CODE (type) != VECTOR_TYPE);
2932 }
2933
2934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2935 for a call to a function whose data type is FNTYPE.
2936 For a library call, FNTYPE is 0. */
2937
2938 void
2939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2940 tree fntype, /* tree ptr for function decl */
2941 rtx libname, /* SYMBOL_REF of library name or 0 */
2942 tree fndecl)
2943 {
2944 static CUMULATIVE_ARGS zero_cum;
2945 tree param, next_param;
2946
2947 if (TARGET_DEBUG_ARG)
2948 {
2949 fprintf (stderr, "\ninit_cumulative_args (");
2950 if (fntype)
2951 fprintf (stderr, "fntype code = %s, ret code = %s",
2952 tree_code_name[(int) TREE_CODE (fntype)],
2953 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2954 else
2955 fprintf (stderr, "no fntype");
2956
2957 if (libname)
2958 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2959 }
2960
2961 *cum = zero_cum;
2962
2963 /* Set up the number of registers to use for passing arguments. */
2964 cum->nregs = ix86_regparm;
2965 if (TARGET_SSE)
2966 cum->sse_nregs = SSE_REGPARM_MAX;
2967 if (TARGET_MMX)
2968 cum->mmx_nregs = MMX_REGPARM_MAX;
2969 cum->warn_sse = true;
2970 cum->warn_mmx = true;
2971 cum->maybe_vaarg = false;
2972
2973 /* Use ecx and edx registers if function has fastcall attribute,
2974 else look for regparm information. */
2975 if (fntype && !TARGET_64BIT)
2976 {
2977 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2978 {
2979 cum->nregs = 2;
2980 cum->fastcall = 1;
2981 }
2982 else
2983 cum->nregs = ix86_function_regparm (fntype, fndecl);
2984 }
2985
2986 /* Set up the number of SSE registers used for passing SFmode
2987 and DFmode arguments. Warn for mismatching ABI. */
2988 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2989
2990 /* Determine if this function has variable arguments. This is
2991 indicated by the last argument being 'void_type_mode' if there
2992 are no variable arguments. If there are variable arguments, then
2993 we won't pass anything in registers in 32-bit mode. */
2994
2995 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2996 {
2997 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2998 param != 0; param = next_param)
2999 {
3000 next_param = TREE_CHAIN (param);
3001 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3002 {
3003 if (!TARGET_64BIT)
3004 {
3005 cum->nregs = 0;
3006 cum->sse_nregs = 0;
3007 cum->mmx_nregs = 0;
3008 cum->warn_sse = 0;
3009 cum->warn_mmx = 0;
3010 cum->fastcall = 0;
3011 cum->float_in_sse = 0;
3012 }
3013 cum->maybe_vaarg = true;
3014 }
3015 }
3016 }
3017 if ((!fntype && !libname)
3018 || (fntype && !TYPE_ARG_TYPES (fntype)))
3019 cum->maybe_vaarg = true;
3020
3021 if (TARGET_DEBUG_ARG)
3022 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3023
3024 return;
3025 }
3026
3027 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3028 But in the case of vector types, it is some vector mode.
3029
3030 When we have only some of our vector isa extensions enabled, then there
3031 are some modes for which vector_mode_supported_p is false. For these
3032 modes, the generic vector support in gcc will choose some non-vector mode
3033 in order to implement the type. By computing the natural mode, we'll
3034 select the proper ABI location for the operand and not depend on whatever
3035 the middle-end decides to do with these vector types. */
3036
3037 static enum machine_mode
3038 type_natural_mode (tree type)
3039 {
3040 enum machine_mode mode = TYPE_MODE (type);
3041
3042 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3043 {
3044 HOST_WIDE_INT size = int_size_in_bytes (type);
3045 if ((size == 8 || size == 16)
3046 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3047 && TYPE_VECTOR_SUBPARTS (type) > 1)
3048 {
3049 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3050
3051 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3052 mode = MIN_MODE_VECTOR_FLOAT;
3053 else
3054 mode = MIN_MODE_VECTOR_INT;
3055
3056 /* Get the mode which has this inner mode and number of units. */
3057 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3058 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3059 && GET_MODE_INNER (mode) == innermode)
3060 return mode;
3061
3062 gcc_unreachable ();
3063 }
3064 }
3065
3066 return mode;
3067 }
3068
3069 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3070 this may not agree with the mode that the type system has chosen for the
3071 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3072 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3073
3074 static rtx
3075 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3076 unsigned int regno)
3077 {
3078 rtx tmp;
3079
3080 if (orig_mode != BLKmode)
3081 tmp = gen_rtx_REG (orig_mode, regno);
3082 else
3083 {
3084 tmp = gen_rtx_REG (mode, regno);
3085 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3086 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3087 }
3088
3089 return tmp;
3090 }
3091
3092 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3093 of this code is to classify each 8bytes of incoming argument by the register
3094 class and assign registers accordingly. */
3095
3096 /* Return the union class of CLASS1 and CLASS2.
3097 See the x86-64 PS ABI for details. */
3098
3099 static enum x86_64_reg_class
3100 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3101 {
3102 /* Rule #1: If both classes are equal, this is the resulting class. */
3103 if (class1 == class2)
3104 return class1;
3105
3106 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3107 the other class. */
3108 if (class1 == X86_64_NO_CLASS)
3109 return class2;
3110 if (class2 == X86_64_NO_CLASS)
3111 return class1;
3112
3113 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3114 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3115 return X86_64_MEMORY_CLASS;
3116
3117 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3118 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3119 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3120 return X86_64_INTEGERSI_CLASS;
3121 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3122 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3123 return X86_64_INTEGER_CLASS;
3124
3125 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3126 MEMORY is used. */
3127 if (class1 == X86_64_X87_CLASS
3128 || class1 == X86_64_X87UP_CLASS
3129 || class1 == X86_64_COMPLEX_X87_CLASS
3130 || class2 == X86_64_X87_CLASS
3131 || class2 == X86_64_X87UP_CLASS
3132 || class2 == X86_64_COMPLEX_X87_CLASS)
3133 return X86_64_MEMORY_CLASS;
3134
3135 /* Rule #6: Otherwise class SSE is used. */
3136 return X86_64_SSE_CLASS;
3137 }
3138
3139 /* Classify the argument of type TYPE and mode MODE.
3140 CLASSES will be filled by the register class used to pass each word
3141 of the operand. The number of words is returned. In case the parameter
3142 should be passed in memory, 0 is returned. As a special case for zero
3143 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3144
3145 BIT_OFFSET is used internally for handling records and specifies offset
3146 of the offset in bits modulo 256 to avoid overflow cases.
3147
3148 See the x86-64 PS ABI for details.
3149 */
3150
3151 static int
3152 classify_argument (enum machine_mode mode, tree type,
3153 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3154 {
3155 HOST_WIDE_INT bytes =
3156 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3157 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3158
3159 /* Variable sized entities are always passed/returned in memory. */
3160 if (bytes < 0)
3161 return 0;
3162
3163 if (mode != VOIDmode
3164 && targetm.calls.must_pass_in_stack (mode, type))
3165 return 0;
3166
3167 if (type && AGGREGATE_TYPE_P (type))
3168 {
3169 int i;
3170 tree field;
3171 enum x86_64_reg_class subclasses[MAX_CLASSES];
3172
3173 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3174 if (bytes > 16)
3175 return 0;
3176
3177 for (i = 0; i < words; i++)
3178 classes[i] = X86_64_NO_CLASS;
3179
3180 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3181 signalize memory class, so handle it as special case. */
3182 if (!words)
3183 {
3184 classes[0] = X86_64_NO_CLASS;
3185 return 1;
3186 }
3187
3188 /* Classify each field of record and merge classes. */
3189 switch (TREE_CODE (type))
3190 {
3191 case RECORD_TYPE:
3192 /* And now merge the fields of structure. */
3193 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3194 {
3195 if (TREE_CODE (field) == FIELD_DECL)
3196 {
3197 int num;
3198
3199 if (TREE_TYPE (field) == error_mark_node)
3200 continue;
3201
3202 /* Bitfields are always classified as integer. Handle them
3203 early, since later code would consider them to be
3204 misaligned integers. */
3205 if (DECL_BIT_FIELD (field))
3206 {
3207 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3208 i < ((int_bit_position (field) + (bit_offset % 64))
3209 + tree_low_cst (DECL_SIZE (field), 0)
3210 + 63) / 8 / 8; i++)
3211 classes[i] =
3212 merge_classes (X86_64_INTEGER_CLASS,
3213 classes[i]);
3214 }
3215 else
3216 {
3217 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3218 TREE_TYPE (field), subclasses,
3219 (int_bit_position (field)
3220 + bit_offset) % 256);
3221 if (!num)
3222 return 0;
3223 for (i = 0; i < num; i++)
3224 {
3225 int pos =
3226 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3227 classes[i + pos] =
3228 merge_classes (subclasses[i], classes[i + pos]);
3229 }
3230 }
3231 }
3232 }
3233 break;
3234
3235 case ARRAY_TYPE:
3236 /* Arrays are handled as small records. */
3237 {
3238 int num;
3239 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3240 TREE_TYPE (type), subclasses, bit_offset);
3241 if (!num)
3242 return 0;
3243
3244 /* The partial classes are now full classes. */
3245 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3246 subclasses[0] = X86_64_SSE_CLASS;
3247 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3248 subclasses[0] = X86_64_INTEGER_CLASS;
3249
3250 for (i = 0; i < words; i++)
3251 classes[i] = subclasses[i % num];
3252
3253 break;
3254 }
3255 case UNION_TYPE:
3256 case QUAL_UNION_TYPE:
3257 /* Unions are similar to RECORD_TYPE but offset is always 0.
3258 */
3259 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3260 {
3261 if (TREE_CODE (field) == FIELD_DECL)
3262 {
3263 int num;
3264
3265 if (TREE_TYPE (field) == error_mark_node)
3266 continue;
3267
3268 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3269 TREE_TYPE (field), subclasses,
3270 bit_offset);
3271 if (!num)
3272 return 0;
3273 for (i = 0; i < num; i++)
3274 classes[i] = merge_classes (subclasses[i], classes[i]);
3275 }
3276 }
3277 break;
3278
3279 default:
3280 gcc_unreachable ();
3281 }
3282
3283 /* Final merger cleanup. */
3284 for (i = 0; i < words; i++)
3285 {
3286 /* If one class is MEMORY, everything should be passed in
3287 memory. */
3288 if (classes[i] == X86_64_MEMORY_CLASS)
3289 return 0;
3290
3291 /* The X86_64_SSEUP_CLASS should be always preceded by
3292 X86_64_SSE_CLASS. */
3293 if (classes[i] == X86_64_SSEUP_CLASS
3294 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3295 classes[i] = X86_64_SSE_CLASS;
3296
3297 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3298 if (classes[i] == X86_64_X87UP_CLASS
3299 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3300 classes[i] = X86_64_SSE_CLASS;
3301 }
3302 return words;
3303 }
3304
3305 /* Compute alignment needed. We align all types to natural boundaries with
3306 exception of XFmode that is aligned to 64bits. */
3307 if (mode != VOIDmode && mode != BLKmode)
3308 {
3309 int mode_alignment = GET_MODE_BITSIZE (mode);
3310
3311 if (mode == XFmode)
3312 mode_alignment = 128;
3313 else if (mode == XCmode)
3314 mode_alignment = 256;
3315 if (COMPLEX_MODE_P (mode))
3316 mode_alignment /= 2;
3317 /* Misaligned fields are always returned in memory. */
3318 if (bit_offset % mode_alignment)
3319 return 0;
3320 }
3321
3322 /* for V1xx modes, just use the base mode */
3323 if (VECTOR_MODE_P (mode)
3324 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3325 mode = GET_MODE_INNER (mode);
3326
3327 /* Classification of atomic types. */
3328 switch (mode)
3329 {
3330 case SDmode:
3331 case DDmode:
3332 classes[0] = X86_64_SSE_CLASS;
3333 return 1;
3334 case TDmode:
3335 classes[0] = X86_64_SSE_CLASS;
3336 classes[1] = X86_64_SSEUP_CLASS;
3337 return 2;
3338 case DImode:
3339 case SImode:
3340 case HImode:
3341 case QImode:
3342 case CSImode:
3343 case CHImode:
3344 case CQImode:
3345 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3346 classes[0] = X86_64_INTEGERSI_CLASS;
3347 else
3348 classes[0] = X86_64_INTEGER_CLASS;
3349 return 1;
3350 case CDImode:
3351 case TImode:
3352 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3353 return 2;
3354 case CTImode:
3355 return 0;
3356 case SFmode:
3357 if (!(bit_offset % 64))
3358 classes[0] = X86_64_SSESF_CLASS;
3359 else
3360 classes[0] = X86_64_SSE_CLASS;
3361 return 1;
3362 case DFmode:
3363 classes[0] = X86_64_SSEDF_CLASS;
3364 return 1;
3365 case XFmode:
3366 classes[0] = X86_64_X87_CLASS;
3367 classes[1] = X86_64_X87UP_CLASS;
3368 return 2;
3369 case TFmode:
3370 classes[0] = X86_64_SSE_CLASS;
3371 classes[1] = X86_64_SSEUP_CLASS;
3372 return 2;
3373 case SCmode:
3374 classes[0] = X86_64_SSE_CLASS;
3375 return 1;
3376 case DCmode:
3377 classes[0] = X86_64_SSEDF_CLASS;
3378 classes[1] = X86_64_SSEDF_CLASS;
3379 return 2;
3380 case XCmode:
3381 classes[0] = X86_64_COMPLEX_X87_CLASS;
3382 return 1;
3383 case TCmode:
3384 /* This modes is larger than 16 bytes. */
3385 return 0;
3386 case V4SFmode:
3387 case V4SImode:
3388 case V16QImode:
3389 case V8HImode:
3390 case V2DFmode:
3391 case V2DImode:
3392 classes[0] = X86_64_SSE_CLASS;
3393 classes[1] = X86_64_SSEUP_CLASS;
3394 return 2;
3395 case V2SFmode:
3396 case V2SImode:
3397 case V4HImode:
3398 case V8QImode:
3399 classes[0] = X86_64_SSE_CLASS;
3400 return 1;
3401 case BLKmode:
3402 case VOIDmode:
3403 return 0;
3404 default:
3405 gcc_assert (VECTOR_MODE_P (mode));
3406
3407 if (bytes > 16)
3408 return 0;
3409
3410 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3411
3412 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3413 classes[0] = X86_64_INTEGERSI_CLASS;
3414 else
3415 classes[0] = X86_64_INTEGER_CLASS;
3416 classes[1] = X86_64_INTEGER_CLASS;
3417 return 1 + (bytes > 8);
3418 }
3419 }
3420
3421 /* Examine the argument and return set number of register required in each
3422 class. Return 0 iff parameter should be passed in memory. */
3423 static int
3424 examine_argument (enum machine_mode mode, tree type, int in_return,
3425 int *int_nregs, int *sse_nregs)
3426 {
3427 enum x86_64_reg_class class[MAX_CLASSES];
3428 int n = classify_argument (mode, type, class, 0);
3429
3430 *int_nregs = 0;
3431 *sse_nregs = 0;
3432 if (!n)
3433 return 0;
3434 for (n--; n >= 0; n--)
3435 switch (class[n])
3436 {
3437 case X86_64_INTEGER_CLASS:
3438 case X86_64_INTEGERSI_CLASS:
3439 (*int_nregs)++;
3440 break;
3441 case X86_64_SSE_CLASS:
3442 case X86_64_SSESF_CLASS:
3443 case X86_64_SSEDF_CLASS:
3444 (*sse_nregs)++;
3445 break;
3446 case X86_64_NO_CLASS:
3447 case X86_64_SSEUP_CLASS:
3448 break;
3449 case X86_64_X87_CLASS:
3450 case X86_64_X87UP_CLASS:
3451 if (!in_return)
3452 return 0;
3453 break;
3454 case X86_64_COMPLEX_X87_CLASS:
3455 return in_return ? 2 : 0;
3456 case X86_64_MEMORY_CLASS:
3457 gcc_unreachable ();
3458 }
3459 return 1;
3460 }
3461
3462 /* Construct container for the argument used by GCC interface. See
3463 FUNCTION_ARG for the detailed description. */
3464
3465 static rtx
3466 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3467 tree type, int in_return, int nintregs, int nsseregs,
3468 const int *intreg, int sse_regno)
3469 {
3470 /* The following variables hold the static issued_error state. */
3471 static bool issued_sse_arg_error;
3472 static bool issued_sse_ret_error;
3473 static bool issued_x87_ret_error;
3474
3475 enum machine_mode tmpmode;
3476 int bytes =
3477 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3478 enum x86_64_reg_class class[MAX_CLASSES];
3479 int n;
3480 int i;
3481 int nexps = 0;
3482 int needed_sseregs, needed_intregs;
3483 rtx exp[MAX_CLASSES];
3484 rtx ret;
3485
3486 n = classify_argument (mode, type, class, 0);
3487 if (TARGET_DEBUG_ARG)
3488 {
3489 if (!n)
3490 fprintf (stderr, "Memory class\n");
3491 else
3492 {
3493 fprintf (stderr, "Classes:");
3494 for (i = 0; i < n; i++)
3495 {
3496 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3497 }
3498 fprintf (stderr, "\n");
3499 }
3500 }
3501 if (!n)
3502 return NULL;
3503 if (!examine_argument (mode, type, in_return, &needed_intregs,
3504 &needed_sseregs))
3505 return NULL;
3506 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3507 return NULL;
3508
3509 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3510 some less clueful developer tries to use floating-point anyway. */
3511 if (needed_sseregs && !TARGET_SSE)
3512 {
3513 if (in_return)
3514 {
3515 if (!issued_sse_ret_error)
3516 {
3517 error ("SSE register return with SSE disabled");
3518 issued_sse_ret_error = true;
3519 }
3520 }
3521 else if (!issued_sse_arg_error)
3522 {
3523 error ("SSE register argument with SSE disabled");
3524 issued_sse_arg_error = true;
3525 }
3526 return NULL;
3527 }
3528
3529 /* Likewise, error if the ABI requires us to return values in the
3530 x87 registers and the user specified -mno-80387. */
3531 if (!TARGET_80387 && in_return)
3532 for (i = 0; i < n; i++)
3533 if (class[i] == X86_64_X87_CLASS
3534 || class[i] == X86_64_X87UP_CLASS
3535 || class[i] == X86_64_COMPLEX_X87_CLASS)
3536 {
3537 if (!issued_x87_ret_error)
3538 {
3539 error ("x87 register return with x87 disabled");
3540 issued_x87_ret_error = true;
3541 }
3542 return NULL;
3543 }
3544
3545 /* First construct simple cases. Avoid SCmode, since we want to use
3546 single register to pass this type. */
3547 if (n == 1 && mode != SCmode)
3548 switch (class[0])
3549 {
3550 case X86_64_INTEGER_CLASS:
3551 case X86_64_INTEGERSI_CLASS:
3552 return gen_rtx_REG (mode, intreg[0]);
3553 case X86_64_SSE_CLASS:
3554 case X86_64_SSESF_CLASS:
3555 case X86_64_SSEDF_CLASS:
3556 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3557 case X86_64_X87_CLASS:
3558 case X86_64_COMPLEX_X87_CLASS:
3559 return gen_rtx_REG (mode, FIRST_STACK_REG);
3560 case X86_64_NO_CLASS:
3561 /* Zero sized array, struct or class. */
3562 return NULL;
3563 default:
3564 gcc_unreachable ();
3565 }
3566 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3567 && mode != BLKmode)
3568 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3569 if (n == 2
3570 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3571 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3572 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3573 && class[1] == X86_64_INTEGER_CLASS
3574 && (mode == CDImode || mode == TImode || mode == TFmode)
3575 && intreg[0] + 1 == intreg[1])
3576 return gen_rtx_REG (mode, intreg[0]);
3577
3578 /* Otherwise figure out the entries of the PARALLEL. */
3579 for (i = 0; i < n; i++)
3580 {
3581 switch (class[i])
3582 {
3583 case X86_64_NO_CLASS:
3584 break;
3585 case X86_64_INTEGER_CLASS:
3586 case X86_64_INTEGERSI_CLASS:
3587 /* Merge TImodes on aligned occasions here too. */
3588 if (i * 8 + 8 > bytes)
3589 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3590 else if (class[i] == X86_64_INTEGERSI_CLASS)
3591 tmpmode = SImode;
3592 else
3593 tmpmode = DImode;
3594 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3595 if (tmpmode == BLKmode)
3596 tmpmode = DImode;
3597 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3598 gen_rtx_REG (tmpmode, *intreg),
3599 GEN_INT (i*8));
3600 intreg++;
3601 break;
3602 case X86_64_SSESF_CLASS:
3603 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3604 gen_rtx_REG (SFmode,
3605 SSE_REGNO (sse_regno)),
3606 GEN_INT (i*8));
3607 sse_regno++;
3608 break;
3609 case X86_64_SSEDF_CLASS:
3610 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3611 gen_rtx_REG (DFmode,
3612 SSE_REGNO (sse_regno)),
3613 GEN_INT (i*8));
3614 sse_regno++;
3615 break;
3616 case X86_64_SSE_CLASS:
3617 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3618 tmpmode = TImode;
3619 else
3620 tmpmode = DImode;
3621 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3622 gen_rtx_REG (tmpmode,
3623 SSE_REGNO (sse_regno)),
3624 GEN_INT (i*8));
3625 if (tmpmode == TImode)
3626 i++;
3627 sse_regno++;
3628 break;
3629 default:
3630 gcc_unreachable ();
3631 }
3632 }
3633
3634 /* Empty aligned struct, union or class. */
3635 if (nexps == 0)
3636 return NULL;
3637
3638 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3639 for (i = 0; i < nexps; i++)
3640 XVECEXP (ret, 0, i) = exp [i];
3641 return ret;
3642 }
3643
3644 /* Update the data in CUM to advance over an argument
3645 of mode MODE and data type TYPE.
3646 (TYPE is null for libcalls where that information may not be available.) */
3647
3648 void
3649 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3650 tree type, int named)
3651 {
3652 int bytes =
3653 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3654 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3655
3656 if (type)
3657 mode = type_natural_mode (type);
3658
3659 if (TARGET_DEBUG_ARG)
3660 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3661 "mode=%s, named=%d)\n\n",
3662 words, cum->words, cum->nregs, cum->sse_nregs,
3663 GET_MODE_NAME (mode), named);
3664
3665 if (TARGET_64BIT)
3666 {
3667 int int_nregs, sse_nregs;
3668 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3669 cum->words += words;
3670 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3671 {
3672 cum->nregs -= int_nregs;
3673 cum->sse_nregs -= sse_nregs;
3674 cum->regno += int_nregs;
3675 cum->sse_regno += sse_nregs;
3676 }
3677 else
3678 cum->words += words;
3679 }
3680 else
3681 {
3682 switch (mode)
3683 {
3684 default:
3685 break;
3686
3687 case BLKmode:
3688 if (bytes < 0)
3689 break;
3690 /* FALLTHRU */
3691
3692 case DImode:
3693 case SImode:
3694 case HImode:
3695 case QImode:
3696 cum->words += words;
3697 cum->nregs -= words;
3698 cum->regno += words;
3699
3700 if (cum->nregs <= 0)
3701 {
3702 cum->nregs = 0;
3703 cum->regno = 0;
3704 }
3705 break;
3706
3707 case DFmode:
3708 if (cum->float_in_sse < 2)
3709 break;
3710 case SFmode:
3711 if (cum->float_in_sse < 1)
3712 break;
3713 /* FALLTHRU */
3714
3715 case TImode:
3716 case V16QImode:
3717 case V8HImode:
3718 case V4SImode:
3719 case V2DImode:
3720 case V4SFmode:
3721 case V2DFmode:
3722 if (!type || !AGGREGATE_TYPE_P (type))
3723 {
3724 cum->sse_words += words;
3725 cum->sse_nregs -= 1;
3726 cum->sse_regno += 1;
3727 if (cum->sse_nregs <= 0)
3728 {
3729 cum->sse_nregs = 0;
3730 cum->sse_regno = 0;
3731 }
3732 }
3733 break;
3734
3735 case V8QImode:
3736 case V4HImode:
3737 case V2SImode:
3738 case V2SFmode:
3739 if (!type || !AGGREGATE_TYPE_P (type))
3740 {
3741 cum->mmx_words += words;
3742 cum->mmx_nregs -= 1;
3743 cum->mmx_regno += 1;
3744 if (cum->mmx_nregs <= 0)
3745 {
3746 cum->mmx_nregs = 0;
3747 cum->mmx_regno = 0;
3748 }
3749 }
3750 break;
3751 }
3752 }
3753 }
3754
3755 /* Define where to put the arguments to a function.
3756 Value is zero to push the argument on the stack,
3757 or a hard register in which to store the argument.
3758
3759 MODE is the argument's machine mode.
3760 TYPE is the data type of the argument (as a tree).
3761 This is null for libcalls where that information may
3762 not be available.
3763 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3764 the preceding args and about the function being called.
3765 NAMED is nonzero if this argument is a named parameter
3766 (otherwise it is an extra parameter matching an ellipsis). */
3767
3768 rtx
3769 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3770 tree type, int named)
3771 {
3772 enum machine_mode mode = orig_mode;
3773 rtx ret = NULL_RTX;
3774 int bytes =
3775 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3776 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3777 static bool warnedsse, warnedmmx;
3778
3779 /* To simplify the code below, represent vector types with a vector mode
3780 even if MMX/SSE are not active. */
3781 if (type && TREE_CODE (type) == VECTOR_TYPE)
3782 mode = type_natural_mode (type);
3783
3784 /* Handle a hidden AL argument containing number of registers for varargs
3785 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3786 any AL settings. */
3787 if (mode == VOIDmode)
3788 {
3789 if (TARGET_64BIT)
3790 return GEN_INT (cum->maybe_vaarg
3791 ? (cum->sse_nregs < 0
3792 ? SSE_REGPARM_MAX
3793 : cum->sse_regno)
3794 : -1);
3795 else
3796 return constm1_rtx;
3797 }
3798 if (TARGET_64BIT)
3799 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3800 cum->sse_nregs,
3801 &x86_64_int_parameter_registers [cum->regno],
3802 cum->sse_regno);
3803 else
3804 switch (mode)
3805 {
3806 /* For now, pass fp/complex values on the stack. */
3807 default:
3808 break;
3809
3810 case BLKmode:
3811 if (bytes < 0)
3812 break;
3813 /* FALLTHRU */
3814 case DImode:
3815 case SImode:
3816 case HImode:
3817 case QImode:
3818 if (words <= cum->nregs)
3819 {
3820 int regno = cum->regno;
3821
3822 /* Fastcall allocates the first two DWORD (SImode) or
3823 smaller arguments to ECX and EDX. */
3824 if (cum->fastcall)
3825 {
3826 if (mode == BLKmode || mode == DImode)
3827 break;
3828
3829 /* ECX not EAX is the first allocated register. */
3830 if (regno == 0)
3831 regno = 2;
3832 }
3833 ret = gen_rtx_REG (mode, regno);
3834 }
3835 break;
3836 case DFmode:
3837 if (cum->float_in_sse < 2)
3838 break;
3839 case SFmode:
3840 if (cum->float_in_sse < 1)
3841 break;
3842 /* FALLTHRU */
3843 case TImode:
3844 case V16QImode:
3845 case V8HImode:
3846 case V4SImode:
3847 case V2DImode:
3848 case V4SFmode:
3849 case V2DFmode:
3850 if (!type || !AGGREGATE_TYPE_P (type))
3851 {
3852 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3853 {
3854 warnedsse = true;
3855 warning (0, "SSE vector argument without SSE enabled "
3856 "changes the ABI");
3857 }
3858 if (cum->sse_nregs)
3859 ret = gen_reg_or_parallel (mode, orig_mode,
3860 cum->sse_regno + FIRST_SSE_REG);
3861 }
3862 break;
3863 case V8QImode:
3864 case V4HImode:
3865 case V2SImode:
3866 case V2SFmode:
3867 if (!type || !AGGREGATE_TYPE_P (type))
3868 {
3869 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3870 {
3871 warnedmmx = true;
3872 warning (0, "MMX vector argument without MMX enabled "
3873 "changes the ABI");
3874 }
3875 if (cum->mmx_nregs)
3876 ret = gen_reg_or_parallel (mode, orig_mode,
3877 cum->mmx_regno + FIRST_MMX_REG);
3878 }
3879 break;
3880 }
3881
3882 if (TARGET_DEBUG_ARG)
3883 {
3884 fprintf (stderr,
3885 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3886 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3887
3888 if (ret)
3889 print_simple_rtl (stderr, ret);
3890 else
3891 fprintf (stderr, ", stack");
3892
3893 fprintf (stderr, " )\n");
3894 }
3895
3896 return ret;
3897 }
3898
3899 /* A C expression that indicates when an argument must be passed by
3900 reference. If nonzero for an argument, a copy of that argument is
3901 made in memory and a pointer to the argument is passed instead of
3902 the argument itself. The pointer is passed in whatever way is
3903 appropriate for passing a pointer to that type. */
3904
3905 static bool
3906 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3907 enum machine_mode mode ATTRIBUTE_UNUSED,
3908 tree type, bool named ATTRIBUTE_UNUSED)
3909 {
3910 if (!TARGET_64BIT)
3911 return 0;
3912
3913 if (type && int_size_in_bytes (type) == -1)
3914 {
3915 if (TARGET_DEBUG_ARG)
3916 fprintf (stderr, "function_arg_pass_by_reference\n");
3917 return 1;
3918 }
3919
3920 return 0;
3921 }
3922
3923 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3924 ABI. Only called if TARGET_SSE. */
3925 static bool
3926 contains_128bit_aligned_vector_p (tree type)
3927 {
3928 enum machine_mode mode = TYPE_MODE (type);
3929 if (SSE_REG_MODE_P (mode)
3930 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3931 return true;
3932 if (TYPE_ALIGN (type) < 128)
3933 return false;
3934
3935 if (AGGREGATE_TYPE_P (type))
3936 {
3937 /* Walk the aggregates recursively. */
3938 switch (TREE_CODE (type))
3939 {
3940 case RECORD_TYPE:
3941 case UNION_TYPE:
3942 case QUAL_UNION_TYPE:
3943 {
3944 tree field;
3945
3946 /* Walk all the structure fields. */
3947 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3948 {
3949 if (TREE_CODE (field) == FIELD_DECL
3950 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3951 return true;
3952 }
3953 break;
3954 }
3955
3956 case ARRAY_TYPE:
3957 /* Just for use if some languages passes arrays by value. */
3958 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3959 return true;
3960 break;
3961
3962 default:
3963 gcc_unreachable ();
3964 }
3965 }
3966 return false;
3967 }
3968
3969 /* Gives the alignment boundary, in bits, of an argument with the
3970 specified mode and type. */
3971
3972 int
3973 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3974 {
3975 int align;
3976 if (type)
3977 align = TYPE_ALIGN (type);
3978 else
3979 align = GET_MODE_ALIGNMENT (mode);
3980 if (align < PARM_BOUNDARY)
3981 align = PARM_BOUNDARY;
3982 if (!TARGET_64BIT)
3983 {
3984 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3985 make an exception for SSE modes since these require 128bit
3986 alignment.
3987
3988 The handling here differs from field_alignment. ICC aligns MMX
3989 arguments to 4 byte boundaries, while structure fields are aligned
3990 to 8 byte boundaries. */
3991 if (!TARGET_SSE)
3992 align = PARM_BOUNDARY;
3993 else if (!type)
3994 {
3995 if (!SSE_REG_MODE_P (mode))
3996 align = PARM_BOUNDARY;
3997 }
3998 else
3999 {
4000 if (!contains_128bit_aligned_vector_p (type))
4001 align = PARM_BOUNDARY;
4002 }
4003 }
4004 if (align > 128)
4005 align = 128;
4006 return align;
4007 }
4008
4009 /* Return true if N is a possible register number of function value. */
4010 bool
4011 ix86_function_value_regno_p (int regno)
4012 {
4013 if (regno == 0
4014 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4015 || (regno == FIRST_SSE_REG && TARGET_SSE))
4016 return true;
4017
4018 if (!TARGET_64BIT
4019 && (regno == FIRST_MMX_REG && TARGET_MMX))
4020 return true;
4021
4022 return false;
4023 }
4024
4025 /* Define how to find the value returned by a function.
4026 VALTYPE is the data type of the value (as a tree).
4027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4028 otherwise, FUNC is 0. */
4029 rtx
4030 ix86_function_value (tree valtype, tree fntype_or_decl,
4031 bool outgoing ATTRIBUTE_UNUSED)
4032 {
4033 enum machine_mode natmode = type_natural_mode (valtype);
4034
4035 if (TARGET_64BIT)
4036 {
4037 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4038 1, REGPARM_MAX, SSE_REGPARM_MAX,
4039 x86_64_int_return_registers, 0);
4040 /* For zero sized structures, construct_container return NULL, but we
4041 need to keep rest of compiler happy by returning meaningful value. */
4042 if (!ret)
4043 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4044 return ret;
4045 }
4046 else
4047 {
4048 tree fn = NULL_TREE, fntype;
4049 if (fntype_or_decl
4050 && DECL_P (fntype_or_decl))
4051 fn = fntype_or_decl;
4052 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4053 return gen_rtx_REG (TYPE_MODE (valtype),
4054 ix86_value_regno (natmode, fn, fntype));
4055 }
4056 }
4057
4058 /* Return true iff type is returned in memory. */
4059 int
4060 ix86_return_in_memory (tree type)
4061 {
4062 int needed_intregs, needed_sseregs, size;
4063 enum machine_mode mode = type_natural_mode (type);
4064
4065 if (TARGET_64BIT)
4066 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4067
4068 if (mode == BLKmode)
4069 return 1;
4070
4071 size = int_size_in_bytes (type);
4072
4073 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4074 return 0;
4075
4076 if (VECTOR_MODE_P (mode) || mode == TImode)
4077 {
4078 /* User-created vectors small enough to fit in EAX. */
4079 if (size < 8)
4080 return 0;
4081
4082 /* MMX/3dNow values are returned in MM0,
4083 except when it doesn't exits. */
4084 if (size == 8)
4085 return (TARGET_MMX ? 0 : 1);
4086
4087 /* SSE values are returned in XMM0, except when it doesn't exist. */
4088 if (size == 16)
4089 return (TARGET_SSE ? 0 : 1);
4090 }
4091
4092 if (mode == XFmode)
4093 return 0;
4094
4095 if (mode == TDmode)
4096 return 1;
4097
4098 if (size > 12)
4099 return 1;
4100 return 0;
4101 }
4102
4103 /* When returning SSE vector types, we have a choice of either
4104 (1) being abi incompatible with a -march switch, or
4105 (2) generating an error.
4106 Given no good solution, I think the safest thing is one warning.
4107 The user won't be able to use -Werror, but....
4108
4109 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4110 called in response to actually generating a caller or callee that
4111 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4112 via aggregate_value_p for general type probing from tree-ssa. */
4113
4114 static rtx
4115 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4116 {
4117 static bool warnedsse, warnedmmx;
4118
4119 if (type)
4120 {
4121 /* Look at the return type of the function, not the function type. */
4122 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4123
4124 if (!TARGET_SSE && !warnedsse)
4125 {
4126 if (mode == TImode
4127 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4128 {
4129 warnedsse = true;
4130 warning (0, "SSE vector return without SSE enabled "
4131 "changes the ABI");
4132 }
4133 }
4134
4135 if (!TARGET_MMX && !warnedmmx)
4136 {
4137 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4138 {
4139 warnedmmx = true;
4140 warning (0, "MMX vector return without MMX enabled "
4141 "changes the ABI");
4142 }
4143 }
4144 }
4145
4146 return NULL;
4147 }
4148
4149 /* Define how to find the value returned by a library function
4150 assuming the value has mode MODE. */
4151 rtx
4152 ix86_libcall_value (enum machine_mode mode)
4153 {
4154 if (TARGET_64BIT)
4155 {
4156 switch (mode)
4157 {
4158 case SFmode:
4159 case SCmode:
4160 case DFmode:
4161 case DCmode:
4162 case TFmode:
4163 case SDmode:
4164 case DDmode:
4165 case TDmode:
4166 return gen_rtx_REG (mode, FIRST_SSE_REG);
4167 case XFmode:
4168 case XCmode:
4169 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4170 case TCmode:
4171 return NULL;
4172 default:
4173 return gen_rtx_REG (mode, 0);
4174 }
4175 }
4176 else
4177 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4178 }
4179
4180 /* Given a mode, return the register to use for a return value. */
4181
4182 static int
4183 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4184 {
4185 gcc_assert (!TARGET_64BIT);
4186
4187 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4188 we normally prevent this case when mmx is not available. However
4189 some ABIs may require the result to be returned like DImode. */
4190 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4191 return TARGET_MMX ? FIRST_MMX_REG : 0;
4192
4193 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4194 we prevent this case when sse is not available. However some ABIs
4195 may require the result to be returned like integer TImode. */
4196 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4197 return TARGET_SSE ? FIRST_SSE_REG : 0;
4198
4199 /* Decimal floating point values can go in %eax, unlike other float modes. */
4200 if (DECIMAL_FLOAT_MODE_P (mode))
4201 return 0;
4202
4203 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4204 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4205 return 0;
4206
4207 /* Floating point return values in %st(0), except for local functions when
4208 SSE math is enabled or for functions with sseregparm attribute. */
4209 if ((func || fntype)
4210 && (mode == SFmode || mode == DFmode))
4211 {
4212 int sse_level = ix86_function_sseregparm (fntype, func);
4213 if ((sse_level >= 1 && mode == SFmode)
4214 || (sse_level == 2 && mode == DFmode))
4215 return FIRST_SSE_REG;
4216 }
4217
4218 return FIRST_FLOAT_REG;
4219 }
4220 \f
4221 /* Create the va_list data type. */
4222
4223 static tree
4224 ix86_build_builtin_va_list (void)
4225 {
4226 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4227
4228 /* For i386 we use plain pointer to argument area. */
4229 if (!TARGET_64BIT)
4230 return build_pointer_type (char_type_node);
4231
4232 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4233 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4234
4235 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4236 unsigned_type_node);
4237 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4238 unsigned_type_node);
4239 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4240 ptr_type_node);
4241 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4242 ptr_type_node);
4243
4244 va_list_gpr_counter_field = f_gpr;
4245 va_list_fpr_counter_field = f_fpr;
4246
4247 DECL_FIELD_CONTEXT (f_gpr) = record;
4248 DECL_FIELD_CONTEXT (f_fpr) = record;
4249 DECL_FIELD_CONTEXT (f_ovf) = record;
4250 DECL_FIELD_CONTEXT (f_sav) = record;
4251
4252 TREE_CHAIN (record) = type_decl;
4253 TYPE_NAME (record) = type_decl;
4254 TYPE_FIELDS (record) = f_gpr;
4255 TREE_CHAIN (f_gpr) = f_fpr;
4256 TREE_CHAIN (f_fpr) = f_ovf;
4257 TREE_CHAIN (f_ovf) = f_sav;
4258
4259 layout_type (record);
4260
4261 /* The correct type is an array type of one element. */
4262 return build_array_type (record, build_index_type (size_zero_node));
4263 }
4264
4265 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4266
4267 static void
4268 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4269 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4270 int no_rtl)
4271 {
4272 CUMULATIVE_ARGS next_cum;
4273 rtx save_area = NULL_RTX, mem;
4274 rtx label;
4275 rtx label_ref;
4276 rtx tmp_reg;
4277 rtx nsse_reg;
4278 int set;
4279 tree fntype;
4280 int stdarg_p;
4281 int i;
4282
4283 if (!TARGET_64BIT)
4284 return;
4285
4286 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4287 return;
4288
4289 /* Indicate to allocate space on the stack for varargs save area. */
4290 ix86_save_varrargs_registers = 1;
4291
4292 cfun->stack_alignment_needed = 128;
4293
4294 fntype = TREE_TYPE (current_function_decl);
4295 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4296 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4297 != void_type_node));
4298
4299 /* For varargs, we do not want to skip the dummy va_dcl argument.
4300 For stdargs, we do want to skip the last named argument. */
4301 next_cum = *cum;
4302 if (stdarg_p)
4303 function_arg_advance (&next_cum, mode, type, 1);
4304
4305 if (!no_rtl)
4306 save_area = frame_pointer_rtx;
4307
4308 set = get_varargs_alias_set ();
4309
4310 for (i = next_cum.regno;
4311 i < ix86_regparm
4312 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4313 i++)
4314 {
4315 mem = gen_rtx_MEM (Pmode,
4316 plus_constant (save_area, i * UNITS_PER_WORD));
4317 MEM_NOTRAP_P (mem) = 1;
4318 set_mem_alias_set (mem, set);
4319 emit_move_insn (mem, gen_rtx_REG (Pmode,
4320 x86_64_int_parameter_registers[i]));
4321 }
4322
4323 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4324 {
4325 /* Now emit code to save SSE registers. The AX parameter contains number
4326 of SSE parameter registers used to call this function. We use
4327 sse_prologue_save insn template that produces computed jump across
4328 SSE saves. We need some preparation work to get this working. */
4329
4330 label = gen_label_rtx ();
4331 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4332
4333 /* Compute address to jump to :
4334 label - 5*eax + nnamed_sse_arguments*5 */
4335 tmp_reg = gen_reg_rtx (Pmode);
4336 nsse_reg = gen_reg_rtx (Pmode);
4337 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4338 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4339 gen_rtx_MULT (Pmode, nsse_reg,
4340 GEN_INT (4))));
4341 if (next_cum.sse_regno)
4342 emit_move_insn
4343 (nsse_reg,
4344 gen_rtx_CONST (DImode,
4345 gen_rtx_PLUS (DImode,
4346 label_ref,
4347 GEN_INT (next_cum.sse_regno * 4))));
4348 else
4349 emit_move_insn (nsse_reg, label_ref);
4350 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4351
4352 /* Compute address of memory block we save into. We always use pointer
4353 pointing 127 bytes after first byte to store - this is needed to keep
4354 instruction size limited by 4 bytes. */
4355 tmp_reg = gen_reg_rtx (Pmode);
4356 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4357 plus_constant (save_area,
4358 8 * REGPARM_MAX + 127)));
4359 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4360 MEM_NOTRAP_P (mem) = 1;
4361 set_mem_alias_set (mem, set);
4362 set_mem_align (mem, BITS_PER_WORD);
4363
4364 /* And finally do the dirty job! */
4365 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4366 GEN_INT (next_cum.sse_regno), label));
4367 }
4368
4369 }
4370
4371 /* Implement va_start. */
4372
4373 void
4374 ix86_va_start (tree valist, rtx nextarg)
4375 {
4376 HOST_WIDE_INT words, n_gpr, n_fpr;
4377 tree f_gpr, f_fpr, f_ovf, f_sav;
4378 tree gpr, fpr, ovf, sav, t;
4379 tree type;
4380
4381 /* Only 64bit target needs something special. */
4382 if (!TARGET_64BIT)
4383 {
4384 std_expand_builtin_va_start (valist, nextarg);
4385 return;
4386 }
4387
4388 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4389 f_fpr = TREE_CHAIN (f_gpr);
4390 f_ovf = TREE_CHAIN (f_fpr);
4391 f_sav = TREE_CHAIN (f_ovf);
4392
4393 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4394 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4395 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4396 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4397 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4398
4399 /* Count number of gp and fp argument registers used. */
4400 words = current_function_args_info.words;
4401 n_gpr = current_function_args_info.regno;
4402 n_fpr = current_function_args_info.sse_regno;
4403
4404 if (TARGET_DEBUG_ARG)
4405 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4406 (int) words, (int) n_gpr, (int) n_fpr);
4407
4408 if (cfun->va_list_gpr_size)
4409 {
4410 type = TREE_TYPE (gpr);
4411 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4412 build_int_cst (type, n_gpr * 8));
4413 TREE_SIDE_EFFECTS (t) = 1;
4414 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4415 }
4416
4417 if (cfun->va_list_fpr_size)
4418 {
4419 type = TREE_TYPE (fpr);
4420 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4421 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4422 TREE_SIDE_EFFECTS (t) = 1;
4423 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4424 }
4425
4426 /* Find the overflow area. */
4427 type = TREE_TYPE (ovf);
4428 t = make_tree (type, virtual_incoming_args_rtx);
4429 if (words != 0)
4430 t = build2 (PLUS_EXPR, type, t,
4431 build_int_cst (type, words * UNITS_PER_WORD));
4432 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4433 TREE_SIDE_EFFECTS (t) = 1;
4434 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4435
4436 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4437 {
4438 /* Find the register save area.
4439 Prologue of the function save it right above stack frame. */
4440 type = TREE_TYPE (sav);
4441 t = make_tree (type, frame_pointer_rtx);
4442 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4443 TREE_SIDE_EFFECTS (t) = 1;
4444 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4445 }
4446 }
4447
4448 /* Implement va_arg. */
4449
4450 tree
4451 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4452 {
4453 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4454 tree f_gpr, f_fpr, f_ovf, f_sav;
4455 tree gpr, fpr, ovf, sav, t;
4456 int size, rsize;
4457 tree lab_false, lab_over = NULL_TREE;
4458 tree addr, t2;
4459 rtx container;
4460 int indirect_p = 0;
4461 tree ptrtype;
4462 enum machine_mode nat_mode;
4463
4464 /* Only 64bit target needs something special. */
4465 if (!TARGET_64BIT)
4466 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4467
4468 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4469 f_fpr = TREE_CHAIN (f_gpr);
4470 f_ovf = TREE_CHAIN (f_fpr);
4471 f_sav = TREE_CHAIN (f_ovf);
4472
4473 valist = build_va_arg_indirect_ref (valist);
4474 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4475 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4476 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4477 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4478
4479 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4480 if (indirect_p)
4481 type = build_pointer_type (type);
4482 size = int_size_in_bytes (type);
4483 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4484
4485 nat_mode = type_natural_mode (type);
4486 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4487 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4488
4489 /* Pull the value out of the saved registers. */
4490
4491 addr = create_tmp_var (ptr_type_node, "addr");
4492 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4493
4494 if (container)
4495 {
4496 int needed_intregs, needed_sseregs;
4497 bool need_temp;
4498 tree int_addr, sse_addr;
4499
4500 lab_false = create_artificial_label ();
4501 lab_over = create_artificial_label ();
4502
4503 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4504
4505 need_temp = (!REG_P (container)
4506 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4507 || TYPE_ALIGN (type) > 128));
4508
4509 /* In case we are passing structure, verify that it is consecutive block
4510 on the register save area. If not we need to do moves. */
4511 if (!need_temp && !REG_P (container))
4512 {
4513 /* Verify that all registers are strictly consecutive */
4514 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4515 {
4516 int i;
4517
4518 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4519 {
4520 rtx slot = XVECEXP (container, 0, i);
4521 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4522 || INTVAL (XEXP (slot, 1)) != i * 16)
4523 need_temp = 1;
4524 }
4525 }
4526 else
4527 {
4528 int i;
4529
4530 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4531 {
4532 rtx slot = XVECEXP (container, 0, i);
4533 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4534 || INTVAL (XEXP (slot, 1)) != i * 8)
4535 need_temp = 1;
4536 }
4537 }
4538 }
4539 if (!need_temp)
4540 {
4541 int_addr = addr;
4542 sse_addr = addr;
4543 }
4544 else
4545 {
4546 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4547 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4548 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4549 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4550 }
4551
4552 /* First ensure that we fit completely in registers. */
4553 if (needed_intregs)
4554 {
4555 t = build_int_cst (TREE_TYPE (gpr),
4556 (REGPARM_MAX - needed_intregs + 1) * 8);
4557 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4558 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4559 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4560 gimplify_and_add (t, pre_p);
4561 }
4562 if (needed_sseregs)
4563 {
4564 t = build_int_cst (TREE_TYPE (fpr),
4565 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4566 + REGPARM_MAX * 8);
4567 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4568 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4569 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4570 gimplify_and_add (t, pre_p);
4571 }
4572
4573 /* Compute index to start of area used for integer regs. */
4574 if (needed_intregs)
4575 {
4576 /* int_addr = gpr + sav; */
4577 t = fold_convert (ptr_type_node, gpr);
4578 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4579 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4580 gimplify_and_add (t, pre_p);
4581 }
4582 if (needed_sseregs)
4583 {
4584 /* sse_addr = fpr + sav; */
4585 t = fold_convert (ptr_type_node, fpr);
4586 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4587 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4588 gimplify_and_add (t, pre_p);
4589 }
4590 if (need_temp)
4591 {
4592 int i;
4593 tree temp = create_tmp_var (type, "va_arg_tmp");
4594
4595 /* addr = &temp; */
4596 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4597 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4598 gimplify_and_add (t, pre_p);
4599
4600 for (i = 0; i < XVECLEN (container, 0); i++)
4601 {
4602 rtx slot = XVECEXP (container, 0, i);
4603 rtx reg = XEXP (slot, 0);
4604 enum machine_mode mode = GET_MODE (reg);
4605 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4606 tree addr_type = build_pointer_type (piece_type);
4607 tree src_addr, src;
4608 int src_offset;
4609 tree dest_addr, dest;
4610
4611 if (SSE_REGNO_P (REGNO (reg)))
4612 {
4613 src_addr = sse_addr;
4614 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4615 }
4616 else
4617 {
4618 src_addr = int_addr;
4619 src_offset = REGNO (reg) * 8;
4620 }
4621 src_addr = fold_convert (addr_type, src_addr);
4622 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4623 size_int (src_offset)));
4624 src = build_va_arg_indirect_ref (src_addr);
4625
4626 dest_addr = fold_convert (addr_type, addr);
4627 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4628 size_int (INTVAL (XEXP (slot, 1)))));
4629 dest = build_va_arg_indirect_ref (dest_addr);
4630
4631 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4632 gimplify_and_add (t, pre_p);
4633 }
4634 }
4635
4636 if (needed_intregs)
4637 {
4638 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4639 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4640 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4641 gimplify_and_add (t, pre_p);
4642 }
4643 if (needed_sseregs)
4644 {
4645 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4646 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4647 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4648 gimplify_and_add (t, pre_p);
4649 }
4650
4651 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4652 gimplify_and_add (t, pre_p);
4653
4654 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4655 append_to_statement_list (t, pre_p);
4656 }
4657
4658 /* ... otherwise out of the overflow area. */
4659
4660 /* Care for on-stack alignment if needed. */
4661 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4662 || integer_zerop (TYPE_SIZE (type)))
4663 t = ovf;
4664 else
4665 {
4666 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4667 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4668 build_int_cst (TREE_TYPE (ovf), align - 1));
4669 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4670 build_int_cst (TREE_TYPE (t), -align));
4671 }
4672 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4673
4674 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4675 gimplify_and_add (t2, pre_p);
4676
4677 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4678 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4679 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4680 gimplify_and_add (t, pre_p);
4681
4682 if (container)
4683 {
4684 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4685 append_to_statement_list (t, pre_p);
4686 }
4687
4688 ptrtype = build_pointer_type (type);
4689 addr = fold_convert (ptrtype, addr);
4690
4691 if (indirect_p)
4692 addr = build_va_arg_indirect_ref (addr);
4693 return build_va_arg_indirect_ref (addr);
4694 }
4695 \f
4696 /* Return nonzero if OPNUM's MEM should be matched
4697 in movabs* patterns. */
4698
4699 int
4700 ix86_check_movabs (rtx insn, int opnum)
4701 {
4702 rtx set, mem;
4703
4704 set = PATTERN (insn);
4705 if (GET_CODE (set) == PARALLEL)
4706 set = XVECEXP (set, 0, 0);
4707 gcc_assert (GET_CODE (set) == SET);
4708 mem = XEXP (set, opnum);
4709 while (GET_CODE (mem) == SUBREG)
4710 mem = SUBREG_REG (mem);
4711 gcc_assert (GET_CODE (mem) == MEM);
4712 return (volatile_ok || !MEM_VOLATILE_P (mem));
4713 }
4714 \f
4715 /* Initialize the table of extra 80387 mathematical constants. */
4716
4717 static void
4718 init_ext_80387_constants (void)
4719 {
4720 static const char * cst[5] =
4721 {
4722 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4723 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4724 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4725 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4726 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4727 };
4728 int i;
4729
4730 for (i = 0; i < 5; i++)
4731 {
4732 real_from_string (&ext_80387_constants_table[i], cst[i]);
4733 /* Ensure each constant is rounded to XFmode precision. */
4734 real_convert (&ext_80387_constants_table[i],
4735 XFmode, &ext_80387_constants_table[i]);
4736 }
4737
4738 ext_80387_constants_init = 1;
4739 }
4740
4741 /* Return true if the constant is something that can be loaded with
4742 a special instruction. */
4743
4744 int
4745 standard_80387_constant_p (rtx x)
4746 {
4747 REAL_VALUE_TYPE r;
4748
4749 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4750 return -1;
4751
4752 if (x == CONST0_RTX (GET_MODE (x)))
4753 return 1;
4754 if (x == CONST1_RTX (GET_MODE (x)))
4755 return 2;
4756
4757 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4758
4759 /* For XFmode constants, try to find a special 80387 instruction when
4760 optimizing for size or on those CPUs that benefit from them. */
4761 if (GET_MODE (x) == XFmode
4762 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4763 {
4764 int i;
4765
4766 if (! ext_80387_constants_init)
4767 init_ext_80387_constants ();
4768
4769 for (i = 0; i < 5; i++)
4770 if (real_identical (&r, &ext_80387_constants_table[i]))
4771 return i + 3;
4772 }
4773
4774 /* Load of the constant -0.0 or -1.0 will be split as
4775 fldz;fchs or fld1;fchs sequence. */
4776 if (real_isnegzero (&r))
4777 return 8;
4778 if (real_identical (&r, &dconstm1))
4779 return 9;
4780
4781 return 0;
4782 }
4783
4784 /* Return the opcode of the special instruction to be used to load
4785 the constant X. */
4786
4787 const char *
4788 standard_80387_constant_opcode (rtx x)
4789 {
4790 switch (standard_80387_constant_p (x))
4791 {
4792 case 1:
4793 return "fldz";
4794 case 2:
4795 return "fld1";
4796 case 3:
4797 return "fldlg2";
4798 case 4:
4799 return "fldln2";
4800 case 5:
4801 return "fldl2e";
4802 case 6:
4803 return "fldl2t";
4804 case 7:
4805 return "fldpi";
4806 case 8:
4807 case 9:
4808 return "#";
4809 default:
4810 gcc_unreachable ();
4811 }
4812 }
4813
4814 /* Return the CONST_DOUBLE representing the 80387 constant that is
4815 loaded by the specified special instruction. The argument IDX
4816 matches the return value from standard_80387_constant_p. */
4817
4818 rtx
4819 standard_80387_constant_rtx (int idx)
4820 {
4821 int i;
4822
4823 if (! ext_80387_constants_init)
4824 init_ext_80387_constants ();
4825
4826 switch (idx)
4827 {
4828 case 3:
4829 case 4:
4830 case 5:
4831 case 6:
4832 case 7:
4833 i = idx - 3;
4834 break;
4835
4836 default:
4837 gcc_unreachable ();
4838 }
4839
4840 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4841 XFmode);
4842 }
4843
4844 /* Return 1 if mode is a valid mode for sse. */
4845 static int
4846 standard_sse_mode_p (enum machine_mode mode)
4847 {
4848 switch (mode)
4849 {
4850 case V16QImode:
4851 case V8HImode:
4852 case V4SImode:
4853 case V2DImode:
4854 case V4SFmode:
4855 case V2DFmode:
4856 return 1;
4857
4858 default:
4859 return 0;
4860 }
4861 }
4862
4863 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4864 */
4865 int
4866 standard_sse_constant_p (rtx x)
4867 {
4868 enum machine_mode mode = GET_MODE (x);
4869
4870 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4871 return 1;
4872 if (vector_all_ones_operand (x, mode)
4873 && standard_sse_mode_p (mode))
4874 return TARGET_SSE2 ? 2 : -1;
4875
4876 return 0;
4877 }
4878
4879 /* Return the opcode of the special instruction to be used to load
4880 the constant X. */
4881
4882 const char *
4883 standard_sse_constant_opcode (rtx insn, rtx x)
4884 {
4885 switch (standard_sse_constant_p (x))
4886 {
4887 case 1:
4888 if (get_attr_mode (insn) == MODE_V4SF)
4889 return "xorps\t%0, %0";
4890 else if (get_attr_mode (insn) == MODE_V2DF)
4891 return "xorpd\t%0, %0";
4892 else
4893 return "pxor\t%0, %0";
4894 case 2:
4895 return "pcmpeqd\t%0, %0";
4896 }
4897 gcc_unreachable ();
4898 }
4899
4900 /* Returns 1 if OP contains a symbol reference */
4901
4902 int
4903 symbolic_reference_mentioned_p (rtx op)
4904 {
4905 const char *fmt;
4906 int i;
4907
4908 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4909 return 1;
4910
4911 fmt = GET_RTX_FORMAT (GET_CODE (op));
4912 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4913 {
4914 if (fmt[i] == 'E')
4915 {
4916 int j;
4917
4918 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4919 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4920 return 1;
4921 }
4922
4923 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4924 return 1;
4925 }
4926
4927 return 0;
4928 }
4929
4930 /* Return 1 if it is appropriate to emit `ret' instructions in the
4931 body of a function. Do this only if the epilogue is simple, needing a
4932 couple of insns. Prior to reloading, we can't tell how many registers
4933 must be saved, so return 0 then. Return 0 if there is no frame
4934 marker to de-allocate. */
4935
4936 int
4937 ix86_can_use_return_insn_p (void)
4938 {
4939 struct ix86_frame frame;
4940
4941 if (! reload_completed || frame_pointer_needed)
4942 return 0;
4943
4944 /* Don't allow more than 32 pop, since that's all we can do
4945 with one instruction. */
4946 if (current_function_pops_args
4947 && current_function_args_size >= 32768)
4948 return 0;
4949
4950 ix86_compute_frame_layout (&frame);
4951 return frame.to_allocate == 0 && frame.nregs == 0;
4952 }
4953 \f
4954 /* Value should be nonzero if functions must have frame pointers.
4955 Zero means the frame pointer need not be set up (and parms may
4956 be accessed via the stack pointer) in functions that seem suitable. */
4957
4958 int
4959 ix86_frame_pointer_required (void)
4960 {
4961 /* If we accessed previous frames, then the generated code expects
4962 to be able to access the saved ebp value in our frame. */
4963 if (cfun->machine->accesses_prev_frame)
4964 return 1;
4965
4966 /* Several x86 os'es need a frame pointer for other reasons,
4967 usually pertaining to setjmp. */
4968 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4969 return 1;
4970
4971 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4972 the frame pointer by default. Turn it back on now if we've not
4973 got a leaf function. */
4974 if (TARGET_OMIT_LEAF_FRAME_POINTER
4975 && (!current_function_is_leaf
4976 || ix86_current_function_calls_tls_descriptor))
4977 return 1;
4978
4979 if (current_function_profile)
4980 return 1;
4981
4982 return 0;
4983 }
4984
4985 /* Record that the current function accesses previous call frames. */
4986
4987 void
4988 ix86_setup_frame_addresses (void)
4989 {
4990 cfun->machine->accesses_prev_frame = 1;
4991 }
4992 \f
4993 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4994 # define USE_HIDDEN_LINKONCE 1
4995 #else
4996 # define USE_HIDDEN_LINKONCE 0
4997 #endif
4998
4999 static int pic_labels_used;
5000
5001 /* Fills in the label name that should be used for a pc thunk for
5002 the given register. */
5003
5004 static void
5005 get_pc_thunk_name (char name[32], unsigned int regno)
5006 {
5007 gcc_assert (!TARGET_64BIT);
5008
5009 if (USE_HIDDEN_LINKONCE)
5010 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5011 else
5012 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5013 }
5014
5015
5016 /* This function generates code for -fpic that loads %ebx with
5017 the return address of the caller and then returns. */
5018
5019 void
5020 ix86_file_end (void)
5021 {
5022 rtx xops[2];
5023 int regno;
5024
5025 for (regno = 0; regno < 8; ++regno)
5026 {
5027 char name[32];
5028
5029 if (! ((pic_labels_used >> regno) & 1))
5030 continue;
5031
5032 get_pc_thunk_name (name, regno);
5033
5034 #if TARGET_MACHO
5035 if (TARGET_MACHO)
5036 {
5037 switch_to_section (darwin_sections[text_coal_section]);
5038 fputs ("\t.weak_definition\t", asm_out_file);
5039 assemble_name (asm_out_file, name);
5040 fputs ("\n\t.private_extern\t", asm_out_file);
5041 assemble_name (asm_out_file, name);
5042 fputs ("\n", asm_out_file);
5043 ASM_OUTPUT_LABEL (asm_out_file, name);
5044 }
5045 else
5046 #endif
5047 if (USE_HIDDEN_LINKONCE)
5048 {
5049 tree decl;
5050
5051 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5052 error_mark_node);
5053 TREE_PUBLIC (decl) = 1;
5054 TREE_STATIC (decl) = 1;
5055 DECL_ONE_ONLY (decl) = 1;
5056
5057 (*targetm.asm_out.unique_section) (decl, 0);
5058 switch_to_section (get_named_section (decl, NULL, 0));
5059
5060 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5061 fputs ("\t.hidden\t", asm_out_file);
5062 assemble_name (asm_out_file, name);
5063 fputc ('\n', asm_out_file);
5064 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5065 }
5066 else
5067 {
5068 switch_to_section (text_section);
5069 ASM_OUTPUT_LABEL (asm_out_file, name);
5070 }
5071
5072 xops[0] = gen_rtx_REG (SImode, regno);
5073 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5074 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5075 output_asm_insn ("ret", xops);
5076 }
5077
5078 if (NEED_INDICATE_EXEC_STACK)
5079 file_end_indicate_exec_stack ();
5080 }
5081
5082 /* Emit code for the SET_GOT patterns. */
5083
5084 const char *
5085 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5086 {
5087 rtx xops[3];
5088
5089 xops[0] = dest;
5090 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5091
5092 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5093 {
5094 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5095
5096 if (!flag_pic)
5097 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5098 else
5099 output_asm_insn ("call\t%a2", xops);
5100
5101 #if TARGET_MACHO
5102 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5103 is what will be referenced by the Mach-O PIC subsystem. */
5104 if (!label)
5105 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5106 #endif
5107
5108 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5109 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5110
5111 if (flag_pic)
5112 output_asm_insn ("pop{l}\t%0", xops);
5113 }
5114 else
5115 {
5116 char name[32];
5117 get_pc_thunk_name (name, REGNO (dest));
5118 pic_labels_used |= 1 << REGNO (dest);
5119
5120 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5121 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5122 output_asm_insn ("call\t%X2", xops);
5123 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5124 is what will be referenced by the Mach-O PIC subsystem. */
5125 #if TARGET_MACHO
5126 if (!label)
5127 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5128 else
5129 targetm.asm_out.internal_label (asm_out_file, "L",
5130 CODE_LABEL_NUMBER (label));
5131 #endif
5132 }
5133
5134 if (TARGET_MACHO)
5135 return "";
5136
5137 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5138 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5139 else
5140 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5141
5142 return "";
5143 }
5144
5145 /* Generate an "push" pattern for input ARG. */
5146
5147 static rtx
5148 gen_push (rtx arg)
5149 {
5150 return gen_rtx_SET (VOIDmode,
5151 gen_rtx_MEM (Pmode,
5152 gen_rtx_PRE_DEC (Pmode,
5153 stack_pointer_rtx)),
5154 arg);
5155 }
5156
5157 /* Return >= 0 if there is an unused call-clobbered register available
5158 for the entire function. */
5159
5160 static unsigned int
5161 ix86_select_alt_pic_regnum (void)
5162 {
5163 if (current_function_is_leaf && !current_function_profile
5164 && !ix86_current_function_calls_tls_descriptor)
5165 {
5166 int i;
5167 for (i = 2; i >= 0; --i)
5168 if (!regs_ever_live[i])
5169 return i;
5170 }
5171
5172 return INVALID_REGNUM;
5173 }
5174
5175 /* Return 1 if we need to save REGNO. */
5176 static int
5177 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5178 {
5179 if (pic_offset_table_rtx
5180 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5181 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5182 || current_function_profile
5183 || current_function_calls_eh_return
5184 || current_function_uses_const_pool))
5185 {
5186 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5187 return 0;
5188 return 1;
5189 }
5190
5191 if (current_function_calls_eh_return && maybe_eh_return)
5192 {
5193 unsigned i;
5194 for (i = 0; ; i++)
5195 {
5196 unsigned test = EH_RETURN_DATA_REGNO (i);
5197 if (test == INVALID_REGNUM)
5198 break;
5199 if (test == regno)
5200 return 1;
5201 }
5202 }
5203
5204 if (cfun->machine->force_align_arg_pointer
5205 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5206 return 1;
5207
5208 return (regs_ever_live[regno]
5209 && !call_used_regs[regno]
5210 && !fixed_regs[regno]
5211 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5212 }
5213
5214 /* Return number of registers to be saved on the stack. */
5215
5216 static int
5217 ix86_nsaved_regs (void)
5218 {
5219 int nregs = 0;
5220 int regno;
5221
5222 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5223 if (ix86_save_reg (regno, true))
5224 nregs++;
5225 return nregs;
5226 }
5227
5228 /* Return the offset between two registers, one to be eliminated, and the other
5229 its replacement, at the start of a routine. */
5230
5231 HOST_WIDE_INT
5232 ix86_initial_elimination_offset (int from, int to)
5233 {
5234 struct ix86_frame frame;
5235 ix86_compute_frame_layout (&frame);
5236
5237 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5238 return frame.hard_frame_pointer_offset;
5239 else if (from == FRAME_POINTER_REGNUM
5240 && to == HARD_FRAME_POINTER_REGNUM)
5241 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5242 else
5243 {
5244 gcc_assert (to == STACK_POINTER_REGNUM);
5245
5246 if (from == ARG_POINTER_REGNUM)
5247 return frame.stack_pointer_offset;
5248
5249 gcc_assert (from == FRAME_POINTER_REGNUM);
5250 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5251 }
5252 }
5253
5254 /* Fill structure ix86_frame about frame of currently computed function. */
5255
5256 static void
5257 ix86_compute_frame_layout (struct ix86_frame *frame)
5258 {
5259 HOST_WIDE_INT total_size;
5260 unsigned int stack_alignment_needed;
5261 HOST_WIDE_INT offset;
5262 unsigned int preferred_alignment;
5263 HOST_WIDE_INT size = get_frame_size ();
5264
5265 frame->nregs = ix86_nsaved_regs ();
5266 total_size = size;
5267
5268 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5269 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5270
5271 /* During reload iteration the amount of registers saved can change.
5272 Recompute the value as needed. Do not recompute when amount of registers
5273 didn't change as reload does multiple calls to the function and does not
5274 expect the decision to change within single iteration. */
5275 if (!optimize_size
5276 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5277 {
5278 int count = frame->nregs;
5279
5280 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5281 /* The fast prologue uses move instead of push to save registers. This
5282 is significantly longer, but also executes faster as modern hardware
5283 can execute the moves in parallel, but can't do that for push/pop.
5284
5285 Be careful about choosing what prologue to emit: When function takes
5286 many instructions to execute we may use slow version as well as in
5287 case function is known to be outside hot spot (this is known with
5288 feedback only). Weight the size of function by number of registers
5289 to save as it is cheap to use one or two push instructions but very
5290 slow to use many of them. */
5291 if (count)
5292 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5293 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5294 || (flag_branch_probabilities
5295 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5296 cfun->machine->use_fast_prologue_epilogue = false;
5297 else
5298 cfun->machine->use_fast_prologue_epilogue
5299 = !expensive_function_p (count);
5300 }
5301 if (TARGET_PROLOGUE_USING_MOVE
5302 && cfun->machine->use_fast_prologue_epilogue)
5303 frame->save_regs_using_mov = true;
5304 else
5305 frame->save_regs_using_mov = false;
5306
5307
5308 /* Skip return address and saved base pointer. */
5309 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5310
5311 frame->hard_frame_pointer_offset = offset;
5312
5313 /* Do some sanity checking of stack_alignment_needed and
5314 preferred_alignment, since i386 port is the only using those features
5315 that may break easily. */
5316
5317 gcc_assert (!size || stack_alignment_needed);
5318 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5319 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5320 gcc_assert (stack_alignment_needed
5321 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5322
5323 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5324 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5325
5326 /* Register save area */
5327 offset += frame->nregs * UNITS_PER_WORD;
5328
5329 /* Va-arg area */
5330 if (ix86_save_varrargs_registers)
5331 {
5332 offset += X86_64_VARARGS_SIZE;
5333 frame->va_arg_size = X86_64_VARARGS_SIZE;
5334 }
5335 else
5336 frame->va_arg_size = 0;
5337
5338 /* Align start of frame for local function. */
5339 frame->padding1 = ((offset + stack_alignment_needed - 1)
5340 & -stack_alignment_needed) - offset;
5341
5342 offset += frame->padding1;
5343
5344 /* Frame pointer points here. */
5345 frame->frame_pointer_offset = offset;
5346
5347 offset += size;
5348
5349 /* Add outgoing arguments area. Can be skipped if we eliminated
5350 all the function calls as dead code.
5351 Skipping is however impossible when function calls alloca. Alloca
5352 expander assumes that last current_function_outgoing_args_size
5353 of stack frame are unused. */
5354 if (ACCUMULATE_OUTGOING_ARGS
5355 && (!current_function_is_leaf || current_function_calls_alloca
5356 || ix86_current_function_calls_tls_descriptor))
5357 {
5358 offset += current_function_outgoing_args_size;
5359 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5360 }
5361 else
5362 frame->outgoing_arguments_size = 0;
5363
5364 /* Align stack boundary. Only needed if we're calling another function
5365 or using alloca. */
5366 if (!current_function_is_leaf || current_function_calls_alloca
5367 || ix86_current_function_calls_tls_descriptor)
5368 frame->padding2 = ((offset + preferred_alignment - 1)
5369 & -preferred_alignment) - offset;
5370 else
5371 frame->padding2 = 0;
5372
5373 offset += frame->padding2;
5374
5375 /* We've reached end of stack frame. */
5376 frame->stack_pointer_offset = offset;
5377
5378 /* Size prologue needs to allocate. */
5379 frame->to_allocate =
5380 (size + frame->padding1 + frame->padding2
5381 + frame->outgoing_arguments_size + frame->va_arg_size);
5382
5383 if ((!frame->to_allocate && frame->nregs <= 1)
5384 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5385 frame->save_regs_using_mov = false;
5386
5387 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5388 && current_function_is_leaf
5389 && !ix86_current_function_calls_tls_descriptor)
5390 {
5391 frame->red_zone_size = frame->to_allocate;
5392 if (frame->save_regs_using_mov)
5393 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5394 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5395 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5396 }
5397 else
5398 frame->red_zone_size = 0;
5399 frame->to_allocate -= frame->red_zone_size;
5400 frame->stack_pointer_offset -= frame->red_zone_size;
5401 #if 0
5402 fprintf (stderr, "nregs: %i\n", frame->nregs);
5403 fprintf (stderr, "size: %i\n", size);
5404 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
5405 fprintf (stderr, "padding1: %i\n", frame->padding1);
5406 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
5407 fprintf (stderr, "padding2: %i\n", frame->padding2);
5408 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
5409 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
5410 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
5411 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
5412 frame->hard_frame_pointer_offset);
5413 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
5414 #endif
5415 }
5416
5417 /* Emit code to save registers in the prologue. */
5418
5419 static void
5420 ix86_emit_save_regs (void)
5421 {
5422 unsigned int regno;
5423 rtx insn;
5424
5425 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5426 if (ix86_save_reg (regno, true))
5427 {
5428 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5429 RTX_FRAME_RELATED_P (insn) = 1;
5430 }
5431 }
5432
5433 /* Emit code to save registers using MOV insns. First register
5434 is restored from POINTER + OFFSET. */
5435 static void
5436 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5437 {
5438 unsigned int regno;
5439 rtx insn;
5440
5441 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5442 if (ix86_save_reg (regno, true))
5443 {
5444 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5445 Pmode, offset),
5446 gen_rtx_REG (Pmode, regno));
5447 RTX_FRAME_RELATED_P (insn) = 1;
5448 offset += UNITS_PER_WORD;
5449 }
5450 }
5451
5452 /* Expand prologue or epilogue stack adjustment.
5453 The pattern exist to put a dependency on all ebp-based memory accesses.
5454 STYLE should be negative if instructions should be marked as frame related,
5455 zero if %r11 register is live and cannot be freely used and positive
5456 otherwise. */
5457
5458 static void
5459 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5460 {
5461 rtx insn;
5462
5463 if (! TARGET_64BIT)
5464 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5465 else if (x86_64_immediate_operand (offset, DImode))
5466 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5467 else
5468 {
5469 rtx r11;
5470 /* r11 is used by indirect sibcall return as well, set before the
5471 epilogue and used after the epilogue. ATM indirect sibcall
5472 shouldn't be used together with huge frame sizes in one
5473 function because of the frame_size check in sibcall.c. */
5474 gcc_assert (style);
5475 r11 = gen_rtx_REG (DImode, R11_REG);
5476 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5477 if (style < 0)
5478 RTX_FRAME_RELATED_P (insn) = 1;
5479 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5480 offset));
5481 }
5482 if (style < 0)
5483 RTX_FRAME_RELATED_P (insn) = 1;
5484 }
5485
5486 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5487
5488 static rtx
5489 ix86_internal_arg_pointer (void)
5490 {
5491 bool has_force_align_arg_pointer =
5492 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5493 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5494 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5495 && DECL_NAME (current_function_decl)
5496 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5497 && DECL_FILE_SCOPE_P (current_function_decl))
5498 || ix86_force_align_arg_pointer
5499 || has_force_align_arg_pointer)
5500 {
5501 /* Nested functions can't realign the stack due to a register
5502 conflict. */
5503 if (DECL_CONTEXT (current_function_decl)
5504 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5505 {
5506 if (ix86_force_align_arg_pointer)
5507 warning (0, "-mstackrealign ignored for nested functions");
5508 if (has_force_align_arg_pointer)
5509 error ("%s not supported for nested functions",
5510 ix86_force_align_arg_pointer_string);
5511 return virtual_incoming_args_rtx;
5512 }
5513 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5514 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5515 }
5516 else
5517 return virtual_incoming_args_rtx;
5518 }
5519
5520 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5521 This is called from dwarf2out.c to emit call frame instructions
5522 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5523 static void
5524 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5525 {
5526 rtx unspec = SET_SRC (pattern);
5527 gcc_assert (GET_CODE (unspec) == UNSPEC);
5528
5529 switch (index)
5530 {
5531 case UNSPEC_REG_SAVE:
5532 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5533 SET_DEST (pattern));
5534 break;
5535 case UNSPEC_DEF_CFA:
5536 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5537 INTVAL (XVECEXP (unspec, 0, 0)));
5538 break;
5539 default:
5540 gcc_unreachable ();
5541 }
5542 }
5543
5544 /* Expand the prologue into a bunch of separate insns. */
5545
5546 void
5547 ix86_expand_prologue (void)
5548 {
5549 rtx insn;
5550 bool pic_reg_used;
5551 struct ix86_frame frame;
5552 HOST_WIDE_INT allocate;
5553
5554 ix86_compute_frame_layout (&frame);
5555
5556 if (cfun->machine->force_align_arg_pointer)
5557 {
5558 rtx x, y;
5559
5560 /* Grab the argument pointer. */
5561 x = plus_constant (stack_pointer_rtx, 4);
5562 y = cfun->machine->force_align_arg_pointer;
5563 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5564 RTX_FRAME_RELATED_P (insn) = 1;
5565
5566 /* The unwind info consists of two parts: install the fafp as the cfa,
5567 and record the fafp as the "save register" of the stack pointer.
5568 The later is there in order that the unwinder can see where it
5569 should restore the stack pointer across the and insn. */
5570 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5571 x = gen_rtx_SET (VOIDmode, y, x);
5572 RTX_FRAME_RELATED_P (x) = 1;
5573 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5574 UNSPEC_REG_SAVE);
5575 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5576 RTX_FRAME_RELATED_P (y) = 1;
5577 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5578 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5579 REG_NOTES (insn) = x;
5580
5581 /* Align the stack. */
5582 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5583 GEN_INT (-16)));
5584
5585 /* And here we cheat like madmen with the unwind info. We force the
5586 cfa register back to sp+4, which is exactly what it was at the
5587 start of the function. Re-pushing the return address results in
5588 the return at the same spot relative to the cfa, and thus is
5589 correct wrt the unwind info. */
5590 x = cfun->machine->force_align_arg_pointer;
5591 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5592 insn = emit_insn (gen_push (x));
5593 RTX_FRAME_RELATED_P (insn) = 1;
5594
5595 x = GEN_INT (4);
5596 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5597 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5598 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5599 REG_NOTES (insn) = x;
5600 }
5601
5602 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5603 slower on all targets. Also sdb doesn't like it. */
5604
5605 if (frame_pointer_needed)
5606 {
5607 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5608 RTX_FRAME_RELATED_P (insn) = 1;
5609
5610 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5611 RTX_FRAME_RELATED_P (insn) = 1;
5612 }
5613
5614 allocate = frame.to_allocate;
5615
5616 if (!frame.save_regs_using_mov)
5617 ix86_emit_save_regs ();
5618 else
5619 allocate += frame.nregs * UNITS_PER_WORD;
5620
5621 /* When using red zone we may start register saving before allocating
5622 the stack frame saving one cycle of the prologue. */
5623 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5624 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5625 : stack_pointer_rtx,
5626 -frame.nregs * UNITS_PER_WORD);
5627
5628 if (allocate == 0)
5629 ;
5630 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5631 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5632 GEN_INT (-allocate), -1);
5633 else
5634 {
5635 /* Only valid for Win32. */
5636 rtx eax = gen_rtx_REG (SImode, 0);
5637 bool eax_live = ix86_eax_live_at_start_p ();
5638 rtx t;
5639
5640 gcc_assert (!TARGET_64BIT);
5641
5642 if (eax_live)
5643 {
5644 emit_insn (gen_push (eax));
5645 allocate -= 4;
5646 }
5647
5648 emit_move_insn (eax, GEN_INT (allocate));
5649
5650 insn = emit_insn (gen_allocate_stack_worker (eax));
5651 RTX_FRAME_RELATED_P (insn) = 1;
5652 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5653 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5654 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5655 t, REG_NOTES (insn));
5656
5657 if (eax_live)
5658 {
5659 if (frame_pointer_needed)
5660 t = plus_constant (hard_frame_pointer_rtx,
5661 allocate
5662 - frame.to_allocate
5663 - frame.nregs * UNITS_PER_WORD);
5664 else
5665 t = plus_constant (stack_pointer_rtx, allocate);
5666 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5667 }
5668 }
5669
5670 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5671 {
5672 if (!frame_pointer_needed || !frame.to_allocate)
5673 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5674 else
5675 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5676 -frame.nregs * UNITS_PER_WORD);
5677 }
5678
5679 pic_reg_used = false;
5680 if (pic_offset_table_rtx
5681 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5682 || current_function_profile))
5683 {
5684 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5685
5686 if (alt_pic_reg_used != INVALID_REGNUM)
5687 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5688
5689 pic_reg_used = true;
5690 }
5691
5692 if (pic_reg_used)
5693 {
5694 if (TARGET_64BIT)
5695 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5696 else
5697 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5698
5699 /* Even with accurate pre-reload life analysis, we can wind up
5700 deleting all references to the pic register after reload.
5701 Consider if cross-jumping unifies two sides of a branch
5702 controlled by a comparison vs the only read from a global.
5703 In which case, allow the set_got to be deleted, though we're
5704 too late to do anything about the ebx save in the prologue. */
5705 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5706 }
5707
5708 /* Prevent function calls from be scheduled before the call to mcount.
5709 In the pic_reg_used case, make sure that the got load isn't deleted. */
5710 if (current_function_profile)
5711 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5712 }
5713
5714 /* Emit code to restore saved registers using MOV insns. First register
5715 is restored from POINTER + OFFSET. */
5716 static void
5717 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5718 int maybe_eh_return)
5719 {
5720 int regno;
5721 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5722
5723 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5724 if (ix86_save_reg (regno, maybe_eh_return))
5725 {
5726 /* Ensure that adjust_address won't be forced to produce pointer
5727 out of range allowed by x86-64 instruction set. */
5728 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5729 {
5730 rtx r11;
5731
5732 r11 = gen_rtx_REG (DImode, R11_REG);
5733 emit_move_insn (r11, GEN_INT (offset));
5734 emit_insn (gen_adddi3 (r11, r11, pointer));
5735 base_address = gen_rtx_MEM (Pmode, r11);
5736 offset = 0;
5737 }
5738 emit_move_insn (gen_rtx_REG (Pmode, regno),
5739 adjust_address (base_address, Pmode, offset));
5740 offset += UNITS_PER_WORD;
5741 }
5742 }
5743
5744 /* Restore function stack, frame, and registers. */
5745
5746 void
5747 ix86_expand_epilogue (int style)
5748 {
5749 int regno;
5750 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5751 struct ix86_frame frame;
5752 HOST_WIDE_INT offset;
5753
5754 ix86_compute_frame_layout (&frame);
5755
5756 /* Calculate start of saved registers relative to ebp. Special care
5757 must be taken for the normal return case of a function using
5758 eh_return: the eax and edx registers are marked as saved, but not
5759 restored along this path. */
5760 offset = frame.nregs;
5761 if (current_function_calls_eh_return && style != 2)
5762 offset -= 2;
5763 offset *= -UNITS_PER_WORD;
5764
5765 /* If we're only restoring one register and sp is not valid then
5766 using a move instruction to restore the register since it's
5767 less work than reloading sp and popping the register.
5768
5769 The default code result in stack adjustment using add/lea instruction,
5770 while this code results in LEAVE instruction (or discrete equivalent),
5771 so it is profitable in some other cases as well. Especially when there
5772 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5773 and there is exactly one register to pop. This heuristic may need some
5774 tuning in future. */
5775 if ((!sp_valid && frame.nregs <= 1)
5776 || (TARGET_EPILOGUE_USING_MOVE
5777 && cfun->machine->use_fast_prologue_epilogue
5778 && (frame.nregs > 1 || frame.to_allocate))
5779 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5780 || (frame_pointer_needed && TARGET_USE_LEAVE
5781 && cfun->machine->use_fast_prologue_epilogue
5782 && frame.nregs == 1)
5783 || current_function_calls_eh_return)
5784 {
5785 /* Restore registers. We can use ebp or esp to address the memory
5786 locations. If both are available, default to ebp, since offsets
5787 are known to be small. Only exception is esp pointing directly to the
5788 end of block of saved registers, where we may simplify addressing
5789 mode. */
5790
5791 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5792 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5793 frame.to_allocate, style == 2);
5794 else
5795 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5796 offset, style == 2);
5797
5798 /* eh_return epilogues need %ecx added to the stack pointer. */
5799 if (style == 2)
5800 {
5801 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5802
5803 if (frame_pointer_needed)
5804 {
5805 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5806 tmp = plus_constant (tmp, UNITS_PER_WORD);
5807 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5808
5809 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5810 emit_move_insn (hard_frame_pointer_rtx, tmp);
5811
5812 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5813 const0_rtx, style);
5814 }
5815 else
5816 {
5817 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5818 tmp = plus_constant (tmp, (frame.to_allocate
5819 + frame.nregs * UNITS_PER_WORD));
5820 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5821 }
5822 }
5823 else if (!frame_pointer_needed)
5824 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5825 GEN_INT (frame.to_allocate
5826 + frame.nregs * UNITS_PER_WORD),
5827 style);
5828 /* If not an i386, mov & pop is faster than "leave". */
5829 else if (TARGET_USE_LEAVE || optimize_size
5830 || !cfun->machine->use_fast_prologue_epilogue)
5831 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5832 else
5833 {
5834 pro_epilogue_adjust_stack (stack_pointer_rtx,
5835 hard_frame_pointer_rtx,
5836 const0_rtx, style);
5837 if (TARGET_64BIT)
5838 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5839 else
5840 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5841 }
5842 }
5843 else
5844 {
5845 /* First step is to deallocate the stack frame so that we can
5846 pop the registers. */
5847 if (!sp_valid)
5848 {
5849 gcc_assert (frame_pointer_needed);
5850 pro_epilogue_adjust_stack (stack_pointer_rtx,
5851 hard_frame_pointer_rtx,
5852 GEN_INT (offset), style);
5853 }
5854 else if (frame.to_allocate)
5855 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5856 GEN_INT (frame.to_allocate), style);
5857
5858 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5859 if (ix86_save_reg (regno, false))
5860 {
5861 if (TARGET_64BIT)
5862 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5863 else
5864 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5865 }
5866 if (frame_pointer_needed)
5867 {
5868 /* Leave results in shorter dependency chains on CPUs that are
5869 able to grok it fast. */
5870 if (TARGET_USE_LEAVE)
5871 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5872 else if (TARGET_64BIT)
5873 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5874 else
5875 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5876 }
5877 }
5878
5879 if (cfun->machine->force_align_arg_pointer)
5880 {
5881 emit_insn (gen_addsi3 (stack_pointer_rtx,
5882 cfun->machine->force_align_arg_pointer,
5883 GEN_INT (-4)));
5884 }
5885
5886 /* Sibcall epilogues don't want a return instruction. */
5887 if (style == 0)
5888 return;
5889
5890 if (current_function_pops_args && current_function_args_size)
5891 {
5892 rtx popc = GEN_INT (current_function_pops_args);
5893
5894 /* i386 can only pop 64K bytes. If asked to pop more, pop
5895 return address, do explicit add, and jump indirectly to the
5896 caller. */
5897
5898 if (current_function_pops_args >= 65536)
5899 {
5900 rtx ecx = gen_rtx_REG (SImode, 2);
5901
5902 /* There is no "pascal" calling convention in 64bit ABI. */
5903 gcc_assert (!TARGET_64BIT);
5904
5905 emit_insn (gen_popsi1 (ecx));
5906 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5907 emit_jump_insn (gen_return_indirect_internal (ecx));
5908 }
5909 else
5910 emit_jump_insn (gen_return_pop_internal (popc));
5911 }
5912 else
5913 emit_jump_insn (gen_return_internal ());
5914 }
5915
5916 /* Reset from the function's potential modifications. */
5917
5918 static void
5919 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5920 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5921 {
5922 if (pic_offset_table_rtx)
5923 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5924 #if TARGET_MACHO
5925 /* Mach-O doesn't support labels at the end of objects, so if
5926 it looks like we might want one, insert a NOP. */
5927 {
5928 rtx insn = get_last_insn ();
5929 while (insn
5930 && NOTE_P (insn)
5931 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
5932 insn = PREV_INSN (insn);
5933 if (insn
5934 && (LABEL_P (insn)
5935 || (NOTE_P (insn)
5936 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
5937 fputs ("\tnop\n", file);
5938 }
5939 #endif
5940
5941 }
5942 \f
5943 /* Extract the parts of an RTL expression that is a valid memory address
5944 for an instruction. Return 0 if the structure of the address is
5945 grossly off. Return -1 if the address contains ASHIFT, so it is not
5946 strictly valid, but still used for computing length of lea instruction. */
5947
5948 int
5949 ix86_decompose_address (rtx addr, struct ix86_address *out)
5950 {
5951 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5952 rtx base_reg, index_reg;
5953 HOST_WIDE_INT scale = 1;
5954 rtx scale_rtx = NULL_RTX;
5955 int retval = 1;
5956 enum ix86_address_seg seg = SEG_DEFAULT;
5957
5958 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5959 base = addr;
5960 else if (GET_CODE (addr) == PLUS)
5961 {
5962 rtx addends[4], op;
5963 int n = 0, i;
5964
5965 op = addr;
5966 do
5967 {
5968 if (n >= 4)
5969 return 0;
5970 addends[n++] = XEXP (op, 1);
5971 op = XEXP (op, 0);
5972 }
5973 while (GET_CODE (op) == PLUS);
5974 if (n >= 4)
5975 return 0;
5976 addends[n] = op;
5977
5978 for (i = n; i >= 0; --i)
5979 {
5980 op = addends[i];
5981 switch (GET_CODE (op))
5982 {
5983 case MULT:
5984 if (index)
5985 return 0;
5986 index = XEXP (op, 0);
5987 scale_rtx = XEXP (op, 1);
5988 break;
5989
5990 case UNSPEC:
5991 if (XINT (op, 1) == UNSPEC_TP
5992 && TARGET_TLS_DIRECT_SEG_REFS
5993 && seg == SEG_DEFAULT)
5994 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5995 else
5996 return 0;
5997 break;
5998
5999 case REG:
6000 case SUBREG:
6001 if (!base)
6002 base = op;
6003 else if (!index)
6004 index = op;
6005 else
6006 return 0;
6007 break;
6008
6009 case CONST:
6010 case CONST_INT:
6011 case SYMBOL_REF:
6012 case LABEL_REF:
6013 if (disp)
6014 return 0;
6015 disp = op;
6016 break;
6017
6018 default:
6019 return 0;
6020 }
6021 }
6022 }
6023 else if (GET_CODE (addr) == MULT)
6024 {
6025 index = XEXP (addr, 0); /* index*scale */
6026 scale_rtx = XEXP (addr, 1);
6027 }
6028 else if (GET_CODE (addr) == ASHIFT)
6029 {
6030 rtx tmp;
6031
6032 /* We're called for lea too, which implements ashift on occasion. */
6033 index = XEXP (addr, 0);
6034 tmp = XEXP (addr, 1);
6035 if (GET_CODE (tmp) != CONST_INT)
6036 return 0;
6037 scale = INTVAL (tmp);
6038 if ((unsigned HOST_WIDE_INT) scale > 3)
6039 return 0;
6040 scale = 1 << scale;
6041 retval = -1;
6042 }
6043 else
6044 disp = addr; /* displacement */
6045
6046 /* Extract the integral value of scale. */
6047 if (scale_rtx)
6048 {
6049 if (GET_CODE (scale_rtx) != CONST_INT)
6050 return 0;
6051 scale = INTVAL (scale_rtx);
6052 }
6053
6054 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6055 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6056
6057 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6058 if (base_reg && index_reg && scale == 1
6059 && (index_reg == arg_pointer_rtx
6060 || index_reg == frame_pointer_rtx
6061 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6062 {
6063 rtx tmp;
6064 tmp = base, base = index, index = tmp;
6065 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6066 }
6067
6068 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6069 if ((base_reg == hard_frame_pointer_rtx
6070 || base_reg == frame_pointer_rtx
6071 || base_reg == arg_pointer_rtx) && !disp)
6072 disp = const0_rtx;
6073
6074 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6075 Avoid this by transforming to [%esi+0]. */
6076 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6077 && base_reg && !index_reg && !disp
6078 && REG_P (base_reg)
6079 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6080 disp = const0_rtx;
6081
6082 /* Special case: encode reg+reg instead of reg*2. */
6083 if (!base && index && scale && scale == 2)
6084 base = index, base_reg = index_reg, scale = 1;
6085
6086 /* Special case: scaling cannot be encoded without base or displacement. */
6087 if (!base && !disp && index && scale != 1)
6088 disp = const0_rtx;
6089
6090 out->base = base;
6091 out->index = index;
6092 out->disp = disp;
6093 out->scale = scale;
6094 out->seg = seg;
6095
6096 return retval;
6097 }
6098 \f
6099 /* Return cost of the memory address x.
6100 For i386, it is better to use a complex address than let gcc copy
6101 the address into a reg and make a new pseudo. But not if the address
6102 requires to two regs - that would mean more pseudos with longer
6103 lifetimes. */
6104 static int
6105 ix86_address_cost (rtx x)
6106 {
6107 struct ix86_address parts;
6108 int cost = 1;
6109 int ok = ix86_decompose_address (x, &parts);
6110
6111 gcc_assert (ok);
6112
6113 if (parts.base && GET_CODE (parts.base) == SUBREG)
6114 parts.base = SUBREG_REG (parts.base);
6115 if (parts.index && GET_CODE (parts.index) == SUBREG)
6116 parts.index = SUBREG_REG (parts.index);
6117
6118 /* More complex memory references are better. */
6119 if (parts.disp && parts.disp != const0_rtx)
6120 cost--;
6121 if (parts.seg != SEG_DEFAULT)
6122 cost--;
6123
6124 /* Attempt to minimize number of registers in the address. */
6125 if ((parts.base
6126 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6127 || (parts.index
6128 && (!REG_P (parts.index)
6129 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6130 cost++;
6131
6132 if (parts.base
6133 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6134 && parts.index
6135 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6136 && parts.base != parts.index)
6137 cost++;
6138
6139 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6140 since it's predecode logic can't detect the length of instructions
6141 and it degenerates to vector decoded. Increase cost of such
6142 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6143 to split such addresses or even refuse such addresses at all.
6144
6145 Following addressing modes are affected:
6146 [base+scale*index]
6147 [scale*index+disp]
6148 [base+index]
6149
6150 The first and last case may be avoidable by explicitly coding the zero in
6151 memory address, but I don't have AMD-K6 machine handy to check this
6152 theory. */
6153
6154 if (TARGET_K6
6155 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6156 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6157 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6158 cost += 10;
6159
6160 return cost;
6161 }
6162 \f
6163 /* If X is a machine specific address (i.e. a symbol or label being
6164 referenced as a displacement from the GOT implemented using an
6165 UNSPEC), then return the base term. Otherwise return X. */
6166
6167 rtx
6168 ix86_find_base_term (rtx x)
6169 {
6170 rtx term;
6171
6172 if (TARGET_64BIT)
6173 {
6174 if (GET_CODE (x) != CONST)
6175 return x;
6176 term = XEXP (x, 0);
6177 if (GET_CODE (term) == PLUS
6178 && (GET_CODE (XEXP (term, 1)) == CONST_INT
6179 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6180 term = XEXP (term, 0);
6181 if (GET_CODE (term) != UNSPEC
6182 || XINT (term, 1) != UNSPEC_GOTPCREL)
6183 return x;
6184
6185 term = XVECEXP (term, 0, 0);
6186
6187 if (GET_CODE (term) != SYMBOL_REF
6188 && GET_CODE (term) != LABEL_REF)
6189 return x;
6190
6191 return term;
6192 }
6193
6194 term = ix86_delegitimize_address (x);
6195
6196 if (GET_CODE (term) != SYMBOL_REF
6197 && GET_CODE (term) != LABEL_REF)
6198 return x;
6199
6200 return term;
6201 }
6202
6203 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6204 this is used for to form addresses to local data when -fPIC is in
6205 use. */
6206
6207 static bool
6208 darwin_local_data_pic (rtx disp)
6209 {
6210 if (GET_CODE (disp) == MINUS)
6211 {
6212 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6213 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6214 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6215 {
6216 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6217 if (! strcmp (sym_name, "<pic base>"))
6218 return true;
6219 }
6220 }
6221
6222 return false;
6223 }
6224 \f
6225 /* Determine if a given RTX is a valid constant. We already know this
6226 satisfies CONSTANT_P. */
6227
6228 bool
6229 legitimate_constant_p (rtx x)
6230 {
6231 switch (GET_CODE (x))
6232 {
6233 case CONST:
6234 x = XEXP (x, 0);
6235
6236 if (GET_CODE (x) == PLUS)
6237 {
6238 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6239 return false;
6240 x = XEXP (x, 0);
6241 }
6242
6243 if (TARGET_MACHO && darwin_local_data_pic (x))
6244 return true;
6245
6246 /* Only some unspecs are valid as "constants". */
6247 if (GET_CODE (x) == UNSPEC)
6248 switch (XINT (x, 1))
6249 {
6250 case UNSPEC_GOTOFF:
6251 return TARGET_64BIT;
6252 case UNSPEC_TPOFF:
6253 case UNSPEC_NTPOFF:
6254 x = XVECEXP (x, 0, 0);
6255 return (GET_CODE (x) == SYMBOL_REF
6256 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6257 case UNSPEC_DTPOFF:
6258 x = XVECEXP (x, 0, 0);
6259 return (GET_CODE (x) == SYMBOL_REF
6260 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6261 default:
6262 return false;
6263 }
6264
6265 /* We must have drilled down to a symbol. */
6266 if (GET_CODE (x) == LABEL_REF)
6267 return true;
6268 if (GET_CODE (x) != SYMBOL_REF)
6269 return false;
6270 /* FALLTHRU */
6271
6272 case SYMBOL_REF:
6273 /* TLS symbols are never valid. */
6274 if (SYMBOL_REF_TLS_MODEL (x))
6275 return false;
6276 break;
6277
6278 case CONST_DOUBLE:
6279 if (GET_MODE (x) == TImode
6280 && x != CONST0_RTX (TImode)
6281 && !TARGET_64BIT)
6282 return false;
6283 break;
6284
6285 case CONST_VECTOR:
6286 if (x == CONST0_RTX (GET_MODE (x)))
6287 return true;
6288 return false;
6289
6290 default:
6291 break;
6292 }
6293
6294 /* Otherwise we handle everything else in the move patterns. */
6295 return true;
6296 }
6297
6298 /* Determine if it's legal to put X into the constant pool. This
6299 is not possible for the address of thread-local symbols, which
6300 is checked above. */
6301
6302 static bool
6303 ix86_cannot_force_const_mem (rtx x)
6304 {
6305 /* We can always put integral constants and vectors in memory. */
6306 switch (GET_CODE (x))
6307 {
6308 case CONST_INT:
6309 case CONST_DOUBLE:
6310 case CONST_VECTOR:
6311 return false;
6312
6313 default:
6314 break;
6315 }
6316 return !legitimate_constant_p (x);
6317 }
6318
6319 /* Determine if a given RTX is a valid constant address. */
6320
6321 bool
6322 constant_address_p (rtx x)
6323 {
6324 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6325 }
6326
6327 /* Nonzero if the constant value X is a legitimate general operand
6328 when generating PIC code. It is given that flag_pic is on and
6329 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6330
6331 bool
6332 legitimate_pic_operand_p (rtx x)
6333 {
6334 rtx inner;
6335
6336 switch (GET_CODE (x))
6337 {
6338 case CONST:
6339 inner = XEXP (x, 0);
6340 if (GET_CODE (inner) == PLUS
6341 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
6342 inner = XEXP (inner, 0);
6343
6344 /* Only some unspecs are valid as "constants". */
6345 if (GET_CODE (inner) == UNSPEC)
6346 switch (XINT (inner, 1))
6347 {
6348 case UNSPEC_GOTOFF:
6349 return TARGET_64BIT;
6350 case UNSPEC_TPOFF:
6351 x = XVECEXP (inner, 0, 0);
6352 return (GET_CODE (x) == SYMBOL_REF
6353 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6354 default:
6355 return false;
6356 }
6357 /* FALLTHRU */
6358
6359 case SYMBOL_REF:
6360 case LABEL_REF:
6361 return legitimate_pic_address_disp_p (x);
6362
6363 default:
6364 return true;
6365 }
6366 }
6367
6368 /* Determine if a given CONST RTX is a valid memory displacement
6369 in PIC mode. */
6370
6371 int
6372 legitimate_pic_address_disp_p (rtx disp)
6373 {
6374 bool saw_plus;
6375
6376 /* In 64bit mode we can allow direct addresses of symbols and labels
6377 when they are not dynamic symbols. */
6378 if (TARGET_64BIT)
6379 {
6380 rtx op0 = disp, op1;
6381
6382 switch (GET_CODE (disp))
6383 {
6384 case LABEL_REF:
6385 return true;
6386
6387 case CONST:
6388 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6389 break;
6390 op0 = XEXP (XEXP (disp, 0), 0);
6391 op1 = XEXP (XEXP (disp, 0), 1);
6392 if (GET_CODE (op1) != CONST_INT
6393 || INTVAL (op1) >= 16*1024*1024
6394 || INTVAL (op1) < -16*1024*1024)
6395 break;
6396 if (GET_CODE (op0) == LABEL_REF)
6397 return true;
6398 if (GET_CODE (op0) != SYMBOL_REF)
6399 break;
6400 /* FALLTHRU */
6401
6402 case SYMBOL_REF:
6403 /* TLS references should always be enclosed in UNSPEC. */
6404 if (SYMBOL_REF_TLS_MODEL (op0))
6405 return false;
6406 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6407 return true;
6408 break;
6409
6410 default:
6411 break;
6412 }
6413 }
6414 if (GET_CODE (disp) != CONST)
6415 return 0;
6416 disp = XEXP (disp, 0);
6417
6418 if (TARGET_64BIT)
6419 {
6420 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6421 of GOT tables. We should not need these anyway. */
6422 if (GET_CODE (disp) != UNSPEC
6423 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6424 && XINT (disp, 1) != UNSPEC_GOTOFF))
6425 return 0;
6426
6427 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6428 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6429 return 0;
6430 return 1;
6431 }
6432
6433 saw_plus = false;
6434 if (GET_CODE (disp) == PLUS)
6435 {
6436 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
6437 return 0;
6438 disp = XEXP (disp, 0);
6439 saw_plus = true;
6440 }
6441
6442 if (TARGET_MACHO && darwin_local_data_pic (disp))
6443 return 1;
6444
6445 if (GET_CODE (disp) != UNSPEC)
6446 return 0;
6447
6448 switch (XINT (disp, 1))
6449 {
6450 case UNSPEC_GOT:
6451 if (saw_plus)
6452 return false;
6453 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6454 case UNSPEC_GOTOFF:
6455 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6456 While ABI specify also 32bit relocation but we don't produce it in
6457 small PIC model at all. */
6458 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6459 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6460 && !TARGET_64BIT)
6461 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6462 return false;
6463 case UNSPEC_GOTTPOFF:
6464 case UNSPEC_GOTNTPOFF:
6465 case UNSPEC_INDNTPOFF:
6466 if (saw_plus)
6467 return false;
6468 disp = XVECEXP (disp, 0, 0);
6469 return (GET_CODE (disp) == SYMBOL_REF
6470 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6471 case UNSPEC_NTPOFF:
6472 disp = XVECEXP (disp, 0, 0);
6473 return (GET_CODE (disp) == SYMBOL_REF
6474 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6475 case UNSPEC_DTPOFF:
6476 disp = XVECEXP (disp, 0, 0);
6477 return (GET_CODE (disp) == SYMBOL_REF
6478 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6479 }
6480
6481 return 0;
6482 }
6483
6484 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6485 memory address for an instruction. The MODE argument is the machine mode
6486 for the MEM expression that wants to use this address.
6487
6488 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6489 convert common non-canonical forms to canonical form so that they will
6490 be recognized. */
6491
6492 int
6493 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6494 {
6495 struct ix86_address parts;
6496 rtx base, index, disp;
6497 HOST_WIDE_INT scale;
6498 const char *reason = NULL;
6499 rtx reason_rtx = NULL_RTX;
6500
6501 if (TARGET_DEBUG_ADDR)
6502 {
6503 fprintf (stderr,
6504 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6505 GET_MODE_NAME (mode), strict);
6506 debug_rtx (addr);
6507 }
6508
6509 if (ix86_decompose_address (addr, &parts) <= 0)
6510 {
6511 reason = "decomposition failed";
6512 goto report_error;
6513 }
6514
6515 base = parts.base;
6516 index = parts.index;
6517 disp = parts.disp;
6518 scale = parts.scale;
6519
6520 /* Validate base register.
6521
6522 Don't allow SUBREG's that span more than a word here. It can lead to spill
6523 failures when the base is one word out of a two word structure, which is
6524 represented internally as a DImode int. */
6525
6526 if (base)
6527 {
6528 rtx reg;
6529 reason_rtx = base;
6530
6531 if (REG_P (base))
6532 reg = base;
6533 else if (GET_CODE (base) == SUBREG
6534 && REG_P (SUBREG_REG (base))
6535 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6536 <= UNITS_PER_WORD)
6537 reg = SUBREG_REG (base);
6538 else
6539 {
6540 reason = "base is not a register";
6541 goto report_error;
6542 }
6543
6544 if (GET_MODE (base) != Pmode)
6545 {
6546 reason = "base is not in Pmode";
6547 goto report_error;
6548 }
6549
6550 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6551 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6552 {
6553 reason = "base is not valid";
6554 goto report_error;
6555 }
6556 }
6557
6558 /* Validate index register.
6559
6560 Don't allow SUBREG's that span more than a word here -- same as above. */
6561
6562 if (index)
6563 {
6564 rtx reg;
6565 reason_rtx = index;
6566
6567 if (REG_P (index))
6568 reg = index;
6569 else if (GET_CODE (index) == SUBREG
6570 && REG_P (SUBREG_REG (index))
6571 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6572 <= UNITS_PER_WORD)
6573 reg = SUBREG_REG (index);
6574 else
6575 {
6576 reason = "index is not a register";
6577 goto report_error;
6578 }
6579
6580 if (GET_MODE (index) != Pmode)
6581 {
6582 reason = "index is not in Pmode";
6583 goto report_error;
6584 }
6585
6586 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6587 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6588 {
6589 reason = "index is not valid";
6590 goto report_error;
6591 }
6592 }
6593
6594 /* Validate scale factor. */
6595 if (scale != 1)
6596 {
6597 reason_rtx = GEN_INT (scale);
6598 if (!index)
6599 {
6600 reason = "scale without index";
6601 goto report_error;
6602 }
6603
6604 if (scale != 2 && scale != 4 && scale != 8)
6605 {
6606 reason = "scale is not a valid multiplier";
6607 goto report_error;
6608 }
6609 }
6610
6611 /* Validate displacement. */
6612 if (disp)
6613 {
6614 reason_rtx = disp;
6615
6616 if (GET_CODE (disp) == CONST
6617 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6618 switch (XINT (XEXP (disp, 0), 1))
6619 {
6620 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6621 used. While ABI specify also 32bit relocations, we don't produce
6622 them at all and use IP relative instead. */
6623 case UNSPEC_GOT:
6624 case UNSPEC_GOTOFF:
6625 gcc_assert (flag_pic);
6626 if (!TARGET_64BIT)
6627 goto is_legitimate_pic;
6628 reason = "64bit address unspec";
6629 goto report_error;
6630
6631 case UNSPEC_GOTPCREL:
6632 gcc_assert (flag_pic);
6633 goto is_legitimate_pic;
6634
6635 case UNSPEC_GOTTPOFF:
6636 case UNSPEC_GOTNTPOFF:
6637 case UNSPEC_INDNTPOFF:
6638 case UNSPEC_NTPOFF:
6639 case UNSPEC_DTPOFF:
6640 break;
6641
6642 default:
6643 reason = "invalid address unspec";
6644 goto report_error;
6645 }
6646
6647 else if (SYMBOLIC_CONST (disp)
6648 && (flag_pic
6649 || (TARGET_MACHO
6650 #if TARGET_MACHO
6651 && MACHOPIC_INDIRECT
6652 && !machopic_operand_p (disp)
6653 #endif
6654 )))
6655 {
6656
6657 is_legitimate_pic:
6658 if (TARGET_64BIT && (index || base))
6659 {
6660 /* foo@dtpoff(%rX) is ok. */
6661 if (GET_CODE (disp) != CONST
6662 || GET_CODE (XEXP (disp, 0)) != PLUS
6663 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6664 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6665 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6666 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6667 {
6668 reason = "non-constant pic memory reference";
6669 goto report_error;
6670 }
6671 }
6672 else if (! legitimate_pic_address_disp_p (disp))
6673 {
6674 reason = "displacement is an invalid pic construct";
6675 goto report_error;
6676 }
6677
6678 /* This code used to verify that a symbolic pic displacement
6679 includes the pic_offset_table_rtx register.
6680
6681 While this is good idea, unfortunately these constructs may
6682 be created by "adds using lea" optimization for incorrect
6683 code like:
6684
6685 int a;
6686 int foo(int i)
6687 {
6688 return *(&a+i);
6689 }
6690
6691 This code is nonsensical, but results in addressing
6692 GOT table with pic_offset_table_rtx base. We can't
6693 just refuse it easily, since it gets matched by
6694 "addsi3" pattern, that later gets split to lea in the
6695 case output register differs from input. While this
6696 can be handled by separate addsi pattern for this case
6697 that never results in lea, this seems to be easier and
6698 correct fix for crash to disable this test. */
6699 }
6700 else if (GET_CODE (disp) != LABEL_REF
6701 && GET_CODE (disp) != CONST_INT
6702 && (GET_CODE (disp) != CONST
6703 || !legitimate_constant_p (disp))
6704 && (GET_CODE (disp) != SYMBOL_REF
6705 || !legitimate_constant_p (disp)))
6706 {
6707 reason = "displacement is not constant";
6708 goto report_error;
6709 }
6710 else if (TARGET_64BIT
6711 && !x86_64_immediate_operand (disp, VOIDmode))
6712 {
6713 reason = "displacement is out of range";
6714 goto report_error;
6715 }
6716 }
6717
6718 /* Everything looks valid. */
6719 if (TARGET_DEBUG_ADDR)
6720 fprintf (stderr, "Success.\n");
6721 return TRUE;
6722
6723 report_error:
6724 if (TARGET_DEBUG_ADDR)
6725 {
6726 fprintf (stderr, "Error: %s\n", reason);
6727 debug_rtx (reason_rtx);
6728 }
6729 return FALSE;
6730 }
6731 \f
6732 /* Return a unique alias set for the GOT. */
6733
6734 static HOST_WIDE_INT
6735 ix86_GOT_alias_set (void)
6736 {
6737 static HOST_WIDE_INT set = -1;
6738 if (set == -1)
6739 set = new_alias_set ();
6740 return set;
6741 }
6742
6743 /* Return a legitimate reference for ORIG (an address) using the
6744 register REG. If REG is 0, a new pseudo is generated.
6745
6746 There are two types of references that must be handled:
6747
6748 1. Global data references must load the address from the GOT, via
6749 the PIC reg. An insn is emitted to do this load, and the reg is
6750 returned.
6751
6752 2. Static data references, constant pool addresses, and code labels
6753 compute the address as an offset from the GOT, whose base is in
6754 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6755 differentiate them from global data objects. The returned
6756 address is the PIC reg + an unspec constant.
6757
6758 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6759 reg also appears in the address. */
6760
6761 static rtx
6762 legitimize_pic_address (rtx orig, rtx reg)
6763 {
6764 rtx addr = orig;
6765 rtx new = orig;
6766 rtx base;
6767
6768 #if TARGET_MACHO
6769 if (TARGET_MACHO && !TARGET_64BIT)
6770 {
6771 if (reg == 0)
6772 reg = gen_reg_rtx (Pmode);
6773 /* Use the generic Mach-O PIC machinery. */
6774 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6775 }
6776 #endif
6777
6778 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6779 new = addr;
6780 else if (TARGET_64BIT
6781 && ix86_cmodel != CM_SMALL_PIC
6782 && local_symbolic_operand (addr, Pmode))
6783 {
6784 rtx tmpreg;
6785 /* This symbol may be referenced via a displacement from the PIC
6786 base address (@GOTOFF). */
6787
6788 if (reload_in_progress)
6789 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6790 if (GET_CODE (addr) == CONST)
6791 addr = XEXP (addr, 0);
6792 if (GET_CODE (addr) == PLUS)
6793 {
6794 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6795 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6796 }
6797 else
6798 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6799 new = gen_rtx_CONST (Pmode, new);
6800 if (!reg)
6801 tmpreg = gen_reg_rtx (Pmode);
6802 else
6803 tmpreg = reg;
6804 emit_move_insn (tmpreg, new);
6805
6806 if (reg != 0)
6807 {
6808 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6809 tmpreg, 1, OPTAB_DIRECT);
6810 new = reg;
6811 }
6812 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6813 }
6814 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6815 {
6816 /* This symbol may be referenced via a displacement from the PIC
6817 base address (@GOTOFF). */
6818
6819 if (reload_in_progress)
6820 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6821 if (GET_CODE (addr) == CONST)
6822 addr = XEXP (addr, 0);
6823 if (GET_CODE (addr) == PLUS)
6824 {
6825 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6826 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6827 }
6828 else
6829 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6830 new = gen_rtx_CONST (Pmode, new);
6831 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6832
6833 if (reg != 0)
6834 {
6835 emit_move_insn (reg, new);
6836 new = reg;
6837 }
6838 }
6839 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6840 {
6841 if (TARGET_64BIT)
6842 {
6843 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6844 new = gen_rtx_CONST (Pmode, new);
6845 new = gen_const_mem (Pmode, new);
6846 set_mem_alias_set (new, ix86_GOT_alias_set ());
6847
6848 if (reg == 0)
6849 reg = gen_reg_rtx (Pmode);
6850 /* Use directly gen_movsi, otherwise the address is loaded
6851 into register for CSE. We don't want to CSE this addresses,
6852 instead we CSE addresses from the GOT table, so skip this. */
6853 emit_insn (gen_movsi (reg, new));
6854 new = reg;
6855 }
6856 else
6857 {
6858 /* This symbol must be referenced via a load from the
6859 Global Offset Table (@GOT). */
6860
6861 if (reload_in_progress)
6862 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6863 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6864 new = gen_rtx_CONST (Pmode, new);
6865 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6866 new = gen_const_mem (Pmode, new);
6867 set_mem_alias_set (new, ix86_GOT_alias_set ());
6868
6869 if (reg == 0)
6870 reg = gen_reg_rtx (Pmode);
6871 emit_move_insn (reg, new);
6872 new = reg;
6873 }
6874 }
6875 else
6876 {
6877 if (GET_CODE (addr) == CONST_INT
6878 && !x86_64_immediate_operand (addr, VOIDmode))
6879 {
6880 if (reg)
6881 {
6882 emit_move_insn (reg, addr);
6883 new = reg;
6884 }
6885 else
6886 new = force_reg (Pmode, addr);
6887 }
6888 else if (GET_CODE (addr) == CONST)
6889 {
6890 addr = XEXP (addr, 0);
6891
6892 /* We must match stuff we generate before. Assume the only
6893 unspecs that can get here are ours. Not that we could do
6894 anything with them anyway.... */
6895 if (GET_CODE (addr) == UNSPEC
6896 || (GET_CODE (addr) == PLUS
6897 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6898 return orig;
6899 gcc_assert (GET_CODE (addr) == PLUS);
6900 }
6901 if (GET_CODE (addr) == PLUS)
6902 {
6903 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6904
6905 /* Check first to see if this is a constant offset from a @GOTOFF
6906 symbol reference. */
6907 if (local_symbolic_operand (op0, Pmode)
6908 && GET_CODE (op1) == CONST_INT)
6909 {
6910 if (!TARGET_64BIT)
6911 {
6912 if (reload_in_progress)
6913 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6914 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6915 UNSPEC_GOTOFF);
6916 new = gen_rtx_PLUS (Pmode, new, op1);
6917 new = gen_rtx_CONST (Pmode, new);
6918 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6919
6920 if (reg != 0)
6921 {
6922 emit_move_insn (reg, new);
6923 new = reg;
6924 }
6925 }
6926 else
6927 {
6928 if (INTVAL (op1) < -16*1024*1024
6929 || INTVAL (op1) >= 16*1024*1024)
6930 {
6931 if (!x86_64_immediate_operand (op1, Pmode))
6932 op1 = force_reg (Pmode, op1);
6933 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6934 }
6935 }
6936 }
6937 else
6938 {
6939 base = legitimize_pic_address (XEXP (addr, 0), reg);
6940 new = legitimize_pic_address (XEXP (addr, 1),
6941 base == reg ? NULL_RTX : reg);
6942
6943 if (GET_CODE (new) == CONST_INT)
6944 new = plus_constant (base, INTVAL (new));
6945 else
6946 {
6947 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6948 {
6949 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6950 new = XEXP (new, 1);
6951 }
6952 new = gen_rtx_PLUS (Pmode, base, new);
6953 }
6954 }
6955 }
6956 }
6957 return new;
6958 }
6959 \f
6960 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6961
6962 static rtx
6963 get_thread_pointer (int to_reg)
6964 {
6965 rtx tp, reg, insn;
6966
6967 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6968 if (!to_reg)
6969 return tp;
6970
6971 reg = gen_reg_rtx (Pmode);
6972 insn = gen_rtx_SET (VOIDmode, reg, tp);
6973 insn = emit_insn (insn);
6974
6975 return reg;
6976 }
6977
6978 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6979 false if we expect this to be used for a memory address and true if
6980 we expect to load the address into a register. */
6981
6982 static rtx
6983 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6984 {
6985 rtx dest, base, off, pic, tp;
6986 int type;
6987
6988 switch (model)
6989 {
6990 case TLS_MODEL_GLOBAL_DYNAMIC:
6991 dest = gen_reg_rtx (Pmode);
6992 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6993
6994 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
6995 {
6996 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6997
6998 start_sequence ();
6999 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7000 insns = get_insns ();
7001 end_sequence ();
7002
7003 emit_libcall_block (insns, dest, rax, x);
7004 }
7005 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7006 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7007 else
7008 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7009
7010 if (TARGET_GNU2_TLS)
7011 {
7012 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7013
7014 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7015 }
7016 break;
7017
7018 case TLS_MODEL_LOCAL_DYNAMIC:
7019 base = gen_reg_rtx (Pmode);
7020 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7021
7022 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7023 {
7024 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7025
7026 start_sequence ();
7027 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7028 insns = get_insns ();
7029 end_sequence ();
7030
7031 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7032 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7033 emit_libcall_block (insns, base, rax, note);
7034 }
7035 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7036 emit_insn (gen_tls_local_dynamic_base_64 (base));
7037 else
7038 emit_insn (gen_tls_local_dynamic_base_32 (base));
7039
7040 if (TARGET_GNU2_TLS)
7041 {
7042 rtx x = ix86_tls_module_base ();
7043
7044 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7045 gen_rtx_MINUS (Pmode, x, tp));
7046 }
7047
7048 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7049 off = gen_rtx_CONST (Pmode, off);
7050
7051 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7052
7053 if (TARGET_GNU2_TLS)
7054 {
7055 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7056
7057 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7058 }
7059
7060 break;
7061
7062 case TLS_MODEL_INITIAL_EXEC:
7063 if (TARGET_64BIT)
7064 {
7065 pic = NULL;
7066 type = UNSPEC_GOTNTPOFF;
7067 }
7068 else if (flag_pic)
7069 {
7070 if (reload_in_progress)
7071 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7072 pic = pic_offset_table_rtx;
7073 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7074 }
7075 else if (!TARGET_ANY_GNU_TLS)
7076 {
7077 pic = gen_reg_rtx (Pmode);
7078 emit_insn (gen_set_got (pic));
7079 type = UNSPEC_GOTTPOFF;
7080 }
7081 else
7082 {
7083 pic = NULL;
7084 type = UNSPEC_INDNTPOFF;
7085 }
7086
7087 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7088 off = gen_rtx_CONST (Pmode, off);
7089 if (pic)
7090 off = gen_rtx_PLUS (Pmode, pic, off);
7091 off = gen_const_mem (Pmode, off);
7092 set_mem_alias_set (off, ix86_GOT_alias_set ());
7093
7094 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7095 {
7096 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7097 off = force_reg (Pmode, off);
7098 return gen_rtx_PLUS (Pmode, base, off);
7099 }
7100 else
7101 {
7102 base = get_thread_pointer (true);
7103 dest = gen_reg_rtx (Pmode);
7104 emit_insn (gen_subsi3 (dest, base, off));
7105 }
7106 break;
7107
7108 case TLS_MODEL_LOCAL_EXEC:
7109 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7110 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7111 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7112 off = gen_rtx_CONST (Pmode, off);
7113
7114 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7115 {
7116 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7117 return gen_rtx_PLUS (Pmode, base, off);
7118 }
7119 else
7120 {
7121 base = get_thread_pointer (true);
7122 dest = gen_reg_rtx (Pmode);
7123 emit_insn (gen_subsi3 (dest, base, off));
7124 }
7125 break;
7126
7127 default:
7128 gcc_unreachable ();
7129 }
7130
7131 return dest;
7132 }
7133
7134 /* Try machine-dependent ways of modifying an illegitimate address
7135 to be legitimate. If we find one, return the new, valid address.
7136 This macro is used in only one place: `memory_address' in explow.c.
7137
7138 OLDX is the address as it was before break_out_memory_refs was called.
7139 In some cases it is useful to look at this to decide what needs to be done.
7140
7141 MODE and WIN are passed so that this macro can use
7142 GO_IF_LEGITIMATE_ADDRESS.
7143
7144 It is always safe for this macro to do nothing. It exists to recognize
7145 opportunities to optimize the output.
7146
7147 For the 80386, we handle X+REG by loading X into a register R and
7148 using R+REG. R will go in a general reg and indexing will be used.
7149 However, if REG is a broken-out memory address or multiplication,
7150 nothing needs to be done because REG can certainly go in a general reg.
7151
7152 When -fpic is used, special handling is needed for symbolic references.
7153 See comments by legitimize_pic_address in i386.c for details. */
7154
7155 rtx
7156 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7157 {
7158 int changed = 0;
7159 unsigned log;
7160
7161 if (TARGET_DEBUG_ADDR)
7162 {
7163 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7164 GET_MODE_NAME (mode));
7165 debug_rtx (x);
7166 }
7167
7168 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7169 if (log)
7170 return legitimize_tls_address (x, log, false);
7171 if (GET_CODE (x) == CONST
7172 && GET_CODE (XEXP (x, 0)) == PLUS
7173 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7174 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7175 {
7176 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7177 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7178 }
7179
7180 if (flag_pic && SYMBOLIC_CONST (x))
7181 return legitimize_pic_address (x, 0);
7182
7183 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7184 if (GET_CODE (x) == ASHIFT
7185 && GET_CODE (XEXP (x, 1)) == CONST_INT
7186 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7187 {
7188 changed = 1;
7189 log = INTVAL (XEXP (x, 1));
7190 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7191 GEN_INT (1 << log));
7192 }
7193
7194 if (GET_CODE (x) == PLUS)
7195 {
7196 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7197
7198 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7199 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7200 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7201 {
7202 changed = 1;
7203 log = INTVAL (XEXP (XEXP (x, 0), 1));
7204 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7205 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7206 GEN_INT (1 << log));
7207 }
7208
7209 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7210 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
7211 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7212 {
7213 changed = 1;
7214 log = INTVAL (XEXP (XEXP (x, 1), 1));
7215 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7216 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7217 GEN_INT (1 << log));
7218 }
7219
7220 /* Put multiply first if it isn't already. */
7221 if (GET_CODE (XEXP (x, 1)) == MULT)
7222 {
7223 rtx tmp = XEXP (x, 0);
7224 XEXP (x, 0) = XEXP (x, 1);
7225 XEXP (x, 1) = tmp;
7226 changed = 1;
7227 }
7228
7229 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7230 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7231 created by virtual register instantiation, register elimination, and
7232 similar optimizations. */
7233 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7234 {
7235 changed = 1;
7236 x = gen_rtx_PLUS (Pmode,
7237 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7238 XEXP (XEXP (x, 1), 0)),
7239 XEXP (XEXP (x, 1), 1));
7240 }
7241
7242 /* Canonicalize
7243 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7244 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7245 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7246 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7247 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7248 && CONSTANT_P (XEXP (x, 1)))
7249 {
7250 rtx constant;
7251 rtx other = NULL_RTX;
7252
7253 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7254 {
7255 constant = XEXP (x, 1);
7256 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7257 }
7258 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
7259 {
7260 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7261 other = XEXP (x, 1);
7262 }
7263 else
7264 constant = 0;
7265
7266 if (constant)
7267 {
7268 changed = 1;
7269 x = gen_rtx_PLUS (Pmode,
7270 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7271 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7272 plus_constant (other, INTVAL (constant)));
7273 }
7274 }
7275
7276 if (changed && legitimate_address_p (mode, x, FALSE))
7277 return x;
7278
7279 if (GET_CODE (XEXP (x, 0)) == MULT)
7280 {
7281 changed = 1;
7282 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7283 }
7284
7285 if (GET_CODE (XEXP (x, 1)) == MULT)
7286 {
7287 changed = 1;
7288 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7289 }
7290
7291 if (changed
7292 && GET_CODE (XEXP (x, 1)) == REG
7293 && GET_CODE (XEXP (x, 0)) == REG)
7294 return x;
7295
7296 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7297 {
7298 changed = 1;
7299 x = legitimize_pic_address (x, 0);
7300 }
7301
7302 if (changed && legitimate_address_p (mode, x, FALSE))
7303 return x;
7304
7305 if (GET_CODE (XEXP (x, 0)) == REG)
7306 {
7307 rtx temp = gen_reg_rtx (Pmode);
7308 rtx val = force_operand (XEXP (x, 1), temp);
7309 if (val != temp)
7310 emit_move_insn (temp, val);
7311
7312 XEXP (x, 1) = temp;
7313 return x;
7314 }
7315
7316 else if (GET_CODE (XEXP (x, 1)) == REG)
7317 {
7318 rtx temp = gen_reg_rtx (Pmode);
7319 rtx val = force_operand (XEXP (x, 0), temp);
7320 if (val != temp)
7321 emit_move_insn (temp, val);
7322
7323 XEXP (x, 0) = temp;
7324 return x;
7325 }
7326 }
7327
7328 return x;
7329 }
7330 \f
7331 /* Print an integer constant expression in assembler syntax. Addition
7332 and subtraction are the only arithmetic that may appear in these
7333 expressions. FILE is the stdio stream to write to, X is the rtx, and
7334 CODE is the operand print code from the output string. */
7335
7336 static void
7337 output_pic_addr_const (FILE *file, rtx x, int code)
7338 {
7339 char buf[256];
7340
7341 switch (GET_CODE (x))
7342 {
7343 case PC:
7344 gcc_assert (flag_pic);
7345 putc ('.', file);
7346 break;
7347
7348 case SYMBOL_REF:
7349 output_addr_const (file, x);
7350 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7351 fputs ("@PLT", file);
7352 break;
7353
7354 case LABEL_REF:
7355 x = XEXP (x, 0);
7356 /* FALLTHRU */
7357 case CODE_LABEL:
7358 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7359 assemble_name (asm_out_file, buf);
7360 break;
7361
7362 case CONST_INT:
7363 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7364 break;
7365
7366 case CONST:
7367 /* This used to output parentheses around the expression,
7368 but that does not work on the 386 (either ATT or BSD assembler). */
7369 output_pic_addr_const (file, XEXP (x, 0), code);
7370 break;
7371
7372 case CONST_DOUBLE:
7373 if (GET_MODE (x) == VOIDmode)
7374 {
7375 /* We can use %d if the number is <32 bits and positive. */
7376 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7377 fprintf (file, "0x%lx%08lx",
7378 (unsigned long) CONST_DOUBLE_HIGH (x),
7379 (unsigned long) CONST_DOUBLE_LOW (x));
7380 else
7381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7382 }
7383 else
7384 /* We can't handle floating point constants;
7385 PRINT_OPERAND must handle them. */
7386 output_operand_lossage ("floating constant misused");
7387 break;
7388
7389 case PLUS:
7390 /* Some assemblers need integer constants to appear first. */
7391 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
7392 {
7393 output_pic_addr_const (file, XEXP (x, 0), code);
7394 putc ('+', file);
7395 output_pic_addr_const (file, XEXP (x, 1), code);
7396 }
7397 else
7398 {
7399 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
7400 output_pic_addr_const (file, XEXP (x, 1), code);
7401 putc ('+', file);
7402 output_pic_addr_const (file, XEXP (x, 0), code);
7403 }
7404 break;
7405
7406 case MINUS:
7407 if (!TARGET_MACHO)
7408 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7409 output_pic_addr_const (file, XEXP (x, 0), code);
7410 putc ('-', file);
7411 output_pic_addr_const (file, XEXP (x, 1), code);
7412 if (!TARGET_MACHO)
7413 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7414 break;
7415
7416 case UNSPEC:
7417 gcc_assert (XVECLEN (x, 0) == 1);
7418 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7419 switch (XINT (x, 1))
7420 {
7421 case UNSPEC_GOT:
7422 fputs ("@GOT", file);
7423 break;
7424 case UNSPEC_GOTOFF:
7425 fputs ("@GOTOFF", file);
7426 break;
7427 case UNSPEC_GOTPCREL:
7428 fputs ("@GOTPCREL(%rip)", file);
7429 break;
7430 case UNSPEC_GOTTPOFF:
7431 /* FIXME: This might be @TPOFF in Sun ld too. */
7432 fputs ("@GOTTPOFF", file);
7433 break;
7434 case UNSPEC_TPOFF:
7435 fputs ("@TPOFF", file);
7436 break;
7437 case UNSPEC_NTPOFF:
7438 if (TARGET_64BIT)
7439 fputs ("@TPOFF", file);
7440 else
7441 fputs ("@NTPOFF", file);
7442 break;
7443 case UNSPEC_DTPOFF:
7444 fputs ("@DTPOFF", file);
7445 break;
7446 case UNSPEC_GOTNTPOFF:
7447 if (TARGET_64BIT)
7448 fputs ("@GOTTPOFF(%rip)", file);
7449 else
7450 fputs ("@GOTNTPOFF", file);
7451 break;
7452 case UNSPEC_INDNTPOFF:
7453 fputs ("@INDNTPOFF", file);
7454 break;
7455 default:
7456 output_operand_lossage ("invalid UNSPEC as operand");
7457 break;
7458 }
7459 break;
7460
7461 default:
7462 output_operand_lossage ("invalid expression as operand");
7463 }
7464 }
7465
7466 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7467 We need to emit DTP-relative relocations. */
7468
7469 static void
7470 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7471 {
7472 fputs (ASM_LONG, file);
7473 output_addr_const (file, x);
7474 fputs ("@DTPOFF", file);
7475 switch (size)
7476 {
7477 case 4:
7478 break;
7479 case 8:
7480 fputs (", 0", file);
7481 break;
7482 default:
7483 gcc_unreachable ();
7484 }
7485 }
7486
7487 /* In the name of slightly smaller debug output, and to cater to
7488 general assembler lossage, recognize PIC+GOTOFF and turn it back
7489 into a direct symbol reference.
7490
7491 On Darwin, this is necessary to avoid a crash, because Darwin
7492 has a different PIC label for each routine but the DWARF debugging
7493 information is not associated with any particular routine, so it's
7494 necessary to remove references to the PIC label from RTL stored by
7495 the DWARF output code. */
7496
7497 static rtx
7498 ix86_delegitimize_address (rtx orig_x)
7499 {
7500 rtx x = orig_x;
7501 /* reg_addend is NULL or a multiple of some register. */
7502 rtx reg_addend = NULL_RTX;
7503 /* const_addend is NULL or a const_int. */
7504 rtx const_addend = NULL_RTX;
7505 /* This is the result, or NULL. */
7506 rtx result = NULL_RTX;
7507
7508 if (GET_CODE (x) == MEM)
7509 x = XEXP (x, 0);
7510
7511 if (TARGET_64BIT)
7512 {
7513 if (GET_CODE (x) != CONST
7514 || GET_CODE (XEXP (x, 0)) != UNSPEC
7515 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7516 || GET_CODE (orig_x) != MEM)
7517 return orig_x;
7518 return XVECEXP (XEXP (x, 0), 0, 0);
7519 }
7520
7521 if (GET_CODE (x) != PLUS
7522 || GET_CODE (XEXP (x, 1)) != CONST)
7523 return orig_x;
7524
7525 if (GET_CODE (XEXP (x, 0)) == REG
7526 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7527 /* %ebx + GOT/GOTOFF */
7528 ;
7529 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7530 {
7531 /* %ebx + %reg * scale + GOT/GOTOFF */
7532 reg_addend = XEXP (x, 0);
7533 if (GET_CODE (XEXP (reg_addend, 0)) == REG
7534 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7535 reg_addend = XEXP (reg_addend, 1);
7536 else if (GET_CODE (XEXP (reg_addend, 1)) == REG
7537 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7538 reg_addend = XEXP (reg_addend, 0);
7539 else
7540 return orig_x;
7541 if (GET_CODE (reg_addend) != REG
7542 && GET_CODE (reg_addend) != MULT
7543 && GET_CODE (reg_addend) != ASHIFT)
7544 return orig_x;
7545 }
7546 else
7547 return orig_x;
7548
7549 x = XEXP (XEXP (x, 1), 0);
7550 if (GET_CODE (x) == PLUS
7551 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7552 {
7553 const_addend = XEXP (x, 1);
7554 x = XEXP (x, 0);
7555 }
7556
7557 if (GET_CODE (x) == UNSPEC
7558 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
7559 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
7560 result = XVECEXP (x, 0, 0);
7561
7562 if (TARGET_MACHO && darwin_local_data_pic (x)
7563 && GET_CODE (orig_x) != MEM)
7564 result = XEXP (x, 0);
7565
7566 if (! result)
7567 return orig_x;
7568
7569 if (const_addend)
7570 result = gen_rtx_PLUS (Pmode, result, const_addend);
7571 if (reg_addend)
7572 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7573 return result;
7574 }
7575 \f
7576 static void
7577 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7578 int fp, FILE *file)
7579 {
7580 const char *suffix;
7581
7582 if (mode == CCFPmode || mode == CCFPUmode)
7583 {
7584 enum rtx_code second_code, bypass_code;
7585 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7586 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7587 code = ix86_fp_compare_code_to_integer (code);
7588 mode = CCmode;
7589 }
7590 if (reverse)
7591 code = reverse_condition (code);
7592
7593 switch (code)
7594 {
7595 case EQ:
7596 suffix = "e";
7597 break;
7598 case NE:
7599 suffix = "ne";
7600 break;
7601 case GT:
7602 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7603 suffix = "g";
7604 break;
7605 case GTU:
7606 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7607 Those same assemblers have the same but opposite lossage on cmov. */
7608 gcc_assert (mode == CCmode);
7609 suffix = fp ? "nbe" : "a";
7610 break;
7611 case LT:
7612 switch (mode)
7613 {
7614 case CCNOmode:
7615 case CCGOCmode:
7616 suffix = "s";
7617 break;
7618
7619 case CCmode:
7620 case CCGCmode:
7621 suffix = "l";
7622 break;
7623
7624 default:
7625 gcc_unreachable ();
7626 }
7627 break;
7628 case LTU:
7629 gcc_assert (mode == CCmode);
7630 suffix = "b";
7631 break;
7632 case GE:
7633 switch (mode)
7634 {
7635 case CCNOmode:
7636 case CCGOCmode:
7637 suffix = "ns";
7638 break;
7639
7640 case CCmode:
7641 case CCGCmode:
7642 suffix = "ge";
7643 break;
7644
7645 default:
7646 gcc_unreachable ();
7647 }
7648 break;
7649 case GEU:
7650 /* ??? As above. */
7651 gcc_assert (mode == CCmode);
7652 suffix = fp ? "nb" : "ae";
7653 break;
7654 case LE:
7655 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7656 suffix = "le";
7657 break;
7658 case LEU:
7659 gcc_assert (mode == CCmode);
7660 suffix = "be";
7661 break;
7662 case UNORDERED:
7663 suffix = fp ? "u" : "p";
7664 break;
7665 case ORDERED:
7666 suffix = fp ? "nu" : "np";
7667 break;
7668 default:
7669 gcc_unreachable ();
7670 }
7671 fputs (suffix, file);
7672 }
7673
7674 /* Print the name of register X to FILE based on its machine mode and number.
7675 If CODE is 'w', pretend the mode is HImode.
7676 If CODE is 'b', pretend the mode is QImode.
7677 If CODE is 'k', pretend the mode is SImode.
7678 If CODE is 'q', pretend the mode is DImode.
7679 If CODE is 'h', pretend the reg is the 'high' byte register.
7680 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7681
7682 void
7683 print_reg (rtx x, int code, FILE *file)
7684 {
7685 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7686 && REGNO (x) != FRAME_POINTER_REGNUM
7687 && REGNO (x) != FLAGS_REG
7688 && REGNO (x) != FPSR_REG
7689 && REGNO (x) != FPCR_REG);
7690
7691 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7692 putc ('%', file);
7693
7694 if (code == 'w' || MMX_REG_P (x))
7695 code = 2;
7696 else if (code == 'b')
7697 code = 1;
7698 else if (code == 'k')
7699 code = 4;
7700 else if (code == 'q')
7701 code = 8;
7702 else if (code == 'y')
7703 code = 3;
7704 else if (code == 'h')
7705 code = 0;
7706 else
7707 code = GET_MODE_SIZE (GET_MODE (x));
7708
7709 /* Irritatingly, AMD extended registers use different naming convention
7710 from the normal registers. */
7711 if (REX_INT_REG_P (x))
7712 {
7713 gcc_assert (TARGET_64BIT);
7714 switch (code)
7715 {
7716 case 0:
7717 error ("extended registers have no high halves");
7718 break;
7719 case 1:
7720 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7721 break;
7722 case 2:
7723 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7724 break;
7725 case 4:
7726 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7727 break;
7728 case 8:
7729 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7730 break;
7731 default:
7732 error ("unsupported operand size for extended register");
7733 break;
7734 }
7735 return;
7736 }
7737 switch (code)
7738 {
7739 case 3:
7740 if (STACK_TOP_P (x))
7741 {
7742 fputs ("st(0)", file);
7743 break;
7744 }
7745 /* FALLTHRU */
7746 case 8:
7747 case 4:
7748 case 12:
7749 if (! ANY_FP_REG_P (x))
7750 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7751 /* FALLTHRU */
7752 case 16:
7753 case 2:
7754 normal:
7755 fputs (hi_reg_name[REGNO (x)], file);
7756 break;
7757 case 1:
7758 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7759 goto normal;
7760 fputs (qi_reg_name[REGNO (x)], file);
7761 break;
7762 case 0:
7763 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7764 goto normal;
7765 fputs (qi_high_reg_name[REGNO (x)], file);
7766 break;
7767 default:
7768 gcc_unreachable ();
7769 }
7770 }
7771
7772 /* Locate some local-dynamic symbol still in use by this function
7773 so that we can print its name in some tls_local_dynamic_base
7774 pattern. */
7775
7776 static const char *
7777 get_some_local_dynamic_name (void)
7778 {
7779 rtx insn;
7780
7781 if (cfun->machine->some_ld_name)
7782 return cfun->machine->some_ld_name;
7783
7784 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7785 if (INSN_P (insn)
7786 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7787 return cfun->machine->some_ld_name;
7788
7789 gcc_unreachable ();
7790 }
7791
7792 static int
7793 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7794 {
7795 rtx x = *px;
7796
7797 if (GET_CODE (x) == SYMBOL_REF
7798 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7799 {
7800 cfun->machine->some_ld_name = XSTR (x, 0);
7801 return 1;
7802 }
7803
7804 return 0;
7805 }
7806
7807 /* Meaning of CODE:
7808 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7809 C -- print opcode suffix for set/cmov insn.
7810 c -- like C, but print reversed condition
7811 F,f -- likewise, but for floating-point.
7812 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7813 otherwise nothing
7814 R -- print the prefix for register names.
7815 z -- print the opcode suffix for the size of the current operand.
7816 * -- print a star (in certain assembler syntax)
7817 A -- print an absolute memory reference.
7818 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7819 s -- print a shift double count, followed by the assemblers argument
7820 delimiter.
7821 b -- print the QImode name of the register for the indicated operand.
7822 %b0 would print %al if operands[0] is reg 0.
7823 w -- likewise, print the HImode name of the register.
7824 k -- likewise, print the SImode name of the register.
7825 q -- likewise, print the DImode name of the register.
7826 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7827 y -- print "st(0)" instead of "st" as a register.
7828 D -- print condition for SSE cmp instruction.
7829 P -- if PIC, print an @PLT suffix.
7830 X -- don't print any sort of PIC '@' suffix for a symbol.
7831 & -- print some in-use local-dynamic symbol name.
7832 H -- print a memory address offset by 8; used for sse high-parts
7833 */
7834
7835 void
7836 print_operand (FILE *file, rtx x, int code)
7837 {
7838 if (code)
7839 {
7840 switch (code)
7841 {
7842 case '*':
7843 if (ASSEMBLER_DIALECT == ASM_ATT)
7844 putc ('*', file);
7845 return;
7846
7847 case '&':
7848 assemble_name (file, get_some_local_dynamic_name ());
7849 return;
7850
7851 case 'A':
7852 switch (ASSEMBLER_DIALECT)
7853 {
7854 case ASM_ATT:
7855 putc ('*', file);
7856 break;
7857
7858 case ASM_INTEL:
7859 /* Intel syntax. For absolute addresses, registers should not
7860 be surrounded by braces. */
7861 if (GET_CODE (x) != REG)
7862 {
7863 putc ('[', file);
7864 PRINT_OPERAND (file, x, 0);
7865 putc (']', file);
7866 return;
7867 }
7868 break;
7869
7870 default:
7871 gcc_unreachable ();
7872 }
7873
7874 PRINT_OPERAND (file, x, 0);
7875 return;
7876
7877
7878 case 'L':
7879 if (ASSEMBLER_DIALECT == ASM_ATT)
7880 putc ('l', file);
7881 return;
7882
7883 case 'W':
7884 if (ASSEMBLER_DIALECT == ASM_ATT)
7885 putc ('w', file);
7886 return;
7887
7888 case 'B':
7889 if (ASSEMBLER_DIALECT == ASM_ATT)
7890 putc ('b', file);
7891 return;
7892
7893 case 'Q':
7894 if (ASSEMBLER_DIALECT == ASM_ATT)
7895 putc ('l', file);
7896 return;
7897
7898 case 'S':
7899 if (ASSEMBLER_DIALECT == ASM_ATT)
7900 putc ('s', file);
7901 return;
7902
7903 case 'T':
7904 if (ASSEMBLER_DIALECT == ASM_ATT)
7905 putc ('t', file);
7906 return;
7907
7908 case 'z':
7909 /* 387 opcodes don't get size suffixes if the operands are
7910 registers. */
7911 if (STACK_REG_P (x))
7912 return;
7913
7914 /* Likewise if using Intel opcodes. */
7915 if (ASSEMBLER_DIALECT == ASM_INTEL)
7916 return;
7917
7918 /* This is the size of op from size of operand. */
7919 switch (GET_MODE_SIZE (GET_MODE (x)))
7920 {
7921 case 1:
7922 putc ('b', file);
7923 return;
7924
7925 case 2:
7926 #ifdef HAVE_GAS_FILDS_FISTS
7927 putc ('s', file);
7928 #endif
7929 return;
7930
7931 case 4:
7932 if (GET_MODE (x) == SFmode)
7933 {
7934 putc ('s', file);
7935 return;
7936 }
7937 else
7938 putc ('l', file);
7939 return;
7940
7941 case 12:
7942 case 16:
7943 putc ('t', file);
7944 return;
7945
7946 case 8:
7947 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7948 {
7949 #ifdef GAS_MNEMONICS
7950 putc ('q', file);
7951 #else
7952 putc ('l', file);
7953 putc ('l', file);
7954 #endif
7955 }
7956 else
7957 putc ('l', file);
7958 return;
7959
7960 default:
7961 gcc_unreachable ();
7962 }
7963
7964 case 'b':
7965 case 'w':
7966 case 'k':
7967 case 'q':
7968 case 'h':
7969 case 'y':
7970 case 'X':
7971 case 'P':
7972 break;
7973
7974 case 's':
7975 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7976 {
7977 PRINT_OPERAND (file, x, 0);
7978 putc (',', file);
7979 }
7980 return;
7981
7982 case 'D':
7983 /* Little bit of braindamage here. The SSE compare instructions
7984 does use completely different names for the comparisons that the
7985 fp conditional moves. */
7986 switch (GET_CODE (x))
7987 {
7988 case EQ:
7989 case UNEQ:
7990 fputs ("eq", file);
7991 break;
7992 case LT:
7993 case UNLT:
7994 fputs ("lt", file);
7995 break;
7996 case LE:
7997 case UNLE:
7998 fputs ("le", file);
7999 break;
8000 case UNORDERED:
8001 fputs ("unord", file);
8002 break;
8003 case NE:
8004 case LTGT:
8005 fputs ("neq", file);
8006 break;
8007 case UNGE:
8008 case GE:
8009 fputs ("nlt", file);
8010 break;
8011 case UNGT:
8012 case GT:
8013 fputs ("nle", file);
8014 break;
8015 case ORDERED:
8016 fputs ("ord", file);
8017 break;
8018 default:
8019 gcc_unreachable ();
8020 }
8021 return;
8022 case 'O':
8023 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8024 if (ASSEMBLER_DIALECT == ASM_ATT)
8025 {
8026 switch (GET_MODE (x))
8027 {
8028 case HImode: putc ('w', file); break;
8029 case SImode:
8030 case SFmode: putc ('l', file); break;
8031 case DImode:
8032 case DFmode: putc ('q', file); break;
8033 default: gcc_unreachable ();
8034 }
8035 putc ('.', file);
8036 }
8037 #endif
8038 return;
8039 case 'C':
8040 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8041 return;
8042 case 'F':
8043 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8044 if (ASSEMBLER_DIALECT == ASM_ATT)
8045 putc ('.', file);
8046 #endif
8047 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8048 return;
8049
8050 /* Like above, but reverse condition */
8051 case 'c':
8052 /* Check to see if argument to %c is really a constant
8053 and not a condition code which needs to be reversed. */
8054 if (!COMPARISON_P (x))
8055 {
8056 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8057 return;
8058 }
8059 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8060 return;
8061 case 'f':
8062 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8063 if (ASSEMBLER_DIALECT == ASM_ATT)
8064 putc ('.', file);
8065 #endif
8066 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8067 return;
8068
8069 case 'H':
8070 /* It doesn't actually matter what mode we use here, as we're
8071 only going to use this for printing. */
8072 x = adjust_address_nv (x, DImode, 8);
8073 break;
8074
8075 case '+':
8076 {
8077 rtx x;
8078
8079 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8080 return;
8081
8082 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8083 if (x)
8084 {
8085 int pred_val = INTVAL (XEXP (x, 0));
8086
8087 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8088 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8089 {
8090 int taken = pred_val > REG_BR_PROB_BASE / 2;
8091 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8092
8093 /* Emit hints only in the case default branch prediction
8094 heuristics would fail. */
8095 if (taken != cputaken)
8096 {
8097 /* We use 3e (DS) prefix for taken branches and
8098 2e (CS) prefix for not taken branches. */
8099 if (taken)
8100 fputs ("ds ; ", file);
8101 else
8102 fputs ("cs ; ", file);
8103 }
8104 }
8105 }
8106 return;
8107 }
8108 default:
8109 output_operand_lossage ("invalid operand code '%c'", code);
8110 }
8111 }
8112
8113 if (GET_CODE (x) == REG)
8114 print_reg (x, code, file);
8115
8116 else if (GET_CODE (x) == MEM)
8117 {
8118 /* No `byte ptr' prefix for call instructions. */
8119 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8120 {
8121 const char * size;
8122 switch (GET_MODE_SIZE (GET_MODE (x)))
8123 {
8124 case 1: size = "BYTE"; break;
8125 case 2: size = "WORD"; break;
8126 case 4: size = "DWORD"; break;
8127 case 8: size = "QWORD"; break;
8128 case 12: size = "XWORD"; break;
8129 case 16: size = "XMMWORD"; break;
8130 default:
8131 gcc_unreachable ();
8132 }
8133
8134 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8135 if (code == 'b')
8136 size = "BYTE";
8137 else if (code == 'w')
8138 size = "WORD";
8139 else if (code == 'k')
8140 size = "DWORD";
8141
8142 fputs (size, file);
8143 fputs (" PTR ", file);
8144 }
8145
8146 x = XEXP (x, 0);
8147 /* Avoid (%rip) for call operands. */
8148 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8149 && GET_CODE (x) != CONST_INT)
8150 output_addr_const (file, x);
8151 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8152 output_operand_lossage ("invalid constraints for operand");
8153 else
8154 output_address (x);
8155 }
8156
8157 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8158 {
8159 REAL_VALUE_TYPE r;
8160 long l;
8161
8162 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8163 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8164
8165 if (ASSEMBLER_DIALECT == ASM_ATT)
8166 putc ('$', file);
8167 fprintf (file, "0x%08lx", l);
8168 }
8169
8170 /* These float cases don't actually occur as immediate operands. */
8171 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8172 {
8173 char dstr[30];
8174
8175 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8176 fprintf (file, "%s", dstr);
8177 }
8178
8179 else if (GET_CODE (x) == CONST_DOUBLE
8180 && GET_MODE (x) == XFmode)
8181 {
8182 char dstr[30];
8183
8184 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8185 fprintf (file, "%s", dstr);
8186 }
8187
8188 else
8189 {
8190 /* We have patterns that allow zero sets of memory, for instance.
8191 In 64-bit mode, we should probably support all 8-byte vectors,
8192 since we can in fact encode that into an immediate. */
8193 if (GET_CODE (x) == CONST_VECTOR)
8194 {
8195 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8196 x = const0_rtx;
8197 }
8198
8199 if (code != 'P')
8200 {
8201 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8202 {
8203 if (ASSEMBLER_DIALECT == ASM_ATT)
8204 putc ('$', file);
8205 }
8206 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8207 || GET_CODE (x) == LABEL_REF)
8208 {
8209 if (ASSEMBLER_DIALECT == ASM_ATT)
8210 putc ('$', file);
8211 else
8212 fputs ("OFFSET FLAT:", file);
8213 }
8214 }
8215 if (GET_CODE (x) == CONST_INT)
8216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8217 else if (flag_pic)
8218 output_pic_addr_const (file, x, code);
8219 else
8220 output_addr_const (file, x);
8221 }
8222 }
8223 \f
8224 /* Print a memory operand whose address is ADDR. */
8225
8226 void
8227 print_operand_address (FILE *file, rtx addr)
8228 {
8229 struct ix86_address parts;
8230 rtx base, index, disp;
8231 int scale;
8232 int ok = ix86_decompose_address (addr, &parts);
8233
8234 gcc_assert (ok);
8235
8236 base = parts.base;
8237 index = parts.index;
8238 disp = parts.disp;
8239 scale = parts.scale;
8240
8241 switch (parts.seg)
8242 {
8243 case SEG_DEFAULT:
8244 break;
8245 case SEG_FS:
8246 case SEG_GS:
8247 if (USER_LABEL_PREFIX[0] == 0)
8248 putc ('%', file);
8249 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8250 break;
8251 default:
8252 gcc_unreachable ();
8253 }
8254
8255 if (!base && !index)
8256 {
8257 /* Displacement only requires special attention. */
8258
8259 if (GET_CODE (disp) == CONST_INT)
8260 {
8261 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8262 {
8263 if (USER_LABEL_PREFIX[0] == 0)
8264 putc ('%', file);
8265 fputs ("ds:", file);
8266 }
8267 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8268 }
8269 else if (flag_pic)
8270 output_pic_addr_const (file, disp, 0);
8271 else
8272 output_addr_const (file, disp);
8273
8274 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8275 if (TARGET_64BIT)
8276 {
8277 if (GET_CODE (disp) == CONST
8278 && GET_CODE (XEXP (disp, 0)) == PLUS
8279 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8280 disp = XEXP (XEXP (disp, 0), 0);
8281 if (GET_CODE (disp) == LABEL_REF
8282 || (GET_CODE (disp) == SYMBOL_REF
8283 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8284 fputs ("(%rip)", file);
8285 }
8286 }
8287 else
8288 {
8289 if (ASSEMBLER_DIALECT == ASM_ATT)
8290 {
8291 if (disp)
8292 {
8293 if (flag_pic)
8294 output_pic_addr_const (file, disp, 0);
8295 else if (GET_CODE (disp) == LABEL_REF)
8296 output_asm_label (disp);
8297 else
8298 output_addr_const (file, disp);
8299 }
8300
8301 putc ('(', file);
8302 if (base)
8303 print_reg (base, 0, file);
8304 if (index)
8305 {
8306 putc (',', file);
8307 print_reg (index, 0, file);
8308 if (scale != 1)
8309 fprintf (file, ",%d", scale);
8310 }
8311 putc (')', file);
8312 }
8313 else
8314 {
8315 rtx offset = NULL_RTX;
8316
8317 if (disp)
8318 {
8319 /* Pull out the offset of a symbol; print any symbol itself. */
8320 if (GET_CODE (disp) == CONST
8321 && GET_CODE (XEXP (disp, 0)) == PLUS
8322 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8323 {
8324 offset = XEXP (XEXP (disp, 0), 1);
8325 disp = gen_rtx_CONST (VOIDmode,
8326 XEXP (XEXP (disp, 0), 0));
8327 }
8328
8329 if (flag_pic)
8330 output_pic_addr_const (file, disp, 0);
8331 else if (GET_CODE (disp) == LABEL_REF)
8332 output_asm_label (disp);
8333 else if (GET_CODE (disp) == CONST_INT)
8334 offset = disp;
8335 else
8336 output_addr_const (file, disp);
8337 }
8338
8339 putc ('[', file);
8340 if (base)
8341 {
8342 print_reg (base, 0, file);
8343 if (offset)
8344 {
8345 if (INTVAL (offset) >= 0)
8346 putc ('+', file);
8347 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8348 }
8349 }
8350 else if (offset)
8351 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8352 else
8353 putc ('0', file);
8354
8355 if (index)
8356 {
8357 putc ('+', file);
8358 print_reg (index, 0, file);
8359 if (scale != 1)
8360 fprintf (file, "*%d", scale);
8361 }
8362 putc (']', file);
8363 }
8364 }
8365 }
8366
8367 bool
8368 output_addr_const_extra (FILE *file, rtx x)
8369 {
8370 rtx op;
8371
8372 if (GET_CODE (x) != UNSPEC)
8373 return false;
8374
8375 op = XVECEXP (x, 0, 0);
8376 switch (XINT (x, 1))
8377 {
8378 case UNSPEC_GOTTPOFF:
8379 output_addr_const (file, op);
8380 /* FIXME: This might be @TPOFF in Sun ld. */
8381 fputs ("@GOTTPOFF", file);
8382 break;
8383 case UNSPEC_TPOFF:
8384 output_addr_const (file, op);
8385 fputs ("@TPOFF", file);
8386 break;
8387 case UNSPEC_NTPOFF:
8388 output_addr_const (file, op);
8389 if (TARGET_64BIT)
8390 fputs ("@TPOFF", file);
8391 else
8392 fputs ("@NTPOFF", file);
8393 break;
8394 case UNSPEC_DTPOFF:
8395 output_addr_const (file, op);
8396 fputs ("@DTPOFF", file);
8397 break;
8398 case UNSPEC_GOTNTPOFF:
8399 output_addr_const (file, op);
8400 if (TARGET_64BIT)
8401 fputs ("@GOTTPOFF(%rip)", file);
8402 else
8403 fputs ("@GOTNTPOFF", file);
8404 break;
8405 case UNSPEC_INDNTPOFF:
8406 output_addr_const (file, op);
8407 fputs ("@INDNTPOFF", file);
8408 break;
8409
8410 default:
8411 return false;
8412 }
8413
8414 return true;
8415 }
8416 \f
8417 /* Split one or more DImode RTL references into pairs of SImode
8418 references. The RTL can be REG, offsettable MEM, integer constant, or
8419 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8420 split and "num" is its length. lo_half and hi_half are output arrays
8421 that parallel "operands". */
8422
8423 void
8424 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8425 {
8426 while (num--)
8427 {
8428 rtx op = operands[num];
8429
8430 /* simplify_subreg refuse to split volatile memory addresses,
8431 but we still have to handle it. */
8432 if (GET_CODE (op) == MEM)
8433 {
8434 lo_half[num] = adjust_address (op, SImode, 0);
8435 hi_half[num] = adjust_address (op, SImode, 4);
8436 }
8437 else
8438 {
8439 lo_half[num] = simplify_gen_subreg (SImode, op,
8440 GET_MODE (op) == VOIDmode
8441 ? DImode : GET_MODE (op), 0);
8442 hi_half[num] = simplify_gen_subreg (SImode, op,
8443 GET_MODE (op) == VOIDmode
8444 ? DImode : GET_MODE (op), 4);
8445 }
8446 }
8447 }
8448 /* Split one or more TImode RTL references into pairs of DImode
8449 references. The RTL can be REG, offsettable MEM, integer constant, or
8450 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8451 split and "num" is its length. lo_half and hi_half are output arrays
8452 that parallel "operands". */
8453
8454 void
8455 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8456 {
8457 while (num--)
8458 {
8459 rtx op = operands[num];
8460
8461 /* simplify_subreg refuse to split volatile memory addresses, but we
8462 still have to handle it. */
8463 if (GET_CODE (op) == MEM)
8464 {
8465 lo_half[num] = adjust_address (op, DImode, 0);
8466 hi_half[num] = adjust_address (op, DImode, 8);
8467 }
8468 else
8469 {
8470 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8471 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8472 }
8473 }
8474 }
8475 \f
8476 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8477 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8478 is the expression of the binary operation. The output may either be
8479 emitted here, or returned to the caller, like all output_* functions.
8480
8481 There is no guarantee that the operands are the same mode, as they
8482 might be within FLOAT or FLOAT_EXTEND expressions. */
8483
8484 #ifndef SYSV386_COMPAT
8485 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8486 wants to fix the assemblers because that causes incompatibility
8487 with gcc. No-one wants to fix gcc because that causes
8488 incompatibility with assemblers... You can use the option of
8489 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8490 #define SYSV386_COMPAT 1
8491 #endif
8492
8493 const char *
8494 output_387_binary_op (rtx insn, rtx *operands)
8495 {
8496 static char buf[30];
8497 const char *p;
8498 const char *ssep;
8499 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8500
8501 #ifdef ENABLE_CHECKING
8502 /* Even if we do not want to check the inputs, this documents input
8503 constraints. Which helps in understanding the following code. */
8504 if (STACK_REG_P (operands[0])
8505 && ((REG_P (operands[1])
8506 && REGNO (operands[0]) == REGNO (operands[1])
8507 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
8508 || (REG_P (operands[2])
8509 && REGNO (operands[0]) == REGNO (operands[2])
8510 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
8511 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8512 ; /* ok */
8513 else
8514 gcc_assert (is_sse);
8515 #endif
8516
8517 switch (GET_CODE (operands[3]))
8518 {
8519 case PLUS:
8520 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8521 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8522 p = "fiadd";
8523 else
8524 p = "fadd";
8525 ssep = "add";
8526 break;
8527
8528 case MINUS:
8529 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8530 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8531 p = "fisub";
8532 else
8533 p = "fsub";
8534 ssep = "sub";
8535 break;
8536
8537 case MULT:
8538 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8539 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8540 p = "fimul";
8541 else
8542 p = "fmul";
8543 ssep = "mul";
8544 break;
8545
8546 case DIV:
8547 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8548 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8549 p = "fidiv";
8550 else
8551 p = "fdiv";
8552 ssep = "div";
8553 break;
8554
8555 default:
8556 gcc_unreachable ();
8557 }
8558
8559 if (is_sse)
8560 {
8561 strcpy (buf, ssep);
8562 if (GET_MODE (operands[0]) == SFmode)
8563 strcat (buf, "ss\t{%2, %0|%0, %2}");
8564 else
8565 strcat (buf, "sd\t{%2, %0|%0, %2}");
8566 return buf;
8567 }
8568 strcpy (buf, p);
8569
8570 switch (GET_CODE (operands[3]))
8571 {
8572 case MULT:
8573 case PLUS:
8574 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8575 {
8576 rtx temp = operands[2];
8577 operands[2] = operands[1];
8578 operands[1] = temp;
8579 }
8580
8581 /* know operands[0] == operands[1]. */
8582
8583 if (GET_CODE (operands[2]) == MEM)
8584 {
8585 p = "%z2\t%2";
8586 break;
8587 }
8588
8589 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8590 {
8591 if (STACK_TOP_P (operands[0]))
8592 /* How is it that we are storing to a dead operand[2]?
8593 Well, presumably operands[1] is dead too. We can't
8594 store the result to st(0) as st(0) gets popped on this
8595 instruction. Instead store to operands[2] (which I
8596 think has to be st(1)). st(1) will be popped later.
8597 gcc <= 2.8.1 didn't have this check and generated
8598 assembly code that the Unixware assembler rejected. */
8599 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8600 else
8601 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8602 break;
8603 }
8604
8605 if (STACK_TOP_P (operands[0]))
8606 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8607 else
8608 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8609 break;
8610
8611 case MINUS:
8612 case DIV:
8613 if (GET_CODE (operands[1]) == MEM)
8614 {
8615 p = "r%z1\t%1";
8616 break;
8617 }
8618
8619 if (GET_CODE (operands[2]) == MEM)
8620 {
8621 p = "%z2\t%2";
8622 break;
8623 }
8624
8625 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8626 {
8627 #if SYSV386_COMPAT
8628 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8629 derived assemblers, confusingly reverse the direction of
8630 the operation for fsub{r} and fdiv{r} when the
8631 destination register is not st(0). The Intel assembler
8632 doesn't have this brain damage. Read !SYSV386_COMPAT to
8633 figure out what the hardware really does. */
8634 if (STACK_TOP_P (operands[0]))
8635 p = "{p\t%0, %2|rp\t%2, %0}";
8636 else
8637 p = "{rp\t%2, %0|p\t%0, %2}";
8638 #else
8639 if (STACK_TOP_P (operands[0]))
8640 /* As above for fmul/fadd, we can't store to st(0). */
8641 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8642 else
8643 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8644 #endif
8645 break;
8646 }
8647
8648 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8649 {
8650 #if SYSV386_COMPAT
8651 if (STACK_TOP_P (operands[0]))
8652 p = "{rp\t%0, %1|p\t%1, %0}";
8653 else
8654 p = "{p\t%1, %0|rp\t%0, %1}";
8655 #else
8656 if (STACK_TOP_P (operands[0]))
8657 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8658 else
8659 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8660 #endif
8661 break;
8662 }
8663
8664 if (STACK_TOP_P (operands[0]))
8665 {
8666 if (STACK_TOP_P (operands[1]))
8667 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8668 else
8669 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8670 break;
8671 }
8672 else if (STACK_TOP_P (operands[1]))
8673 {
8674 #if SYSV386_COMPAT
8675 p = "{\t%1, %0|r\t%0, %1}";
8676 #else
8677 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8678 #endif
8679 }
8680 else
8681 {
8682 #if SYSV386_COMPAT
8683 p = "{r\t%2, %0|\t%0, %2}";
8684 #else
8685 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8686 #endif
8687 }
8688 break;
8689
8690 default:
8691 gcc_unreachable ();
8692 }
8693
8694 strcat (buf, p);
8695 return buf;
8696 }
8697
8698 /* Return needed mode for entity in optimize_mode_switching pass. */
8699
8700 int
8701 ix86_mode_needed (int entity, rtx insn)
8702 {
8703 enum attr_i387_cw mode;
8704
8705 /* The mode UNINITIALIZED is used to store control word after a
8706 function call or ASM pattern. The mode ANY specify that function
8707 has no requirements on the control word and make no changes in the
8708 bits we are interested in. */
8709
8710 if (CALL_P (insn)
8711 || (NONJUMP_INSN_P (insn)
8712 && (asm_noperands (PATTERN (insn)) >= 0
8713 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8714 return I387_CW_UNINITIALIZED;
8715
8716 if (recog_memoized (insn) < 0)
8717 return I387_CW_ANY;
8718
8719 mode = get_attr_i387_cw (insn);
8720
8721 switch (entity)
8722 {
8723 case I387_TRUNC:
8724 if (mode == I387_CW_TRUNC)
8725 return mode;
8726 break;
8727
8728 case I387_FLOOR:
8729 if (mode == I387_CW_FLOOR)
8730 return mode;
8731 break;
8732
8733 case I387_CEIL:
8734 if (mode == I387_CW_CEIL)
8735 return mode;
8736 break;
8737
8738 case I387_MASK_PM:
8739 if (mode == I387_CW_MASK_PM)
8740 return mode;
8741 break;
8742
8743 default:
8744 gcc_unreachable ();
8745 }
8746
8747 return I387_CW_ANY;
8748 }
8749
8750 /* Output code to initialize control word copies used by trunc?f?i and
8751 rounding patterns. CURRENT_MODE is set to current control word,
8752 while NEW_MODE is set to new control word. */
8753
8754 void
8755 emit_i387_cw_initialization (int mode)
8756 {
8757 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8758 rtx new_mode;
8759
8760 int slot;
8761
8762 rtx reg = gen_reg_rtx (HImode);
8763
8764 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8765 emit_move_insn (reg, copy_rtx (stored_mode));
8766
8767 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8768 {
8769 switch (mode)
8770 {
8771 case I387_CW_TRUNC:
8772 /* round toward zero (truncate) */
8773 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8774 slot = SLOT_CW_TRUNC;
8775 break;
8776
8777 case I387_CW_FLOOR:
8778 /* round down toward -oo */
8779 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8780 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8781 slot = SLOT_CW_FLOOR;
8782 break;
8783
8784 case I387_CW_CEIL:
8785 /* round up toward +oo */
8786 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8787 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8788 slot = SLOT_CW_CEIL;
8789 break;
8790
8791 case I387_CW_MASK_PM:
8792 /* mask precision exception for nearbyint() */
8793 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8794 slot = SLOT_CW_MASK_PM;
8795 break;
8796
8797 default:
8798 gcc_unreachable ();
8799 }
8800 }
8801 else
8802 {
8803 switch (mode)
8804 {
8805 case I387_CW_TRUNC:
8806 /* round toward zero (truncate) */
8807 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8808 slot = SLOT_CW_TRUNC;
8809 break;
8810
8811 case I387_CW_FLOOR:
8812 /* round down toward -oo */
8813 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8814 slot = SLOT_CW_FLOOR;
8815 break;
8816
8817 case I387_CW_CEIL:
8818 /* round up toward +oo */
8819 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8820 slot = SLOT_CW_CEIL;
8821 break;
8822
8823 case I387_CW_MASK_PM:
8824 /* mask precision exception for nearbyint() */
8825 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8826 slot = SLOT_CW_MASK_PM;
8827 break;
8828
8829 default:
8830 gcc_unreachable ();
8831 }
8832 }
8833
8834 gcc_assert (slot < MAX_386_STACK_LOCALS);
8835
8836 new_mode = assign_386_stack_local (HImode, slot);
8837 emit_move_insn (new_mode, reg);
8838 }
8839
8840 /* Output code for INSN to convert a float to a signed int. OPERANDS
8841 are the insn operands. The output may be [HSD]Imode and the input
8842 operand may be [SDX]Fmode. */
8843
8844 const char *
8845 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8846 {
8847 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8848 int dimode_p = GET_MODE (operands[0]) == DImode;
8849 int round_mode = get_attr_i387_cw (insn);
8850
8851 /* Jump through a hoop or two for DImode, since the hardware has no
8852 non-popping instruction. We used to do this a different way, but
8853 that was somewhat fragile and broke with post-reload splitters. */
8854 if ((dimode_p || fisttp) && !stack_top_dies)
8855 output_asm_insn ("fld\t%y1", operands);
8856
8857 gcc_assert (STACK_TOP_P (operands[1]));
8858 gcc_assert (GET_CODE (operands[0]) == MEM);
8859
8860 if (fisttp)
8861 output_asm_insn ("fisttp%z0\t%0", operands);
8862 else
8863 {
8864 if (round_mode != I387_CW_ANY)
8865 output_asm_insn ("fldcw\t%3", operands);
8866 if (stack_top_dies || dimode_p)
8867 output_asm_insn ("fistp%z0\t%0", operands);
8868 else
8869 output_asm_insn ("fist%z0\t%0", operands);
8870 if (round_mode != I387_CW_ANY)
8871 output_asm_insn ("fldcw\t%2", operands);
8872 }
8873
8874 return "";
8875 }
8876
8877 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8878 have the values zero or one, indicates the ffreep insn's operand
8879 from the OPERANDS array. */
8880
8881 static const char *
8882 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8883 {
8884 if (TARGET_USE_FFREEP)
8885 #if HAVE_AS_IX86_FFREEP
8886 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8887 #else
8888 {
8889 static char retval[] = ".word\t0xc_df";
8890 int regno = REGNO (operands[opno]);
8891
8892 gcc_assert (FP_REGNO_P (regno));
8893
8894 retval[9] = '0' + (regno - FIRST_STACK_REG);
8895 return retval;
8896 }
8897 #endif
8898
8899 return opno ? "fstp\t%y1" : "fstp\t%y0";
8900 }
8901
8902
8903 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8904 should be used. UNORDERED_P is true when fucom should be used. */
8905
8906 const char *
8907 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8908 {
8909 int stack_top_dies;
8910 rtx cmp_op0, cmp_op1;
8911 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8912
8913 if (eflags_p)
8914 {
8915 cmp_op0 = operands[0];
8916 cmp_op1 = operands[1];
8917 }
8918 else
8919 {
8920 cmp_op0 = operands[1];
8921 cmp_op1 = operands[2];
8922 }
8923
8924 if (is_sse)
8925 {
8926 if (GET_MODE (operands[0]) == SFmode)
8927 if (unordered_p)
8928 return "ucomiss\t{%1, %0|%0, %1}";
8929 else
8930 return "comiss\t{%1, %0|%0, %1}";
8931 else
8932 if (unordered_p)
8933 return "ucomisd\t{%1, %0|%0, %1}";
8934 else
8935 return "comisd\t{%1, %0|%0, %1}";
8936 }
8937
8938 gcc_assert (STACK_TOP_P (cmp_op0));
8939
8940 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8941
8942 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8943 {
8944 if (stack_top_dies)
8945 {
8946 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8947 return output_387_ffreep (operands, 1);
8948 }
8949 else
8950 return "ftst\n\tfnstsw\t%0";
8951 }
8952
8953 if (STACK_REG_P (cmp_op1)
8954 && stack_top_dies
8955 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8956 && REGNO (cmp_op1) != FIRST_STACK_REG)
8957 {
8958 /* If both the top of the 387 stack dies, and the other operand
8959 is also a stack register that dies, then this must be a
8960 `fcompp' float compare */
8961
8962 if (eflags_p)
8963 {
8964 /* There is no double popping fcomi variant. Fortunately,
8965 eflags is immune from the fstp's cc clobbering. */
8966 if (unordered_p)
8967 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8968 else
8969 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8970 return output_387_ffreep (operands, 0);
8971 }
8972 else
8973 {
8974 if (unordered_p)
8975 return "fucompp\n\tfnstsw\t%0";
8976 else
8977 return "fcompp\n\tfnstsw\t%0";
8978 }
8979 }
8980 else
8981 {
8982 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8983
8984 static const char * const alt[16] =
8985 {
8986 "fcom%z2\t%y2\n\tfnstsw\t%0",
8987 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8988 "fucom%z2\t%y2\n\tfnstsw\t%0",
8989 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8990
8991 "ficom%z2\t%y2\n\tfnstsw\t%0",
8992 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8993 NULL,
8994 NULL,
8995
8996 "fcomi\t{%y1, %0|%0, %y1}",
8997 "fcomip\t{%y1, %0|%0, %y1}",
8998 "fucomi\t{%y1, %0|%0, %y1}",
8999 "fucomip\t{%y1, %0|%0, %y1}",
9000
9001 NULL,
9002 NULL,
9003 NULL,
9004 NULL
9005 };
9006
9007 int mask;
9008 const char *ret;
9009
9010 mask = eflags_p << 3;
9011 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9012 mask |= unordered_p << 1;
9013 mask |= stack_top_dies;
9014
9015 gcc_assert (mask < 16);
9016 ret = alt[mask];
9017 gcc_assert (ret);
9018
9019 return ret;
9020 }
9021 }
9022
9023 void
9024 ix86_output_addr_vec_elt (FILE *file, int value)
9025 {
9026 const char *directive = ASM_LONG;
9027
9028 #ifdef ASM_QUAD
9029 if (TARGET_64BIT)
9030 directive = ASM_QUAD;
9031 #else
9032 gcc_assert (!TARGET_64BIT);
9033 #endif
9034
9035 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9036 }
9037
9038 void
9039 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9040 {
9041 if (TARGET_64BIT)
9042 fprintf (file, "%s%s%d-%s%d\n",
9043 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9044 else if (HAVE_AS_GOTOFF_IN_DATA)
9045 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9046 #if TARGET_MACHO
9047 else if (TARGET_MACHO)
9048 {
9049 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9050 machopic_output_function_base_name (file);
9051 fprintf(file, "\n");
9052 }
9053 #endif
9054 else
9055 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9056 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9057 }
9058 \f
9059 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9060 for the target. */
9061
9062 void
9063 ix86_expand_clear (rtx dest)
9064 {
9065 rtx tmp;
9066
9067 /* We play register width games, which are only valid after reload. */
9068 gcc_assert (reload_completed);
9069
9070 /* Avoid HImode and its attendant prefix byte. */
9071 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9072 dest = gen_rtx_REG (SImode, REGNO (dest));
9073
9074 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9075
9076 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9077 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9078 {
9079 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9080 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9081 }
9082
9083 emit_insn (tmp);
9084 }
9085
9086 /* X is an unchanging MEM. If it is a constant pool reference, return
9087 the constant pool rtx, else NULL. */
9088
9089 rtx
9090 maybe_get_pool_constant (rtx x)
9091 {
9092 x = ix86_delegitimize_address (XEXP (x, 0));
9093
9094 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9095 return get_pool_constant (x);
9096
9097 return NULL_RTX;
9098 }
9099
9100 void
9101 ix86_expand_move (enum machine_mode mode, rtx operands[])
9102 {
9103 int strict = (reload_in_progress || reload_completed);
9104 rtx op0, op1;
9105 enum tls_model model;
9106
9107 op0 = operands[0];
9108 op1 = operands[1];
9109
9110 if (GET_CODE (op1) == SYMBOL_REF)
9111 {
9112 model = SYMBOL_REF_TLS_MODEL (op1);
9113 if (model)
9114 {
9115 op1 = legitimize_tls_address (op1, model, true);
9116 op1 = force_operand (op1, op0);
9117 if (op1 == op0)
9118 return;
9119 }
9120 }
9121 else if (GET_CODE (op1) == CONST
9122 && GET_CODE (XEXP (op1, 0)) == PLUS
9123 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9124 {
9125 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9126 if (model)
9127 {
9128 rtx addend = XEXP (XEXP (op1, 0), 1);
9129 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9130 op1 = force_operand (op1, NULL);
9131 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9132 op0, 1, OPTAB_DIRECT);
9133 if (op1 == op0)
9134 return;
9135 }
9136 }
9137
9138 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9139 {
9140 if (TARGET_MACHO && !TARGET_64BIT)
9141 {
9142 #if TARGET_MACHO
9143 if (MACHOPIC_PURE)
9144 {
9145 rtx temp = ((reload_in_progress
9146 || ((op0 && GET_CODE (op0) == REG)
9147 && mode == Pmode))
9148 ? op0 : gen_reg_rtx (Pmode));
9149 op1 = machopic_indirect_data_reference (op1, temp);
9150 op1 = machopic_legitimize_pic_address (op1, mode,
9151 temp == op1 ? 0 : temp);
9152 }
9153 else if (MACHOPIC_INDIRECT)
9154 op1 = machopic_indirect_data_reference (op1, 0);
9155 if (op0 == op1)
9156 return;
9157 #endif
9158 }
9159 else
9160 {
9161 if (GET_CODE (op0) == MEM)
9162 op1 = force_reg (Pmode, op1);
9163 else
9164 op1 = legitimize_address (op1, op1, Pmode);
9165 }
9166 }
9167 else
9168 {
9169 if (GET_CODE (op0) == MEM
9170 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9171 || !push_operand (op0, mode))
9172 && GET_CODE (op1) == MEM)
9173 op1 = force_reg (mode, op1);
9174
9175 if (push_operand (op0, mode)
9176 && ! general_no_elim_operand (op1, mode))
9177 op1 = copy_to_mode_reg (mode, op1);
9178
9179 /* Force large constants in 64bit compilation into register
9180 to get them CSEed. */
9181 if (TARGET_64BIT && mode == DImode
9182 && immediate_operand (op1, mode)
9183 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9184 && !register_operand (op0, mode)
9185 && optimize && !reload_completed && !reload_in_progress)
9186 op1 = copy_to_mode_reg (mode, op1);
9187
9188 if (FLOAT_MODE_P (mode))
9189 {
9190 /* If we are loading a floating point constant to a register,
9191 force the value to memory now, since we'll get better code
9192 out the back end. */
9193
9194 if (strict)
9195 ;
9196 else if (GET_CODE (op1) == CONST_DOUBLE)
9197 {
9198 op1 = validize_mem (force_const_mem (mode, op1));
9199 if (!register_operand (op0, mode))
9200 {
9201 rtx temp = gen_reg_rtx (mode);
9202 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9203 emit_move_insn (op0, temp);
9204 return;
9205 }
9206 }
9207 }
9208 }
9209
9210 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9211 }
9212
9213 void
9214 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9215 {
9216 rtx op0 = operands[0], op1 = operands[1];
9217
9218 /* Force constants other than zero into memory. We do not know how
9219 the instructions used to build constants modify the upper 64 bits
9220 of the register, once we have that information we may be able
9221 to handle some of them more efficiently. */
9222 if ((reload_in_progress | reload_completed) == 0
9223 && register_operand (op0, mode)
9224 && CONSTANT_P (op1)
9225 && standard_sse_constant_p (op1) <= 0)
9226 op1 = validize_mem (force_const_mem (mode, op1));
9227
9228 /* Make operand1 a register if it isn't already. */
9229 if (!no_new_pseudos
9230 && !register_operand (op0, mode)
9231 && !register_operand (op1, mode))
9232 {
9233 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9234 return;
9235 }
9236
9237 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9238 }
9239
9240 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9241 straight to ix86_expand_vector_move. */
9242
9243 void
9244 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9245 {
9246 rtx op0, op1, m;
9247
9248 op0 = operands[0];
9249 op1 = operands[1];
9250
9251 if (MEM_P (op1))
9252 {
9253 /* If we're optimizing for size, movups is the smallest. */
9254 if (optimize_size)
9255 {
9256 op0 = gen_lowpart (V4SFmode, op0);
9257 op1 = gen_lowpart (V4SFmode, op1);
9258 emit_insn (gen_sse_movups (op0, op1));
9259 return;
9260 }
9261
9262 /* ??? If we have typed data, then it would appear that using
9263 movdqu is the only way to get unaligned data loaded with
9264 integer type. */
9265 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9266 {
9267 op0 = gen_lowpart (V16QImode, op0);
9268 op1 = gen_lowpart (V16QImode, op1);
9269 emit_insn (gen_sse2_movdqu (op0, op1));
9270 return;
9271 }
9272
9273 if (TARGET_SSE2 && mode == V2DFmode)
9274 {
9275 rtx zero;
9276
9277 /* When SSE registers are split into halves, we can avoid
9278 writing to the top half twice. */
9279 if (TARGET_SSE_SPLIT_REGS)
9280 {
9281 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9282 zero = op0;
9283 }
9284 else
9285 {
9286 /* ??? Not sure about the best option for the Intel chips.
9287 The following would seem to satisfy; the register is
9288 entirely cleared, breaking the dependency chain. We
9289 then store to the upper half, with a dependency depth
9290 of one. A rumor has it that Intel recommends two movsd
9291 followed by an unpacklpd, but this is unconfirmed. And
9292 given that the dependency depth of the unpacklpd would
9293 still be one, I'm not sure why this would be better. */
9294 zero = CONST0_RTX (V2DFmode);
9295 }
9296
9297 m = adjust_address (op1, DFmode, 0);
9298 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9299 m = adjust_address (op1, DFmode, 8);
9300 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9301 }
9302 else
9303 {
9304 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9305 emit_move_insn (op0, CONST0_RTX (mode));
9306 else
9307 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9308
9309 if (mode != V4SFmode)
9310 op0 = gen_lowpart (V4SFmode, op0);
9311 m = adjust_address (op1, V2SFmode, 0);
9312 emit_insn (gen_sse_loadlps (op0, op0, m));
9313 m = adjust_address (op1, V2SFmode, 8);
9314 emit_insn (gen_sse_loadhps (op0, op0, m));
9315 }
9316 }
9317 else if (MEM_P (op0))
9318 {
9319 /* If we're optimizing for size, movups is the smallest. */
9320 if (optimize_size)
9321 {
9322 op0 = gen_lowpart (V4SFmode, op0);
9323 op1 = gen_lowpart (V4SFmode, op1);
9324 emit_insn (gen_sse_movups (op0, op1));
9325 return;
9326 }
9327
9328 /* ??? Similar to above, only less clear because of quote
9329 typeless stores unquote. */
9330 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9331 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9332 {
9333 op0 = gen_lowpart (V16QImode, op0);
9334 op1 = gen_lowpart (V16QImode, op1);
9335 emit_insn (gen_sse2_movdqu (op0, op1));
9336 return;
9337 }
9338
9339 if (TARGET_SSE2 && mode == V2DFmode)
9340 {
9341 m = adjust_address (op0, DFmode, 0);
9342 emit_insn (gen_sse2_storelpd (m, op1));
9343 m = adjust_address (op0, DFmode, 8);
9344 emit_insn (gen_sse2_storehpd (m, op1));
9345 }
9346 else
9347 {
9348 if (mode != V4SFmode)
9349 op1 = gen_lowpart (V4SFmode, op1);
9350 m = adjust_address (op0, V2SFmode, 0);
9351 emit_insn (gen_sse_storelps (m, op1));
9352 m = adjust_address (op0, V2SFmode, 8);
9353 emit_insn (gen_sse_storehps (m, op1));
9354 }
9355 }
9356 else
9357 gcc_unreachable ();
9358 }
9359
9360 /* Expand a push in MODE. This is some mode for which we do not support
9361 proper push instructions, at least from the registers that we expect
9362 the value to live in. */
9363
9364 void
9365 ix86_expand_push (enum machine_mode mode, rtx x)
9366 {
9367 rtx tmp;
9368
9369 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9370 GEN_INT (-GET_MODE_SIZE (mode)),
9371 stack_pointer_rtx, 1, OPTAB_DIRECT);
9372 if (tmp != stack_pointer_rtx)
9373 emit_move_insn (stack_pointer_rtx, tmp);
9374
9375 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9376 emit_move_insn (tmp, x);
9377 }
9378
9379 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9380 destination to use for the operation. If different from the true
9381 destination in operands[0], a copy operation will be required. */
9382
9383 rtx
9384 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9385 rtx operands[])
9386 {
9387 int matching_memory;
9388 rtx src1, src2, dst;
9389
9390 dst = operands[0];
9391 src1 = operands[1];
9392 src2 = operands[2];
9393
9394 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9395 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9396 && (rtx_equal_p (dst, src2)
9397 || immediate_operand (src1, mode)))
9398 {
9399 rtx temp = src1;
9400 src1 = src2;
9401 src2 = temp;
9402 }
9403
9404 /* If the destination is memory, and we do not have matching source
9405 operands, do things in registers. */
9406 matching_memory = 0;
9407 if (GET_CODE (dst) == MEM)
9408 {
9409 if (rtx_equal_p (dst, src1))
9410 matching_memory = 1;
9411 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9412 && rtx_equal_p (dst, src2))
9413 matching_memory = 2;
9414 else
9415 dst = gen_reg_rtx (mode);
9416 }
9417
9418 /* Both source operands cannot be in memory. */
9419 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
9420 {
9421 if (matching_memory != 2)
9422 src2 = force_reg (mode, src2);
9423 else
9424 src1 = force_reg (mode, src1);
9425 }
9426
9427 /* If the operation is not commutable, source 1 cannot be a constant
9428 or non-matching memory. */
9429 if ((CONSTANT_P (src1)
9430 || (!matching_memory && GET_CODE (src1) == MEM))
9431 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9432 src1 = force_reg (mode, src1);
9433
9434 src1 = operands[1] = src1;
9435 src2 = operands[2] = src2;
9436 return dst;
9437 }
9438
9439 /* Similarly, but assume that the destination has already been
9440 set up properly. */
9441
9442 void
9443 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9444 enum machine_mode mode, rtx operands[])
9445 {
9446 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9447 gcc_assert (dst == operands[0]);
9448 }
9449
9450 /* Attempt to expand a binary operator. Make the expansion closer to the
9451 actual machine, then just general_operand, which will allow 3 separate
9452 memory references (one output, two input) in a single insn. */
9453
9454 void
9455 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9456 rtx operands[])
9457 {
9458 rtx src1, src2, dst, op, clob;
9459
9460 dst = ix86_fixup_binary_operands (code, mode, operands);
9461 src1 = operands[1];
9462 src2 = operands[2];
9463
9464 /* Emit the instruction. */
9465
9466 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9467 if (reload_in_progress)
9468 {
9469 /* Reload doesn't know about the flags register, and doesn't know that
9470 it doesn't want to clobber it. We can only do this with PLUS. */
9471 gcc_assert (code == PLUS);
9472 emit_insn (op);
9473 }
9474 else
9475 {
9476 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9477 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9478 }
9479
9480 /* Fix up the destination if needed. */
9481 if (dst != operands[0])
9482 emit_move_insn (operands[0], dst);
9483 }
9484
9485 /* Return TRUE or FALSE depending on whether the binary operator meets the
9486 appropriate constraints. */
9487
9488 int
9489 ix86_binary_operator_ok (enum rtx_code code,
9490 enum machine_mode mode ATTRIBUTE_UNUSED,
9491 rtx operands[3])
9492 {
9493 /* Both source operands cannot be in memory. */
9494 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
9495 return 0;
9496 /* If the operation is not commutable, source 1 cannot be a constant. */
9497 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9498 return 0;
9499 /* If the destination is memory, we must have a matching source operand. */
9500 if (GET_CODE (operands[0]) == MEM
9501 && ! (rtx_equal_p (operands[0], operands[1])
9502 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9503 && rtx_equal_p (operands[0], operands[2]))))
9504 return 0;
9505 /* If the operation is not commutable and the source 1 is memory, we must
9506 have a matching destination. */
9507 if (GET_CODE (operands[1]) == MEM
9508 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9509 && ! rtx_equal_p (operands[0], operands[1]))
9510 return 0;
9511 return 1;
9512 }
9513
9514 /* Attempt to expand a unary operator. Make the expansion closer to the
9515 actual machine, then just general_operand, which will allow 2 separate
9516 memory references (one output, one input) in a single insn. */
9517
9518 void
9519 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9520 rtx operands[])
9521 {
9522 int matching_memory;
9523 rtx src, dst, op, clob;
9524
9525 dst = operands[0];
9526 src = operands[1];
9527
9528 /* If the destination is memory, and we do not have matching source
9529 operands, do things in registers. */
9530 matching_memory = 0;
9531 if (MEM_P (dst))
9532 {
9533 if (rtx_equal_p (dst, src))
9534 matching_memory = 1;
9535 else
9536 dst = gen_reg_rtx (mode);
9537 }
9538
9539 /* When source operand is memory, destination must match. */
9540 if (MEM_P (src) && !matching_memory)
9541 src = force_reg (mode, src);
9542
9543 /* Emit the instruction. */
9544
9545 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9546 if (reload_in_progress || code == NOT)
9547 {
9548 /* Reload doesn't know about the flags register, and doesn't know that
9549 it doesn't want to clobber it. */
9550 gcc_assert (code == NOT);
9551 emit_insn (op);
9552 }
9553 else
9554 {
9555 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9556 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9557 }
9558
9559 /* Fix up the destination if needed. */
9560 if (dst != operands[0])
9561 emit_move_insn (operands[0], dst);
9562 }
9563
9564 /* Return TRUE or FALSE depending on whether the unary operator meets the
9565 appropriate constraints. */
9566
9567 int
9568 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9569 enum machine_mode mode ATTRIBUTE_UNUSED,
9570 rtx operands[2] ATTRIBUTE_UNUSED)
9571 {
9572 /* If one of operands is memory, source and destination must match. */
9573 if ((GET_CODE (operands[0]) == MEM
9574 || GET_CODE (operands[1]) == MEM)
9575 && ! rtx_equal_p (operands[0], operands[1]))
9576 return FALSE;
9577 return TRUE;
9578 }
9579
9580 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9581 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9582 true, then replicate the mask for all elements of the vector register.
9583 If INVERT is true, then create a mask excluding the sign bit. */
9584
9585 rtx
9586 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9587 {
9588 enum machine_mode vec_mode;
9589 HOST_WIDE_INT hi, lo;
9590 int shift = 63;
9591 rtvec v;
9592 rtx mask;
9593
9594 /* Find the sign bit, sign extended to 2*HWI. */
9595 if (mode == SFmode)
9596 lo = 0x80000000, hi = lo < 0;
9597 else if (HOST_BITS_PER_WIDE_INT >= 64)
9598 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9599 else
9600 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9601
9602 if (invert)
9603 lo = ~lo, hi = ~hi;
9604
9605 /* Force this value into the low part of a fp vector constant. */
9606 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9607 mask = gen_lowpart (mode, mask);
9608
9609 if (mode == SFmode)
9610 {
9611 if (vect)
9612 v = gen_rtvec (4, mask, mask, mask, mask);
9613 else
9614 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9615 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9616 vec_mode = V4SFmode;
9617 }
9618 else
9619 {
9620 if (vect)
9621 v = gen_rtvec (2, mask, mask);
9622 else
9623 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9624 vec_mode = V2DFmode;
9625 }
9626
9627 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9628 }
9629
9630 /* Generate code for floating point ABS or NEG. */
9631
9632 void
9633 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9634 rtx operands[])
9635 {
9636 rtx mask, set, use, clob, dst, src;
9637 bool matching_memory;
9638 bool use_sse = false;
9639 bool vector_mode = VECTOR_MODE_P (mode);
9640 enum machine_mode elt_mode = mode;
9641
9642 if (vector_mode)
9643 {
9644 elt_mode = GET_MODE_INNER (mode);
9645 use_sse = true;
9646 }
9647 else if (TARGET_SSE_MATH)
9648 use_sse = SSE_FLOAT_MODE_P (mode);
9649
9650 /* NEG and ABS performed with SSE use bitwise mask operations.
9651 Create the appropriate mask now. */
9652 if (use_sse)
9653 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9654 else
9655 mask = NULL_RTX;
9656
9657 dst = operands[0];
9658 src = operands[1];
9659
9660 /* If the destination is memory, and we don't have matching source
9661 operands or we're using the x87, do things in registers. */
9662 matching_memory = false;
9663 if (MEM_P (dst))
9664 {
9665 if (use_sse && rtx_equal_p (dst, src))
9666 matching_memory = true;
9667 else
9668 dst = gen_reg_rtx (mode);
9669 }
9670 if (MEM_P (src) && !matching_memory)
9671 src = force_reg (mode, src);
9672
9673 if (vector_mode)
9674 {
9675 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9676 set = gen_rtx_SET (VOIDmode, dst, set);
9677 emit_insn (set);
9678 }
9679 else
9680 {
9681 set = gen_rtx_fmt_e (code, mode, src);
9682 set = gen_rtx_SET (VOIDmode, dst, set);
9683 if (mask)
9684 {
9685 use = gen_rtx_USE (VOIDmode, mask);
9686 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9687 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9688 gen_rtvec (3, set, use, clob)));
9689 }
9690 else
9691 emit_insn (set);
9692 }
9693
9694 if (dst != operands[0])
9695 emit_move_insn (operands[0], dst);
9696 }
9697
9698 /* Expand a copysign operation. Special case operand 0 being a constant. */
9699
9700 void
9701 ix86_expand_copysign (rtx operands[])
9702 {
9703 enum machine_mode mode, vmode;
9704 rtx dest, op0, op1, mask, nmask;
9705
9706 dest = operands[0];
9707 op0 = operands[1];
9708 op1 = operands[2];
9709
9710 mode = GET_MODE (dest);
9711 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9712
9713 if (GET_CODE (op0) == CONST_DOUBLE)
9714 {
9715 rtvec v;
9716
9717 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9718 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9719
9720 if (op0 == CONST0_RTX (mode))
9721 op0 = CONST0_RTX (vmode);
9722 else
9723 {
9724 if (mode == SFmode)
9725 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9726 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9727 else
9728 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9729 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9730 }
9731
9732 mask = ix86_build_signbit_mask (mode, 0, 0);
9733
9734 if (mode == SFmode)
9735 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9736 else
9737 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9738 }
9739 else
9740 {
9741 nmask = ix86_build_signbit_mask (mode, 0, 1);
9742 mask = ix86_build_signbit_mask (mode, 0, 0);
9743
9744 if (mode == SFmode)
9745 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9746 else
9747 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9748 }
9749 }
9750
9751 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9752 be a constant, and so has already been expanded into a vector constant. */
9753
9754 void
9755 ix86_split_copysign_const (rtx operands[])
9756 {
9757 enum machine_mode mode, vmode;
9758 rtx dest, op0, op1, mask, x;
9759
9760 dest = operands[0];
9761 op0 = operands[1];
9762 op1 = operands[2];
9763 mask = operands[3];
9764
9765 mode = GET_MODE (dest);
9766 vmode = GET_MODE (mask);
9767
9768 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9769 x = gen_rtx_AND (vmode, dest, mask);
9770 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9771
9772 if (op0 != CONST0_RTX (vmode))
9773 {
9774 x = gen_rtx_IOR (vmode, dest, op0);
9775 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9776 }
9777 }
9778
9779 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9780 so we have to do two masks. */
9781
9782 void
9783 ix86_split_copysign_var (rtx operands[])
9784 {
9785 enum machine_mode mode, vmode;
9786 rtx dest, scratch, op0, op1, mask, nmask, x;
9787
9788 dest = operands[0];
9789 scratch = operands[1];
9790 op0 = operands[2];
9791 op1 = operands[3];
9792 nmask = operands[4];
9793 mask = operands[5];
9794
9795 mode = GET_MODE (dest);
9796 vmode = GET_MODE (mask);
9797
9798 if (rtx_equal_p (op0, op1))
9799 {
9800 /* Shouldn't happen often (it's useless, obviously), but when it does
9801 we'd generate incorrect code if we continue below. */
9802 emit_move_insn (dest, op0);
9803 return;
9804 }
9805
9806 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9807 {
9808 gcc_assert (REGNO (op1) == REGNO (scratch));
9809
9810 x = gen_rtx_AND (vmode, scratch, mask);
9811 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9812
9813 dest = mask;
9814 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9815 x = gen_rtx_NOT (vmode, dest);
9816 x = gen_rtx_AND (vmode, x, op0);
9817 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9818 }
9819 else
9820 {
9821 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9822 {
9823 x = gen_rtx_AND (vmode, scratch, mask);
9824 }
9825 else /* alternative 2,4 */
9826 {
9827 gcc_assert (REGNO (mask) == REGNO (scratch));
9828 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9829 x = gen_rtx_AND (vmode, scratch, op1);
9830 }
9831 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9832
9833 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9834 {
9835 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9836 x = gen_rtx_AND (vmode, dest, nmask);
9837 }
9838 else /* alternative 3,4 */
9839 {
9840 gcc_assert (REGNO (nmask) == REGNO (dest));
9841 dest = nmask;
9842 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9843 x = gen_rtx_AND (vmode, dest, op0);
9844 }
9845 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9846 }
9847
9848 x = gen_rtx_IOR (vmode, dest, scratch);
9849 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9850 }
9851
9852 /* Return TRUE or FALSE depending on whether the first SET in INSN
9853 has source and destination with matching CC modes, and that the
9854 CC mode is at least as constrained as REQ_MODE. */
9855
9856 int
9857 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9858 {
9859 rtx set;
9860 enum machine_mode set_mode;
9861
9862 set = PATTERN (insn);
9863 if (GET_CODE (set) == PARALLEL)
9864 set = XVECEXP (set, 0, 0);
9865 gcc_assert (GET_CODE (set) == SET);
9866 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9867
9868 set_mode = GET_MODE (SET_DEST (set));
9869 switch (set_mode)
9870 {
9871 case CCNOmode:
9872 if (req_mode != CCNOmode
9873 && (req_mode != CCmode
9874 || XEXP (SET_SRC (set), 1) != const0_rtx))
9875 return 0;
9876 break;
9877 case CCmode:
9878 if (req_mode == CCGCmode)
9879 return 0;
9880 /* FALLTHRU */
9881 case CCGCmode:
9882 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9883 return 0;
9884 /* FALLTHRU */
9885 case CCGOCmode:
9886 if (req_mode == CCZmode)
9887 return 0;
9888 /* FALLTHRU */
9889 case CCZmode:
9890 break;
9891
9892 default:
9893 gcc_unreachable ();
9894 }
9895
9896 return (GET_MODE (SET_SRC (set)) == set_mode);
9897 }
9898
9899 /* Generate insn patterns to do an integer compare of OPERANDS. */
9900
9901 static rtx
9902 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9903 {
9904 enum machine_mode cmpmode;
9905 rtx tmp, flags;
9906
9907 cmpmode = SELECT_CC_MODE (code, op0, op1);
9908 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9909
9910 /* This is very simple, but making the interface the same as in the
9911 FP case makes the rest of the code easier. */
9912 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9913 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9914
9915 /* Return the test that should be put into the flags user, i.e.
9916 the bcc, scc, or cmov instruction. */
9917 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9918 }
9919
9920 /* Figure out whether to use ordered or unordered fp comparisons.
9921 Return the appropriate mode to use. */
9922
9923 enum machine_mode
9924 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9925 {
9926 /* ??? In order to make all comparisons reversible, we do all comparisons
9927 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9928 all forms trapping and nontrapping comparisons, we can make inequality
9929 comparisons trapping again, since it results in better code when using
9930 FCOM based compares. */
9931 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9932 }
9933
9934 enum machine_mode
9935 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9936 {
9937 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9938 return ix86_fp_compare_mode (code);
9939 switch (code)
9940 {
9941 /* Only zero flag is needed. */
9942 case EQ: /* ZF=0 */
9943 case NE: /* ZF!=0 */
9944 return CCZmode;
9945 /* Codes needing carry flag. */
9946 case GEU: /* CF=0 */
9947 case GTU: /* CF=0 & ZF=0 */
9948 case LTU: /* CF=1 */
9949 case LEU: /* CF=1 | ZF=1 */
9950 return CCmode;
9951 /* Codes possibly doable only with sign flag when
9952 comparing against zero. */
9953 case GE: /* SF=OF or SF=0 */
9954 case LT: /* SF<>OF or SF=1 */
9955 if (op1 == const0_rtx)
9956 return CCGOCmode;
9957 else
9958 /* For other cases Carry flag is not required. */
9959 return CCGCmode;
9960 /* Codes doable only with sign flag when comparing
9961 against zero, but we miss jump instruction for it
9962 so we need to use relational tests against overflow
9963 that thus needs to be zero. */
9964 case GT: /* ZF=0 & SF=OF */
9965 case LE: /* ZF=1 | SF<>OF */
9966 if (op1 == const0_rtx)
9967 return CCNOmode;
9968 else
9969 return CCGCmode;
9970 /* strcmp pattern do (use flags) and combine may ask us for proper
9971 mode. */
9972 case USE:
9973 return CCmode;
9974 default:
9975 gcc_unreachable ();
9976 }
9977 }
9978
9979 /* Return the fixed registers used for condition codes. */
9980
9981 static bool
9982 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9983 {
9984 *p1 = FLAGS_REG;
9985 *p2 = FPSR_REG;
9986 return true;
9987 }
9988
9989 /* If two condition code modes are compatible, return a condition code
9990 mode which is compatible with both. Otherwise, return
9991 VOIDmode. */
9992
9993 static enum machine_mode
9994 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9995 {
9996 if (m1 == m2)
9997 return m1;
9998
9999 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
10000 return VOIDmode;
10001
10002 if ((m1 == CCGCmode && m2 == CCGOCmode)
10003 || (m1 == CCGOCmode && m2 == CCGCmode))
10004 return CCGCmode;
10005
10006 switch (m1)
10007 {
10008 default:
10009 gcc_unreachable ();
10010
10011 case CCmode:
10012 case CCGCmode:
10013 case CCGOCmode:
10014 case CCNOmode:
10015 case CCZmode:
10016 switch (m2)
10017 {
10018 default:
10019 return VOIDmode;
10020
10021 case CCmode:
10022 case CCGCmode:
10023 case CCGOCmode:
10024 case CCNOmode:
10025 case CCZmode:
10026 return CCmode;
10027 }
10028
10029 case CCFPmode:
10030 case CCFPUmode:
10031 /* These are only compatible with themselves, which we already
10032 checked above. */
10033 return VOIDmode;
10034 }
10035 }
10036
10037 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10038
10039 int
10040 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10041 {
10042 enum rtx_code swapped_code = swap_condition (code);
10043 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10044 || (ix86_fp_comparison_cost (swapped_code)
10045 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10046 }
10047
10048 /* Swap, force into registers, or otherwise massage the two operands
10049 to a fp comparison. The operands are updated in place; the new
10050 comparison code is returned. */
10051
10052 static enum rtx_code
10053 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10054 {
10055 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10056 rtx op0 = *pop0, op1 = *pop1;
10057 enum machine_mode op_mode = GET_MODE (op0);
10058 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10059
10060 /* All of the unordered compare instructions only work on registers.
10061 The same is true of the fcomi compare instructions. The XFmode
10062 compare instructions require registers except when comparing
10063 against zero or when converting operand 1 from fixed point to
10064 floating point. */
10065
10066 if (!is_sse
10067 && (fpcmp_mode == CCFPUmode
10068 || (op_mode == XFmode
10069 && ! (standard_80387_constant_p (op0) == 1
10070 || standard_80387_constant_p (op1) == 1)
10071 && GET_CODE (op1) != FLOAT)
10072 || ix86_use_fcomi_compare (code)))
10073 {
10074 op0 = force_reg (op_mode, op0);
10075 op1 = force_reg (op_mode, op1);
10076 }
10077 else
10078 {
10079 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10080 things around if they appear profitable, otherwise force op0
10081 into a register. */
10082
10083 if (standard_80387_constant_p (op0) == 0
10084 || (GET_CODE (op0) == MEM
10085 && ! (standard_80387_constant_p (op1) == 0
10086 || GET_CODE (op1) == MEM)))
10087 {
10088 rtx tmp;
10089 tmp = op0, op0 = op1, op1 = tmp;
10090 code = swap_condition (code);
10091 }
10092
10093 if (GET_CODE (op0) != REG)
10094 op0 = force_reg (op_mode, op0);
10095
10096 if (CONSTANT_P (op1))
10097 {
10098 int tmp = standard_80387_constant_p (op1);
10099 if (tmp == 0)
10100 op1 = validize_mem (force_const_mem (op_mode, op1));
10101 else if (tmp == 1)
10102 {
10103 if (TARGET_CMOVE)
10104 op1 = force_reg (op_mode, op1);
10105 }
10106 else
10107 op1 = force_reg (op_mode, op1);
10108 }
10109 }
10110
10111 /* Try to rearrange the comparison to make it cheaper. */
10112 if (ix86_fp_comparison_cost (code)
10113 > ix86_fp_comparison_cost (swap_condition (code))
10114 && (GET_CODE (op1) == REG || !no_new_pseudos))
10115 {
10116 rtx tmp;
10117 tmp = op0, op0 = op1, op1 = tmp;
10118 code = swap_condition (code);
10119 if (GET_CODE (op0) != REG)
10120 op0 = force_reg (op_mode, op0);
10121 }
10122
10123 *pop0 = op0;
10124 *pop1 = op1;
10125 return code;
10126 }
10127
10128 /* Convert comparison codes we use to represent FP comparison to integer
10129 code that will result in proper branch. Return UNKNOWN if no such code
10130 is available. */
10131
10132 enum rtx_code
10133 ix86_fp_compare_code_to_integer (enum rtx_code code)
10134 {
10135 switch (code)
10136 {
10137 case GT:
10138 return GTU;
10139 case GE:
10140 return GEU;
10141 case ORDERED:
10142 case UNORDERED:
10143 return code;
10144 break;
10145 case UNEQ:
10146 return EQ;
10147 break;
10148 case UNLT:
10149 return LTU;
10150 break;
10151 case UNLE:
10152 return LEU;
10153 break;
10154 case LTGT:
10155 return NE;
10156 break;
10157 default:
10158 return UNKNOWN;
10159 }
10160 }
10161
10162 /* Split comparison code CODE into comparisons we can do using branch
10163 instructions. BYPASS_CODE is comparison code for branch that will
10164 branch around FIRST_CODE and SECOND_CODE. If some of branches
10165 is not required, set value to UNKNOWN.
10166 We never require more than two branches. */
10167
10168 void
10169 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10170 enum rtx_code *first_code,
10171 enum rtx_code *second_code)
10172 {
10173 *first_code = code;
10174 *bypass_code = UNKNOWN;
10175 *second_code = UNKNOWN;
10176
10177 /* The fcomi comparison sets flags as follows:
10178
10179 cmp ZF PF CF
10180 > 0 0 0
10181 < 0 0 1
10182 = 1 0 0
10183 un 1 1 1 */
10184
10185 switch (code)
10186 {
10187 case GT: /* GTU - CF=0 & ZF=0 */
10188 case GE: /* GEU - CF=0 */
10189 case ORDERED: /* PF=0 */
10190 case UNORDERED: /* PF=1 */
10191 case UNEQ: /* EQ - ZF=1 */
10192 case UNLT: /* LTU - CF=1 */
10193 case UNLE: /* LEU - CF=1 | ZF=1 */
10194 case LTGT: /* EQ - ZF=0 */
10195 break;
10196 case LT: /* LTU - CF=1 - fails on unordered */
10197 *first_code = UNLT;
10198 *bypass_code = UNORDERED;
10199 break;
10200 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10201 *first_code = UNLE;
10202 *bypass_code = UNORDERED;
10203 break;
10204 case EQ: /* EQ - ZF=1 - fails on unordered */
10205 *first_code = UNEQ;
10206 *bypass_code = UNORDERED;
10207 break;
10208 case NE: /* NE - ZF=0 - fails on unordered */
10209 *first_code = LTGT;
10210 *second_code = UNORDERED;
10211 break;
10212 case UNGE: /* GEU - CF=0 - fails on unordered */
10213 *first_code = GE;
10214 *second_code = UNORDERED;
10215 break;
10216 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10217 *first_code = GT;
10218 *second_code = UNORDERED;
10219 break;
10220 default:
10221 gcc_unreachable ();
10222 }
10223 if (!TARGET_IEEE_FP)
10224 {
10225 *second_code = UNKNOWN;
10226 *bypass_code = UNKNOWN;
10227 }
10228 }
10229
10230 /* Return cost of comparison done fcom + arithmetics operations on AX.
10231 All following functions do use number of instructions as a cost metrics.
10232 In future this should be tweaked to compute bytes for optimize_size and
10233 take into account performance of various instructions on various CPUs. */
10234 static int
10235 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10236 {
10237 if (!TARGET_IEEE_FP)
10238 return 4;
10239 /* The cost of code output by ix86_expand_fp_compare. */
10240 switch (code)
10241 {
10242 case UNLE:
10243 case UNLT:
10244 case LTGT:
10245 case GT:
10246 case GE:
10247 case UNORDERED:
10248 case ORDERED:
10249 case UNEQ:
10250 return 4;
10251 break;
10252 case LT:
10253 case NE:
10254 case EQ:
10255 case UNGE:
10256 return 5;
10257 break;
10258 case LE:
10259 case UNGT:
10260 return 6;
10261 break;
10262 default:
10263 gcc_unreachable ();
10264 }
10265 }
10266
10267 /* Return cost of comparison done using fcomi operation.
10268 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10269 static int
10270 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10271 {
10272 enum rtx_code bypass_code, first_code, second_code;
10273 /* Return arbitrarily high cost when instruction is not supported - this
10274 prevents gcc from using it. */
10275 if (!TARGET_CMOVE)
10276 return 1024;
10277 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10278 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10279 }
10280
10281 /* Return cost of comparison done using sahf operation.
10282 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10283 static int
10284 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10285 {
10286 enum rtx_code bypass_code, first_code, second_code;
10287 /* Return arbitrarily high cost when instruction is not preferred - this
10288 avoids gcc from using it. */
10289 if (!TARGET_USE_SAHF && !optimize_size)
10290 return 1024;
10291 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10292 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10293 }
10294
10295 /* Compute cost of the comparison done using any method.
10296 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10297 static int
10298 ix86_fp_comparison_cost (enum rtx_code code)
10299 {
10300 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10301 int min;
10302
10303 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10304 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10305
10306 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10307 if (min > sahf_cost)
10308 min = sahf_cost;
10309 if (min > fcomi_cost)
10310 min = fcomi_cost;
10311 return min;
10312 }
10313
10314 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10315
10316 static rtx
10317 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10318 rtx *second_test, rtx *bypass_test)
10319 {
10320 enum machine_mode fpcmp_mode, intcmp_mode;
10321 rtx tmp, tmp2;
10322 int cost = ix86_fp_comparison_cost (code);
10323 enum rtx_code bypass_code, first_code, second_code;
10324
10325 fpcmp_mode = ix86_fp_compare_mode (code);
10326 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10327
10328 if (second_test)
10329 *second_test = NULL_RTX;
10330 if (bypass_test)
10331 *bypass_test = NULL_RTX;
10332
10333 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10334
10335 /* Do fcomi/sahf based test when profitable. */
10336 if ((bypass_code == UNKNOWN || bypass_test)
10337 && (second_code == UNKNOWN || second_test)
10338 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10339 {
10340 if (TARGET_CMOVE)
10341 {
10342 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10343 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10344 tmp);
10345 emit_insn (tmp);
10346 }
10347 else
10348 {
10349 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10350 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10351 if (!scratch)
10352 scratch = gen_reg_rtx (HImode);
10353 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10354 emit_insn (gen_x86_sahf_1 (scratch));
10355 }
10356
10357 /* The FP codes work out to act like unsigned. */
10358 intcmp_mode = fpcmp_mode;
10359 code = first_code;
10360 if (bypass_code != UNKNOWN)
10361 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10362 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10363 const0_rtx);
10364 if (second_code != UNKNOWN)
10365 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10366 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10367 const0_rtx);
10368 }
10369 else
10370 {
10371 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10372 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10373 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10374 if (!scratch)
10375 scratch = gen_reg_rtx (HImode);
10376 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10377
10378 /* In the unordered case, we have to check C2 for NaN's, which
10379 doesn't happen to work out to anything nice combination-wise.
10380 So do some bit twiddling on the value we've got in AH to come
10381 up with an appropriate set of condition codes. */
10382
10383 intcmp_mode = CCNOmode;
10384 switch (code)
10385 {
10386 case GT:
10387 case UNGT:
10388 if (code == GT || !TARGET_IEEE_FP)
10389 {
10390 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10391 code = EQ;
10392 }
10393 else
10394 {
10395 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10396 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10397 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10398 intcmp_mode = CCmode;
10399 code = GEU;
10400 }
10401 break;
10402 case LT:
10403 case UNLT:
10404 if (code == LT && TARGET_IEEE_FP)
10405 {
10406 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10407 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10408 intcmp_mode = CCmode;
10409 code = EQ;
10410 }
10411 else
10412 {
10413 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10414 code = NE;
10415 }
10416 break;
10417 case GE:
10418 case UNGE:
10419 if (code == GE || !TARGET_IEEE_FP)
10420 {
10421 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10422 code = EQ;
10423 }
10424 else
10425 {
10426 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10427 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10428 GEN_INT (0x01)));
10429 code = NE;
10430 }
10431 break;
10432 case LE:
10433 case UNLE:
10434 if (code == LE && TARGET_IEEE_FP)
10435 {
10436 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10437 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10438 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10439 intcmp_mode = CCmode;
10440 code = LTU;
10441 }
10442 else
10443 {
10444 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10445 code = NE;
10446 }
10447 break;
10448 case EQ:
10449 case UNEQ:
10450 if (code == EQ && TARGET_IEEE_FP)
10451 {
10452 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10453 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10454 intcmp_mode = CCmode;
10455 code = EQ;
10456 }
10457 else
10458 {
10459 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10460 code = NE;
10461 break;
10462 }
10463 break;
10464 case NE:
10465 case LTGT:
10466 if (code == NE && TARGET_IEEE_FP)
10467 {
10468 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10469 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10470 GEN_INT (0x40)));
10471 code = NE;
10472 }
10473 else
10474 {
10475 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10476 code = EQ;
10477 }
10478 break;
10479
10480 case UNORDERED:
10481 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10482 code = NE;
10483 break;
10484 case ORDERED:
10485 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10486 code = EQ;
10487 break;
10488
10489 default:
10490 gcc_unreachable ();
10491 }
10492 }
10493
10494 /* Return the test that should be put into the flags user, i.e.
10495 the bcc, scc, or cmov instruction. */
10496 return gen_rtx_fmt_ee (code, VOIDmode,
10497 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10498 const0_rtx);
10499 }
10500
10501 rtx
10502 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10503 {
10504 rtx op0, op1, ret;
10505 op0 = ix86_compare_op0;
10506 op1 = ix86_compare_op1;
10507
10508 if (second_test)
10509 *second_test = NULL_RTX;
10510 if (bypass_test)
10511 *bypass_test = NULL_RTX;
10512
10513 if (ix86_compare_emitted)
10514 {
10515 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10516 ix86_compare_emitted = NULL_RTX;
10517 }
10518 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10519 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10520 second_test, bypass_test);
10521 else
10522 ret = ix86_expand_int_compare (code, op0, op1);
10523
10524 return ret;
10525 }
10526
10527 /* Return true if the CODE will result in nontrivial jump sequence. */
10528 bool
10529 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10530 {
10531 enum rtx_code bypass_code, first_code, second_code;
10532 if (!TARGET_CMOVE)
10533 return true;
10534 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10535 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10536 }
10537
10538 void
10539 ix86_expand_branch (enum rtx_code code, rtx label)
10540 {
10541 rtx tmp;
10542
10543 /* If we have emitted a compare insn, go straight to simple.
10544 ix86_expand_compare won't emit anything if ix86_compare_emitted
10545 is non NULL. */
10546 if (ix86_compare_emitted)
10547 goto simple;
10548
10549 switch (GET_MODE (ix86_compare_op0))
10550 {
10551 case QImode:
10552 case HImode:
10553 case SImode:
10554 simple:
10555 tmp = ix86_expand_compare (code, NULL, NULL);
10556 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10557 gen_rtx_LABEL_REF (VOIDmode, label),
10558 pc_rtx);
10559 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10560 return;
10561
10562 case SFmode:
10563 case DFmode:
10564 case XFmode:
10565 {
10566 rtvec vec;
10567 int use_fcomi;
10568 enum rtx_code bypass_code, first_code, second_code;
10569
10570 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10571 &ix86_compare_op1);
10572
10573 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10574
10575 /* Check whether we will use the natural sequence with one jump. If
10576 so, we can expand jump early. Otherwise delay expansion by
10577 creating compound insn to not confuse optimizers. */
10578 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10579 && TARGET_CMOVE)
10580 {
10581 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10582 gen_rtx_LABEL_REF (VOIDmode, label),
10583 pc_rtx, NULL_RTX, NULL_RTX);
10584 }
10585 else
10586 {
10587 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10588 ix86_compare_op0, ix86_compare_op1);
10589 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10590 gen_rtx_LABEL_REF (VOIDmode, label),
10591 pc_rtx);
10592 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10593
10594 use_fcomi = ix86_use_fcomi_compare (code);
10595 vec = rtvec_alloc (3 + !use_fcomi);
10596 RTVEC_ELT (vec, 0) = tmp;
10597 RTVEC_ELT (vec, 1)
10598 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10599 RTVEC_ELT (vec, 2)
10600 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10601 if (! use_fcomi)
10602 RTVEC_ELT (vec, 3)
10603 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10604
10605 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10606 }
10607 return;
10608 }
10609
10610 case DImode:
10611 if (TARGET_64BIT)
10612 goto simple;
10613 case TImode:
10614 /* Expand DImode branch into multiple compare+branch. */
10615 {
10616 rtx lo[2], hi[2], label2;
10617 enum rtx_code code1, code2, code3;
10618 enum machine_mode submode;
10619
10620 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10621 {
10622 tmp = ix86_compare_op0;
10623 ix86_compare_op0 = ix86_compare_op1;
10624 ix86_compare_op1 = tmp;
10625 code = swap_condition (code);
10626 }
10627 if (GET_MODE (ix86_compare_op0) == DImode)
10628 {
10629 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10630 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10631 submode = SImode;
10632 }
10633 else
10634 {
10635 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10636 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10637 submode = DImode;
10638 }
10639
10640 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10641 avoid two branches. This costs one extra insn, so disable when
10642 optimizing for size. */
10643
10644 if ((code == EQ || code == NE)
10645 && (!optimize_size
10646 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10647 {
10648 rtx xor0, xor1;
10649
10650 xor1 = hi[0];
10651 if (hi[1] != const0_rtx)
10652 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10653 NULL_RTX, 0, OPTAB_WIDEN);
10654
10655 xor0 = lo[0];
10656 if (lo[1] != const0_rtx)
10657 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10658 NULL_RTX, 0, OPTAB_WIDEN);
10659
10660 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10661 NULL_RTX, 0, OPTAB_WIDEN);
10662
10663 ix86_compare_op0 = tmp;
10664 ix86_compare_op1 = const0_rtx;
10665 ix86_expand_branch (code, label);
10666 return;
10667 }
10668
10669 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10670 op1 is a constant and the low word is zero, then we can just
10671 examine the high word. */
10672
10673 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
10674 switch (code)
10675 {
10676 case LT: case LTU: case GE: case GEU:
10677 ix86_compare_op0 = hi[0];
10678 ix86_compare_op1 = hi[1];
10679 ix86_expand_branch (code, label);
10680 return;
10681 default:
10682 break;
10683 }
10684
10685 /* Otherwise, we need two or three jumps. */
10686
10687 label2 = gen_label_rtx ();
10688
10689 code1 = code;
10690 code2 = swap_condition (code);
10691 code3 = unsigned_condition (code);
10692
10693 switch (code)
10694 {
10695 case LT: case GT: case LTU: case GTU:
10696 break;
10697
10698 case LE: code1 = LT; code2 = GT; break;
10699 case GE: code1 = GT; code2 = LT; break;
10700 case LEU: code1 = LTU; code2 = GTU; break;
10701 case GEU: code1 = GTU; code2 = LTU; break;
10702
10703 case EQ: code1 = UNKNOWN; code2 = NE; break;
10704 case NE: code2 = UNKNOWN; break;
10705
10706 default:
10707 gcc_unreachable ();
10708 }
10709
10710 /*
10711 * a < b =>
10712 * if (hi(a) < hi(b)) goto true;
10713 * if (hi(a) > hi(b)) goto false;
10714 * if (lo(a) < lo(b)) goto true;
10715 * false:
10716 */
10717
10718 ix86_compare_op0 = hi[0];
10719 ix86_compare_op1 = hi[1];
10720
10721 if (code1 != UNKNOWN)
10722 ix86_expand_branch (code1, label);
10723 if (code2 != UNKNOWN)
10724 ix86_expand_branch (code2, label2);
10725
10726 ix86_compare_op0 = lo[0];
10727 ix86_compare_op1 = lo[1];
10728 ix86_expand_branch (code3, label);
10729
10730 if (code2 != UNKNOWN)
10731 emit_label (label2);
10732 return;
10733 }
10734
10735 default:
10736 gcc_unreachable ();
10737 }
10738 }
10739
10740 /* Split branch based on floating point condition. */
10741 void
10742 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10743 rtx target1, rtx target2, rtx tmp, rtx pushed)
10744 {
10745 rtx second, bypass;
10746 rtx label = NULL_RTX;
10747 rtx condition;
10748 int bypass_probability = -1, second_probability = -1, probability = -1;
10749 rtx i;
10750
10751 if (target2 != pc_rtx)
10752 {
10753 rtx tmp = target2;
10754 code = reverse_condition_maybe_unordered (code);
10755 target2 = target1;
10756 target1 = tmp;
10757 }
10758
10759 condition = ix86_expand_fp_compare (code, op1, op2,
10760 tmp, &second, &bypass);
10761
10762 /* Remove pushed operand from stack. */
10763 if (pushed)
10764 ix86_free_from_memory (GET_MODE (pushed));
10765
10766 if (split_branch_probability >= 0)
10767 {
10768 /* Distribute the probabilities across the jumps.
10769 Assume the BYPASS and SECOND to be always test
10770 for UNORDERED. */
10771 probability = split_branch_probability;
10772
10773 /* Value of 1 is low enough to make no need for probability
10774 to be updated. Later we may run some experiments and see
10775 if unordered values are more frequent in practice. */
10776 if (bypass)
10777 bypass_probability = 1;
10778 if (second)
10779 second_probability = 1;
10780 }
10781 if (bypass != NULL_RTX)
10782 {
10783 label = gen_label_rtx ();
10784 i = emit_jump_insn (gen_rtx_SET
10785 (VOIDmode, pc_rtx,
10786 gen_rtx_IF_THEN_ELSE (VOIDmode,
10787 bypass,
10788 gen_rtx_LABEL_REF (VOIDmode,
10789 label),
10790 pc_rtx)));
10791 if (bypass_probability >= 0)
10792 REG_NOTES (i)
10793 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10794 GEN_INT (bypass_probability),
10795 REG_NOTES (i));
10796 }
10797 i = emit_jump_insn (gen_rtx_SET
10798 (VOIDmode, pc_rtx,
10799 gen_rtx_IF_THEN_ELSE (VOIDmode,
10800 condition, target1, target2)));
10801 if (probability >= 0)
10802 REG_NOTES (i)
10803 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10804 GEN_INT (probability),
10805 REG_NOTES (i));
10806 if (second != NULL_RTX)
10807 {
10808 i = emit_jump_insn (gen_rtx_SET
10809 (VOIDmode, pc_rtx,
10810 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10811 target2)));
10812 if (second_probability >= 0)
10813 REG_NOTES (i)
10814 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10815 GEN_INT (second_probability),
10816 REG_NOTES (i));
10817 }
10818 if (label != NULL_RTX)
10819 emit_label (label);
10820 }
10821
10822 int
10823 ix86_expand_setcc (enum rtx_code code, rtx dest)
10824 {
10825 rtx ret, tmp, tmpreg, equiv;
10826 rtx second_test, bypass_test;
10827
10828 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10829 return 0; /* FAIL */
10830
10831 gcc_assert (GET_MODE (dest) == QImode);
10832
10833 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10834 PUT_MODE (ret, QImode);
10835
10836 tmp = dest;
10837 tmpreg = dest;
10838
10839 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10840 if (bypass_test || second_test)
10841 {
10842 rtx test = second_test;
10843 int bypass = 0;
10844 rtx tmp2 = gen_reg_rtx (QImode);
10845 if (bypass_test)
10846 {
10847 gcc_assert (!second_test);
10848 test = bypass_test;
10849 bypass = 1;
10850 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10851 }
10852 PUT_MODE (test, QImode);
10853 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10854
10855 if (bypass)
10856 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10857 else
10858 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10859 }
10860
10861 /* Attach a REG_EQUAL note describing the comparison result. */
10862 if (ix86_compare_op0 && ix86_compare_op1)
10863 {
10864 equiv = simplify_gen_relational (code, QImode,
10865 GET_MODE (ix86_compare_op0),
10866 ix86_compare_op0, ix86_compare_op1);
10867 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10868 }
10869
10870 return 1; /* DONE */
10871 }
10872
10873 /* Expand comparison setting or clearing carry flag. Return true when
10874 successful and set pop for the operation. */
10875 static bool
10876 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10877 {
10878 enum machine_mode mode =
10879 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10880
10881 /* Do not handle DImode compares that go through special path. Also we can't
10882 deal with FP compares yet. This is possible to add. */
10883 if (mode == (TARGET_64BIT ? TImode : DImode))
10884 return false;
10885 if (FLOAT_MODE_P (mode))
10886 {
10887 rtx second_test = NULL, bypass_test = NULL;
10888 rtx compare_op, compare_seq;
10889
10890 /* Shortcut: following common codes never translate into carry flag compares. */
10891 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10892 || code == ORDERED || code == UNORDERED)
10893 return false;
10894
10895 /* These comparisons require zero flag; swap operands so they won't. */
10896 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10897 && !TARGET_IEEE_FP)
10898 {
10899 rtx tmp = op0;
10900 op0 = op1;
10901 op1 = tmp;
10902 code = swap_condition (code);
10903 }
10904
10905 /* Try to expand the comparison and verify that we end up with carry flag
10906 based comparison. This is fails to be true only when we decide to expand
10907 comparison using arithmetic that is not too common scenario. */
10908 start_sequence ();
10909 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10910 &second_test, &bypass_test);
10911 compare_seq = get_insns ();
10912 end_sequence ();
10913
10914 if (second_test || bypass_test)
10915 return false;
10916 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10917 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10918 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10919 else
10920 code = GET_CODE (compare_op);
10921 if (code != LTU && code != GEU)
10922 return false;
10923 emit_insn (compare_seq);
10924 *pop = compare_op;
10925 return true;
10926 }
10927 if (!INTEGRAL_MODE_P (mode))
10928 return false;
10929 switch (code)
10930 {
10931 case LTU:
10932 case GEU:
10933 break;
10934
10935 /* Convert a==0 into (unsigned)a<1. */
10936 case EQ:
10937 case NE:
10938 if (op1 != const0_rtx)
10939 return false;
10940 op1 = const1_rtx;
10941 code = (code == EQ ? LTU : GEU);
10942 break;
10943
10944 /* Convert a>b into b<a or a>=b-1. */
10945 case GTU:
10946 case LEU:
10947 if (GET_CODE (op1) == CONST_INT)
10948 {
10949 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10950 /* Bail out on overflow. We still can swap operands but that
10951 would force loading of the constant into register. */
10952 if (op1 == const0_rtx
10953 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10954 return false;
10955 code = (code == GTU ? GEU : LTU);
10956 }
10957 else
10958 {
10959 rtx tmp = op1;
10960 op1 = op0;
10961 op0 = tmp;
10962 code = (code == GTU ? LTU : GEU);
10963 }
10964 break;
10965
10966 /* Convert a>=0 into (unsigned)a<0x80000000. */
10967 case LT:
10968 case GE:
10969 if (mode == DImode || op1 != const0_rtx)
10970 return false;
10971 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10972 code = (code == LT ? GEU : LTU);
10973 break;
10974 case LE:
10975 case GT:
10976 if (mode == DImode || op1 != constm1_rtx)
10977 return false;
10978 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10979 code = (code == LE ? GEU : LTU);
10980 break;
10981
10982 default:
10983 return false;
10984 }
10985 /* Swapping operands may cause constant to appear as first operand. */
10986 if (!nonimmediate_operand (op0, VOIDmode))
10987 {
10988 if (no_new_pseudos)
10989 return false;
10990 op0 = force_reg (mode, op0);
10991 }
10992 ix86_compare_op0 = op0;
10993 ix86_compare_op1 = op1;
10994 *pop = ix86_expand_compare (code, NULL, NULL);
10995 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10996 return true;
10997 }
10998
10999 int
11000 ix86_expand_int_movcc (rtx operands[])
11001 {
11002 enum rtx_code code = GET_CODE (operands[1]), compare_code;
11003 rtx compare_seq, compare_op;
11004 rtx second_test, bypass_test;
11005 enum machine_mode mode = GET_MODE (operands[0]);
11006 bool sign_bit_compare_p = false;;
11007
11008 start_sequence ();
11009 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11010 compare_seq = get_insns ();
11011 end_sequence ();
11012
11013 compare_code = GET_CODE (compare_op);
11014
11015 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11016 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11017 sign_bit_compare_p = true;
11018
11019 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11020 HImode insns, we'd be swallowed in word prefix ops. */
11021
11022 if ((mode != HImode || TARGET_FAST_PREFIX)
11023 && (mode != (TARGET_64BIT ? TImode : DImode))
11024 && GET_CODE (operands[2]) == CONST_INT
11025 && GET_CODE (operands[3]) == CONST_INT)
11026 {
11027 rtx out = operands[0];
11028 HOST_WIDE_INT ct = INTVAL (operands[2]);
11029 HOST_WIDE_INT cf = INTVAL (operands[3]);
11030 HOST_WIDE_INT diff;
11031
11032 diff = ct - cf;
11033 /* Sign bit compares are better done using shifts than we do by using
11034 sbb. */
11035 if (sign_bit_compare_p
11036 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11037 ix86_compare_op1, &compare_op))
11038 {
11039 /* Detect overlap between destination and compare sources. */
11040 rtx tmp = out;
11041
11042 if (!sign_bit_compare_p)
11043 {
11044 bool fpcmp = false;
11045
11046 compare_code = GET_CODE (compare_op);
11047
11048 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11049 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11050 {
11051 fpcmp = true;
11052 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11053 }
11054
11055 /* To simplify rest of code, restrict to the GEU case. */
11056 if (compare_code == LTU)
11057 {
11058 HOST_WIDE_INT tmp = ct;
11059 ct = cf;
11060 cf = tmp;
11061 compare_code = reverse_condition (compare_code);
11062 code = reverse_condition (code);
11063 }
11064 else
11065 {
11066 if (fpcmp)
11067 PUT_CODE (compare_op,
11068 reverse_condition_maybe_unordered
11069 (GET_CODE (compare_op)));
11070 else
11071 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11072 }
11073 diff = ct - cf;
11074
11075 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11076 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11077 tmp = gen_reg_rtx (mode);
11078
11079 if (mode == DImode)
11080 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11081 else
11082 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11083 }
11084 else
11085 {
11086 if (code == GT || code == GE)
11087 code = reverse_condition (code);
11088 else
11089 {
11090 HOST_WIDE_INT tmp = ct;
11091 ct = cf;
11092 cf = tmp;
11093 diff = ct - cf;
11094 }
11095 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11096 ix86_compare_op1, VOIDmode, 0, -1);
11097 }
11098
11099 if (diff == 1)
11100 {
11101 /*
11102 * cmpl op0,op1
11103 * sbbl dest,dest
11104 * [addl dest, ct]
11105 *
11106 * Size 5 - 8.
11107 */
11108 if (ct)
11109 tmp = expand_simple_binop (mode, PLUS,
11110 tmp, GEN_INT (ct),
11111 copy_rtx (tmp), 1, OPTAB_DIRECT);
11112 }
11113 else if (cf == -1)
11114 {
11115 /*
11116 * cmpl op0,op1
11117 * sbbl dest,dest
11118 * orl $ct, dest
11119 *
11120 * Size 8.
11121 */
11122 tmp = expand_simple_binop (mode, IOR,
11123 tmp, GEN_INT (ct),
11124 copy_rtx (tmp), 1, OPTAB_DIRECT);
11125 }
11126 else if (diff == -1 && ct)
11127 {
11128 /*
11129 * cmpl op0,op1
11130 * sbbl dest,dest
11131 * notl dest
11132 * [addl dest, cf]
11133 *
11134 * Size 8 - 11.
11135 */
11136 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11137 if (cf)
11138 tmp = expand_simple_binop (mode, PLUS,
11139 copy_rtx (tmp), GEN_INT (cf),
11140 copy_rtx (tmp), 1, OPTAB_DIRECT);
11141 }
11142 else
11143 {
11144 /*
11145 * cmpl op0,op1
11146 * sbbl dest,dest
11147 * [notl dest]
11148 * andl cf - ct, dest
11149 * [addl dest, ct]
11150 *
11151 * Size 8 - 11.
11152 */
11153
11154 if (cf == 0)
11155 {
11156 cf = ct;
11157 ct = 0;
11158 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11159 }
11160
11161 tmp = expand_simple_binop (mode, AND,
11162 copy_rtx (tmp),
11163 gen_int_mode (cf - ct, mode),
11164 copy_rtx (tmp), 1, OPTAB_DIRECT);
11165 if (ct)
11166 tmp = expand_simple_binop (mode, PLUS,
11167 copy_rtx (tmp), GEN_INT (ct),
11168 copy_rtx (tmp), 1, OPTAB_DIRECT);
11169 }
11170
11171 if (!rtx_equal_p (tmp, out))
11172 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11173
11174 return 1; /* DONE */
11175 }
11176
11177 if (diff < 0)
11178 {
11179 HOST_WIDE_INT tmp;
11180 tmp = ct, ct = cf, cf = tmp;
11181 diff = -diff;
11182 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11183 {
11184 /* We may be reversing unordered compare to normal compare, that
11185 is not valid in general (we may convert non-trapping condition
11186 to trapping one), however on i386 we currently emit all
11187 comparisons unordered. */
11188 compare_code = reverse_condition_maybe_unordered (compare_code);
11189 code = reverse_condition_maybe_unordered (code);
11190 }
11191 else
11192 {
11193 compare_code = reverse_condition (compare_code);
11194 code = reverse_condition (code);
11195 }
11196 }
11197
11198 compare_code = UNKNOWN;
11199 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11200 && GET_CODE (ix86_compare_op1) == CONST_INT)
11201 {
11202 if (ix86_compare_op1 == const0_rtx
11203 && (code == LT || code == GE))
11204 compare_code = code;
11205 else if (ix86_compare_op1 == constm1_rtx)
11206 {
11207 if (code == LE)
11208 compare_code = LT;
11209 else if (code == GT)
11210 compare_code = GE;
11211 }
11212 }
11213
11214 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11215 if (compare_code != UNKNOWN
11216 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11217 && (cf == -1 || ct == -1))
11218 {
11219 /* If lea code below could be used, only optimize
11220 if it results in a 2 insn sequence. */
11221
11222 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11223 || diff == 3 || diff == 5 || diff == 9)
11224 || (compare_code == LT && ct == -1)
11225 || (compare_code == GE && cf == -1))
11226 {
11227 /*
11228 * notl op1 (if necessary)
11229 * sarl $31, op1
11230 * orl cf, op1
11231 */
11232 if (ct != -1)
11233 {
11234 cf = ct;
11235 ct = -1;
11236 code = reverse_condition (code);
11237 }
11238
11239 out = emit_store_flag (out, code, ix86_compare_op0,
11240 ix86_compare_op1, VOIDmode, 0, -1);
11241
11242 out = expand_simple_binop (mode, IOR,
11243 out, GEN_INT (cf),
11244 out, 1, OPTAB_DIRECT);
11245 if (out != operands[0])
11246 emit_move_insn (operands[0], out);
11247
11248 return 1; /* DONE */
11249 }
11250 }
11251
11252
11253 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11254 || diff == 3 || diff == 5 || diff == 9)
11255 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11256 && (mode != DImode
11257 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11258 {
11259 /*
11260 * xorl dest,dest
11261 * cmpl op1,op2
11262 * setcc dest
11263 * lea cf(dest*(ct-cf)),dest
11264 *
11265 * Size 14.
11266 *
11267 * This also catches the degenerate setcc-only case.
11268 */
11269
11270 rtx tmp;
11271 int nops;
11272
11273 out = emit_store_flag (out, code, ix86_compare_op0,
11274 ix86_compare_op1, VOIDmode, 0, 1);
11275
11276 nops = 0;
11277 /* On x86_64 the lea instruction operates on Pmode, so we need
11278 to get arithmetics done in proper mode to match. */
11279 if (diff == 1)
11280 tmp = copy_rtx (out);
11281 else
11282 {
11283 rtx out1;
11284 out1 = copy_rtx (out);
11285 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11286 nops++;
11287 if (diff & 1)
11288 {
11289 tmp = gen_rtx_PLUS (mode, tmp, out1);
11290 nops++;
11291 }
11292 }
11293 if (cf != 0)
11294 {
11295 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11296 nops++;
11297 }
11298 if (!rtx_equal_p (tmp, out))
11299 {
11300 if (nops == 1)
11301 out = force_operand (tmp, copy_rtx (out));
11302 else
11303 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11304 }
11305 if (!rtx_equal_p (out, operands[0]))
11306 emit_move_insn (operands[0], copy_rtx (out));
11307
11308 return 1; /* DONE */
11309 }
11310
11311 /*
11312 * General case: Jumpful:
11313 * xorl dest,dest cmpl op1, op2
11314 * cmpl op1, op2 movl ct, dest
11315 * setcc dest jcc 1f
11316 * decl dest movl cf, dest
11317 * andl (cf-ct),dest 1:
11318 * addl ct,dest
11319 *
11320 * Size 20. Size 14.
11321 *
11322 * This is reasonably steep, but branch mispredict costs are
11323 * high on modern cpus, so consider failing only if optimizing
11324 * for space.
11325 */
11326
11327 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11328 && BRANCH_COST >= 2)
11329 {
11330 if (cf == 0)
11331 {
11332 cf = ct;
11333 ct = 0;
11334 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11335 /* We may be reversing unordered compare to normal compare,
11336 that is not valid in general (we may convert non-trapping
11337 condition to trapping one), however on i386 we currently
11338 emit all comparisons unordered. */
11339 code = reverse_condition_maybe_unordered (code);
11340 else
11341 {
11342 code = reverse_condition (code);
11343 if (compare_code != UNKNOWN)
11344 compare_code = reverse_condition (compare_code);
11345 }
11346 }
11347
11348 if (compare_code != UNKNOWN)
11349 {
11350 /* notl op1 (if needed)
11351 sarl $31, op1
11352 andl (cf-ct), op1
11353 addl ct, op1
11354
11355 For x < 0 (resp. x <= -1) there will be no notl,
11356 so if possible swap the constants to get rid of the
11357 complement.
11358 True/false will be -1/0 while code below (store flag
11359 followed by decrement) is 0/-1, so the constants need
11360 to be exchanged once more. */
11361
11362 if (compare_code == GE || !cf)
11363 {
11364 code = reverse_condition (code);
11365 compare_code = LT;
11366 }
11367 else
11368 {
11369 HOST_WIDE_INT tmp = cf;
11370 cf = ct;
11371 ct = tmp;
11372 }
11373
11374 out = emit_store_flag (out, code, ix86_compare_op0,
11375 ix86_compare_op1, VOIDmode, 0, -1);
11376 }
11377 else
11378 {
11379 out = emit_store_flag (out, code, ix86_compare_op0,
11380 ix86_compare_op1, VOIDmode, 0, 1);
11381
11382 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11383 copy_rtx (out), 1, OPTAB_DIRECT);
11384 }
11385
11386 out = expand_simple_binop (mode, AND, copy_rtx (out),
11387 gen_int_mode (cf - ct, mode),
11388 copy_rtx (out), 1, OPTAB_DIRECT);
11389 if (ct)
11390 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11391 copy_rtx (out), 1, OPTAB_DIRECT);
11392 if (!rtx_equal_p (out, operands[0]))
11393 emit_move_insn (operands[0], copy_rtx (out));
11394
11395 return 1; /* DONE */
11396 }
11397 }
11398
11399 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11400 {
11401 /* Try a few things more with specific constants and a variable. */
11402
11403 optab op;
11404 rtx var, orig_out, out, tmp;
11405
11406 if (BRANCH_COST <= 2)
11407 return 0; /* FAIL */
11408
11409 /* If one of the two operands is an interesting constant, load a
11410 constant with the above and mask it in with a logical operation. */
11411
11412 if (GET_CODE (operands[2]) == CONST_INT)
11413 {
11414 var = operands[3];
11415 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11416 operands[3] = constm1_rtx, op = and_optab;
11417 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11418 operands[3] = const0_rtx, op = ior_optab;
11419 else
11420 return 0; /* FAIL */
11421 }
11422 else if (GET_CODE (operands[3]) == CONST_INT)
11423 {
11424 var = operands[2];
11425 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11426 operands[2] = constm1_rtx, op = and_optab;
11427 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11428 operands[2] = const0_rtx, op = ior_optab;
11429 else
11430 return 0; /* FAIL */
11431 }
11432 else
11433 return 0; /* FAIL */
11434
11435 orig_out = operands[0];
11436 tmp = gen_reg_rtx (mode);
11437 operands[0] = tmp;
11438
11439 /* Recurse to get the constant loaded. */
11440 if (ix86_expand_int_movcc (operands) == 0)
11441 return 0; /* FAIL */
11442
11443 /* Mask in the interesting variable. */
11444 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11445 OPTAB_WIDEN);
11446 if (!rtx_equal_p (out, orig_out))
11447 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11448
11449 return 1; /* DONE */
11450 }
11451
11452 /*
11453 * For comparison with above,
11454 *
11455 * movl cf,dest
11456 * movl ct,tmp
11457 * cmpl op1,op2
11458 * cmovcc tmp,dest
11459 *
11460 * Size 15.
11461 */
11462
11463 if (! nonimmediate_operand (operands[2], mode))
11464 operands[2] = force_reg (mode, operands[2]);
11465 if (! nonimmediate_operand (operands[3], mode))
11466 operands[3] = force_reg (mode, operands[3]);
11467
11468 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11469 {
11470 rtx tmp = gen_reg_rtx (mode);
11471 emit_move_insn (tmp, operands[3]);
11472 operands[3] = tmp;
11473 }
11474 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11475 {
11476 rtx tmp = gen_reg_rtx (mode);
11477 emit_move_insn (tmp, operands[2]);
11478 operands[2] = tmp;
11479 }
11480
11481 if (! register_operand (operands[2], VOIDmode)
11482 && (mode == QImode
11483 || ! register_operand (operands[3], VOIDmode)))
11484 operands[2] = force_reg (mode, operands[2]);
11485
11486 if (mode == QImode
11487 && ! register_operand (operands[3], VOIDmode))
11488 operands[3] = force_reg (mode, operands[3]);
11489
11490 emit_insn (compare_seq);
11491 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11492 gen_rtx_IF_THEN_ELSE (mode,
11493 compare_op, operands[2],
11494 operands[3])));
11495 if (bypass_test)
11496 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11497 gen_rtx_IF_THEN_ELSE (mode,
11498 bypass_test,
11499 copy_rtx (operands[3]),
11500 copy_rtx (operands[0]))));
11501 if (second_test)
11502 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11503 gen_rtx_IF_THEN_ELSE (mode,
11504 second_test,
11505 copy_rtx (operands[2]),
11506 copy_rtx (operands[0]))));
11507
11508 return 1; /* DONE */
11509 }
11510
11511 /* Swap, force into registers, or otherwise massage the two operands
11512 to an sse comparison with a mask result. Thus we differ a bit from
11513 ix86_prepare_fp_compare_args which expects to produce a flags result.
11514
11515 The DEST operand exists to help determine whether to commute commutative
11516 operators. The POP0/POP1 operands are updated in place. The new
11517 comparison code is returned, or UNKNOWN if not implementable. */
11518
11519 static enum rtx_code
11520 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11521 rtx *pop0, rtx *pop1)
11522 {
11523 rtx tmp;
11524
11525 switch (code)
11526 {
11527 case LTGT:
11528 case UNEQ:
11529 /* We have no LTGT as an operator. We could implement it with
11530 NE & ORDERED, but this requires an extra temporary. It's
11531 not clear that it's worth it. */
11532 return UNKNOWN;
11533
11534 case LT:
11535 case LE:
11536 case UNGT:
11537 case UNGE:
11538 /* These are supported directly. */
11539 break;
11540
11541 case EQ:
11542 case NE:
11543 case UNORDERED:
11544 case ORDERED:
11545 /* For commutative operators, try to canonicalize the destination
11546 operand to be first in the comparison - this helps reload to
11547 avoid extra moves. */
11548 if (!dest || !rtx_equal_p (dest, *pop1))
11549 break;
11550 /* FALLTHRU */
11551
11552 case GE:
11553 case GT:
11554 case UNLE:
11555 case UNLT:
11556 /* These are not supported directly. Swap the comparison operands
11557 to transform into something that is supported. */
11558 tmp = *pop0;
11559 *pop0 = *pop1;
11560 *pop1 = tmp;
11561 code = swap_condition (code);
11562 break;
11563
11564 default:
11565 gcc_unreachable ();
11566 }
11567
11568 return code;
11569 }
11570
11571 /* Detect conditional moves that exactly match min/max operational
11572 semantics. Note that this is IEEE safe, as long as we don't
11573 interchange the operands.
11574
11575 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11576 and TRUE if the operation is successful and instructions are emitted. */
11577
11578 static bool
11579 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11580 rtx cmp_op1, rtx if_true, rtx if_false)
11581 {
11582 enum machine_mode mode;
11583 bool is_min;
11584 rtx tmp;
11585
11586 if (code == LT)
11587 ;
11588 else if (code == UNGE)
11589 {
11590 tmp = if_true;
11591 if_true = if_false;
11592 if_false = tmp;
11593 }
11594 else
11595 return false;
11596
11597 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11598 is_min = true;
11599 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11600 is_min = false;
11601 else
11602 return false;
11603
11604 mode = GET_MODE (dest);
11605
11606 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11607 but MODE may be a vector mode and thus not appropriate. */
11608 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11609 {
11610 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11611 rtvec v;
11612
11613 if_true = force_reg (mode, if_true);
11614 v = gen_rtvec (2, if_true, if_false);
11615 tmp = gen_rtx_UNSPEC (mode, v, u);
11616 }
11617 else
11618 {
11619 code = is_min ? SMIN : SMAX;
11620 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11621 }
11622
11623 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11624 return true;
11625 }
11626
11627 /* Expand an sse vector comparison. Return the register with the result. */
11628
11629 static rtx
11630 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11631 rtx op_true, rtx op_false)
11632 {
11633 enum machine_mode mode = GET_MODE (dest);
11634 rtx x;
11635
11636 cmp_op0 = force_reg (mode, cmp_op0);
11637 if (!nonimmediate_operand (cmp_op1, mode))
11638 cmp_op1 = force_reg (mode, cmp_op1);
11639
11640 if (optimize
11641 || reg_overlap_mentioned_p (dest, op_true)
11642 || reg_overlap_mentioned_p (dest, op_false))
11643 dest = gen_reg_rtx (mode);
11644
11645 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11646 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11647
11648 return dest;
11649 }
11650
11651 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11652 operations. This is used for both scalar and vector conditional moves. */
11653
11654 static void
11655 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11656 {
11657 enum machine_mode mode = GET_MODE (dest);
11658 rtx t2, t3, x;
11659
11660 if (op_false == CONST0_RTX (mode))
11661 {
11662 op_true = force_reg (mode, op_true);
11663 x = gen_rtx_AND (mode, cmp, op_true);
11664 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11665 }
11666 else if (op_true == CONST0_RTX (mode))
11667 {
11668 op_false = force_reg (mode, op_false);
11669 x = gen_rtx_NOT (mode, cmp);
11670 x = gen_rtx_AND (mode, x, op_false);
11671 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11672 }
11673 else
11674 {
11675 op_true = force_reg (mode, op_true);
11676 op_false = force_reg (mode, op_false);
11677
11678 t2 = gen_reg_rtx (mode);
11679 if (optimize)
11680 t3 = gen_reg_rtx (mode);
11681 else
11682 t3 = dest;
11683
11684 x = gen_rtx_AND (mode, op_true, cmp);
11685 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11686
11687 x = gen_rtx_NOT (mode, cmp);
11688 x = gen_rtx_AND (mode, x, op_false);
11689 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11690
11691 x = gen_rtx_IOR (mode, t3, t2);
11692 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11693 }
11694 }
11695
11696 /* Expand a floating-point conditional move. Return true if successful. */
11697
11698 int
11699 ix86_expand_fp_movcc (rtx operands[])
11700 {
11701 enum machine_mode mode = GET_MODE (operands[0]);
11702 enum rtx_code code = GET_CODE (operands[1]);
11703 rtx tmp, compare_op, second_test, bypass_test;
11704
11705 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11706 {
11707 enum machine_mode cmode;
11708
11709 /* Since we've no cmove for sse registers, don't force bad register
11710 allocation just to gain access to it. Deny movcc when the
11711 comparison mode doesn't match the move mode. */
11712 cmode = GET_MODE (ix86_compare_op0);
11713 if (cmode == VOIDmode)
11714 cmode = GET_MODE (ix86_compare_op1);
11715 if (cmode != mode)
11716 return 0;
11717
11718 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11719 &ix86_compare_op0,
11720 &ix86_compare_op1);
11721 if (code == UNKNOWN)
11722 return 0;
11723
11724 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11725 ix86_compare_op1, operands[2],
11726 operands[3]))
11727 return 1;
11728
11729 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11730 ix86_compare_op1, operands[2], operands[3]);
11731 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11732 return 1;
11733 }
11734
11735 /* The floating point conditional move instructions don't directly
11736 support conditions resulting from a signed integer comparison. */
11737
11738 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11739
11740 /* The floating point conditional move instructions don't directly
11741 support signed integer comparisons. */
11742
11743 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11744 {
11745 gcc_assert (!second_test && !bypass_test);
11746 tmp = gen_reg_rtx (QImode);
11747 ix86_expand_setcc (code, tmp);
11748 code = NE;
11749 ix86_compare_op0 = tmp;
11750 ix86_compare_op1 = const0_rtx;
11751 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11752 }
11753 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11754 {
11755 tmp = gen_reg_rtx (mode);
11756 emit_move_insn (tmp, operands[3]);
11757 operands[3] = tmp;
11758 }
11759 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11760 {
11761 tmp = gen_reg_rtx (mode);
11762 emit_move_insn (tmp, operands[2]);
11763 operands[2] = tmp;
11764 }
11765
11766 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11767 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11768 operands[2], operands[3])));
11769 if (bypass_test)
11770 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11771 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11772 operands[3], operands[0])));
11773 if (second_test)
11774 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11775 gen_rtx_IF_THEN_ELSE (mode, second_test,
11776 operands[2], operands[0])));
11777
11778 return 1;
11779 }
11780
11781 /* Expand a floating-point vector conditional move; a vcond operation
11782 rather than a movcc operation. */
11783
11784 bool
11785 ix86_expand_fp_vcond (rtx operands[])
11786 {
11787 enum rtx_code code = GET_CODE (operands[3]);
11788 rtx cmp;
11789
11790 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11791 &operands[4], &operands[5]);
11792 if (code == UNKNOWN)
11793 return false;
11794
11795 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11796 operands[5], operands[1], operands[2]))
11797 return true;
11798
11799 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11800 operands[1], operands[2]);
11801 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11802 return true;
11803 }
11804
11805 /* Expand a signed integral vector conditional move. */
11806
11807 bool
11808 ix86_expand_int_vcond (rtx operands[])
11809 {
11810 enum machine_mode mode = GET_MODE (operands[0]);
11811 enum rtx_code code = GET_CODE (operands[3]);
11812 bool negate = false;
11813 rtx x, cop0, cop1;
11814
11815 cop0 = operands[4];
11816 cop1 = operands[5];
11817
11818 /* Canonicalize the comparison to EQ, GT, GTU. */
11819 switch (code)
11820 {
11821 case EQ:
11822 case GT:
11823 case GTU:
11824 break;
11825
11826 case NE:
11827 case LE:
11828 case LEU:
11829 code = reverse_condition (code);
11830 negate = true;
11831 break;
11832
11833 case GE:
11834 case GEU:
11835 code = reverse_condition (code);
11836 negate = true;
11837 /* FALLTHRU */
11838
11839 case LT:
11840 case LTU:
11841 code = swap_condition (code);
11842 x = cop0, cop0 = cop1, cop1 = x;
11843 break;
11844
11845 default:
11846 gcc_unreachable ();
11847 }
11848
11849 /* Unsigned parallel compare is not supported by the hardware. Play some
11850 tricks to turn this into a signed comparison against 0. */
11851 if (code == GTU)
11852 {
11853 cop0 = force_reg (mode, cop0);
11854
11855 switch (mode)
11856 {
11857 case V4SImode:
11858 {
11859 rtx t1, t2, mask;
11860
11861 /* Perform a parallel modulo subtraction. */
11862 t1 = gen_reg_rtx (mode);
11863 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11864
11865 /* Extract the original sign bit of op0. */
11866 mask = GEN_INT (-0x80000000);
11867 mask = gen_rtx_CONST_VECTOR (mode,
11868 gen_rtvec (4, mask, mask, mask, mask));
11869 mask = force_reg (mode, mask);
11870 t2 = gen_reg_rtx (mode);
11871 emit_insn (gen_andv4si3 (t2, cop0, mask));
11872
11873 /* XOR it back into the result of the subtraction. This results
11874 in the sign bit set iff we saw unsigned underflow. */
11875 x = gen_reg_rtx (mode);
11876 emit_insn (gen_xorv4si3 (x, t1, t2));
11877
11878 code = GT;
11879 }
11880 break;
11881
11882 case V16QImode:
11883 case V8HImode:
11884 /* Perform a parallel unsigned saturating subtraction. */
11885 x = gen_reg_rtx (mode);
11886 emit_insn (gen_rtx_SET (VOIDmode, x,
11887 gen_rtx_US_MINUS (mode, cop0, cop1)));
11888
11889 code = EQ;
11890 negate = !negate;
11891 break;
11892
11893 default:
11894 gcc_unreachable ();
11895 }
11896
11897 cop0 = x;
11898 cop1 = CONST0_RTX (mode);
11899 }
11900
11901 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11902 operands[1+negate], operands[2-negate]);
11903
11904 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11905 operands[2-negate]);
11906 return true;
11907 }
11908
11909 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11910 true if we should do zero extension, else sign extension. HIGH_P is
11911 true if we want the N/2 high elements, else the low elements. */
11912
11913 void
11914 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11915 {
11916 enum machine_mode imode = GET_MODE (operands[1]);
11917 rtx (*unpack)(rtx, rtx, rtx);
11918 rtx se, dest;
11919
11920 switch (imode)
11921 {
11922 case V16QImode:
11923 if (high_p)
11924 unpack = gen_vec_interleave_highv16qi;
11925 else
11926 unpack = gen_vec_interleave_lowv16qi;
11927 break;
11928 case V8HImode:
11929 if (high_p)
11930 unpack = gen_vec_interleave_highv8hi;
11931 else
11932 unpack = gen_vec_interleave_lowv8hi;
11933 break;
11934 case V4SImode:
11935 if (high_p)
11936 unpack = gen_vec_interleave_highv4si;
11937 else
11938 unpack = gen_vec_interleave_lowv4si;
11939 break;
11940 default:
11941 gcc_unreachable ();
11942 }
11943
11944 dest = gen_lowpart (imode, operands[0]);
11945
11946 if (unsigned_p)
11947 se = force_reg (imode, CONST0_RTX (imode));
11948 else
11949 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11950 operands[1], pc_rtx, pc_rtx);
11951
11952 emit_insn (unpack (dest, operands[1], se));
11953 }
11954
11955 /* Expand conditional increment or decrement using adb/sbb instructions.
11956 The default case using setcc followed by the conditional move can be
11957 done by generic code. */
11958 int
11959 ix86_expand_int_addcc (rtx operands[])
11960 {
11961 enum rtx_code code = GET_CODE (operands[1]);
11962 rtx compare_op;
11963 rtx val = const0_rtx;
11964 bool fpcmp = false;
11965 enum machine_mode mode = GET_MODE (operands[0]);
11966
11967 if (operands[3] != const1_rtx
11968 && operands[3] != constm1_rtx)
11969 return 0;
11970 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11971 ix86_compare_op1, &compare_op))
11972 return 0;
11973 code = GET_CODE (compare_op);
11974
11975 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11976 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11977 {
11978 fpcmp = true;
11979 code = ix86_fp_compare_code_to_integer (code);
11980 }
11981
11982 if (code != LTU)
11983 {
11984 val = constm1_rtx;
11985 if (fpcmp)
11986 PUT_CODE (compare_op,
11987 reverse_condition_maybe_unordered
11988 (GET_CODE (compare_op)));
11989 else
11990 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11991 }
11992 PUT_MODE (compare_op, mode);
11993
11994 /* Construct either adc or sbb insn. */
11995 if ((code == LTU) == (operands[3] == constm1_rtx))
11996 {
11997 switch (GET_MODE (operands[0]))
11998 {
11999 case QImode:
12000 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
12001 break;
12002 case HImode:
12003 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12004 break;
12005 case SImode:
12006 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12007 break;
12008 case DImode:
12009 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12010 break;
12011 default:
12012 gcc_unreachable ();
12013 }
12014 }
12015 else
12016 {
12017 switch (GET_MODE (operands[0]))
12018 {
12019 case QImode:
12020 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12021 break;
12022 case HImode:
12023 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12024 break;
12025 case SImode:
12026 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12027 break;
12028 case DImode:
12029 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12030 break;
12031 default:
12032 gcc_unreachable ();
12033 }
12034 }
12035 return 1; /* DONE */
12036 }
12037
12038
12039 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12040 works for floating pointer parameters and nonoffsetable memories.
12041 For pushes, it returns just stack offsets; the values will be saved
12042 in the right order. Maximally three parts are generated. */
12043
12044 static int
12045 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12046 {
12047 int size;
12048
12049 if (!TARGET_64BIT)
12050 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12051 else
12052 size = (GET_MODE_SIZE (mode) + 4) / 8;
12053
12054 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
12055 gcc_assert (size >= 2 && size <= 3);
12056
12057 /* Optimize constant pool reference to immediates. This is used by fp
12058 moves, that force all constants to memory to allow combining. */
12059 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
12060 {
12061 rtx tmp = maybe_get_pool_constant (operand);
12062 if (tmp)
12063 operand = tmp;
12064 }
12065
12066 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
12067 {
12068 /* The only non-offsetable memories we handle are pushes. */
12069 int ok = push_operand (operand, VOIDmode);
12070
12071 gcc_assert (ok);
12072
12073 operand = copy_rtx (operand);
12074 PUT_MODE (operand, Pmode);
12075 parts[0] = parts[1] = parts[2] = operand;
12076 return size;
12077 }
12078
12079 if (GET_CODE (operand) == CONST_VECTOR)
12080 {
12081 enum machine_mode imode = int_mode_for_mode (mode);
12082 /* Caution: if we looked through a constant pool memory above,
12083 the operand may actually have a different mode now. That's
12084 ok, since we want to pun this all the way back to an integer. */
12085 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12086 gcc_assert (operand != NULL);
12087 mode = imode;
12088 }
12089
12090 if (!TARGET_64BIT)
12091 {
12092 if (mode == DImode)
12093 split_di (&operand, 1, &parts[0], &parts[1]);
12094 else
12095 {
12096 if (REG_P (operand))
12097 {
12098 gcc_assert (reload_completed);
12099 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12100 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12101 if (size == 3)
12102 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12103 }
12104 else if (offsettable_memref_p (operand))
12105 {
12106 operand = adjust_address (operand, SImode, 0);
12107 parts[0] = operand;
12108 parts[1] = adjust_address (operand, SImode, 4);
12109 if (size == 3)
12110 parts[2] = adjust_address (operand, SImode, 8);
12111 }
12112 else if (GET_CODE (operand) == CONST_DOUBLE)
12113 {
12114 REAL_VALUE_TYPE r;
12115 long l[4];
12116
12117 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12118 switch (mode)
12119 {
12120 case XFmode:
12121 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12122 parts[2] = gen_int_mode (l[2], SImode);
12123 break;
12124 case DFmode:
12125 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12126 break;
12127 default:
12128 gcc_unreachable ();
12129 }
12130 parts[1] = gen_int_mode (l[1], SImode);
12131 parts[0] = gen_int_mode (l[0], SImode);
12132 }
12133 else
12134 gcc_unreachable ();
12135 }
12136 }
12137 else
12138 {
12139 if (mode == TImode)
12140 split_ti (&operand, 1, &parts[0], &parts[1]);
12141 if (mode == XFmode || mode == TFmode)
12142 {
12143 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12144 if (REG_P (operand))
12145 {
12146 gcc_assert (reload_completed);
12147 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12148 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12149 }
12150 else if (offsettable_memref_p (operand))
12151 {
12152 operand = adjust_address (operand, DImode, 0);
12153 parts[0] = operand;
12154 parts[1] = adjust_address (operand, upper_mode, 8);
12155 }
12156 else if (GET_CODE (operand) == CONST_DOUBLE)
12157 {
12158 REAL_VALUE_TYPE r;
12159 long l[4];
12160
12161 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12162 real_to_target (l, &r, mode);
12163
12164 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12165 if (HOST_BITS_PER_WIDE_INT >= 64)
12166 parts[0]
12167 = gen_int_mode
12168 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12169 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12170 DImode);
12171 else
12172 parts[0] = immed_double_const (l[0], l[1], DImode);
12173
12174 if (upper_mode == SImode)
12175 parts[1] = gen_int_mode (l[2], SImode);
12176 else if (HOST_BITS_PER_WIDE_INT >= 64)
12177 parts[1]
12178 = gen_int_mode
12179 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12180 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12181 DImode);
12182 else
12183 parts[1] = immed_double_const (l[2], l[3], DImode);
12184 }
12185 else
12186 gcc_unreachable ();
12187 }
12188 }
12189
12190 return size;
12191 }
12192
12193 /* Emit insns to perform a move or push of DI, DF, and XF values.
12194 Return false when normal moves are needed; true when all required
12195 insns have been emitted. Operands 2-4 contain the input values
12196 int the correct order; operands 5-7 contain the output values. */
12197
12198 void
12199 ix86_split_long_move (rtx operands[])
12200 {
12201 rtx part[2][3];
12202 int nparts;
12203 int push = 0;
12204 int collisions = 0;
12205 enum machine_mode mode = GET_MODE (operands[0]);
12206
12207 /* The DFmode expanders may ask us to move double.
12208 For 64bit target this is single move. By hiding the fact
12209 here we simplify i386.md splitters. */
12210 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12211 {
12212 /* Optimize constant pool reference to immediates. This is used by
12213 fp moves, that force all constants to memory to allow combining. */
12214
12215 if (GET_CODE (operands[1]) == MEM
12216 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12217 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12218 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12219 if (push_operand (operands[0], VOIDmode))
12220 {
12221 operands[0] = copy_rtx (operands[0]);
12222 PUT_MODE (operands[0], Pmode);
12223 }
12224 else
12225 operands[0] = gen_lowpart (DImode, operands[0]);
12226 operands[1] = gen_lowpart (DImode, operands[1]);
12227 emit_move_insn (operands[0], operands[1]);
12228 return;
12229 }
12230
12231 /* The only non-offsettable memory we handle is push. */
12232 if (push_operand (operands[0], VOIDmode))
12233 push = 1;
12234 else
12235 gcc_assert (GET_CODE (operands[0]) != MEM
12236 || offsettable_memref_p (operands[0]));
12237
12238 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12239 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12240
12241 /* When emitting push, take care for source operands on the stack. */
12242 if (push && GET_CODE (operands[1]) == MEM
12243 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12244 {
12245 if (nparts == 3)
12246 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12247 XEXP (part[1][2], 0));
12248 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12249 XEXP (part[1][1], 0));
12250 }
12251
12252 /* We need to do copy in the right order in case an address register
12253 of the source overlaps the destination. */
12254 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
12255 {
12256 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12257 collisions++;
12258 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12259 collisions++;
12260 if (nparts == 3
12261 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12262 collisions++;
12263
12264 /* Collision in the middle part can be handled by reordering. */
12265 if (collisions == 1 && nparts == 3
12266 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12267 {
12268 rtx tmp;
12269 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12270 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12271 }
12272
12273 /* If there are more collisions, we can't handle it by reordering.
12274 Do an lea to the last part and use only one colliding move. */
12275 else if (collisions > 1)
12276 {
12277 rtx base;
12278
12279 collisions = 1;
12280
12281 base = part[0][nparts - 1];
12282
12283 /* Handle the case when the last part isn't valid for lea.
12284 Happens in 64-bit mode storing the 12-byte XFmode. */
12285 if (GET_MODE (base) != Pmode)
12286 base = gen_rtx_REG (Pmode, REGNO (base));
12287
12288 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12289 part[1][0] = replace_equiv_address (part[1][0], base);
12290 part[1][1] = replace_equiv_address (part[1][1],
12291 plus_constant (base, UNITS_PER_WORD));
12292 if (nparts == 3)
12293 part[1][2] = replace_equiv_address (part[1][2],
12294 plus_constant (base, 8));
12295 }
12296 }
12297
12298 if (push)
12299 {
12300 if (!TARGET_64BIT)
12301 {
12302 if (nparts == 3)
12303 {
12304 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12305 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12306 emit_move_insn (part[0][2], part[1][2]);
12307 }
12308 }
12309 else
12310 {
12311 /* In 64bit mode we don't have 32bit push available. In case this is
12312 register, it is OK - we will just use larger counterpart. We also
12313 retype memory - these comes from attempt to avoid REX prefix on
12314 moving of second half of TFmode value. */
12315 if (GET_MODE (part[1][1]) == SImode)
12316 {
12317 switch (GET_CODE (part[1][1]))
12318 {
12319 case MEM:
12320 part[1][1] = adjust_address (part[1][1], DImode, 0);
12321 break;
12322
12323 case REG:
12324 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12325 break;
12326
12327 default:
12328 gcc_unreachable ();
12329 }
12330
12331 if (GET_MODE (part[1][0]) == SImode)
12332 part[1][0] = part[1][1];
12333 }
12334 }
12335 emit_move_insn (part[0][1], part[1][1]);
12336 emit_move_insn (part[0][0], part[1][0]);
12337 return;
12338 }
12339
12340 /* Choose correct order to not overwrite the source before it is copied. */
12341 if ((REG_P (part[0][0])
12342 && REG_P (part[1][1])
12343 && (REGNO (part[0][0]) == REGNO (part[1][1])
12344 || (nparts == 3
12345 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12346 || (collisions > 0
12347 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12348 {
12349 if (nparts == 3)
12350 {
12351 operands[2] = part[0][2];
12352 operands[3] = part[0][1];
12353 operands[4] = part[0][0];
12354 operands[5] = part[1][2];
12355 operands[6] = part[1][1];
12356 operands[7] = part[1][0];
12357 }
12358 else
12359 {
12360 operands[2] = part[0][1];
12361 operands[3] = part[0][0];
12362 operands[5] = part[1][1];
12363 operands[6] = part[1][0];
12364 }
12365 }
12366 else
12367 {
12368 if (nparts == 3)
12369 {
12370 operands[2] = part[0][0];
12371 operands[3] = part[0][1];
12372 operands[4] = part[0][2];
12373 operands[5] = part[1][0];
12374 operands[6] = part[1][1];
12375 operands[7] = part[1][2];
12376 }
12377 else
12378 {
12379 operands[2] = part[0][0];
12380 operands[3] = part[0][1];
12381 operands[5] = part[1][0];
12382 operands[6] = part[1][1];
12383 }
12384 }
12385
12386 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12387 if (optimize_size)
12388 {
12389 if (GET_CODE (operands[5]) == CONST_INT
12390 && operands[5] != const0_rtx
12391 && REG_P (operands[2]))
12392 {
12393 if (GET_CODE (operands[6]) == CONST_INT
12394 && INTVAL (operands[6]) == INTVAL (operands[5]))
12395 operands[6] = operands[2];
12396
12397 if (nparts == 3
12398 && GET_CODE (operands[7]) == CONST_INT
12399 && INTVAL (operands[7]) == INTVAL (operands[5]))
12400 operands[7] = operands[2];
12401 }
12402
12403 if (nparts == 3
12404 && GET_CODE (operands[6]) == CONST_INT
12405 && operands[6] != const0_rtx
12406 && REG_P (operands[3])
12407 && GET_CODE (operands[7]) == CONST_INT
12408 && INTVAL (operands[7]) == INTVAL (operands[6]))
12409 operands[7] = operands[3];
12410 }
12411
12412 emit_move_insn (operands[2], operands[5]);
12413 emit_move_insn (operands[3], operands[6]);
12414 if (nparts == 3)
12415 emit_move_insn (operands[4], operands[7]);
12416
12417 return;
12418 }
12419
12420 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12421 left shift by a constant, either using a single shift or
12422 a sequence of add instructions. */
12423
12424 static void
12425 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12426 {
12427 if (count == 1)
12428 {
12429 emit_insn ((mode == DImode
12430 ? gen_addsi3
12431 : gen_adddi3) (operand, operand, operand));
12432 }
12433 else if (!optimize_size
12434 && count * ix86_cost->add <= ix86_cost->shift_const)
12435 {
12436 int i;
12437 for (i=0; i<count; i++)
12438 {
12439 emit_insn ((mode == DImode
12440 ? gen_addsi3
12441 : gen_adddi3) (operand, operand, operand));
12442 }
12443 }
12444 else
12445 emit_insn ((mode == DImode
12446 ? gen_ashlsi3
12447 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12448 }
12449
12450 void
12451 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12452 {
12453 rtx low[2], high[2];
12454 int count;
12455 const int single_width = mode == DImode ? 32 : 64;
12456
12457 if (GET_CODE (operands[2]) == CONST_INT)
12458 {
12459 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12460 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12461
12462 if (count >= single_width)
12463 {
12464 emit_move_insn (high[0], low[1]);
12465 emit_move_insn (low[0], const0_rtx);
12466
12467 if (count > single_width)
12468 ix86_expand_ashl_const (high[0], count - single_width, mode);
12469 }
12470 else
12471 {
12472 if (!rtx_equal_p (operands[0], operands[1]))
12473 emit_move_insn (operands[0], operands[1]);
12474 emit_insn ((mode == DImode
12475 ? gen_x86_shld_1
12476 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12477 ix86_expand_ashl_const (low[0], count, mode);
12478 }
12479 return;
12480 }
12481
12482 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12483
12484 if (operands[1] == const1_rtx)
12485 {
12486 /* Assuming we've chosen a QImode capable registers, then 1 << N
12487 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12488 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12489 {
12490 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12491
12492 ix86_expand_clear (low[0]);
12493 ix86_expand_clear (high[0]);
12494 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12495
12496 d = gen_lowpart (QImode, low[0]);
12497 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12498 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12499 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12500
12501 d = gen_lowpart (QImode, high[0]);
12502 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12503 s = gen_rtx_NE (QImode, flags, const0_rtx);
12504 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12505 }
12506
12507 /* Otherwise, we can get the same results by manually performing
12508 a bit extract operation on bit 5/6, and then performing the two
12509 shifts. The two methods of getting 0/1 into low/high are exactly
12510 the same size. Avoiding the shift in the bit extract case helps
12511 pentium4 a bit; no one else seems to care much either way. */
12512 else
12513 {
12514 rtx x;
12515
12516 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12517 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12518 else
12519 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12520 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12521
12522 emit_insn ((mode == DImode
12523 ? gen_lshrsi3
12524 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12525 emit_insn ((mode == DImode
12526 ? gen_andsi3
12527 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12528 emit_move_insn (low[0], high[0]);
12529 emit_insn ((mode == DImode
12530 ? gen_xorsi3
12531 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12532 }
12533
12534 emit_insn ((mode == DImode
12535 ? gen_ashlsi3
12536 : gen_ashldi3) (low[0], low[0], operands[2]));
12537 emit_insn ((mode == DImode
12538 ? gen_ashlsi3
12539 : gen_ashldi3) (high[0], high[0], operands[2]));
12540 return;
12541 }
12542
12543 if (operands[1] == constm1_rtx)
12544 {
12545 /* For -1 << N, we can avoid the shld instruction, because we
12546 know that we're shifting 0...31/63 ones into a -1. */
12547 emit_move_insn (low[0], constm1_rtx);
12548 if (optimize_size)
12549 emit_move_insn (high[0], low[0]);
12550 else
12551 emit_move_insn (high[0], constm1_rtx);
12552 }
12553 else
12554 {
12555 if (!rtx_equal_p (operands[0], operands[1]))
12556 emit_move_insn (operands[0], operands[1]);
12557
12558 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12559 emit_insn ((mode == DImode
12560 ? gen_x86_shld_1
12561 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12562 }
12563
12564 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12565
12566 if (TARGET_CMOVE && scratch)
12567 {
12568 ix86_expand_clear (scratch);
12569 emit_insn ((mode == DImode
12570 ? gen_x86_shift_adj_1
12571 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12572 }
12573 else
12574 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12575 }
12576
12577 void
12578 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12579 {
12580 rtx low[2], high[2];
12581 int count;
12582 const int single_width = mode == DImode ? 32 : 64;
12583
12584 if (GET_CODE (operands[2]) == CONST_INT)
12585 {
12586 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12587 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12588
12589 if (count == single_width * 2 - 1)
12590 {
12591 emit_move_insn (high[0], high[1]);
12592 emit_insn ((mode == DImode
12593 ? gen_ashrsi3
12594 : gen_ashrdi3) (high[0], high[0],
12595 GEN_INT (single_width - 1)));
12596 emit_move_insn (low[0], high[0]);
12597
12598 }
12599 else if (count >= single_width)
12600 {
12601 emit_move_insn (low[0], high[1]);
12602 emit_move_insn (high[0], low[0]);
12603 emit_insn ((mode == DImode
12604 ? gen_ashrsi3
12605 : gen_ashrdi3) (high[0], high[0],
12606 GEN_INT (single_width - 1)));
12607 if (count > single_width)
12608 emit_insn ((mode == DImode
12609 ? gen_ashrsi3
12610 : gen_ashrdi3) (low[0], low[0],
12611 GEN_INT (count - single_width)));
12612 }
12613 else
12614 {
12615 if (!rtx_equal_p (operands[0], operands[1]))
12616 emit_move_insn (operands[0], operands[1]);
12617 emit_insn ((mode == DImode
12618 ? gen_x86_shrd_1
12619 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12620 emit_insn ((mode == DImode
12621 ? gen_ashrsi3
12622 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12623 }
12624 }
12625 else
12626 {
12627 if (!rtx_equal_p (operands[0], operands[1]))
12628 emit_move_insn (operands[0], operands[1]);
12629
12630 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12631
12632 emit_insn ((mode == DImode
12633 ? gen_x86_shrd_1
12634 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12635 emit_insn ((mode == DImode
12636 ? gen_ashrsi3
12637 : gen_ashrdi3) (high[0], high[0], operands[2]));
12638
12639 if (TARGET_CMOVE && scratch)
12640 {
12641 emit_move_insn (scratch, high[0]);
12642 emit_insn ((mode == DImode
12643 ? gen_ashrsi3
12644 : gen_ashrdi3) (scratch, scratch,
12645 GEN_INT (single_width - 1)));
12646 emit_insn ((mode == DImode
12647 ? gen_x86_shift_adj_1
12648 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12649 scratch));
12650 }
12651 else
12652 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12653 }
12654 }
12655
12656 void
12657 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12658 {
12659 rtx low[2], high[2];
12660 int count;
12661 const int single_width = mode == DImode ? 32 : 64;
12662
12663 if (GET_CODE (operands[2]) == CONST_INT)
12664 {
12665 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12666 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12667
12668 if (count >= single_width)
12669 {
12670 emit_move_insn (low[0], high[1]);
12671 ix86_expand_clear (high[0]);
12672
12673 if (count > single_width)
12674 emit_insn ((mode == DImode
12675 ? gen_lshrsi3
12676 : gen_lshrdi3) (low[0], low[0],
12677 GEN_INT (count - single_width)));
12678 }
12679 else
12680 {
12681 if (!rtx_equal_p (operands[0], operands[1]))
12682 emit_move_insn (operands[0], operands[1]);
12683 emit_insn ((mode == DImode
12684 ? gen_x86_shrd_1
12685 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12686 emit_insn ((mode == DImode
12687 ? gen_lshrsi3
12688 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12689 }
12690 }
12691 else
12692 {
12693 if (!rtx_equal_p (operands[0], operands[1]))
12694 emit_move_insn (operands[0], operands[1]);
12695
12696 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12697
12698 emit_insn ((mode == DImode
12699 ? gen_x86_shrd_1
12700 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12701 emit_insn ((mode == DImode
12702 ? gen_lshrsi3
12703 : gen_lshrdi3) (high[0], high[0], operands[2]));
12704
12705 /* Heh. By reversing the arguments, we can reuse this pattern. */
12706 if (TARGET_CMOVE && scratch)
12707 {
12708 ix86_expand_clear (scratch);
12709 emit_insn ((mode == DImode
12710 ? gen_x86_shift_adj_1
12711 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12712 scratch));
12713 }
12714 else
12715 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12716 }
12717 }
12718
12719 /* Predict just emitted jump instruction to be taken with probability PROB. */
12720 static void
12721 predict_jump (int prob)
12722 {
12723 rtx insn = get_last_insn ();
12724 gcc_assert (GET_CODE (insn) == JUMP_INSN);
12725 REG_NOTES (insn)
12726 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12727 GEN_INT (prob),
12728 REG_NOTES (insn));
12729 }
12730
12731 /* Helper function for the string operations below. Dest VARIABLE whether
12732 it is aligned to VALUE bytes. If true, jump to the label. */
12733 static rtx
12734 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12735 {
12736 rtx label = gen_label_rtx ();
12737 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12738 if (GET_MODE (variable) == DImode)
12739 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12740 else
12741 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12742 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12743 1, label);
12744 if (epilogue)
12745 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12746 else
12747 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12748 return label;
12749 }
12750
12751 /* Adjust COUNTER by the VALUE. */
12752 static void
12753 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12754 {
12755 if (GET_MODE (countreg) == DImode)
12756 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12757 else
12758 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12759 }
12760
12761 /* Zero extend possibly SImode EXP to Pmode register. */
12762 rtx
12763 ix86_zero_extend_to_Pmode (rtx exp)
12764 {
12765 rtx r;
12766 if (GET_MODE (exp) == VOIDmode)
12767 return force_reg (Pmode, exp);
12768 if (GET_MODE (exp) == Pmode)
12769 return copy_to_mode_reg (Pmode, exp);
12770 r = gen_reg_rtx (Pmode);
12771 emit_insn (gen_zero_extendsidi2 (r, exp));
12772 return r;
12773 }
12774
12775 /* Divide COUNTREG by SCALE. */
12776 static rtx
12777 scale_counter (rtx countreg, int scale)
12778 {
12779 rtx sc;
12780 rtx piece_size_mask;
12781
12782 if (scale == 1)
12783 return countreg;
12784 if (GET_CODE (countreg) == CONST_INT)
12785 return GEN_INT (INTVAL (countreg) / scale);
12786 gcc_assert (REG_P (countreg));
12787
12788 piece_size_mask = GEN_INT (scale - 1);
12789 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12790 GEN_INT (exact_log2 (scale)),
12791 NULL, 1, OPTAB_DIRECT);
12792 return sc;
12793 }
12794
12795 /* When SRCPTR is non-NULL, output simple loop to move memory
12796 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12797 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12798 equivalent loop to set memory by VALUE (supposed to be in MODE).
12799
12800 The size is rounded down to whole number of chunk size moved at once.
12801 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12802
12803
12804 static void
12805 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12806 rtx destptr, rtx srcptr, rtx value,
12807 rtx count, enum machine_mode mode, int unroll,
12808 int expected_size)
12809 {
12810 rtx out_label, top_label, iter, tmp;
12811 enum machine_mode iter_mode;
12812 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12813 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12814 rtx size;
12815 rtx x_addr;
12816 rtx y_addr;
12817 int i;
12818
12819 iter_mode = GET_MODE (count);
12820 if (iter_mode == VOIDmode)
12821 iter_mode = word_mode;
12822
12823 top_label = gen_label_rtx ();
12824 out_label = gen_label_rtx ();
12825 iter = gen_reg_rtx (iter_mode);
12826
12827 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12828 NULL, 1, OPTAB_DIRECT);
12829 /* Those two should combine. */
12830 if (piece_size == const1_rtx)
12831 {
12832 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12833 true, out_label);
12834 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12835 }
12836 emit_move_insn (iter, const0_rtx);
12837
12838 emit_label (top_label);
12839
12840 tmp = convert_modes (Pmode, iter_mode, iter, true);
12841 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12842 destmem = change_address (destmem, mode, x_addr);
12843
12844 if (srcmem)
12845 {
12846 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12847 srcmem = change_address (srcmem, mode, y_addr);
12848
12849 /* When unrolling for chips that reorder memory reads and writes,
12850 we can save registers by using single temporary.
12851 Also using 4 temporaries is overkill in 32bit mode. */
12852 if (!TARGET_64BIT && 0)
12853 {
12854 for (i = 0; i < unroll; i++)
12855 {
12856 if (i)
12857 {
12858 destmem =
12859 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12860 srcmem =
12861 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12862 }
12863 emit_move_insn (destmem, srcmem);
12864 }
12865 }
12866 else
12867 {
12868 rtx tmpreg[4];
12869 gcc_assert (unroll <= 4);
12870 for (i = 0; i < unroll; i++)
12871 {
12872 tmpreg[i] = gen_reg_rtx (mode);
12873 if (i)
12874 {
12875 srcmem =
12876 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12877 }
12878 emit_move_insn (tmpreg[i], srcmem);
12879 }
12880 for (i = 0; i < unroll; i++)
12881 {
12882 if (i)
12883 {
12884 destmem =
12885 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12886 }
12887 emit_move_insn (destmem, tmpreg[i]);
12888 }
12889 }
12890 }
12891 else
12892 for (i = 0; i < unroll; i++)
12893 {
12894 if (i)
12895 destmem =
12896 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12897 emit_move_insn (destmem, value);
12898 }
12899
12900 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
12901 true, OPTAB_LIB_WIDEN);
12902 if (tmp != iter)
12903 emit_move_insn (iter, tmp);
12904
12905 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
12906 true, top_label);
12907 if (expected_size != -1)
12908 {
12909 expected_size /= GET_MODE_SIZE (mode) * unroll;
12910 if (expected_size == 0)
12911 predict_jump (0);
12912 else if (expected_size > REG_BR_PROB_BASE)
12913 predict_jump (REG_BR_PROB_BASE - 1);
12914 else
12915 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
12916 }
12917 else
12918 predict_jump (REG_BR_PROB_BASE * 80 / 100);
12919 iter = ix86_zero_extend_to_Pmode (iter);
12920 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
12921 true, OPTAB_LIB_WIDEN);
12922 if (tmp != destptr)
12923 emit_move_insn (destptr, tmp);
12924 if (srcptr)
12925 {
12926 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
12927 true, OPTAB_LIB_WIDEN);
12928 if (tmp != srcptr)
12929 emit_move_insn (srcptr, tmp);
12930 }
12931 emit_label (out_label);
12932 }
12933
12934 /* Output "rep; mov" instruction.
12935 Arguments have same meaning as for previous function */
12936 static void
12937 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
12938 rtx destptr, rtx srcptr,
12939 rtx count,
12940 enum machine_mode mode)
12941 {
12942 rtx destexp;
12943 rtx srcexp;
12944 rtx countreg;
12945
12946 /* If the size is known, it is shorter to use rep movs. */
12947 if (mode == QImode && GET_CODE (count) == CONST_INT
12948 && !(INTVAL (count) & 3))
12949 mode = SImode;
12950
12951 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12952 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12953 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
12954 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
12955 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12956 if (mode != QImode)
12957 {
12958 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12959 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12960 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12961 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
12962 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12963 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
12964 }
12965 else
12966 {
12967 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12968 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
12969 }
12970 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
12971 destexp, srcexp));
12972 }
12973
12974 /* Output "rep; stos" instruction.
12975 Arguments have same meaning as for previous function */
12976 static void
12977 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
12978 rtx count,
12979 enum machine_mode mode)
12980 {
12981 rtx destexp;
12982 rtx countreg;
12983
12984 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12985 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12986 value = force_reg (mode, gen_lowpart (mode, value));
12987 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12988 if (mode != QImode)
12989 {
12990 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12991 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12992 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12993 }
12994 else
12995 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12996 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
12997 }
12998
12999 static void
13000 emit_strmov (rtx destmem, rtx srcmem,
13001 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
13002 {
13003 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13004 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13005 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13006 }
13007
13008 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13009 static void
13010 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13011 rtx destptr, rtx srcptr, rtx count, int max_size)
13012 {
13013 rtx src, dest;
13014 if (GET_CODE (count) == CONST_INT)
13015 {
13016 HOST_WIDE_INT countval = INTVAL (count);
13017 int offset = 0;
13018
13019 if ((countval & 0x16) && max_size > 16)
13020 {
13021 if (TARGET_64BIT)
13022 {
13023 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13024 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13025 }
13026 else
13027 gcc_unreachable ();
13028 offset += 16;
13029 }
13030 if ((countval & 0x08) && max_size > 8)
13031 {
13032 if (TARGET_64BIT)
13033 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13034 else
13035 {
13036 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13037 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13038 }
13039 offset += 8;
13040 }
13041 if ((countval & 0x04) && max_size > 4)
13042 {
13043 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13044 offset += 4;
13045 }
13046 if ((countval & 0x02) && max_size > 2)
13047 {
13048 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13049 offset += 2;
13050 }
13051 if ((countval & 0x01) && max_size > 1)
13052 {
13053 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13054 offset += 1;
13055 }
13056 return;
13057 }
13058 if (max_size > 8)
13059 {
13060 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13061 count, 1, OPTAB_DIRECT);
13062 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13063 count, QImode, 1, 4);
13064 return;
13065 }
13066
13067 /* When there are stringops, we can cheaply increase dest and src pointers.
13068 Otherwise we save code size by maintaining offset (zero is readily
13069 available from preceding rep operation) and using x86 addressing modes.
13070 */
13071 if (TARGET_SINGLE_STRINGOP)
13072 {
13073 if (max_size > 4)
13074 {
13075 rtx label = ix86_expand_aligntest (count, 4, true);
13076 src = change_address (srcmem, SImode, srcptr);
13077 dest = change_address (destmem, SImode, destptr);
13078 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13079 emit_label (label);
13080 LABEL_NUSES (label) = 1;
13081 }
13082 if (max_size > 2)
13083 {
13084 rtx label = ix86_expand_aligntest (count, 2, true);
13085 src = change_address (srcmem, HImode, srcptr);
13086 dest = change_address (destmem, HImode, destptr);
13087 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13088 emit_label (label);
13089 LABEL_NUSES (label) = 1;
13090 }
13091 if (max_size > 1)
13092 {
13093 rtx label = ix86_expand_aligntest (count, 1, true);
13094 src = change_address (srcmem, QImode, srcptr);
13095 dest = change_address (destmem, QImode, destptr);
13096 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13097 emit_label (label);
13098 LABEL_NUSES (label) = 1;
13099 }
13100 }
13101 else
13102 {
13103 rtx offset = force_reg (Pmode, const0_rtx);
13104 rtx tmp;
13105
13106 if (max_size > 4)
13107 {
13108 rtx label = ix86_expand_aligntest (count, 4, true);
13109 src = change_address (srcmem, SImode, srcptr);
13110 dest = change_address (destmem, SImode, destptr);
13111 emit_move_insn (dest, src);
13112 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13113 true, OPTAB_LIB_WIDEN);
13114 if (tmp != offset)
13115 emit_move_insn (offset, tmp);
13116 emit_label (label);
13117 LABEL_NUSES (label) = 1;
13118 }
13119 if (max_size > 2)
13120 {
13121 rtx label = ix86_expand_aligntest (count, 2, true);
13122 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13123 src = change_address (srcmem, HImode, tmp);
13124 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13125 dest = change_address (destmem, HImode, tmp);
13126 emit_move_insn (dest, src);
13127 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13128 true, OPTAB_LIB_WIDEN);
13129 if (tmp != offset)
13130 emit_move_insn (offset, tmp);
13131 emit_label (label);
13132 LABEL_NUSES (label) = 1;
13133 }
13134 if (max_size > 1)
13135 {
13136 rtx label = ix86_expand_aligntest (count, 1, true);
13137 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13138 src = change_address (srcmem, QImode, tmp);
13139 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13140 dest = change_address (destmem, QImode, tmp);
13141 emit_move_insn (dest, src);
13142 emit_label (label);
13143 LABEL_NUSES (label) = 1;
13144 }
13145 }
13146 }
13147
13148 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13149 static void
13150 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13151 rtx count, int max_size)
13152 {
13153 count =
13154 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13155 count, 1, OPTAB_DIRECT);
13156 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13157 gen_lowpart (QImode, value), count, QImode,
13158 1, max_size / 2);
13159 }
13160
13161 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13162 static void
13163 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13164 {
13165 rtx dest;
13166
13167 if (GET_CODE (count) == CONST_INT)
13168 {
13169 HOST_WIDE_INT countval = INTVAL (count);
13170 int offset = 0;
13171
13172 if ((countval & 0x16) && max_size > 16)
13173 {
13174 if (TARGET_64BIT)
13175 {
13176 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13177 emit_insn (gen_strset (destptr, dest, value));
13178 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13179 emit_insn (gen_strset (destptr, dest, value));
13180 }
13181 else
13182 gcc_unreachable ();
13183 offset += 16;
13184 }
13185 if ((countval & 0x08) && max_size > 8)
13186 {
13187 if (TARGET_64BIT)
13188 {
13189 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13190 emit_insn (gen_strset (destptr, dest, value));
13191 }
13192 else
13193 {
13194 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13195 emit_insn (gen_strset (destptr, dest, value));
13196 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13197 emit_insn (gen_strset (destptr, dest, value));
13198 }
13199 offset += 8;
13200 }
13201 if ((countval & 0x04) && max_size > 4)
13202 {
13203 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13204 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13205 offset += 4;
13206 }
13207 if ((countval & 0x02) && max_size > 2)
13208 {
13209 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13210 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13211 offset += 2;
13212 }
13213 if ((countval & 0x01) && max_size > 1)
13214 {
13215 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13216 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13217 offset += 1;
13218 }
13219 return;
13220 }
13221 if (max_size > 32)
13222 {
13223 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13224 return;
13225 }
13226 if (max_size > 16)
13227 {
13228 rtx label = ix86_expand_aligntest (count, 16, true);
13229 if (TARGET_64BIT)
13230 {
13231 dest = change_address (destmem, DImode, destptr);
13232 emit_insn (gen_strset (destptr, dest, value));
13233 emit_insn (gen_strset (destptr, dest, value));
13234 }
13235 else
13236 {
13237 dest = change_address (destmem, SImode, destptr);
13238 emit_insn (gen_strset (destptr, dest, value));
13239 emit_insn (gen_strset (destptr, dest, value));
13240 emit_insn (gen_strset (destptr, dest, value));
13241 emit_insn (gen_strset (destptr, dest, value));
13242 }
13243 emit_label (label);
13244 LABEL_NUSES (label) = 1;
13245 }
13246 if (max_size > 8)
13247 {
13248 rtx label = ix86_expand_aligntest (count, 8, true);
13249 if (TARGET_64BIT)
13250 {
13251 dest = change_address (destmem, DImode, destptr);
13252 emit_insn (gen_strset (destptr, dest, value));
13253 }
13254 else
13255 {
13256 dest = change_address (destmem, SImode, destptr);
13257 emit_insn (gen_strset (destptr, dest, value));
13258 emit_insn (gen_strset (destptr, dest, value));
13259 }
13260 emit_label (label);
13261 LABEL_NUSES (label) = 1;
13262 }
13263 if (max_size > 4)
13264 {
13265 rtx label = ix86_expand_aligntest (count, 4, true);
13266 dest = change_address (destmem, SImode, destptr);
13267 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13268 emit_label (label);
13269 LABEL_NUSES (label) = 1;
13270 }
13271 if (max_size > 2)
13272 {
13273 rtx label = ix86_expand_aligntest (count, 2, true);
13274 dest = change_address (destmem, HImode, destptr);
13275 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13276 emit_label (label);
13277 LABEL_NUSES (label) = 1;
13278 }
13279 if (max_size > 1)
13280 {
13281 rtx label = ix86_expand_aligntest (count, 1, true);
13282 dest = change_address (destmem, QImode, destptr);
13283 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13284 emit_label (label);
13285 LABEL_NUSES (label) = 1;
13286 }
13287 }
13288
13289 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13290 DESIRED_ALIGNMENT. */
13291 static void
13292 expand_movmem_prologue (rtx destmem, rtx srcmem,
13293 rtx destptr, rtx srcptr, rtx count,
13294 int align, int desired_alignment)
13295 {
13296 if (align <= 1 && desired_alignment > 1)
13297 {
13298 rtx label = ix86_expand_aligntest (destptr, 1, false);
13299 srcmem = change_address (srcmem, QImode, srcptr);
13300 destmem = change_address (destmem, QImode, destptr);
13301 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13302 ix86_adjust_counter (count, 1);
13303 emit_label (label);
13304 LABEL_NUSES (label) = 1;
13305 }
13306 if (align <= 2 && desired_alignment > 2)
13307 {
13308 rtx label = ix86_expand_aligntest (destptr, 2, false);
13309 srcmem = change_address (srcmem, HImode, srcptr);
13310 destmem = change_address (destmem, HImode, destptr);
13311 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13312 ix86_adjust_counter (count, 2);
13313 emit_label (label);
13314 LABEL_NUSES (label) = 1;
13315 }
13316 if (align <= 4 && desired_alignment > 4)
13317 {
13318 rtx label = ix86_expand_aligntest (destptr, 4, false);
13319 srcmem = change_address (srcmem, SImode, srcptr);
13320 destmem = change_address (destmem, SImode, destptr);
13321 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13322 ix86_adjust_counter (count, 4);
13323 emit_label (label);
13324 LABEL_NUSES (label) = 1;
13325 }
13326 gcc_assert (desired_alignment <= 8);
13327 }
13328
13329 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13330 DESIRED_ALIGNMENT. */
13331 static void
13332 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13333 int align, int desired_alignment)
13334 {
13335 if (align <= 1 && desired_alignment > 1)
13336 {
13337 rtx label = ix86_expand_aligntest (destptr, 1, false);
13338 destmem = change_address (destmem, QImode, destptr);
13339 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13340 ix86_adjust_counter (count, 1);
13341 emit_label (label);
13342 LABEL_NUSES (label) = 1;
13343 }
13344 if (align <= 2 && desired_alignment > 2)
13345 {
13346 rtx label = ix86_expand_aligntest (destptr, 2, false);
13347 destmem = change_address (destmem, HImode, destptr);
13348 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13349 ix86_adjust_counter (count, 2);
13350 emit_label (label);
13351 LABEL_NUSES (label) = 1;
13352 }
13353 if (align <= 4 && desired_alignment > 4)
13354 {
13355 rtx label = ix86_expand_aligntest (destptr, 4, false);
13356 destmem = change_address (destmem, SImode, destptr);
13357 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13358 ix86_adjust_counter (count, 4);
13359 emit_label (label);
13360 LABEL_NUSES (label) = 1;
13361 }
13362 gcc_assert (desired_alignment <= 8);
13363 }
13364
13365 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13366 static enum stringop_alg
13367 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13368 int *dynamic_check)
13369 {
13370 const struct stringop_algs * algs;
13371
13372 *dynamic_check = -1;
13373 if (memset)
13374 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13375 else
13376 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13377 if (stringop_alg != no_stringop)
13378 return stringop_alg;
13379 /* rep; movq or rep; movl is the smallest variant. */
13380 else if (optimize_size)
13381 {
13382 if (!count || (count & 3))
13383 return rep_prefix_1_byte;
13384 else
13385 return rep_prefix_4_byte;
13386 }
13387 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13388 */
13389 else if (expected_size != -1 && expected_size < 4)
13390 return loop_1_byte;
13391 else if (expected_size != -1)
13392 {
13393 unsigned int i;
13394 enum stringop_alg alg = libcall;
13395 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13396 {
13397 gcc_assert (algs->size[i].max);
13398 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13399 {
13400 if (algs->size[i].alg != libcall)
13401 alg = algs->size[i].alg;
13402 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13403 last non-libcall inline algorithm. */
13404 if (TARGET_INLINE_ALL_STRINGOPS)
13405 {
13406 /* When the current size is best to be copied by a libcall,
13407 but we are still forced to inline, run the heuristic bellow
13408 that will pick code for medium sized blocks. */
13409 if (alg != libcall)
13410 return alg;
13411 break;
13412 }
13413 else
13414 return algs->size[i].alg;
13415 }
13416 }
13417 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13418 }
13419 /* When asked to inline the call anyway, try to pick meaningful choice.
13420 We look for maximal size of block that is faster to copy by hand and
13421 take blocks of at most of that size guessing that average size will
13422 be roughly half of the block.
13423
13424 If this turns out to be bad, we might simply specify the preferred
13425 choice in ix86_costs. */
13426 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13427 && algs->unknown_size == libcall)
13428 {
13429 int max = -1;
13430 enum stringop_alg alg;
13431 int i;
13432
13433 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13434 if (algs->size[i].alg != libcall && algs->size[i].alg)
13435 max = algs->size[i].max;
13436 if (max == -1)
13437 max = 4096;
13438 alg = decide_alg (count, max / 2, memset, dynamic_check);
13439 gcc_assert (*dynamic_check == -1);
13440 gcc_assert (alg != libcall);
13441 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13442 *dynamic_check = max;
13443 return alg;
13444 }
13445 return algs->unknown_size;
13446 }
13447
13448 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13449 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13450 static int
13451 decide_alignment (int align,
13452 enum stringop_alg alg,
13453 int expected_size)
13454 {
13455 int desired_align = 0;
13456 switch (alg)
13457 {
13458 case no_stringop:
13459 gcc_unreachable ();
13460 case loop:
13461 case unrolled_loop:
13462 desired_align = GET_MODE_SIZE (Pmode);
13463 break;
13464 case rep_prefix_8_byte:
13465 desired_align = 8;
13466 break;
13467 case rep_prefix_4_byte:
13468 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13469 copying whole cacheline at once. */
13470 if (TARGET_PENTIUMPRO)
13471 desired_align = 8;
13472 else
13473 desired_align = 4;
13474 break;
13475 case rep_prefix_1_byte:
13476 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13477 copying whole cacheline at once. */
13478 if (TARGET_PENTIUMPRO)
13479 desired_align = 8;
13480 else
13481 desired_align = 1;
13482 break;
13483 case loop_1_byte:
13484 desired_align = 1;
13485 break;
13486 case libcall:
13487 return 0;
13488 }
13489
13490 if (optimize_size)
13491 desired_align = 1;
13492 if (desired_align < align)
13493 desired_align = align;
13494 if (expected_size != -1 && expected_size < 4)
13495 desired_align = align;
13496 return desired_align;
13497 }
13498
13499 /* Return the smallest power of 2 greater than VAL. */
13500 static int
13501 smallest_pow2_greater_than (int val)
13502 {
13503 int ret = 1;
13504 while (ret <= val)
13505 ret <<= 1;
13506 return ret;
13507 }
13508
13509 /* Expand string move (memcpy) operation. Use i386 string operations when
13510 profitable. expand_clrmem contains similar code. The code depends upon
13511 architecture, block size and alignment, but always has the same
13512 overall structure:
13513
13514 1) Prologue guard: Conditional that jumps up to epilogues for small
13515 blocks that can be handled by epilogue alone. This is faster but
13516 also needed for correctness, since prologue assume the block is larger
13517 than the desired alignment.
13518
13519 Optional dynamic check for size and libcall for large
13520 blocks is emitted here too, with -minline-stringops-dynamically.
13521
13522 2) Prologue: copy first few bytes in order to get destination aligned
13523 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
13524 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
13525 We emit either a jump tree on power of two sized blocks, or a byte loop.
13526
13527 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
13528 with specified algorithm.
13529
13530 4) Epilogue: code copying tail of the block that is too small to be
13531 handled by main body (or up to size guarded by prologue guard). */
13532
13533 int
13534 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13535 rtx expected_align_exp, rtx expected_size_exp)
13536 {
13537 rtx destreg;
13538 rtx srcreg;
13539 rtx label = NULL;
13540 rtx tmp;
13541 rtx jump_around_label = NULL;
13542 HOST_WIDE_INT align = 1;
13543 unsigned HOST_WIDE_INT count = 0;
13544 HOST_WIDE_INT expected_size = -1;
13545 int size_needed = 0, epilogue_size_needed;
13546 int desired_align = 0;
13547 enum stringop_alg alg;
13548 int dynamic_check;
13549
13550 if (GET_CODE (align_exp) == CONST_INT)
13551 align = INTVAL (align_exp);
13552 /* i386 can do misaligned access on reasonably increased cost. */
13553 if (GET_CODE (expected_align_exp) == CONST_INT
13554 && INTVAL (expected_align_exp) > align)
13555 align = INTVAL (expected_align_exp);
13556 if (GET_CODE (count_exp) == CONST_INT)
13557 count = expected_size = INTVAL (count_exp);
13558 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13559 expected_size = INTVAL (expected_size_exp);
13560
13561 /* Step 0: Decide on preferred algorithm, desired alignment and
13562 size of chunks to be copied by main loop. */
13563
13564 alg = decide_alg (count, expected_size, false, &dynamic_check);
13565 desired_align = decide_alignment (align, alg, expected_size);
13566
13567 if (!TARGET_ALIGN_STRINGOPS)
13568 align = desired_align;
13569
13570 if (alg == libcall)
13571 return 0;
13572 gcc_assert (alg != no_stringop);
13573 if (!count)
13574 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13575 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13576 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13577 switch (alg)
13578 {
13579 case libcall:
13580 case no_stringop:
13581 gcc_unreachable ();
13582 case loop:
13583 size_needed = GET_MODE_SIZE (Pmode);
13584 break;
13585 case unrolled_loop:
13586 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13587 break;
13588 case rep_prefix_8_byte:
13589 size_needed = 8;
13590 break;
13591 case rep_prefix_4_byte:
13592 size_needed = 4;
13593 break;
13594 case rep_prefix_1_byte:
13595 case loop_1_byte:
13596 size_needed = 1;
13597 break;
13598 }
13599
13600 epilogue_size_needed = size_needed;
13601
13602 /* Step 1: Prologue guard. */
13603
13604 /* Alignment code needs count to be in register. */
13605 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13606 {
13607 enum machine_mode mode = SImode;
13608 if (TARGET_64BIT && (count & ~0xffffffff))
13609 mode = DImode;
13610 count_exp = force_reg (mode, count_exp);
13611 }
13612 gcc_assert (desired_align >= 1 && align >= 1);
13613
13614 /* Ensure that alignment prologue won't copy past end of block. */
13615 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13616 && !count)
13617 {
13618 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13619
13620 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13621 Make sure it is power of 2. */
13622 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13623
13624 label = gen_label_rtx ();
13625 emit_cmp_and_jump_insns (count_exp,
13626 GEN_INT (epilogue_size_needed),
13627 LTU, 0, GET_MODE (count_exp), 1, label);
13628 if (expected_size == -1 || expected_size < epilogue_size_needed)
13629 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13630 else
13631 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13632 }
13633 /* Emit code to decide on runtime whether library call or inline should be
13634 used. */
13635 if (dynamic_check != -1)
13636 {
13637 rtx hot_label = gen_label_rtx ();
13638 jump_around_label = gen_label_rtx ();
13639 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13640 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13641 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13642 emit_block_move_via_libcall (dst, src, count_exp, false);
13643 emit_jump (jump_around_label);
13644 emit_label (hot_label);
13645 }
13646
13647 /* Step 2: Alignment prologue. */
13648
13649 if (desired_align > align)
13650 {
13651 /* Except for the first move in epilogue, we no longer know
13652 constant offset in aliasing info. It don't seems to worth
13653 the pain to maintain it for the first move, so throw away
13654 the info early. */
13655 src = change_address (src, BLKmode, srcreg);
13656 dst = change_address (dst, BLKmode, destreg);
13657 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13658 desired_align);
13659 }
13660 if (label && size_needed == 1)
13661 {
13662 emit_label (label);
13663 LABEL_NUSES (label) = 1;
13664 label = NULL;
13665 }
13666
13667 /* Step 3: Main loop. */
13668
13669 switch (alg)
13670 {
13671 case libcall:
13672 case no_stringop:
13673 gcc_unreachable ();
13674 case loop_1_byte:
13675 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13676 count_exp, QImode, 1, expected_size);
13677 break;
13678 case loop:
13679 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13680 count_exp, Pmode, 1, expected_size);
13681 break;
13682 case unrolled_loop:
13683 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13684 registers for 4 temporaries anyway. */
13685 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13686 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13687 expected_size);
13688 break;
13689 case rep_prefix_8_byte:
13690 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13691 DImode);
13692 break;
13693 case rep_prefix_4_byte:
13694 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13695 SImode);
13696 break;
13697 case rep_prefix_1_byte:
13698 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13699 QImode);
13700 break;
13701 }
13702 /* Adjust properly the offset of src and dest memory for aliasing. */
13703 if (GET_CODE (count_exp) == CONST_INT)
13704 {
13705 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13706 (count / size_needed) * size_needed);
13707 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13708 (count / size_needed) * size_needed);
13709 }
13710 else
13711 {
13712 src = change_address (src, BLKmode, srcreg);
13713 dst = change_address (dst, BLKmode, destreg);
13714 }
13715
13716 /* Step 4: Epilogue to copy the remaining bytes. */
13717
13718 if (label)
13719 {
13720 /* When the main loop is done, COUNT_EXP might hold original count,
13721 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
13722 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
13723 bytes. Compensate if needed. */
13724
13725 if (size_needed < epilogue_size_needed)
13726 {
13727 tmp =
13728 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13729 GEN_INT (size_needed - 1), count_exp, 1,
13730 OPTAB_DIRECT);
13731 if (tmp != count_exp)
13732 emit_move_insn (count_exp, tmp);
13733 }
13734 emit_label (label);
13735 LABEL_NUSES (label) = 1;
13736 }
13737
13738 if (count_exp != const0_rtx && epilogue_size_needed > 1)
13739 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13740 epilogue_size_needed);
13741 if (jump_around_label)
13742 emit_label (jump_around_label);
13743 return 1;
13744 }
13745
13746 /* Helper function for memcpy. For QImode value 0xXY produce
13747 0xXYXYXYXY of wide specified by MODE. This is essentially
13748 a * 0x10101010, but we can do slightly better than
13749 synth_mult by unwinding the sequence by hand on CPUs with
13750 slow multiply. */
13751 static rtx
13752 promote_duplicated_reg (enum machine_mode mode, rtx val)
13753 {
13754 enum machine_mode valmode = GET_MODE (val);
13755 rtx tmp;
13756 int nops = mode == DImode ? 3 : 2;
13757
13758 gcc_assert (mode == SImode || mode == DImode);
13759 if (val == const0_rtx)
13760 return copy_to_mode_reg (mode, const0_rtx);
13761 if (GET_CODE (val) == CONST_INT)
13762 {
13763 HOST_WIDE_INT v = INTVAL (val) & 255;
13764
13765 v |= v << 8;
13766 v |= v << 16;
13767 if (mode == DImode)
13768 v |= (v << 16) << 16;
13769 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13770 }
13771
13772 if (valmode == VOIDmode)
13773 valmode = QImode;
13774 if (valmode != QImode)
13775 val = gen_lowpart (QImode, val);
13776 if (mode == QImode)
13777 return val;
13778 if (!TARGET_PARTIAL_REG_STALL)
13779 nops--;
13780 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13781 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13782 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13783 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13784 {
13785 rtx reg = convert_modes (mode, QImode, val, true);
13786 tmp = promote_duplicated_reg (mode, const1_rtx);
13787 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13788 OPTAB_DIRECT);
13789 }
13790 else
13791 {
13792 rtx reg = convert_modes (mode, QImode, val, true);
13793
13794 if (!TARGET_PARTIAL_REG_STALL)
13795 if (mode == SImode)
13796 emit_insn (gen_movsi_insv_1 (reg, reg));
13797 else
13798 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13799 else
13800 {
13801 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13802 NULL, 1, OPTAB_DIRECT);
13803 reg =
13804 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13805 }
13806 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13807 NULL, 1, OPTAB_DIRECT);
13808 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13809 if (mode == SImode)
13810 return reg;
13811 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13812 NULL, 1, OPTAB_DIRECT);
13813 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13814 return reg;
13815 }
13816 }
13817
13818 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
13819 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
13820 alignment from ALIGN to DESIRED_ALIGN. */
13821 static rtx
13822 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
13823 {
13824 rtx promoted_val;
13825
13826 if (TARGET_64BIT
13827 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
13828 promoted_val = promote_duplicated_reg (DImode, val);
13829 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
13830 promoted_val = promote_duplicated_reg (SImode, val);
13831 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
13832 promoted_val = promote_duplicated_reg (HImode, val);
13833 else
13834 promoted_val = val;
13835
13836 return promoted_val;
13837 }
13838
13839 /* Expand string clear operation (bzero). Use i386 string operations when
13840 profitable. See expand_movmem comment for explanation of individual
13841 steps performed. */
13842 int
13843 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13844 rtx expected_align_exp, rtx expected_size_exp)
13845 {
13846 rtx destreg;
13847 rtx label = NULL;
13848 rtx tmp;
13849 rtx jump_around_label = NULL;
13850 HOST_WIDE_INT align = 1;
13851 unsigned HOST_WIDE_INT count = 0;
13852 HOST_WIDE_INT expected_size = -1;
13853 int size_needed = 0, epilogue_size_needed;
13854 int desired_align = 0;
13855 enum stringop_alg alg;
13856 rtx promoted_val = NULL;
13857 bool force_loopy_epilogue = false;
13858 int dynamic_check;
13859
13860 if (GET_CODE (align_exp) == CONST_INT)
13861 align = INTVAL (align_exp);
13862 /* i386 can do misaligned access on reasonably increased cost. */
13863 if (GET_CODE (expected_align_exp) == CONST_INT
13864 && INTVAL (expected_align_exp) > align)
13865 align = INTVAL (expected_align_exp);
13866 if (GET_CODE (count_exp) == CONST_INT)
13867 count = expected_size = INTVAL (count_exp);
13868 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13869 expected_size = INTVAL (expected_size_exp);
13870
13871 /* Step 0: Decide on preferred algorithm, desired alignment and
13872 size of chunks to be copied by main loop. */
13873
13874 alg = decide_alg (count, expected_size, true, &dynamic_check);
13875 desired_align = decide_alignment (align, alg, expected_size);
13876
13877 if (!TARGET_ALIGN_STRINGOPS)
13878 align = desired_align;
13879
13880 if (alg == libcall)
13881 return 0;
13882 gcc_assert (alg != no_stringop);
13883 if (!count)
13884 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13885 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13886 switch (alg)
13887 {
13888 case libcall:
13889 case no_stringop:
13890 gcc_unreachable ();
13891 case loop:
13892 size_needed = GET_MODE_SIZE (Pmode);
13893 break;
13894 case unrolled_loop:
13895 size_needed = GET_MODE_SIZE (Pmode) * 4;
13896 break;
13897 case rep_prefix_8_byte:
13898 size_needed = 8;
13899 break;
13900 case rep_prefix_4_byte:
13901 size_needed = 4;
13902 break;
13903 case rep_prefix_1_byte:
13904 case loop_1_byte:
13905 size_needed = 1;
13906 break;
13907 }
13908 epilogue_size_needed = size_needed;
13909
13910 /* Step 1: Prologue guard. */
13911
13912 /* Alignment code needs count to be in register. */
13913 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13914 {
13915 enum machine_mode mode = SImode;
13916 if (TARGET_64BIT && (count & ~0xffffffff))
13917 mode = DImode;
13918 count_exp = force_reg (mode, count_exp);
13919 }
13920 /* Do the cheap promotion to allow better CSE across the
13921 main loop and epilogue (ie one load of the big constant in the
13922 front of all code. */
13923 if (GET_CODE (val_exp) == CONST_INT)
13924 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13925 desired_align, align);
13926 /* Ensure that alignment prologue won't copy past end of block. */
13927 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13928 && !count)
13929 {
13930 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13931
13932 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13933 Make sure it is power of 2. */
13934 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13935
13936 /* To improve performance of small blocks, we jump around the VAL
13937 promoting mode. This mean that if the promoted VAL is not constant,
13938 we might not use it in the epilogue and have to use byte
13939 loop variant. */
13940 if (epilogue_size_needed > 2 && !promoted_val)
13941 force_loopy_epilogue = true;
13942 label = gen_label_rtx ();
13943 emit_cmp_and_jump_insns (count_exp,
13944 GEN_INT (epilogue_size_needed),
13945 LTU, 0, GET_MODE (count_exp), 1, label);
13946 if (expected_size == -1 || expected_size <= epilogue_size_needed)
13947 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13948 else
13949 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13950 }
13951 if (dynamic_check != -1)
13952 {
13953 rtx hot_label = gen_label_rtx ();
13954 jump_around_label = gen_label_rtx ();
13955 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13956 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13957 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13958 set_storage_via_libcall (dst, count_exp, val_exp, false);
13959 emit_jump (jump_around_label);
13960 emit_label (hot_label);
13961 }
13962
13963 /* Step 2: Alignment prologue. */
13964
13965 /* Do the expensive promotion once we branched off the small blocks. */
13966 if (!promoted_val)
13967 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13968 desired_align, align);
13969 gcc_assert (desired_align >= 1 && align >= 1);
13970
13971 if (desired_align > align)
13972 {
13973 /* Except for the first move in epilogue, we no longer know
13974 constant offset in aliasing info. It don't seems to worth
13975 the pain to maintain it for the first move, so throw away
13976 the info early. */
13977 dst = change_address (dst, BLKmode, destreg);
13978 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
13979 desired_align);
13980 }
13981 if (label && size_needed == 1)
13982 {
13983 emit_label (label);
13984 LABEL_NUSES (label) = 1;
13985 label = NULL;
13986 }
13987
13988 /* Step 3: Main loop. */
13989
13990 switch (alg)
13991 {
13992 case libcall:
13993 case no_stringop:
13994 gcc_unreachable ();
13995 case loop_1_byte:
13996 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13997 count_exp, QImode, 1, expected_size);
13998 break;
13999 case loop:
14000 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14001 count_exp, Pmode, 1, expected_size);
14002 break;
14003 case unrolled_loop:
14004 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14005 count_exp, Pmode, 4, expected_size);
14006 break;
14007 case rep_prefix_8_byte:
14008 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14009 DImode);
14010 break;
14011 case rep_prefix_4_byte:
14012 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14013 SImode);
14014 break;
14015 case rep_prefix_1_byte:
14016 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14017 QImode);
14018 break;
14019 }
14020 /* Adjust properly the offset of src and dest memory for aliasing. */
14021 if (GET_CODE (count_exp) == CONST_INT)
14022 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14023 (count / size_needed) * size_needed);
14024 else
14025 dst = change_address (dst, BLKmode, destreg);
14026
14027 /* Step 4: Epilogue to copy the remaining bytes. */
14028
14029 if (label)
14030 {
14031 /* When the main loop is done, COUNT_EXP might hold original count,
14032 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14033 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14034 bytes. Compensate if needed. */
14035
14036 if (size_needed < desired_align - align)
14037 {
14038 tmp =
14039 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
14040 GEN_INT (size_needed - 1), count_exp, 1,
14041 OPTAB_DIRECT);
14042 size_needed = desired_align - align + 1;
14043 if (tmp != count_exp)
14044 emit_move_insn (count_exp, tmp);
14045 }
14046 emit_label (label);
14047 LABEL_NUSES (label) = 1;
14048 }
14049 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14050 {
14051 if (force_loopy_epilogue)
14052 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14053 size_needed);
14054 else
14055 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14056 size_needed);
14057 }
14058 if (jump_around_label)
14059 emit_label (jump_around_label);
14060 return 1;
14061 }
14062
14063 /* Expand strlen. */
14064 int
14065 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
14066 {
14067 rtx addr, scratch1, scratch2, scratch3, scratch4;
14068
14069 /* The generic case of strlen expander is long. Avoid it's
14070 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
14071
14072 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14073 && !TARGET_INLINE_ALL_STRINGOPS
14074 && !optimize_size
14075 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
14076 return 0;
14077
14078 addr = force_reg (Pmode, XEXP (src, 0));
14079 scratch1 = gen_reg_rtx (Pmode);
14080
14081 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14082 && !optimize_size)
14083 {
14084 /* Well it seems that some optimizer does not combine a call like
14085 foo(strlen(bar), strlen(bar));
14086 when the move and the subtraction is done here. It does calculate
14087 the length just once when these instructions are done inside of
14088 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
14089 often used and I use one fewer register for the lifetime of
14090 output_strlen_unroll() this is better. */
14091
14092 emit_move_insn (out, addr);
14093
14094 ix86_expand_strlensi_unroll_1 (out, src, align);
14095
14096 /* strlensi_unroll_1 returns the address of the zero at the end of
14097 the string, like memchr(), so compute the length by subtracting
14098 the start address. */
14099 if (TARGET_64BIT)
14100 emit_insn (gen_subdi3 (out, out, addr));
14101 else
14102 emit_insn (gen_subsi3 (out, out, addr));
14103 }
14104 else
14105 {
14106 rtx unspec;
14107 scratch2 = gen_reg_rtx (Pmode);
14108 scratch3 = gen_reg_rtx (Pmode);
14109 scratch4 = force_reg (Pmode, constm1_rtx);
14110
14111 emit_move_insn (scratch3, addr);
14112 eoschar = force_reg (QImode, eoschar);
14113
14114 src = replace_equiv_address_nv (src, scratch3);
14115
14116 /* If .md starts supporting :P, this can be done in .md. */
14117 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14118 scratch4), UNSPEC_SCAS);
14119 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14120 if (TARGET_64BIT)
14121 {
14122 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14123 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14124 }
14125 else
14126 {
14127 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14128 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14129 }
14130 }
14131 return 1;
14132 }
14133
14134 /* Expand the appropriate insns for doing strlen if not just doing
14135 repnz; scasb
14136
14137 out = result, initialized with the start address
14138 align_rtx = alignment of the address.
14139 scratch = scratch register, initialized with the startaddress when
14140 not aligned, otherwise undefined
14141
14142 This is just the body. It needs the initializations mentioned above and
14143 some address computing at the end. These things are done in i386.md. */
14144
14145 static void
14146 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14147 {
14148 int align;
14149 rtx tmp;
14150 rtx align_2_label = NULL_RTX;
14151 rtx align_3_label = NULL_RTX;
14152 rtx align_4_label = gen_label_rtx ();
14153 rtx end_0_label = gen_label_rtx ();
14154 rtx mem;
14155 rtx tmpreg = gen_reg_rtx (SImode);
14156 rtx scratch = gen_reg_rtx (SImode);
14157 rtx cmp;
14158
14159 align = 0;
14160 if (GET_CODE (align_rtx) == CONST_INT)
14161 align = INTVAL (align_rtx);
14162
14163 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14164
14165 /* Is there a known alignment and is it less than 4? */
14166 if (align < 4)
14167 {
14168 rtx scratch1 = gen_reg_rtx (Pmode);
14169 emit_move_insn (scratch1, out);
14170 /* Is there a known alignment and is it not 2? */
14171 if (align != 2)
14172 {
14173 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14174 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14175
14176 /* Leave just the 3 lower bits. */
14177 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14178 NULL_RTX, 0, OPTAB_WIDEN);
14179
14180 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14181 Pmode, 1, align_4_label);
14182 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14183 Pmode, 1, align_2_label);
14184 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14185 Pmode, 1, align_3_label);
14186 }
14187 else
14188 {
14189 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14190 check if is aligned to 4 - byte. */
14191
14192 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14193 NULL_RTX, 0, OPTAB_WIDEN);
14194
14195 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14196 Pmode, 1, align_4_label);
14197 }
14198
14199 mem = change_address (src, QImode, out);
14200
14201 /* Now compare the bytes. */
14202
14203 /* Compare the first n unaligned byte on a byte per byte basis. */
14204 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14205 QImode, 1, end_0_label);
14206
14207 /* Increment the address. */
14208 if (TARGET_64BIT)
14209 emit_insn (gen_adddi3 (out, out, const1_rtx));
14210 else
14211 emit_insn (gen_addsi3 (out, out, const1_rtx));
14212
14213 /* Not needed with an alignment of 2 */
14214 if (align != 2)
14215 {
14216 emit_label (align_2_label);
14217
14218 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14219 end_0_label);
14220
14221 if (TARGET_64BIT)
14222 emit_insn (gen_adddi3 (out, out, const1_rtx));
14223 else
14224 emit_insn (gen_addsi3 (out, out, const1_rtx));
14225
14226 emit_label (align_3_label);
14227 }
14228
14229 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14230 end_0_label);
14231
14232 if (TARGET_64BIT)
14233 emit_insn (gen_adddi3 (out, out, const1_rtx));
14234 else
14235 emit_insn (gen_addsi3 (out, out, const1_rtx));
14236 }
14237
14238 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14239 align this loop. It gives only huge programs, but does not help to
14240 speed up. */
14241 emit_label (align_4_label);
14242
14243 mem = change_address (src, SImode, out);
14244 emit_move_insn (scratch, mem);
14245 if (TARGET_64BIT)
14246 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14247 else
14248 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14249
14250 /* This formula yields a nonzero result iff one of the bytes is zero.
14251 This saves three branches inside loop and many cycles. */
14252
14253 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14254 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14255 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14256 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14257 gen_int_mode (0x80808080, SImode)));
14258 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14259 align_4_label);
14260
14261 if (TARGET_CMOVE)
14262 {
14263 rtx reg = gen_reg_rtx (SImode);
14264 rtx reg2 = gen_reg_rtx (Pmode);
14265 emit_move_insn (reg, tmpreg);
14266 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14267
14268 /* If zero is not in the first two bytes, move two bytes forward. */
14269 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14270 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14271 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14272 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14273 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14274 reg,
14275 tmpreg)));
14276 /* Emit lea manually to avoid clobbering of flags. */
14277 emit_insn (gen_rtx_SET (SImode, reg2,
14278 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14279
14280 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14281 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14282 emit_insn (gen_rtx_SET (VOIDmode, out,
14283 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14284 reg2,
14285 out)));
14286
14287 }
14288 else
14289 {
14290 rtx end_2_label = gen_label_rtx ();
14291 /* Is zero in the first two bytes? */
14292
14293 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14294 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14295 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14296 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14297 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14298 pc_rtx);
14299 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14300 JUMP_LABEL (tmp) = end_2_label;
14301
14302 /* Not in the first two. Move two bytes forward. */
14303 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14304 if (TARGET_64BIT)
14305 emit_insn (gen_adddi3 (out, out, const2_rtx));
14306 else
14307 emit_insn (gen_addsi3 (out, out, const2_rtx));
14308
14309 emit_label (end_2_label);
14310
14311 }
14312
14313 /* Avoid branch in fixing the byte. */
14314 tmpreg = gen_lowpart (QImode, tmpreg);
14315 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14316 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14317 if (TARGET_64BIT)
14318 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14319 else
14320 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14321
14322 emit_label (end_0_label);
14323 }
14324
14325 void
14326 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14327 rtx callarg2 ATTRIBUTE_UNUSED,
14328 rtx pop, int sibcall)
14329 {
14330 rtx use = NULL, call;
14331
14332 if (pop == const0_rtx)
14333 pop = NULL;
14334 gcc_assert (!TARGET_64BIT || !pop);
14335
14336 if (TARGET_MACHO && !TARGET_64BIT)
14337 {
14338 #if TARGET_MACHO
14339 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14340 fnaddr = machopic_indirect_call_target (fnaddr);
14341 #endif
14342 }
14343 else
14344 {
14345 /* Static functions and indirect calls don't need the pic register. */
14346 if (! TARGET_64BIT && flag_pic
14347 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14348 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14349 use_reg (&use, pic_offset_table_rtx);
14350 }
14351
14352 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14353 {
14354 rtx al = gen_rtx_REG (QImode, 0);
14355 emit_move_insn (al, callarg2);
14356 use_reg (&use, al);
14357 }
14358
14359 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14360 {
14361 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14362 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14363 }
14364 if (sibcall && TARGET_64BIT
14365 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14366 {
14367 rtx addr;
14368 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14369 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14370 emit_move_insn (fnaddr, addr);
14371 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14372 }
14373
14374 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14375 if (retval)
14376 call = gen_rtx_SET (VOIDmode, retval, call);
14377 if (pop)
14378 {
14379 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14380 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14381 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14382 }
14383
14384 call = emit_call_insn (call);
14385 if (use)
14386 CALL_INSN_FUNCTION_USAGE (call) = use;
14387 }
14388
14389 \f
14390 /* Clear stack slot assignments remembered from previous functions.
14391 This is called from INIT_EXPANDERS once before RTL is emitted for each
14392 function. */
14393
14394 static struct machine_function *
14395 ix86_init_machine_status (void)
14396 {
14397 struct machine_function *f;
14398
14399 f = ggc_alloc_cleared (sizeof (struct machine_function));
14400 f->use_fast_prologue_epilogue_nregs = -1;
14401 f->tls_descriptor_call_expanded_p = 0;
14402
14403 return f;
14404 }
14405
14406 /* Return a MEM corresponding to a stack slot with mode MODE.
14407 Allocate a new slot if necessary.
14408
14409 The RTL for a function can have several slots available: N is
14410 which slot to use. */
14411
14412 rtx
14413 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14414 {
14415 struct stack_local_entry *s;
14416
14417 gcc_assert (n < MAX_386_STACK_LOCALS);
14418
14419 for (s = ix86_stack_locals; s; s = s->next)
14420 if (s->mode == mode && s->n == n)
14421 return copy_rtx (s->rtl);
14422
14423 s = (struct stack_local_entry *)
14424 ggc_alloc (sizeof (struct stack_local_entry));
14425 s->n = n;
14426 s->mode = mode;
14427 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14428
14429 s->next = ix86_stack_locals;
14430 ix86_stack_locals = s;
14431 return s->rtl;
14432 }
14433
14434 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14435
14436 static GTY(()) rtx ix86_tls_symbol;
14437 rtx
14438 ix86_tls_get_addr (void)
14439 {
14440
14441 if (!ix86_tls_symbol)
14442 {
14443 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14444 (TARGET_ANY_GNU_TLS
14445 && !TARGET_64BIT)
14446 ? "___tls_get_addr"
14447 : "__tls_get_addr");
14448 }
14449
14450 return ix86_tls_symbol;
14451 }
14452
14453 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14454
14455 static GTY(()) rtx ix86_tls_module_base_symbol;
14456 rtx
14457 ix86_tls_module_base (void)
14458 {
14459
14460 if (!ix86_tls_module_base_symbol)
14461 {
14462 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14463 "_TLS_MODULE_BASE_");
14464 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14465 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14466 }
14467
14468 return ix86_tls_module_base_symbol;
14469 }
14470 \f
14471 /* Calculate the length of the memory address in the instruction
14472 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14473
14474 int
14475 memory_address_length (rtx addr)
14476 {
14477 struct ix86_address parts;
14478 rtx base, index, disp;
14479 int len;
14480 int ok;
14481
14482 if (GET_CODE (addr) == PRE_DEC
14483 || GET_CODE (addr) == POST_INC
14484 || GET_CODE (addr) == PRE_MODIFY
14485 || GET_CODE (addr) == POST_MODIFY)
14486 return 0;
14487
14488 ok = ix86_decompose_address (addr, &parts);
14489 gcc_assert (ok);
14490
14491 if (parts.base && GET_CODE (parts.base) == SUBREG)
14492 parts.base = SUBREG_REG (parts.base);
14493 if (parts.index && GET_CODE (parts.index) == SUBREG)
14494 parts.index = SUBREG_REG (parts.index);
14495
14496 base = parts.base;
14497 index = parts.index;
14498 disp = parts.disp;
14499 len = 0;
14500
14501 /* Rule of thumb:
14502 - esp as the base always wants an index,
14503 - ebp as the base always wants a displacement. */
14504
14505 /* Register Indirect. */
14506 if (base && !index && !disp)
14507 {
14508 /* esp (for its index) and ebp (for its displacement) need
14509 the two-byte modrm form. */
14510 if (addr == stack_pointer_rtx
14511 || addr == arg_pointer_rtx
14512 || addr == frame_pointer_rtx
14513 || addr == hard_frame_pointer_rtx)
14514 len = 1;
14515 }
14516
14517 /* Direct Addressing. */
14518 else if (disp && !base && !index)
14519 len = 4;
14520
14521 else
14522 {
14523 /* Find the length of the displacement constant. */
14524 if (disp)
14525 {
14526 if (base && satisfies_constraint_K (disp))
14527 len = 1;
14528 else
14529 len = 4;
14530 }
14531 /* ebp always wants a displacement. */
14532 else if (base == hard_frame_pointer_rtx)
14533 len = 1;
14534
14535 /* An index requires the two-byte modrm form.... */
14536 if (index
14537 /* ...like esp, which always wants an index. */
14538 || base == stack_pointer_rtx
14539 || base == arg_pointer_rtx
14540 || base == frame_pointer_rtx)
14541 len += 1;
14542 }
14543
14544 return len;
14545 }
14546
14547 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14548 is set, expect that insn have 8bit immediate alternative. */
14549 int
14550 ix86_attr_length_immediate_default (rtx insn, int shortform)
14551 {
14552 int len = 0;
14553 int i;
14554 extract_insn_cached (insn);
14555 for (i = recog_data.n_operands - 1; i >= 0; --i)
14556 if (CONSTANT_P (recog_data.operand[i]))
14557 {
14558 gcc_assert (!len);
14559 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14560 len = 1;
14561 else
14562 {
14563 switch (get_attr_mode (insn))
14564 {
14565 case MODE_QI:
14566 len+=1;
14567 break;
14568 case MODE_HI:
14569 len+=2;
14570 break;
14571 case MODE_SI:
14572 len+=4;
14573 break;
14574 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14575 case MODE_DI:
14576 len+=4;
14577 break;
14578 default:
14579 fatal_insn ("unknown insn mode", insn);
14580 }
14581 }
14582 }
14583 return len;
14584 }
14585 /* Compute default value for "length_address" attribute. */
14586 int
14587 ix86_attr_length_address_default (rtx insn)
14588 {
14589 int i;
14590
14591 if (get_attr_type (insn) == TYPE_LEA)
14592 {
14593 rtx set = PATTERN (insn);
14594
14595 if (GET_CODE (set) == PARALLEL)
14596 set = XVECEXP (set, 0, 0);
14597
14598 gcc_assert (GET_CODE (set) == SET);
14599
14600 return memory_address_length (SET_SRC (set));
14601 }
14602
14603 extract_insn_cached (insn);
14604 for (i = recog_data.n_operands - 1; i >= 0; --i)
14605 if (GET_CODE (recog_data.operand[i]) == MEM)
14606 {
14607 return memory_address_length (XEXP (recog_data.operand[i], 0));
14608 break;
14609 }
14610 return 0;
14611 }
14612 \f
14613 /* Return the maximum number of instructions a cpu can issue. */
14614
14615 static int
14616 ix86_issue_rate (void)
14617 {
14618 switch (ix86_tune)
14619 {
14620 case PROCESSOR_PENTIUM:
14621 case PROCESSOR_K6:
14622 return 2;
14623
14624 case PROCESSOR_PENTIUMPRO:
14625 case PROCESSOR_PENTIUM4:
14626 case PROCESSOR_ATHLON:
14627 case PROCESSOR_K8:
14628 case PROCESSOR_NOCONA:
14629 case PROCESSOR_GENERIC32:
14630 case PROCESSOR_GENERIC64:
14631 return 3;
14632
14633 case PROCESSOR_CORE2:
14634 return 4;
14635
14636 default:
14637 return 1;
14638 }
14639 }
14640
14641 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14642 by DEP_INSN and nothing set by DEP_INSN. */
14643
14644 static int
14645 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14646 {
14647 rtx set, set2;
14648
14649 /* Simplify the test for uninteresting insns. */
14650 if (insn_type != TYPE_SETCC
14651 && insn_type != TYPE_ICMOV
14652 && insn_type != TYPE_FCMOV
14653 && insn_type != TYPE_IBR)
14654 return 0;
14655
14656 if ((set = single_set (dep_insn)) != 0)
14657 {
14658 set = SET_DEST (set);
14659 set2 = NULL_RTX;
14660 }
14661 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14662 && XVECLEN (PATTERN (dep_insn), 0) == 2
14663 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14664 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14665 {
14666 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14667 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14668 }
14669 else
14670 return 0;
14671
14672 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
14673 return 0;
14674
14675 /* This test is true if the dependent insn reads the flags but
14676 not any other potentially set register. */
14677 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14678 return 0;
14679
14680 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14681 return 0;
14682
14683 return 1;
14684 }
14685
14686 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14687 address with operands set by DEP_INSN. */
14688
14689 static int
14690 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14691 {
14692 rtx addr;
14693
14694 if (insn_type == TYPE_LEA
14695 && TARGET_PENTIUM)
14696 {
14697 addr = PATTERN (insn);
14698
14699 if (GET_CODE (addr) == PARALLEL)
14700 addr = XVECEXP (addr, 0, 0);
14701
14702 gcc_assert (GET_CODE (addr) == SET);
14703
14704 addr = SET_SRC (addr);
14705 }
14706 else
14707 {
14708 int i;
14709 extract_insn_cached (insn);
14710 for (i = recog_data.n_operands - 1; i >= 0; --i)
14711 if (GET_CODE (recog_data.operand[i]) == MEM)
14712 {
14713 addr = XEXP (recog_data.operand[i], 0);
14714 goto found;
14715 }
14716 return 0;
14717 found:;
14718 }
14719
14720 return modified_in_p (addr, dep_insn);
14721 }
14722
14723 static int
14724 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14725 {
14726 enum attr_type insn_type, dep_insn_type;
14727 enum attr_memory memory;
14728 rtx set, set2;
14729 int dep_insn_code_number;
14730
14731 /* Anti and output dependencies have zero cost on all CPUs. */
14732 if (REG_NOTE_KIND (link) != 0)
14733 return 0;
14734
14735 dep_insn_code_number = recog_memoized (dep_insn);
14736
14737 /* If we can't recognize the insns, we can't really do anything. */
14738 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14739 return cost;
14740
14741 insn_type = get_attr_type (insn);
14742 dep_insn_type = get_attr_type (dep_insn);
14743
14744 switch (ix86_tune)
14745 {
14746 case PROCESSOR_PENTIUM:
14747 /* Address Generation Interlock adds a cycle of latency. */
14748 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14749 cost += 1;
14750
14751 /* ??? Compares pair with jump/setcc. */
14752 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14753 cost = 0;
14754
14755 /* Floating point stores require value to be ready one cycle earlier. */
14756 if (insn_type == TYPE_FMOV
14757 && get_attr_memory (insn) == MEMORY_STORE
14758 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14759 cost += 1;
14760 break;
14761
14762 case PROCESSOR_PENTIUMPRO:
14763 memory = get_attr_memory (insn);
14764
14765 /* INT->FP conversion is expensive. */
14766 if (get_attr_fp_int_src (dep_insn))
14767 cost += 5;
14768
14769 /* There is one cycle extra latency between an FP op and a store. */
14770 if (insn_type == TYPE_FMOV
14771 && (set = single_set (dep_insn)) != NULL_RTX
14772 && (set2 = single_set (insn)) != NULL_RTX
14773 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14774 && GET_CODE (SET_DEST (set2)) == MEM)
14775 cost += 1;
14776
14777 /* Show ability of reorder buffer to hide latency of load by executing
14778 in parallel with previous instruction in case
14779 previous instruction is not needed to compute the address. */
14780 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14781 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14782 {
14783 /* Claim moves to take one cycle, as core can issue one load
14784 at time and the next load can start cycle later. */
14785 if (dep_insn_type == TYPE_IMOV
14786 || dep_insn_type == TYPE_FMOV)
14787 cost = 1;
14788 else if (cost > 1)
14789 cost--;
14790 }
14791 break;
14792
14793 case PROCESSOR_K6:
14794 memory = get_attr_memory (insn);
14795
14796 /* The esp dependency is resolved before the instruction is really
14797 finished. */
14798 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14799 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14800 return 1;
14801
14802 /* INT->FP conversion is expensive. */
14803 if (get_attr_fp_int_src (dep_insn))
14804 cost += 5;
14805
14806 /* Show ability of reorder buffer to hide latency of load by executing
14807 in parallel with previous instruction in case
14808 previous instruction is not needed to compute the address. */
14809 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14810 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14811 {
14812 /* Claim moves to take one cycle, as core can issue one load
14813 at time and the next load can start cycle later. */
14814 if (dep_insn_type == TYPE_IMOV
14815 || dep_insn_type == TYPE_FMOV)
14816 cost = 1;
14817 else if (cost > 2)
14818 cost -= 2;
14819 else
14820 cost = 1;
14821 }
14822 break;
14823
14824 case PROCESSOR_ATHLON:
14825 case PROCESSOR_K8:
14826 case PROCESSOR_GENERIC32:
14827 case PROCESSOR_GENERIC64:
14828 memory = get_attr_memory (insn);
14829
14830 /* Show ability of reorder buffer to hide latency of load by executing
14831 in parallel with previous instruction in case
14832 previous instruction is not needed to compute the address. */
14833 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14834 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14835 {
14836 enum attr_unit unit = get_attr_unit (insn);
14837 int loadcost = 3;
14838
14839 /* Because of the difference between the length of integer and
14840 floating unit pipeline preparation stages, the memory operands
14841 for floating point are cheaper.
14842
14843 ??? For Athlon it the difference is most probably 2. */
14844 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14845 loadcost = 3;
14846 else
14847 loadcost = TARGET_ATHLON ? 2 : 0;
14848
14849 if (cost >= loadcost)
14850 cost -= loadcost;
14851 else
14852 cost = 0;
14853 }
14854
14855 default:
14856 break;
14857 }
14858
14859 return cost;
14860 }
14861
14862 /* How many alternative schedules to try. This should be as wide as the
14863 scheduling freedom in the DFA, but no wider. Making this value too
14864 large results extra work for the scheduler. */
14865
14866 static int
14867 ia32_multipass_dfa_lookahead (void)
14868 {
14869 if (ix86_tune == PROCESSOR_PENTIUM)
14870 return 2;
14871
14872 if (ix86_tune == PROCESSOR_PENTIUMPRO
14873 || ix86_tune == PROCESSOR_K6)
14874 return 1;
14875
14876 else
14877 return 0;
14878 }
14879
14880 \f
14881 /* Compute the alignment given to a constant that is being placed in memory.
14882 EXP is the constant and ALIGN is the alignment that the object would
14883 ordinarily have.
14884 The value of this function is used instead of that alignment to align
14885 the object. */
14886
14887 int
14888 ix86_constant_alignment (tree exp, int align)
14889 {
14890 if (TREE_CODE (exp) == REAL_CST)
14891 {
14892 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14893 return 64;
14894 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14895 return 128;
14896 }
14897 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14898 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14899 return BITS_PER_WORD;
14900
14901 return align;
14902 }
14903
14904 /* Compute the alignment for a static variable.
14905 TYPE is the data type, and ALIGN is the alignment that
14906 the object would ordinarily have. The value of this function is used
14907 instead of that alignment to align the object. */
14908
14909 int
14910 ix86_data_alignment (tree type, int align)
14911 {
14912 int max_align = optimize_size ? BITS_PER_WORD : 256;
14913
14914 if (AGGREGATE_TYPE_P (type)
14915 && TYPE_SIZE (type)
14916 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14917 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14918 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14919 && align < max_align)
14920 align = max_align;
14921
14922 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14923 to 16byte boundary. */
14924 if (TARGET_64BIT)
14925 {
14926 if (AGGREGATE_TYPE_P (type)
14927 && TYPE_SIZE (type)
14928 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14929 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14930 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14931 return 128;
14932 }
14933
14934 if (TREE_CODE (type) == ARRAY_TYPE)
14935 {
14936 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14937 return 64;
14938 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14939 return 128;
14940 }
14941 else if (TREE_CODE (type) == COMPLEX_TYPE)
14942 {
14943
14944 if (TYPE_MODE (type) == DCmode && align < 64)
14945 return 64;
14946 if (TYPE_MODE (type) == XCmode && align < 128)
14947 return 128;
14948 }
14949 else if ((TREE_CODE (type) == RECORD_TYPE
14950 || TREE_CODE (type) == UNION_TYPE
14951 || TREE_CODE (type) == QUAL_UNION_TYPE)
14952 && TYPE_FIELDS (type))
14953 {
14954 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14955 return 64;
14956 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14957 return 128;
14958 }
14959 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14960 || TREE_CODE (type) == INTEGER_TYPE)
14961 {
14962 if (TYPE_MODE (type) == DFmode && align < 64)
14963 return 64;
14964 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14965 return 128;
14966 }
14967
14968 return align;
14969 }
14970
14971 /* Compute the alignment for a local variable.
14972 TYPE is the data type, and ALIGN is the alignment that
14973 the object would ordinarily have. The value of this macro is used
14974 instead of that alignment to align the object. */
14975
14976 int
14977 ix86_local_alignment (tree type, int align)
14978 {
14979 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14980 to 16byte boundary. */
14981 if (TARGET_64BIT)
14982 {
14983 if (AGGREGATE_TYPE_P (type)
14984 && TYPE_SIZE (type)
14985 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14986 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
14987 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14988 return 128;
14989 }
14990 if (TREE_CODE (type) == ARRAY_TYPE)
14991 {
14992 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14993 return 64;
14994 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14995 return 128;
14996 }
14997 else if (TREE_CODE (type) == COMPLEX_TYPE)
14998 {
14999 if (TYPE_MODE (type) == DCmode && align < 64)
15000 return 64;
15001 if (TYPE_MODE (type) == XCmode && align < 128)
15002 return 128;
15003 }
15004 else if ((TREE_CODE (type) == RECORD_TYPE
15005 || TREE_CODE (type) == UNION_TYPE
15006 || TREE_CODE (type) == QUAL_UNION_TYPE)
15007 && TYPE_FIELDS (type))
15008 {
15009 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15010 return 64;
15011 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15012 return 128;
15013 }
15014 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15015 || TREE_CODE (type) == INTEGER_TYPE)
15016 {
15017
15018 if (TYPE_MODE (type) == DFmode && align < 64)
15019 return 64;
15020 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15021 return 128;
15022 }
15023 return align;
15024 }
15025 \f
15026 /* Emit RTL insns to initialize the variable parts of a trampoline.
15027 FNADDR is an RTX for the address of the function's pure code.
15028 CXT is an RTX for the static chain value for the function. */
15029 void
15030 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15031 {
15032 if (!TARGET_64BIT)
15033 {
15034 /* Compute offset from the end of the jmp to the target function. */
15035 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15036 plus_constant (tramp, 10),
15037 NULL_RTX, 1, OPTAB_DIRECT);
15038 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15039 gen_int_mode (0xb9, QImode));
15040 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15041 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15042 gen_int_mode (0xe9, QImode));
15043 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15044 }
15045 else
15046 {
15047 int offset = 0;
15048 /* Try to load address using shorter movl instead of movabs.
15049 We may want to support movq for kernel mode, but kernel does not use
15050 trampolines at the moment. */
15051 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15052 {
15053 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15054 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15055 gen_int_mode (0xbb41, HImode));
15056 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15057 gen_lowpart (SImode, fnaddr));
15058 offset += 6;
15059 }
15060 else
15061 {
15062 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15063 gen_int_mode (0xbb49, HImode));
15064 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15065 fnaddr);
15066 offset += 10;
15067 }
15068 /* Load static chain using movabs to r10. */
15069 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15070 gen_int_mode (0xba49, HImode));
15071 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15072 cxt);
15073 offset += 10;
15074 /* Jump to the r11 */
15075 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15076 gen_int_mode (0xff49, HImode));
15077 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15078 gen_int_mode (0xe3, QImode));
15079 offset += 3;
15080 gcc_assert (offset <= TRAMPOLINE_SIZE);
15081 }
15082
15083 #ifdef ENABLE_EXECUTE_STACK
15084 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15085 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15086 #endif
15087 }
15088 \f
15089 /* Codes for all the SSE/MMX builtins. */
15090 enum ix86_builtins
15091 {
15092 IX86_BUILTIN_ADDPS,
15093 IX86_BUILTIN_ADDSS,
15094 IX86_BUILTIN_DIVPS,
15095 IX86_BUILTIN_DIVSS,
15096 IX86_BUILTIN_MULPS,
15097 IX86_BUILTIN_MULSS,
15098 IX86_BUILTIN_SUBPS,
15099 IX86_BUILTIN_SUBSS,
15100
15101 IX86_BUILTIN_CMPEQPS,
15102 IX86_BUILTIN_CMPLTPS,
15103 IX86_BUILTIN_CMPLEPS,
15104 IX86_BUILTIN_CMPGTPS,
15105 IX86_BUILTIN_CMPGEPS,
15106 IX86_BUILTIN_CMPNEQPS,
15107 IX86_BUILTIN_CMPNLTPS,
15108 IX86_BUILTIN_CMPNLEPS,
15109 IX86_BUILTIN_CMPNGTPS,
15110 IX86_BUILTIN_CMPNGEPS,
15111 IX86_BUILTIN_CMPORDPS,
15112 IX86_BUILTIN_CMPUNORDPS,
15113 IX86_BUILTIN_CMPEQSS,
15114 IX86_BUILTIN_CMPLTSS,
15115 IX86_BUILTIN_CMPLESS,
15116 IX86_BUILTIN_CMPNEQSS,
15117 IX86_BUILTIN_CMPNLTSS,
15118 IX86_BUILTIN_CMPNLESS,
15119 IX86_BUILTIN_CMPNGTSS,
15120 IX86_BUILTIN_CMPNGESS,
15121 IX86_BUILTIN_CMPORDSS,
15122 IX86_BUILTIN_CMPUNORDSS,
15123
15124 IX86_BUILTIN_COMIEQSS,
15125 IX86_BUILTIN_COMILTSS,
15126 IX86_BUILTIN_COMILESS,
15127 IX86_BUILTIN_COMIGTSS,
15128 IX86_BUILTIN_COMIGESS,
15129 IX86_BUILTIN_COMINEQSS,
15130 IX86_BUILTIN_UCOMIEQSS,
15131 IX86_BUILTIN_UCOMILTSS,
15132 IX86_BUILTIN_UCOMILESS,
15133 IX86_BUILTIN_UCOMIGTSS,
15134 IX86_BUILTIN_UCOMIGESS,
15135 IX86_BUILTIN_UCOMINEQSS,
15136
15137 IX86_BUILTIN_CVTPI2PS,
15138 IX86_BUILTIN_CVTPS2PI,
15139 IX86_BUILTIN_CVTSI2SS,
15140 IX86_BUILTIN_CVTSI642SS,
15141 IX86_BUILTIN_CVTSS2SI,
15142 IX86_BUILTIN_CVTSS2SI64,
15143 IX86_BUILTIN_CVTTPS2PI,
15144 IX86_BUILTIN_CVTTSS2SI,
15145 IX86_BUILTIN_CVTTSS2SI64,
15146
15147 IX86_BUILTIN_MAXPS,
15148 IX86_BUILTIN_MAXSS,
15149 IX86_BUILTIN_MINPS,
15150 IX86_BUILTIN_MINSS,
15151
15152 IX86_BUILTIN_LOADUPS,
15153 IX86_BUILTIN_STOREUPS,
15154 IX86_BUILTIN_MOVSS,
15155
15156 IX86_BUILTIN_MOVHLPS,
15157 IX86_BUILTIN_MOVLHPS,
15158 IX86_BUILTIN_LOADHPS,
15159 IX86_BUILTIN_LOADLPS,
15160 IX86_BUILTIN_STOREHPS,
15161 IX86_BUILTIN_STORELPS,
15162
15163 IX86_BUILTIN_MASKMOVQ,
15164 IX86_BUILTIN_MOVMSKPS,
15165 IX86_BUILTIN_PMOVMSKB,
15166
15167 IX86_BUILTIN_MOVNTPS,
15168 IX86_BUILTIN_MOVNTQ,
15169
15170 IX86_BUILTIN_LOADDQU,
15171 IX86_BUILTIN_STOREDQU,
15172
15173 IX86_BUILTIN_PACKSSWB,
15174 IX86_BUILTIN_PACKSSDW,
15175 IX86_BUILTIN_PACKUSWB,
15176
15177 IX86_BUILTIN_PADDB,
15178 IX86_BUILTIN_PADDW,
15179 IX86_BUILTIN_PADDD,
15180 IX86_BUILTIN_PADDQ,
15181 IX86_BUILTIN_PADDSB,
15182 IX86_BUILTIN_PADDSW,
15183 IX86_BUILTIN_PADDUSB,
15184 IX86_BUILTIN_PADDUSW,
15185 IX86_BUILTIN_PSUBB,
15186 IX86_BUILTIN_PSUBW,
15187 IX86_BUILTIN_PSUBD,
15188 IX86_BUILTIN_PSUBQ,
15189 IX86_BUILTIN_PSUBSB,
15190 IX86_BUILTIN_PSUBSW,
15191 IX86_BUILTIN_PSUBUSB,
15192 IX86_BUILTIN_PSUBUSW,
15193
15194 IX86_BUILTIN_PAND,
15195 IX86_BUILTIN_PANDN,
15196 IX86_BUILTIN_POR,
15197 IX86_BUILTIN_PXOR,
15198
15199 IX86_BUILTIN_PAVGB,
15200 IX86_BUILTIN_PAVGW,
15201
15202 IX86_BUILTIN_PCMPEQB,
15203 IX86_BUILTIN_PCMPEQW,
15204 IX86_BUILTIN_PCMPEQD,
15205 IX86_BUILTIN_PCMPGTB,
15206 IX86_BUILTIN_PCMPGTW,
15207 IX86_BUILTIN_PCMPGTD,
15208
15209 IX86_BUILTIN_PMADDWD,
15210
15211 IX86_BUILTIN_PMAXSW,
15212 IX86_BUILTIN_PMAXUB,
15213 IX86_BUILTIN_PMINSW,
15214 IX86_BUILTIN_PMINUB,
15215
15216 IX86_BUILTIN_PMULHUW,
15217 IX86_BUILTIN_PMULHW,
15218 IX86_BUILTIN_PMULLW,
15219
15220 IX86_BUILTIN_PSADBW,
15221 IX86_BUILTIN_PSHUFW,
15222
15223 IX86_BUILTIN_PSLLW,
15224 IX86_BUILTIN_PSLLD,
15225 IX86_BUILTIN_PSLLQ,
15226 IX86_BUILTIN_PSRAW,
15227 IX86_BUILTIN_PSRAD,
15228 IX86_BUILTIN_PSRLW,
15229 IX86_BUILTIN_PSRLD,
15230 IX86_BUILTIN_PSRLQ,
15231 IX86_BUILTIN_PSLLWI,
15232 IX86_BUILTIN_PSLLDI,
15233 IX86_BUILTIN_PSLLQI,
15234 IX86_BUILTIN_PSRAWI,
15235 IX86_BUILTIN_PSRADI,
15236 IX86_BUILTIN_PSRLWI,
15237 IX86_BUILTIN_PSRLDI,
15238 IX86_BUILTIN_PSRLQI,
15239
15240 IX86_BUILTIN_PUNPCKHBW,
15241 IX86_BUILTIN_PUNPCKHWD,
15242 IX86_BUILTIN_PUNPCKHDQ,
15243 IX86_BUILTIN_PUNPCKLBW,
15244 IX86_BUILTIN_PUNPCKLWD,
15245 IX86_BUILTIN_PUNPCKLDQ,
15246
15247 IX86_BUILTIN_SHUFPS,
15248
15249 IX86_BUILTIN_RCPPS,
15250 IX86_BUILTIN_RCPSS,
15251 IX86_BUILTIN_RSQRTPS,
15252 IX86_BUILTIN_RSQRTSS,
15253 IX86_BUILTIN_SQRTPS,
15254 IX86_BUILTIN_SQRTSS,
15255
15256 IX86_BUILTIN_UNPCKHPS,
15257 IX86_BUILTIN_UNPCKLPS,
15258
15259 IX86_BUILTIN_ANDPS,
15260 IX86_BUILTIN_ANDNPS,
15261 IX86_BUILTIN_ORPS,
15262 IX86_BUILTIN_XORPS,
15263
15264 IX86_BUILTIN_EMMS,
15265 IX86_BUILTIN_LDMXCSR,
15266 IX86_BUILTIN_STMXCSR,
15267 IX86_BUILTIN_SFENCE,
15268
15269 /* 3DNow! Original */
15270 IX86_BUILTIN_FEMMS,
15271 IX86_BUILTIN_PAVGUSB,
15272 IX86_BUILTIN_PF2ID,
15273 IX86_BUILTIN_PFACC,
15274 IX86_BUILTIN_PFADD,
15275 IX86_BUILTIN_PFCMPEQ,
15276 IX86_BUILTIN_PFCMPGE,
15277 IX86_BUILTIN_PFCMPGT,
15278 IX86_BUILTIN_PFMAX,
15279 IX86_BUILTIN_PFMIN,
15280 IX86_BUILTIN_PFMUL,
15281 IX86_BUILTIN_PFRCP,
15282 IX86_BUILTIN_PFRCPIT1,
15283 IX86_BUILTIN_PFRCPIT2,
15284 IX86_BUILTIN_PFRSQIT1,
15285 IX86_BUILTIN_PFRSQRT,
15286 IX86_BUILTIN_PFSUB,
15287 IX86_BUILTIN_PFSUBR,
15288 IX86_BUILTIN_PI2FD,
15289 IX86_BUILTIN_PMULHRW,
15290
15291 /* 3DNow! Athlon Extensions */
15292 IX86_BUILTIN_PF2IW,
15293 IX86_BUILTIN_PFNACC,
15294 IX86_BUILTIN_PFPNACC,
15295 IX86_BUILTIN_PI2FW,
15296 IX86_BUILTIN_PSWAPDSI,
15297 IX86_BUILTIN_PSWAPDSF,
15298
15299 /* SSE2 */
15300 IX86_BUILTIN_ADDPD,
15301 IX86_BUILTIN_ADDSD,
15302 IX86_BUILTIN_DIVPD,
15303 IX86_BUILTIN_DIVSD,
15304 IX86_BUILTIN_MULPD,
15305 IX86_BUILTIN_MULSD,
15306 IX86_BUILTIN_SUBPD,
15307 IX86_BUILTIN_SUBSD,
15308
15309 IX86_BUILTIN_CMPEQPD,
15310 IX86_BUILTIN_CMPLTPD,
15311 IX86_BUILTIN_CMPLEPD,
15312 IX86_BUILTIN_CMPGTPD,
15313 IX86_BUILTIN_CMPGEPD,
15314 IX86_BUILTIN_CMPNEQPD,
15315 IX86_BUILTIN_CMPNLTPD,
15316 IX86_BUILTIN_CMPNLEPD,
15317 IX86_BUILTIN_CMPNGTPD,
15318 IX86_BUILTIN_CMPNGEPD,
15319 IX86_BUILTIN_CMPORDPD,
15320 IX86_BUILTIN_CMPUNORDPD,
15321 IX86_BUILTIN_CMPNEPD,
15322 IX86_BUILTIN_CMPEQSD,
15323 IX86_BUILTIN_CMPLTSD,
15324 IX86_BUILTIN_CMPLESD,
15325 IX86_BUILTIN_CMPNEQSD,
15326 IX86_BUILTIN_CMPNLTSD,
15327 IX86_BUILTIN_CMPNLESD,
15328 IX86_BUILTIN_CMPORDSD,
15329 IX86_BUILTIN_CMPUNORDSD,
15330 IX86_BUILTIN_CMPNESD,
15331
15332 IX86_BUILTIN_COMIEQSD,
15333 IX86_BUILTIN_COMILTSD,
15334 IX86_BUILTIN_COMILESD,
15335 IX86_BUILTIN_COMIGTSD,
15336 IX86_BUILTIN_COMIGESD,
15337 IX86_BUILTIN_COMINEQSD,
15338 IX86_BUILTIN_UCOMIEQSD,
15339 IX86_BUILTIN_UCOMILTSD,
15340 IX86_BUILTIN_UCOMILESD,
15341 IX86_BUILTIN_UCOMIGTSD,
15342 IX86_BUILTIN_UCOMIGESD,
15343 IX86_BUILTIN_UCOMINEQSD,
15344
15345 IX86_BUILTIN_MAXPD,
15346 IX86_BUILTIN_MAXSD,
15347 IX86_BUILTIN_MINPD,
15348 IX86_BUILTIN_MINSD,
15349
15350 IX86_BUILTIN_ANDPD,
15351 IX86_BUILTIN_ANDNPD,
15352 IX86_BUILTIN_ORPD,
15353 IX86_BUILTIN_XORPD,
15354
15355 IX86_BUILTIN_SQRTPD,
15356 IX86_BUILTIN_SQRTSD,
15357
15358 IX86_BUILTIN_UNPCKHPD,
15359 IX86_BUILTIN_UNPCKLPD,
15360
15361 IX86_BUILTIN_SHUFPD,
15362
15363 IX86_BUILTIN_LOADUPD,
15364 IX86_BUILTIN_STOREUPD,
15365 IX86_BUILTIN_MOVSD,
15366
15367 IX86_BUILTIN_LOADHPD,
15368 IX86_BUILTIN_LOADLPD,
15369
15370 IX86_BUILTIN_CVTDQ2PD,
15371 IX86_BUILTIN_CVTDQ2PS,
15372
15373 IX86_BUILTIN_CVTPD2DQ,
15374 IX86_BUILTIN_CVTPD2PI,
15375 IX86_BUILTIN_CVTPD2PS,
15376 IX86_BUILTIN_CVTTPD2DQ,
15377 IX86_BUILTIN_CVTTPD2PI,
15378
15379 IX86_BUILTIN_CVTPI2PD,
15380 IX86_BUILTIN_CVTSI2SD,
15381 IX86_BUILTIN_CVTSI642SD,
15382
15383 IX86_BUILTIN_CVTSD2SI,
15384 IX86_BUILTIN_CVTSD2SI64,
15385 IX86_BUILTIN_CVTSD2SS,
15386 IX86_BUILTIN_CVTSS2SD,
15387 IX86_BUILTIN_CVTTSD2SI,
15388 IX86_BUILTIN_CVTTSD2SI64,
15389
15390 IX86_BUILTIN_CVTPS2DQ,
15391 IX86_BUILTIN_CVTPS2PD,
15392 IX86_BUILTIN_CVTTPS2DQ,
15393
15394 IX86_BUILTIN_MOVNTI,
15395 IX86_BUILTIN_MOVNTPD,
15396 IX86_BUILTIN_MOVNTDQ,
15397
15398 /* SSE2 MMX */
15399 IX86_BUILTIN_MASKMOVDQU,
15400 IX86_BUILTIN_MOVMSKPD,
15401 IX86_BUILTIN_PMOVMSKB128,
15402
15403 IX86_BUILTIN_PACKSSWB128,
15404 IX86_BUILTIN_PACKSSDW128,
15405 IX86_BUILTIN_PACKUSWB128,
15406
15407 IX86_BUILTIN_PADDB128,
15408 IX86_BUILTIN_PADDW128,
15409 IX86_BUILTIN_PADDD128,
15410 IX86_BUILTIN_PADDQ128,
15411 IX86_BUILTIN_PADDSB128,
15412 IX86_BUILTIN_PADDSW128,
15413 IX86_BUILTIN_PADDUSB128,
15414 IX86_BUILTIN_PADDUSW128,
15415 IX86_BUILTIN_PSUBB128,
15416 IX86_BUILTIN_PSUBW128,
15417 IX86_BUILTIN_PSUBD128,
15418 IX86_BUILTIN_PSUBQ128,
15419 IX86_BUILTIN_PSUBSB128,
15420 IX86_BUILTIN_PSUBSW128,
15421 IX86_BUILTIN_PSUBUSB128,
15422 IX86_BUILTIN_PSUBUSW128,
15423
15424 IX86_BUILTIN_PAND128,
15425 IX86_BUILTIN_PANDN128,
15426 IX86_BUILTIN_POR128,
15427 IX86_BUILTIN_PXOR128,
15428
15429 IX86_BUILTIN_PAVGB128,
15430 IX86_BUILTIN_PAVGW128,
15431
15432 IX86_BUILTIN_PCMPEQB128,
15433 IX86_BUILTIN_PCMPEQW128,
15434 IX86_BUILTIN_PCMPEQD128,
15435 IX86_BUILTIN_PCMPGTB128,
15436 IX86_BUILTIN_PCMPGTW128,
15437 IX86_BUILTIN_PCMPGTD128,
15438
15439 IX86_BUILTIN_PMADDWD128,
15440
15441 IX86_BUILTIN_PMAXSW128,
15442 IX86_BUILTIN_PMAXUB128,
15443 IX86_BUILTIN_PMINSW128,
15444 IX86_BUILTIN_PMINUB128,
15445
15446 IX86_BUILTIN_PMULUDQ,
15447 IX86_BUILTIN_PMULUDQ128,
15448 IX86_BUILTIN_PMULHUW128,
15449 IX86_BUILTIN_PMULHW128,
15450 IX86_BUILTIN_PMULLW128,
15451
15452 IX86_BUILTIN_PSADBW128,
15453 IX86_BUILTIN_PSHUFHW,
15454 IX86_BUILTIN_PSHUFLW,
15455 IX86_BUILTIN_PSHUFD,
15456
15457 IX86_BUILTIN_PSLLW128,
15458 IX86_BUILTIN_PSLLD128,
15459 IX86_BUILTIN_PSLLQ128,
15460 IX86_BUILTIN_PSRAW128,
15461 IX86_BUILTIN_PSRAD128,
15462 IX86_BUILTIN_PSRLW128,
15463 IX86_BUILTIN_PSRLD128,
15464 IX86_BUILTIN_PSRLQ128,
15465 IX86_BUILTIN_PSLLDQI128,
15466 IX86_BUILTIN_PSLLWI128,
15467 IX86_BUILTIN_PSLLDI128,
15468 IX86_BUILTIN_PSLLQI128,
15469 IX86_BUILTIN_PSRAWI128,
15470 IX86_BUILTIN_PSRADI128,
15471 IX86_BUILTIN_PSRLDQI128,
15472 IX86_BUILTIN_PSRLWI128,
15473 IX86_BUILTIN_PSRLDI128,
15474 IX86_BUILTIN_PSRLQI128,
15475
15476 IX86_BUILTIN_PUNPCKHBW128,
15477 IX86_BUILTIN_PUNPCKHWD128,
15478 IX86_BUILTIN_PUNPCKHDQ128,
15479 IX86_BUILTIN_PUNPCKHQDQ128,
15480 IX86_BUILTIN_PUNPCKLBW128,
15481 IX86_BUILTIN_PUNPCKLWD128,
15482 IX86_BUILTIN_PUNPCKLDQ128,
15483 IX86_BUILTIN_PUNPCKLQDQ128,
15484
15485 IX86_BUILTIN_CLFLUSH,
15486 IX86_BUILTIN_MFENCE,
15487 IX86_BUILTIN_LFENCE,
15488
15489 /* Prescott New Instructions. */
15490 IX86_BUILTIN_ADDSUBPS,
15491 IX86_BUILTIN_HADDPS,
15492 IX86_BUILTIN_HSUBPS,
15493 IX86_BUILTIN_MOVSHDUP,
15494 IX86_BUILTIN_MOVSLDUP,
15495 IX86_BUILTIN_ADDSUBPD,
15496 IX86_BUILTIN_HADDPD,
15497 IX86_BUILTIN_HSUBPD,
15498 IX86_BUILTIN_LDDQU,
15499
15500 IX86_BUILTIN_MONITOR,
15501 IX86_BUILTIN_MWAIT,
15502
15503 /* SSSE3. */
15504 IX86_BUILTIN_PHADDW,
15505 IX86_BUILTIN_PHADDD,
15506 IX86_BUILTIN_PHADDSW,
15507 IX86_BUILTIN_PHSUBW,
15508 IX86_BUILTIN_PHSUBD,
15509 IX86_BUILTIN_PHSUBSW,
15510 IX86_BUILTIN_PMADDUBSW,
15511 IX86_BUILTIN_PMULHRSW,
15512 IX86_BUILTIN_PSHUFB,
15513 IX86_BUILTIN_PSIGNB,
15514 IX86_BUILTIN_PSIGNW,
15515 IX86_BUILTIN_PSIGND,
15516 IX86_BUILTIN_PALIGNR,
15517 IX86_BUILTIN_PABSB,
15518 IX86_BUILTIN_PABSW,
15519 IX86_BUILTIN_PABSD,
15520
15521 IX86_BUILTIN_PHADDW128,
15522 IX86_BUILTIN_PHADDD128,
15523 IX86_BUILTIN_PHADDSW128,
15524 IX86_BUILTIN_PHSUBW128,
15525 IX86_BUILTIN_PHSUBD128,
15526 IX86_BUILTIN_PHSUBSW128,
15527 IX86_BUILTIN_PMADDUBSW128,
15528 IX86_BUILTIN_PMULHRSW128,
15529 IX86_BUILTIN_PSHUFB128,
15530 IX86_BUILTIN_PSIGNB128,
15531 IX86_BUILTIN_PSIGNW128,
15532 IX86_BUILTIN_PSIGND128,
15533 IX86_BUILTIN_PALIGNR128,
15534 IX86_BUILTIN_PABSB128,
15535 IX86_BUILTIN_PABSW128,
15536 IX86_BUILTIN_PABSD128,
15537
15538 IX86_BUILTIN_VEC_INIT_V2SI,
15539 IX86_BUILTIN_VEC_INIT_V4HI,
15540 IX86_BUILTIN_VEC_INIT_V8QI,
15541 IX86_BUILTIN_VEC_EXT_V2DF,
15542 IX86_BUILTIN_VEC_EXT_V2DI,
15543 IX86_BUILTIN_VEC_EXT_V4SF,
15544 IX86_BUILTIN_VEC_EXT_V4SI,
15545 IX86_BUILTIN_VEC_EXT_V8HI,
15546 IX86_BUILTIN_VEC_EXT_V2SI,
15547 IX86_BUILTIN_VEC_EXT_V4HI,
15548 IX86_BUILTIN_VEC_SET_V8HI,
15549 IX86_BUILTIN_VEC_SET_V4HI,
15550
15551 IX86_BUILTIN_MAX
15552 };
15553
15554 /* Table for the ix86 builtin decls. */
15555 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15556
15557 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15558 * if the target_flags include one of MASK. Stores the function decl
15559 * in the ix86_builtins array.
15560 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15561
15562 static inline tree
15563 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15564 {
15565 tree decl = NULL_TREE;
15566
15567 if (mask & target_flags
15568 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15569 {
15570 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15571 NULL, NULL_TREE);
15572 ix86_builtins[(int) code] = decl;
15573 }
15574
15575 return decl;
15576 }
15577
15578 /* Like def_builtin, but also marks the function decl "const". */
15579
15580 static inline tree
15581 def_builtin_const (int mask, const char *name, tree type,
15582 enum ix86_builtins code)
15583 {
15584 tree decl = def_builtin (mask, name, type, code);
15585 if (decl)
15586 TREE_READONLY (decl) = 1;
15587 return decl;
15588 }
15589
15590 /* Bits for builtin_description.flag. */
15591
15592 /* Set when we don't support the comparison natively, and should
15593 swap_comparison in order to support it. */
15594 #define BUILTIN_DESC_SWAP_OPERANDS 1
15595
15596 struct builtin_description
15597 {
15598 const unsigned int mask;
15599 const enum insn_code icode;
15600 const char *const name;
15601 const enum ix86_builtins code;
15602 const enum rtx_code comparison;
15603 const unsigned int flag;
15604 };
15605
15606 static const struct builtin_description bdesc_comi[] =
15607 {
15608 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15609 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15610 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15611 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15612 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15613 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15614 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15615 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15616 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15617 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15618 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15619 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15620 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15621 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15622 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15623 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15624 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15625 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15626 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15627 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15628 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15629 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15630 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15631 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15632 };
15633
15634 static const struct builtin_description bdesc_2arg[] =
15635 {
15636 /* SSE */
15637 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15638 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15639 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15640 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15641 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15642 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15643 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15644 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15645
15646 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15647 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15648 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15649 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15650 BUILTIN_DESC_SWAP_OPERANDS },
15651 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15652 BUILTIN_DESC_SWAP_OPERANDS },
15653 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15654 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15655 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15656 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15657 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15658 BUILTIN_DESC_SWAP_OPERANDS },
15659 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15660 BUILTIN_DESC_SWAP_OPERANDS },
15661 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15662 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15663 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15664 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15665 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15666 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15667 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15668 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15669 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15670 BUILTIN_DESC_SWAP_OPERANDS },
15671 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15672 BUILTIN_DESC_SWAP_OPERANDS },
15673 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15674
15675 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15676 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15677 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15678 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15679
15680 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15681 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15682 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15683 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15684
15685 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15686 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15687 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15688 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15689 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15690
15691 /* MMX */
15692 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15693 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15694 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15695 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15696 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15697 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15698 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15699 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15700
15701 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15702 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15703 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15704 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15705 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15706 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15707 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15708 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15709
15710 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15711 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15712 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15713
15714 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15715 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15716 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15717 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15718
15719 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15720 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15721
15722 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15723 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15724 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15725 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15726 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15727 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15728
15729 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15730 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15731 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15732 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15733
15734 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15735 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15736 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15737 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15738 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15739 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15740
15741 /* Special. */
15742 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15743 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15744 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15745
15746 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15747 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15748 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15749
15750 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15751 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15752 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15753 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15754 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15755 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15756
15757 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15758 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15759 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15760 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15761 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15762 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15763
15764 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15765 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15766 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15767 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15768
15769 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15770 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15771
15772 /* SSE2 */
15773 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15774 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15775 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15776 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15777 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15778 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15779 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15780 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15781
15782 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15783 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15784 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15785 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15786 BUILTIN_DESC_SWAP_OPERANDS },
15787 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15788 BUILTIN_DESC_SWAP_OPERANDS },
15789 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15790 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15791 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15792 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15793 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15794 BUILTIN_DESC_SWAP_OPERANDS },
15795 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15796 BUILTIN_DESC_SWAP_OPERANDS },
15797 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15798 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15799 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15800 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15801 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15802 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15803 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15804 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15805 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15806
15807 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15808 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15809 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15810 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15811
15812 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15813 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15814 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15815 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15816
15817 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15818 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15819 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15820
15821 /* SSE2 MMX */
15822 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15823 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15824 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15825 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15826 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15827 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15828 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15829 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15830
15831 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15832 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15833 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15834 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15835 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15836 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15837 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15838 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15839
15840 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15841 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15842
15843 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15844 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15845 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15846 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15847
15848 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15849 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15850
15851 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15852 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15853 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15854 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15855 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15856 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15857
15858 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15859 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15860 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15861 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15862
15863 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15864 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15865 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15866 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15867 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15868 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15870 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15871
15872 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15873 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15874 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15875
15876 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15877 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15878
15879 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15880 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15881
15882 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15883 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15884 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15885
15886 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15887 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15888 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15889
15890 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15891 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15892
15893 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15894
15895 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15896 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15897 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15898 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15899
15900 /* SSE3 MMX */
15901 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15902 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15903 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15904 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15905 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15906 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15907
15908 /* SSSE3 */
15909 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15910 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15911 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15912 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15913 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15914 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15915 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15916 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15917 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15918 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15919 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15920 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15921 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15922 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15923 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15924 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15925 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15926 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15927 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15928 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15929 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15930 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15931 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15932 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15933 };
15934
15935 static const struct builtin_description bdesc_1arg[] =
15936 {
15937 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15938 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15939
15940 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
15941 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
15942 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
15943
15944 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
15945 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
15946 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
15947 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
15948 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
15949 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
15950
15951 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
15952 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
15953
15954 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
15955
15956 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
15957 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
15958
15959 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
15960 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
15961 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
15962 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
15963 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
15964
15965 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
15966
15967 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
15968 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
15969 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
15970 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
15971
15972 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
15973 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
15974 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
15975
15976 /* SSE3 */
15977 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
15978 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
15979
15980 /* SSSE3 */
15981 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
15982 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
15983 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
15984 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
15985 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
15986 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
15987 };
15988
15989 static void
15990 ix86_init_builtins (void)
15991 {
15992 if (TARGET_MMX)
15993 ix86_init_mmx_sse_builtins ();
15994 }
15995
15996 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
15997 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
15998 builtins. */
15999 static void
16000 ix86_init_mmx_sse_builtins (void)
16001 {
16002 const struct builtin_description * d;
16003 size_t i;
16004
16005 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
16006 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16007 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16008 tree V2DI_type_node
16009 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16010 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16011 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16012 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16013 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16014 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
16015 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16016
16017 tree pchar_type_node = build_pointer_type (char_type_node);
16018 tree pcchar_type_node = build_pointer_type (
16019 build_type_variant (char_type_node, 1, 0));
16020 tree pfloat_type_node = build_pointer_type (float_type_node);
16021 tree pcfloat_type_node = build_pointer_type (
16022 build_type_variant (float_type_node, 1, 0));
16023 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16024 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16025 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16026
16027 /* Comparisons. */
16028 tree int_ftype_v4sf_v4sf
16029 = build_function_type_list (integer_type_node,
16030 V4SF_type_node, V4SF_type_node, NULL_TREE);
16031 tree v4si_ftype_v4sf_v4sf
16032 = build_function_type_list (V4SI_type_node,
16033 V4SF_type_node, V4SF_type_node, NULL_TREE);
16034 /* MMX/SSE/integer conversions. */
16035 tree int_ftype_v4sf
16036 = build_function_type_list (integer_type_node,
16037 V4SF_type_node, NULL_TREE);
16038 tree int64_ftype_v4sf
16039 = build_function_type_list (long_long_integer_type_node,
16040 V4SF_type_node, NULL_TREE);
16041 tree int_ftype_v8qi
16042 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16043 tree v4sf_ftype_v4sf_int
16044 = build_function_type_list (V4SF_type_node,
16045 V4SF_type_node, integer_type_node, NULL_TREE);
16046 tree v4sf_ftype_v4sf_int64
16047 = build_function_type_list (V4SF_type_node,
16048 V4SF_type_node, long_long_integer_type_node,
16049 NULL_TREE);
16050 tree v4sf_ftype_v4sf_v2si
16051 = build_function_type_list (V4SF_type_node,
16052 V4SF_type_node, V2SI_type_node, NULL_TREE);
16053
16054 /* Miscellaneous. */
16055 tree v8qi_ftype_v4hi_v4hi
16056 = build_function_type_list (V8QI_type_node,
16057 V4HI_type_node, V4HI_type_node, NULL_TREE);
16058 tree v4hi_ftype_v2si_v2si
16059 = build_function_type_list (V4HI_type_node,
16060 V2SI_type_node, V2SI_type_node, NULL_TREE);
16061 tree v4sf_ftype_v4sf_v4sf_int
16062 = build_function_type_list (V4SF_type_node,
16063 V4SF_type_node, V4SF_type_node,
16064 integer_type_node, NULL_TREE);
16065 tree v2si_ftype_v4hi_v4hi
16066 = build_function_type_list (V2SI_type_node,
16067 V4HI_type_node, V4HI_type_node, NULL_TREE);
16068 tree v4hi_ftype_v4hi_int
16069 = build_function_type_list (V4HI_type_node,
16070 V4HI_type_node, integer_type_node, NULL_TREE);
16071 tree v4hi_ftype_v4hi_di
16072 = build_function_type_list (V4HI_type_node,
16073 V4HI_type_node, long_long_unsigned_type_node,
16074 NULL_TREE);
16075 tree v2si_ftype_v2si_di
16076 = build_function_type_list (V2SI_type_node,
16077 V2SI_type_node, long_long_unsigned_type_node,
16078 NULL_TREE);
16079 tree void_ftype_void
16080 = build_function_type (void_type_node, void_list_node);
16081 tree void_ftype_unsigned
16082 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16083 tree void_ftype_unsigned_unsigned
16084 = build_function_type_list (void_type_node, unsigned_type_node,
16085 unsigned_type_node, NULL_TREE);
16086 tree void_ftype_pcvoid_unsigned_unsigned
16087 = build_function_type_list (void_type_node, const_ptr_type_node,
16088 unsigned_type_node, unsigned_type_node,
16089 NULL_TREE);
16090 tree unsigned_ftype_void
16091 = build_function_type (unsigned_type_node, void_list_node);
16092 tree v2si_ftype_v4sf
16093 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16094 /* Loads/stores. */
16095 tree void_ftype_v8qi_v8qi_pchar
16096 = build_function_type_list (void_type_node,
16097 V8QI_type_node, V8QI_type_node,
16098 pchar_type_node, NULL_TREE);
16099 tree v4sf_ftype_pcfloat
16100 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16101 /* @@@ the type is bogus */
16102 tree v4sf_ftype_v4sf_pv2si
16103 = build_function_type_list (V4SF_type_node,
16104 V4SF_type_node, pv2si_type_node, NULL_TREE);
16105 tree void_ftype_pv2si_v4sf
16106 = build_function_type_list (void_type_node,
16107 pv2si_type_node, V4SF_type_node, NULL_TREE);
16108 tree void_ftype_pfloat_v4sf
16109 = build_function_type_list (void_type_node,
16110 pfloat_type_node, V4SF_type_node, NULL_TREE);
16111 tree void_ftype_pdi_di
16112 = build_function_type_list (void_type_node,
16113 pdi_type_node, long_long_unsigned_type_node,
16114 NULL_TREE);
16115 tree void_ftype_pv2di_v2di
16116 = build_function_type_list (void_type_node,
16117 pv2di_type_node, V2DI_type_node, NULL_TREE);
16118 /* Normal vector unops. */
16119 tree v4sf_ftype_v4sf
16120 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16121 tree v16qi_ftype_v16qi
16122 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16123 tree v8hi_ftype_v8hi
16124 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16125 tree v4si_ftype_v4si
16126 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16127 tree v8qi_ftype_v8qi
16128 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16129 tree v4hi_ftype_v4hi
16130 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16131
16132 /* Normal vector binops. */
16133 tree v4sf_ftype_v4sf_v4sf
16134 = build_function_type_list (V4SF_type_node,
16135 V4SF_type_node, V4SF_type_node, NULL_TREE);
16136 tree v8qi_ftype_v8qi_v8qi
16137 = build_function_type_list (V8QI_type_node,
16138 V8QI_type_node, V8QI_type_node, NULL_TREE);
16139 tree v4hi_ftype_v4hi_v4hi
16140 = build_function_type_list (V4HI_type_node,
16141 V4HI_type_node, V4HI_type_node, NULL_TREE);
16142 tree v2si_ftype_v2si_v2si
16143 = build_function_type_list (V2SI_type_node,
16144 V2SI_type_node, V2SI_type_node, NULL_TREE);
16145 tree di_ftype_di_di
16146 = build_function_type_list (long_long_unsigned_type_node,
16147 long_long_unsigned_type_node,
16148 long_long_unsigned_type_node, NULL_TREE);
16149
16150 tree di_ftype_di_di_int
16151 = build_function_type_list (long_long_unsigned_type_node,
16152 long_long_unsigned_type_node,
16153 long_long_unsigned_type_node,
16154 integer_type_node, NULL_TREE);
16155
16156 tree v2si_ftype_v2sf
16157 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16158 tree v2sf_ftype_v2si
16159 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16160 tree v2si_ftype_v2si
16161 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16162 tree v2sf_ftype_v2sf
16163 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16164 tree v2sf_ftype_v2sf_v2sf
16165 = build_function_type_list (V2SF_type_node,
16166 V2SF_type_node, V2SF_type_node, NULL_TREE);
16167 tree v2si_ftype_v2sf_v2sf
16168 = build_function_type_list (V2SI_type_node,
16169 V2SF_type_node, V2SF_type_node, NULL_TREE);
16170 tree pint_type_node = build_pointer_type (integer_type_node);
16171 tree pdouble_type_node = build_pointer_type (double_type_node);
16172 tree pcdouble_type_node = build_pointer_type (
16173 build_type_variant (double_type_node, 1, 0));
16174 tree int_ftype_v2df_v2df
16175 = build_function_type_list (integer_type_node,
16176 V2DF_type_node, V2DF_type_node, NULL_TREE);
16177
16178 tree void_ftype_pcvoid
16179 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16180 tree v4sf_ftype_v4si
16181 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16182 tree v4si_ftype_v4sf
16183 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16184 tree v2df_ftype_v4si
16185 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16186 tree v4si_ftype_v2df
16187 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16188 tree v2si_ftype_v2df
16189 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16190 tree v4sf_ftype_v2df
16191 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16192 tree v2df_ftype_v2si
16193 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16194 tree v2df_ftype_v4sf
16195 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16196 tree int_ftype_v2df
16197 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16198 tree int64_ftype_v2df
16199 = build_function_type_list (long_long_integer_type_node,
16200 V2DF_type_node, NULL_TREE);
16201 tree v2df_ftype_v2df_int
16202 = build_function_type_list (V2DF_type_node,
16203 V2DF_type_node, integer_type_node, NULL_TREE);
16204 tree v2df_ftype_v2df_int64
16205 = build_function_type_list (V2DF_type_node,
16206 V2DF_type_node, long_long_integer_type_node,
16207 NULL_TREE);
16208 tree v4sf_ftype_v4sf_v2df
16209 = build_function_type_list (V4SF_type_node,
16210 V4SF_type_node, V2DF_type_node, NULL_TREE);
16211 tree v2df_ftype_v2df_v4sf
16212 = build_function_type_list (V2DF_type_node,
16213 V2DF_type_node, V4SF_type_node, NULL_TREE);
16214 tree v2df_ftype_v2df_v2df_int
16215 = build_function_type_list (V2DF_type_node,
16216 V2DF_type_node, V2DF_type_node,
16217 integer_type_node,
16218 NULL_TREE);
16219 tree v2df_ftype_v2df_pcdouble
16220 = build_function_type_list (V2DF_type_node,
16221 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16222 tree void_ftype_pdouble_v2df
16223 = build_function_type_list (void_type_node,
16224 pdouble_type_node, V2DF_type_node, NULL_TREE);
16225 tree void_ftype_pint_int
16226 = build_function_type_list (void_type_node,
16227 pint_type_node, integer_type_node, NULL_TREE);
16228 tree void_ftype_v16qi_v16qi_pchar
16229 = build_function_type_list (void_type_node,
16230 V16QI_type_node, V16QI_type_node,
16231 pchar_type_node, NULL_TREE);
16232 tree v2df_ftype_pcdouble
16233 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16234 tree v2df_ftype_v2df_v2df
16235 = build_function_type_list (V2DF_type_node,
16236 V2DF_type_node, V2DF_type_node, NULL_TREE);
16237 tree v16qi_ftype_v16qi_v16qi
16238 = build_function_type_list (V16QI_type_node,
16239 V16QI_type_node, V16QI_type_node, NULL_TREE);
16240 tree v8hi_ftype_v8hi_v8hi
16241 = build_function_type_list (V8HI_type_node,
16242 V8HI_type_node, V8HI_type_node, NULL_TREE);
16243 tree v4si_ftype_v4si_v4si
16244 = build_function_type_list (V4SI_type_node,
16245 V4SI_type_node, V4SI_type_node, NULL_TREE);
16246 tree v2di_ftype_v2di_v2di
16247 = build_function_type_list (V2DI_type_node,
16248 V2DI_type_node, V2DI_type_node, NULL_TREE);
16249 tree v2di_ftype_v2df_v2df
16250 = build_function_type_list (V2DI_type_node,
16251 V2DF_type_node, V2DF_type_node, NULL_TREE);
16252 tree v2df_ftype_v2df
16253 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16254 tree v2di_ftype_v2di_int
16255 = build_function_type_list (V2DI_type_node,
16256 V2DI_type_node, integer_type_node, NULL_TREE);
16257 tree v2di_ftype_v2di_v2di_int
16258 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16259 V2DI_type_node, integer_type_node, NULL_TREE);
16260 tree v4si_ftype_v4si_int
16261 = build_function_type_list (V4SI_type_node,
16262 V4SI_type_node, integer_type_node, NULL_TREE);
16263 tree v8hi_ftype_v8hi_int
16264 = build_function_type_list (V8HI_type_node,
16265 V8HI_type_node, integer_type_node, NULL_TREE);
16266 tree v8hi_ftype_v8hi_v2di
16267 = build_function_type_list (V8HI_type_node,
16268 V8HI_type_node, V2DI_type_node, NULL_TREE);
16269 tree v4si_ftype_v4si_v2di
16270 = build_function_type_list (V4SI_type_node,
16271 V4SI_type_node, V2DI_type_node, NULL_TREE);
16272 tree v4si_ftype_v8hi_v8hi
16273 = build_function_type_list (V4SI_type_node,
16274 V8HI_type_node, V8HI_type_node, NULL_TREE);
16275 tree di_ftype_v8qi_v8qi
16276 = build_function_type_list (long_long_unsigned_type_node,
16277 V8QI_type_node, V8QI_type_node, NULL_TREE);
16278 tree di_ftype_v2si_v2si
16279 = build_function_type_list (long_long_unsigned_type_node,
16280 V2SI_type_node, V2SI_type_node, NULL_TREE);
16281 tree v2di_ftype_v16qi_v16qi
16282 = build_function_type_list (V2DI_type_node,
16283 V16QI_type_node, V16QI_type_node, NULL_TREE);
16284 tree v2di_ftype_v4si_v4si
16285 = build_function_type_list (V2DI_type_node,
16286 V4SI_type_node, V4SI_type_node, NULL_TREE);
16287 tree int_ftype_v16qi
16288 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16289 tree v16qi_ftype_pcchar
16290 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16291 tree void_ftype_pchar_v16qi
16292 = build_function_type_list (void_type_node,
16293 pchar_type_node, V16QI_type_node, NULL_TREE);
16294
16295 tree float80_type;
16296 tree float128_type;
16297 tree ftype;
16298
16299 /* The __float80 type. */
16300 if (TYPE_MODE (long_double_type_node) == XFmode)
16301 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16302 "__float80");
16303 else
16304 {
16305 /* The __float80 type. */
16306 float80_type = make_node (REAL_TYPE);
16307 TYPE_PRECISION (float80_type) = 80;
16308 layout_type (float80_type);
16309 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16310 }
16311
16312 if (TARGET_64BIT)
16313 {
16314 float128_type = make_node (REAL_TYPE);
16315 TYPE_PRECISION (float128_type) = 128;
16316 layout_type (float128_type);
16317 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16318 }
16319
16320 /* Add all builtins that are more or less simple operations on two
16321 operands. */
16322 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16323 {
16324 /* Use one of the operands; the target can have a different mode for
16325 mask-generating compares. */
16326 enum machine_mode mode;
16327 tree type;
16328
16329 if (d->name == 0)
16330 continue;
16331 mode = insn_data[d->icode].operand[1].mode;
16332
16333 switch (mode)
16334 {
16335 case V16QImode:
16336 type = v16qi_ftype_v16qi_v16qi;
16337 break;
16338 case V8HImode:
16339 type = v8hi_ftype_v8hi_v8hi;
16340 break;
16341 case V4SImode:
16342 type = v4si_ftype_v4si_v4si;
16343 break;
16344 case V2DImode:
16345 type = v2di_ftype_v2di_v2di;
16346 break;
16347 case V2DFmode:
16348 type = v2df_ftype_v2df_v2df;
16349 break;
16350 case V4SFmode:
16351 type = v4sf_ftype_v4sf_v4sf;
16352 break;
16353 case V8QImode:
16354 type = v8qi_ftype_v8qi_v8qi;
16355 break;
16356 case V4HImode:
16357 type = v4hi_ftype_v4hi_v4hi;
16358 break;
16359 case V2SImode:
16360 type = v2si_ftype_v2si_v2si;
16361 break;
16362 case DImode:
16363 type = di_ftype_di_di;
16364 break;
16365
16366 default:
16367 gcc_unreachable ();
16368 }
16369
16370 /* Override for comparisons. */
16371 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16372 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16373 type = v4si_ftype_v4sf_v4sf;
16374
16375 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16376 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16377 type = v2di_ftype_v2df_v2df;
16378
16379 def_builtin (d->mask, d->name, type, d->code);
16380 }
16381
16382 /* Add all builtins that are more or less simple operations on 1 operand. */
16383 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16384 {
16385 enum machine_mode mode;
16386 tree type;
16387
16388 if (d->name == 0)
16389 continue;
16390 mode = insn_data[d->icode].operand[1].mode;
16391
16392 switch (mode)
16393 {
16394 case V16QImode:
16395 type = v16qi_ftype_v16qi;
16396 break;
16397 case V8HImode:
16398 type = v8hi_ftype_v8hi;
16399 break;
16400 case V4SImode:
16401 type = v4si_ftype_v4si;
16402 break;
16403 case V2DFmode:
16404 type = v2df_ftype_v2df;
16405 break;
16406 case V4SFmode:
16407 type = v4sf_ftype_v4sf;
16408 break;
16409 case V8QImode:
16410 type = v8qi_ftype_v8qi;
16411 break;
16412 case V4HImode:
16413 type = v4hi_ftype_v4hi;
16414 break;
16415 case V2SImode:
16416 type = v2si_ftype_v2si;
16417 break;
16418
16419 default:
16420 abort ();
16421 }
16422
16423 def_builtin (d->mask, d->name, type, d->code);
16424 }
16425
16426 /* Add the remaining MMX insns with somewhat more complicated types. */
16427 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16428 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16429 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16430 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16431
16432 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16433 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16434 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16435
16436 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16437 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16438
16439 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16440 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16441
16442 /* comi/ucomi insns. */
16443 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16444 if (d->mask == MASK_SSE2)
16445 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16446 else
16447 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16448
16449 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16450 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16451 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16452
16453 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16454 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16455 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16456 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16457 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16458 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16459 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16460 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16461 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16462 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16463 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16464
16465 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16466
16467 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16468 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16469
16470 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16471 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16472 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16473 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16474
16475 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16476 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16477 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16478 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16479
16480 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16481
16482 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16483
16484 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16485 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16486 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16487 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16488 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16489 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16490
16491 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16492
16493 /* Original 3DNow! */
16494 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16495 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16496 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16497 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16498 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16499 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16500 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16501 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16502 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16503 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16504 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16505 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16506 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16507 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16508 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16509 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16510 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16511 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16512 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16513 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16514
16515 /* 3DNow! extension as used in the Athlon CPU. */
16516 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16517 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16518 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16519 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16520 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16521 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16522
16523 /* SSE2 */
16524 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16525
16526 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16527 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16528
16529 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16530 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16531
16532 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16533 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16534 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16535 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16536 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16537
16538 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16539 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16540 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16541 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16542
16543 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16544 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16545
16546 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16547
16548 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16549 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16550
16551 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16552 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16553 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16554 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16555 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16556
16557 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16558
16559 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16560 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16561 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16562 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16563
16564 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16565 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16566 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16567
16568 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16569 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16570 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16571 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16572
16573 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16574 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16575 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16576
16577 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16578 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16579
16580 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16581 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16582
16583 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16584 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16585 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16586
16587 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16588 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16589 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16590
16591 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16592 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16593
16594 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16595 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16596 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16597 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16598
16599 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16600 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16601 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16602 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16603
16604 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16605 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16606
16607 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16608
16609 /* Prescott New Instructions. */
16610 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16611 void_ftype_pcvoid_unsigned_unsigned,
16612 IX86_BUILTIN_MONITOR);
16613 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16614 void_ftype_unsigned_unsigned,
16615 IX86_BUILTIN_MWAIT);
16616 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16617 v4sf_ftype_v4sf,
16618 IX86_BUILTIN_MOVSHDUP);
16619 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16620 v4sf_ftype_v4sf,
16621 IX86_BUILTIN_MOVSLDUP);
16622 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16623 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16624
16625 /* SSSE3. */
16626 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16627 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16628 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16629 IX86_BUILTIN_PALIGNR);
16630
16631 /* Access to the vec_init patterns. */
16632 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16633 integer_type_node, NULL_TREE);
16634 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16635 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16636
16637 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16638 short_integer_type_node,
16639 short_integer_type_node,
16640 short_integer_type_node, NULL_TREE);
16641 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16642 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16643
16644 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16645 char_type_node, char_type_node,
16646 char_type_node, char_type_node,
16647 char_type_node, char_type_node,
16648 char_type_node, NULL_TREE);
16649 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16650 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16651
16652 /* Access to the vec_extract patterns. */
16653 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16654 integer_type_node, NULL_TREE);
16655 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16656 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16657
16658 ftype = build_function_type_list (long_long_integer_type_node,
16659 V2DI_type_node, integer_type_node,
16660 NULL_TREE);
16661 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16662 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16663
16664 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16665 integer_type_node, NULL_TREE);
16666 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16667 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16668
16669 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16670 integer_type_node, NULL_TREE);
16671 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16672 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16673
16674 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16675 integer_type_node, NULL_TREE);
16676 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16677 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16678
16679 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16680 integer_type_node, NULL_TREE);
16681 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16682 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16683
16684 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16685 integer_type_node, NULL_TREE);
16686 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16687 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16688
16689 /* Access to the vec_set patterns. */
16690 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16691 intHI_type_node,
16692 integer_type_node, NULL_TREE);
16693 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16694 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16695
16696 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16697 intHI_type_node,
16698 integer_type_node, NULL_TREE);
16699 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16700 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16701 }
16702
16703 /* Errors in the source file can cause expand_expr to return const0_rtx
16704 where we expect a vector. To avoid crashing, use one of the vector
16705 clear instructions. */
16706 static rtx
16707 safe_vector_operand (rtx x, enum machine_mode mode)
16708 {
16709 if (x == const0_rtx)
16710 x = CONST0_RTX (mode);
16711 return x;
16712 }
16713
16714 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16715
16716 static rtx
16717 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16718 {
16719 rtx pat, xops[3];
16720 tree arg0 = TREE_VALUE (arglist);
16721 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16722 rtx op0 = expand_normal (arg0);
16723 rtx op1 = expand_normal (arg1);
16724 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16725 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16726 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16727
16728 if (VECTOR_MODE_P (mode0))
16729 op0 = safe_vector_operand (op0, mode0);
16730 if (VECTOR_MODE_P (mode1))
16731 op1 = safe_vector_operand (op1, mode1);
16732
16733 if (optimize || !target
16734 || GET_MODE (target) != tmode
16735 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16736 target = gen_reg_rtx (tmode);
16737
16738 if (GET_MODE (op1) == SImode && mode1 == TImode)
16739 {
16740 rtx x = gen_reg_rtx (V4SImode);
16741 emit_insn (gen_sse2_loadd (x, op1));
16742 op1 = gen_lowpart (TImode, x);
16743 }
16744
16745 /* The insn must want input operands in the same modes as the
16746 result. */
16747 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16748 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16749
16750 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16751 op0 = copy_to_mode_reg (mode0, op0);
16752 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16753 op1 = copy_to_mode_reg (mode1, op1);
16754
16755 /* ??? Using ix86_fixup_binary_operands is problematic when
16756 we've got mismatched modes. Fake it. */
16757
16758 xops[0] = target;
16759 xops[1] = op0;
16760 xops[2] = op1;
16761
16762 if (tmode == mode0 && tmode == mode1)
16763 {
16764 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16765 op0 = xops[1];
16766 op1 = xops[2];
16767 }
16768 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16769 {
16770 op0 = force_reg (mode0, op0);
16771 op1 = force_reg (mode1, op1);
16772 target = gen_reg_rtx (tmode);
16773 }
16774
16775 pat = GEN_FCN (icode) (target, op0, op1);
16776 if (! pat)
16777 return 0;
16778 emit_insn (pat);
16779 return target;
16780 }
16781
16782 /* Subroutine of ix86_expand_builtin to take care of stores. */
16783
16784 static rtx
16785 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16786 {
16787 rtx pat;
16788 tree arg0 = TREE_VALUE (arglist);
16789 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16790 rtx op0 = expand_normal (arg0);
16791 rtx op1 = expand_normal (arg1);
16792 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16793 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16794
16795 if (VECTOR_MODE_P (mode1))
16796 op1 = safe_vector_operand (op1, mode1);
16797
16798 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16799 op1 = copy_to_mode_reg (mode1, op1);
16800
16801 pat = GEN_FCN (icode) (op0, op1);
16802 if (pat)
16803 emit_insn (pat);
16804 return 0;
16805 }
16806
16807 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16808
16809 static rtx
16810 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16811 rtx target, int do_load)
16812 {
16813 rtx pat;
16814 tree arg0 = TREE_VALUE (arglist);
16815 rtx op0 = expand_normal (arg0);
16816 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16817 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16818
16819 if (optimize || !target
16820 || GET_MODE (target) != tmode
16821 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16822 target = gen_reg_rtx (tmode);
16823 if (do_load)
16824 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16825 else
16826 {
16827 if (VECTOR_MODE_P (mode0))
16828 op0 = safe_vector_operand (op0, mode0);
16829
16830 if ((optimize && !register_operand (op0, mode0))
16831 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16832 op0 = copy_to_mode_reg (mode0, op0);
16833 }
16834
16835 pat = GEN_FCN (icode) (target, op0);
16836 if (! pat)
16837 return 0;
16838 emit_insn (pat);
16839 return target;
16840 }
16841
16842 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16843 sqrtss, rsqrtss, rcpss. */
16844
16845 static rtx
16846 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16847 {
16848 rtx pat;
16849 tree arg0 = TREE_VALUE (arglist);
16850 rtx op1, op0 = expand_normal (arg0);
16851 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16852 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16853
16854 if (optimize || !target
16855 || GET_MODE (target) != tmode
16856 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16857 target = gen_reg_rtx (tmode);
16858
16859 if (VECTOR_MODE_P (mode0))
16860 op0 = safe_vector_operand (op0, mode0);
16861
16862 if ((optimize && !register_operand (op0, mode0))
16863 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16864 op0 = copy_to_mode_reg (mode0, op0);
16865
16866 op1 = op0;
16867 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16868 op1 = copy_to_mode_reg (mode0, op1);
16869
16870 pat = GEN_FCN (icode) (target, op0, op1);
16871 if (! pat)
16872 return 0;
16873 emit_insn (pat);
16874 return target;
16875 }
16876
16877 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16878
16879 static rtx
16880 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16881 rtx target)
16882 {
16883 rtx pat;
16884 tree arg0 = TREE_VALUE (arglist);
16885 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16886 rtx op0 = expand_normal (arg0);
16887 rtx op1 = expand_normal (arg1);
16888 rtx op2;
16889 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16890 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16891 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16892 enum rtx_code comparison = d->comparison;
16893
16894 if (VECTOR_MODE_P (mode0))
16895 op0 = safe_vector_operand (op0, mode0);
16896 if (VECTOR_MODE_P (mode1))
16897 op1 = safe_vector_operand (op1, mode1);
16898
16899 /* Swap operands if we have a comparison that isn't available in
16900 hardware. */
16901 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16902 {
16903 rtx tmp = gen_reg_rtx (mode1);
16904 emit_move_insn (tmp, op1);
16905 op1 = op0;
16906 op0 = tmp;
16907 }
16908
16909 if (optimize || !target
16910 || GET_MODE (target) != tmode
16911 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16912 target = gen_reg_rtx (tmode);
16913
16914 if ((optimize && !register_operand (op0, mode0))
16915 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16916 op0 = copy_to_mode_reg (mode0, op0);
16917 if ((optimize && !register_operand (op1, mode1))
16918 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16919 op1 = copy_to_mode_reg (mode1, op1);
16920
16921 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16922 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16923 if (! pat)
16924 return 0;
16925 emit_insn (pat);
16926 return target;
16927 }
16928
16929 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16930
16931 static rtx
16932 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16933 rtx target)
16934 {
16935 rtx pat;
16936 tree arg0 = TREE_VALUE (arglist);
16937 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16938 rtx op0 = expand_normal (arg0);
16939 rtx op1 = expand_normal (arg1);
16940 rtx op2;
16941 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
16942 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
16943 enum rtx_code comparison = d->comparison;
16944
16945 if (VECTOR_MODE_P (mode0))
16946 op0 = safe_vector_operand (op0, mode0);
16947 if (VECTOR_MODE_P (mode1))
16948 op1 = safe_vector_operand (op1, mode1);
16949
16950 /* Swap operands if we have a comparison that isn't available in
16951 hardware. */
16952 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16953 {
16954 rtx tmp = op1;
16955 op1 = op0;
16956 op0 = tmp;
16957 }
16958
16959 target = gen_reg_rtx (SImode);
16960 emit_move_insn (target, const0_rtx);
16961 target = gen_rtx_SUBREG (QImode, target, 0);
16962
16963 if ((optimize && !register_operand (op0, mode0))
16964 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16965 op0 = copy_to_mode_reg (mode0, op0);
16966 if ((optimize && !register_operand (op1, mode1))
16967 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16968 op1 = copy_to_mode_reg (mode1, op1);
16969
16970 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16971 pat = GEN_FCN (d->icode) (op0, op1);
16972 if (! pat)
16973 return 0;
16974 emit_insn (pat);
16975 emit_insn (gen_rtx_SET (VOIDmode,
16976 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
16977 gen_rtx_fmt_ee (comparison, QImode,
16978 SET_DEST (pat),
16979 const0_rtx)));
16980
16981 return SUBREG_REG (target);
16982 }
16983
16984 /* Return the integer constant in ARG. Constrain it to be in the range
16985 of the subparts of VEC_TYPE; issue an error if not. */
16986
16987 static int
16988 get_element_number (tree vec_type, tree arg)
16989 {
16990 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16991
16992 if (!host_integerp (arg, 1)
16993 || (elt = tree_low_cst (arg, 1), elt > max))
16994 {
16995 error ("selector must be an integer constant in the range 0..%wi", max);
16996 return 0;
16997 }
16998
16999 return elt;
17000 }
17001
17002 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17003 ix86_expand_vector_init. We DO have language-level syntax for this, in
17004 the form of (type){ init-list }. Except that since we can't place emms
17005 instructions from inside the compiler, we can't allow the use of MMX
17006 registers unless the user explicitly asks for it. So we do *not* define
17007 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17008 we have builtins invoked by mmintrin.h that gives us license to emit
17009 these sorts of instructions. */
17010
17011 static rtx
17012 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
17013 {
17014 enum machine_mode tmode = TYPE_MODE (type);
17015 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17016 int i, n_elt = GET_MODE_NUNITS (tmode);
17017 rtvec v = rtvec_alloc (n_elt);
17018
17019 gcc_assert (VECTOR_MODE_P (tmode));
17020
17021 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
17022 {
17023 rtx x = expand_normal (TREE_VALUE (arglist));
17024 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17025 }
17026
17027 gcc_assert (arglist == NULL);
17028
17029 if (!target || !register_operand (target, tmode))
17030 target = gen_reg_rtx (tmode);
17031
17032 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17033 return target;
17034 }
17035
17036 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17037 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17038 had a language-level syntax for referencing vector elements. */
17039
17040 static rtx
17041 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
17042 {
17043 enum machine_mode tmode, mode0;
17044 tree arg0, arg1;
17045 int elt;
17046 rtx op0;
17047
17048 arg0 = TREE_VALUE (arglist);
17049 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17050
17051 op0 = expand_normal (arg0);
17052 elt = get_element_number (TREE_TYPE (arg0), arg1);
17053
17054 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17055 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17056 gcc_assert (VECTOR_MODE_P (mode0));
17057
17058 op0 = force_reg (mode0, op0);
17059
17060 if (optimize || !target || !register_operand (target, tmode))
17061 target = gen_reg_rtx (tmode);
17062
17063 ix86_expand_vector_extract (true, target, op0, elt);
17064
17065 return target;
17066 }
17067
17068 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17069 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17070 a language-level syntax for referencing vector elements. */
17071
17072 static rtx
17073 ix86_expand_vec_set_builtin (tree arglist)
17074 {
17075 enum machine_mode tmode, mode1;
17076 tree arg0, arg1, arg2;
17077 int elt;
17078 rtx op0, op1;
17079
17080 arg0 = TREE_VALUE (arglist);
17081 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17082 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17083
17084 tmode = TYPE_MODE (TREE_TYPE (arg0));
17085 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17086 gcc_assert (VECTOR_MODE_P (tmode));
17087
17088 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17089 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17090 elt = get_element_number (TREE_TYPE (arg0), arg2);
17091
17092 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17093 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17094
17095 op0 = force_reg (tmode, op0);
17096 op1 = force_reg (mode1, op1);
17097
17098 ix86_expand_vector_set (true, op0, op1, elt);
17099
17100 return op0;
17101 }
17102
17103 /* Expand an expression EXP that calls a built-in function,
17104 with result going to TARGET if that's convenient
17105 (and in mode MODE if that's convenient).
17106 SUBTARGET may be used as the target for computing one of EXP's operands.
17107 IGNORE is nonzero if the value is to be ignored. */
17108
17109 static rtx
17110 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17111 enum machine_mode mode ATTRIBUTE_UNUSED,
17112 int ignore ATTRIBUTE_UNUSED)
17113 {
17114 const struct builtin_description *d;
17115 size_t i;
17116 enum insn_code icode;
17117 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17118 tree arglist = TREE_OPERAND (exp, 1);
17119 tree arg0, arg1, arg2;
17120 rtx op0, op1, op2, pat;
17121 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17122 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17123
17124 switch (fcode)
17125 {
17126 case IX86_BUILTIN_EMMS:
17127 emit_insn (gen_mmx_emms ());
17128 return 0;
17129
17130 case IX86_BUILTIN_SFENCE:
17131 emit_insn (gen_sse_sfence ());
17132 return 0;
17133
17134 case IX86_BUILTIN_MASKMOVQ:
17135 case IX86_BUILTIN_MASKMOVDQU:
17136 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17137 ? CODE_FOR_mmx_maskmovq
17138 : CODE_FOR_sse2_maskmovdqu);
17139 /* Note the arg order is different from the operand order. */
17140 arg1 = TREE_VALUE (arglist);
17141 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17142 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17143 op0 = expand_normal (arg0);
17144 op1 = expand_normal (arg1);
17145 op2 = expand_normal (arg2);
17146 mode0 = insn_data[icode].operand[0].mode;
17147 mode1 = insn_data[icode].operand[1].mode;
17148 mode2 = insn_data[icode].operand[2].mode;
17149
17150 op0 = force_reg (Pmode, op0);
17151 op0 = gen_rtx_MEM (mode1, op0);
17152
17153 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17154 op0 = copy_to_mode_reg (mode0, op0);
17155 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17156 op1 = copy_to_mode_reg (mode1, op1);
17157 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17158 op2 = copy_to_mode_reg (mode2, op2);
17159 pat = GEN_FCN (icode) (op0, op1, op2);
17160 if (! pat)
17161 return 0;
17162 emit_insn (pat);
17163 return 0;
17164
17165 case IX86_BUILTIN_SQRTSS:
17166 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17167 case IX86_BUILTIN_RSQRTSS:
17168 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17169 case IX86_BUILTIN_RCPSS:
17170 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17171
17172 case IX86_BUILTIN_LOADUPS:
17173 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17174
17175 case IX86_BUILTIN_STOREUPS:
17176 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17177
17178 case IX86_BUILTIN_LOADHPS:
17179 case IX86_BUILTIN_LOADLPS:
17180 case IX86_BUILTIN_LOADHPD:
17181 case IX86_BUILTIN_LOADLPD:
17182 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17183 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17184 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17185 : CODE_FOR_sse2_loadlpd);
17186 arg0 = TREE_VALUE (arglist);
17187 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17188 op0 = expand_normal (arg0);
17189 op1 = expand_normal (arg1);
17190 tmode = insn_data[icode].operand[0].mode;
17191 mode0 = insn_data[icode].operand[1].mode;
17192 mode1 = insn_data[icode].operand[2].mode;
17193
17194 op0 = force_reg (mode0, op0);
17195 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17196 if (optimize || target == 0
17197 || GET_MODE (target) != tmode
17198 || !register_operand (target, tmode))
17199 target = gen_reg_rtx (tmode);
17200 pat = GEN_FCN (icode) (target, op0, op1);
17201 if (! pat)
17202 return 0;
17203 emit_insn (pat);
17204 return target;
17205
17206 case IX86_BUILTIN_STOREHPS:
17207 case IX86_BUILTIN_STORELPS:
17208 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17209 : CODE_FOR_sse_storelps);
17210 arg0 = TREE_VALUE (arglist);
17211 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17212 op0 = expand_normal (arg0);
17213 op1 = expand_normal (arg1);
17214 mode0 = insn_data[icode].operand[0].mode;
17215 mode1 = insn_data[icode].operand[1].mode;
17216
17217 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17218 op1 = force_reg (mode1, op1);
17219
17220 pat = GEN_FCN (icode) (op0, op1);
17221 if (! pat)
17222 return 0;
17223 emit_insn (pat);
17224 return const0_rtx;
17225
17226 case IX86_BUILTIN_MOVNTPS:
17227 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17228 case IX86_BUILTIN_MOVNTQ:
17229 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17230
17231 case IX86_BUILTIN_LDMXCSR:
17232 op0 = expand_normal (TREE_VALUE (arglist));
17233 target = assign_386_stack_local (SImode, SLOT_TEMP);
17234 emit_move_insn (target, op0);
17235 emit_insn (gen_sse_ldmxcsr (target));
17236 return 0;
17237
17238 case IX86_BUILTIN_STMXCSR:
17239 target = assign_386_stack_local (SImode, SLOT_TEMP);
17240 emit_insn (gen_sse_stmxcsr (target));
17241 return copy_to_mode_reg (SImode, target);
17242
17243 case IX86_BUILTIN_SHUFPS:
17244 case IX86_BUILTIN_SHUFPD:
17245 icode = (fcode == IX86_BUILTIN_SHUFPS
17246 ? CODE_FOR_sse_shufps
17247 : CODE_FOR_sse2_shufpd);
17248 arg0 = TREE_VALUE (arglist);
17249 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17250 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17251 op0 = expand_normal (arg0);
17252 op1 = expand_normal (arg1);
17253 op2 = expand_normal (arg2);
17254 tmode = insn_data[icode].operand[0].mode;
17255 mode0 = insn_data[icode].operand[1].mode;
17256 mode1 = insn_data[icode].operand[2].mode;
17257 mode2 = insn_data[icode].operand[3].mode;
17258
17259 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17260 op0 = copy_to_mode_reg (mode0, op0);
17261 if ((optimize && !register_operand (op1, mode1))
17262 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17263 op1 = copy_to_mode_reg (mode1, op1);
17264 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17265 {
17266 /* @@@ better error message */
17267 error ("mask must be an immediate");
17268 return gen_reg_rtx (tmode);
17269 }
17270 if (optimize || target == 0
17271 || GET_MODE (target) != tmode
17272 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17273 target = gen_reg_rtx (tmode);
17274 pat = GEN_FCN (icode) (target, op0, op1, op2);
17275 if (! pat)
17276 return 0;
17277 emit_insn (pat);
17278 return target;
17279
17280 case IX86_BUILTIN_PSHUFW:
17281 case IX86_BUILTIN_PSHUFD:
17282 case IX86_BUILTIN_PSHUFHW:
17283 case IX86_BUILTIN_PSHUFLW:
17284 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17285 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17286 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17287 : CODE_FOR_mmx_pshufw);
17288 arg0 = TREE_VALUE (arglist);
17289 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17290 op0 = expand_normal (arg0);
17291 op1 = expand_normal (arg1);
17292 tmode = insn_data[icode].operand[0].mode;
17293 mode1 = insn_data[icode].operand[1].mode;
17294 mode2 = insn_data[icode].operand[2].mode;
17295
17296 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17297 op0 = copy_to_mode_reg (mode1, op0);
17298 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17299 {
17300 /* @@@ better error message */
17301 error ("mask must be an immediate");
17302 return const0_rtx;
17303 }
17304 if (target == 0
17305 || GET_MODE (target) != tmode
17306 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17307 target = gen_reg_rtx (tmode);
17308 pat = GEN_FCN (icode) (target, op0, op1);
17309 if (! pat)
17310 return 0;
17311 emit_insn (pat);
17312 return target;
17313
17314 case IX86_BUILTIN_PSLLDQI128:
17315 case IX86_BUILTIN_PSRLDQI128:
17316 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17317 : CODE_FOR_sse2_lshrti3);
17318 arg0 = TREE_VALUE (arglist);
17319 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17320 op0 = expand_normal (arg0);
17321 op1 = expand_normal (arg1);
17322 tmode = insn_data[icode].operand[0].mode;
17323 mode1 = insn_data[icode].operand[1].mode;
17324 mode2 = insn_data[icode].operand[2].mode;
17325
17326 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17327 {
17328 op0 = copy_to_reg (op0);
17329 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17330 }
17331 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17332 {
17333 error ("shift must be an immediate");
17334 return const0_rtx;
17335 }
17336 target = gen_reg_rtx (V2DImode);
17337 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17338 if (! pat)
17339 return 0;
17340 emit_insn (pat);
17341 return target;
17342
17343 case IX86_BUILTIN_FEMMS:
17344 emit_insn (gen_mmx_femms ());
17345 return NULL_RTX;
17346
17347 case IX86_BUILTIN_PAVGUSB:
17348 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17349
17350 case IX86_BUILTIN_PF2ID:
17351 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17352
17353 case IX86_BUILTIN_PFACC:
17354 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17355
17356 case IX86_BUILTIN_PFADD:
17357 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17358
17359 case IX86_BUILTIN_PFCMPEQ:
17360 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17361
17362 case IX86_BUILTIN_PFCMPGE:
17363 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17364
17365 case IX86_BUILTIN_PFCMPGT:
17366 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17367
17368 case IX86_BUILTIN_PFMAX:
17369 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17370
17371 case IX86_BUILTIN_PFMIN:
17372 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17373
17374 case IX86_BUILTIN_PFMUL:
17375 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17376
17377 case IX86_BUILTIN_PFRCP:
17378 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17379
17380 case IX86_BUILTIN_PFRCPIT1:
17381 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17382
17383 case IX86_BUILTIN_PFRCPIT2:
17384 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17385
17386 case IX86_BUILTIN_PFRSQIT1:
17387 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17388
17389 case IX86_BUILTIN_PFRSQRT:
17390 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17391
17392 case IX86_BUILTIN_PFSUB:
17393 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17394
17395 case IX86_BUILTIN_PFSUBR:
17396 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17397
17398 case IX86_BUILTIN_PI2FD:
17399 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17400
17401 case IX86_BUILTIN_PMULHRW:
17402 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17403
17404 case IX86_BUILTIN_PF2IW:
17405 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17406
17407 case IX86_BUILTIN_PFNACC:
17408 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17409
17410 case IX86_BUILTIN_PFPNACC:
17411 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17412
17413 case IX86_BUILTIN_PI2FW:
17414 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17415
17416 case IX86_BUILTIN_PSWAPDSI:
17417 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17418
17419 case IX86_BUILTIN_PSWAPDSF:
17420 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17421
17422 case IX86_BUILTIN_SQRTSD:
17423 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17424 case IX86_BUILTIN_LOADUPD:
17425 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17426 case IX86_BUILTIN_STOREUPD:
17427 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17428
17429 case IX86_BUILTIN_MFENCE:
17430 emit_insn (gen_sse2_mfence ());
17431 return 0;
17432 case IX86_BUILTIN_LFENCE:
17433 emit_insn (gen_sse2_lfence ());
17434 return 0;
17435
17436 case IX86_BUILTIN_CLFLUSH:
17437 arg0 = TREE_VALUE (arglist);
17438 op0 = expand_normal (arg0);
17439 icode = CODE_FOR_sse2_clflush;
17440 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17441 op0 = copy_to_mode_reg (Pmode, op0);
17442
17443 emit_insn (gen_sse2_clflush (op0));
17444 return 0;
17445
17446 case IX86_BUILTIN_MOVNTPD:
17447 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17448 case IX86_BUILTIN_MOVNTDQ:
17449 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17450 case IX86_BUILTIN_MOVNTI:
17451 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17452
17453 case IX86_BUILTIN_LOADDQU:
17454 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17455 case IX86_BUILTIN_STOREDQU:
17456 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17457
17458 case IX86_BUILTIN_MONITOR:
17459 arg0 = TREE_VALUE (arglist);
17460 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17461 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17462 op0 = expand_normal (arg0);
17463 op1 = expand_normal (arg1);
17464 op2 = expand_normal (arg2);
17465 if (!REG_P (op0))
17466 op0 = copy_to_mode_reg (Pmode, op0);
17467 if (!REG_P (op1))
17468 op1 = copy_to_mode_reg (SImode, op1);
17469 if (!REG_P (op2))
17470 op2 = copy_to_mode_reg (SImode, op2);
17471 if (!TARGET_64BIT)
17472 emit_insn (gen_sse3_monitor (op0, op1, op2));
17473 else
17474 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17475 return 0;
17476
17477 case IX86_BUILTIN_MWAIT:
17478 arg0 = TREE_VALUE (arglist);
17479 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17480 op0 = expand_normal (arg0);
17481 op1 = expand_normal (arg1);
17482 if (!REG_P (op0))
17483 op0 = copy_to_mode_reg (SImode, op0);
17484 if (!REG_P (op1))
17485 op1 = copy_to_mode_reg (SImode, op1);
17486 emit_insn (gen_sse3_mwait (op0, op1));
17487 return 0;
17488
17489 case IX86_BUILTIN_LDDQU:
17490 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17491 target, 1);
17492
17493 case IX86_BUILTIN_PALIGNR:
17494 case IX86_BUILTIN_PALIGNR128:
17495 if (fcode == IX86_BUILTIN_PALIGNR)
17496 {
17497 icode = CODE_FOR_ssse3_palignrdi;
17498 mode = DImode;
17499 }
17500 else
17501 {
17502 icode = CODE_FOR_ssse3_palignrti;
17503 mode = V2DImode;
17504 }
17505 arg0 = TREE_VALUE (arglist);
17506 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17507 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17508 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17509 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17510 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17511 tmode = insn_data[icode].operand[0].mode;
17512 mode1 = insn_data[icode].operand[1].mode;
17513 mode2 = insn_data[icode].operand[2].mode;
17514 mode3 = insn_data[icode].operand[3].mode;
17515
17516 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17517 {
17518 op0 = copy_to_reg (op0);
17519 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17520 }
17521 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17522 {
17523 op1 = copy_to_reg (op1);
17524 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17525 }
17526 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17527 {
17528 error ("shift must be an immediate");
17529 return const0_rtx;
17530 }
17531 target = gen_reg_rtx (mode);
17532 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17533 op0, op1, op2);
17534 if (! pat)
17535 return 0;
17536 emit_insn (pat);
17537 return target;
17538
17539 case IX86_BUILTIN_VEC_INIT_V2SI:
17540 case IX86_BUILTIN_VEC_INIT_V4HI:
17541 case IX86_BUILTIN_VEC_INIT_V8QI:
17542 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17543
17544 case IX86_BUILTIN_VEC_EXT_V2DF:
17545 case IX86_BUILTIN_VEC_EXT_V2DI:
17546 case IX86_BUILTIN_VEC_EXT_V4SF:
17547 case IX86_BUILTIN_VEC_EXT_V4SI:
17548 case IX86_BUILTIN_VEC_EXT_V8HI:
17549 case IX86_BUILTIN_VEC_EXT_V2SI:
17550 case IX86_BUILTIN_VEC_EXT_V4HI:
17551 return ix86_expand_vec_ext_builtin (arglist, target);
17552
17553 case IX86_BUILTIN_VEC_SET_V8HI:
17554 case IX86_BUILTIN_VEC_SET_V4HI:
17555 return ix86_expand_vec_set_builtin (arglist);
17556
17557 default:
17558 break;
17559 }
17560
17561 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17562 if (d->code == fcode)
17563 {
17564 /* Compares are treated specially. */
17565 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17566 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17567 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17568 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17569 return ix86_expand_sse_compare (d, arglist, target);
17570
17571 return ix86_expand_binop_builtin (d->icode, arglist, target);
17572 }
17573
17574 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17575 if (d->code == fcode)
17576 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17577
17578 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17579 if (d->code == fcode)
17580 return ix86_expand_sse_comi (d, arglist, target);
17581
17582 gcc_unreachable ();
17583 }
17584
17585 /* Returns a function decl for a vectorized version of the builtin function
17586 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17587 if it is not available. */
17588
17589 static tree
17590 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17591 {
17592 enum machine_mode el_mode;
17593 int n;
17594
17595 if (TREE_CODE (type) != VECTOR_TYPE)
17596 return NULL_TREE;
17597
17598 el_mode = TYPE_MODE (TREE_TYPE (type));
17599 n = TYPE_VECTOR_SUBPARTS (type);
17600
17601 switch (fn)
17602 {
17603 case BUILT_IN_SQRT:
17604 if (el_mode == DFmode && n == 2)
17605 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17606 return NULL_TREE;
17607
17608 case BUILT_IN_SQRTF:
17609 if (el_mode == SFmode && n == 4)
17610 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17611 return NULL_TREE;
17612
17613 default:
17614 ;
17615 }
17616
17617 return NULL_TREE;
17618 }
17619
17620 /* Store OPERAND to the memory after reload is completed. This means
17621 that we can't easily use assign_stack_local. */
17622 rtx
17623 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17624 {
17625 rtx result;
17626
17627 gcc_assert (reload_completed);
17628 if (TARGET_RED_ZONE)
17629 {
17630 result = gen_rtx_MEM (mode,
17631 gen_rtx_PLUS (Pmode,
17632 stack_pointer_rtx,
17633 GEN_INT (-RED_ZONE_SIZE)));
17634 emit_move_insn (result, operand);
17635 }
17636 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17637 {
17638 switch (mode)
17639 {
17640 case HImode:
17641 case SImode:
17642 operand = gen_lowpart (DImode, operand);
17643 /* FALLTHRU */
17644 case DImode:
17645 emit_insn (
17646 gen_rtx_SET (VOIDmode,
17647 gen_rtx_MEM (DImode,
17648 gen_rtx_PRE_DEC (DImode,
17649 stack_pointer_rtx)),
17650 operand));
17651 break;
17652 default:
17653 gcc_unreachable ();
17654 }
17655 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17656 }
17657 else
17658 {
17659 switch (mode)
17660 {
17661 case DImode:
17662 {
17663 rtx operands[2];
17664 split_di (&operand, 1, operands, operands + 1);
17665 emit_insn (
17666 gen_rtx_SET (VOIDmode,
17667 gen_rtx_MEM (SImode,
17668 gen_rtx_PRE_DEC (Pmode,
17669 stack_pointer_rtx)),
17670 operands[1]));
17671 emit_insn (
17672 gen_rtx_SET (VOIDmode,
17673 gen_rtx_MEM (SImode,
17674 gen_rtx_PRE_DEC (Pmode,
17675 stack_pointer_rtx)),
17676 operands[0]));
17677 }
17678 break;
17679 case HImode:
17680 /* Store HImodes as SImodes. */
17681 operand = gen_lowpart (SImode, operand);
17682 /* FALLTHRU */
17683 case SImode:
17684 emit_insn (
17685 gen_rtx_SET (VOIDmode,
17686 gen_rtx_MEM (GET_MODE (operand),
17687 gen_rtx_PRE_DEC (SImode,
17688 stack_pointer_rtx)),
17689 operand));
17690 break;
17691 default:
17692 gcc_unreachable ();
17693 }
17694 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17695 }
17696 return result;
17697 }
17698
17699 /* Free operand from the memory. */
17700 void
17701 ix86_free_from_memory (enum machine_mode mode)
17702 {
17703 if (!TARGET_RED_ZONE)
17704 {
17705 int size;
17706
17707 if (mode == DImode || TARGET_64BIT)
17708 size = 8;
17709 else
17710 size = 4;
17711 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17712 to pop or add instruction if registers are available. */
17713 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17714 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17715 GEN_INT (size))));
17716 }
17717 }
17718
17719 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17720 QImode must go into class Q_REGS.
17721 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17722 movdf to do mem-to-mem moves through integer regs. */
17723 enum reg_class
17724 ix86_preferred_reload_class (rtx x, enum reg_class class)
17725 {
17726 enum machine_mode mode = GET_MODE (x);
17727
17728 /* We're only allowed to return a subclass of CLASS. Many of the
17729 following checks fail for NO_REGS, so eliminate that early. */
17730 if (class == NO_REGS)
17731 return NO_REGS;
17732
17733 /* All classes can load zeros. */
17734 if (x == CONST0_RTX (mode))
17735 return class;
17736
17737 /* Force constants into memory if we are loading a (nonzero) constant into
17738 an MMX or SSE register. This is because there are no MMX/SSE instructions
17739 to load from a constant. */
17740 if (CONSTANT_P (x)
17741 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17742 return NO_REGS;
17743
17744 /* Prefer SSE regs only, if we can use them for math. */
17745 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17746 return SSE_CLASS_P (class) ? class : NO_REGS;
17747
17748 /* Floating-point constants need more complex checks. */
17749 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17750 {
17751 /* General regs can load everything. */
17752 if (reg_class_subset_p (class, GENERAL_REGS))
17753 return class;
17754
17755 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17756 zero above. We only want to wind up preferring 80387 registers if
17757 we plan on doing computation with them. */
17758 if (TARGET_80387
17759 && standard_80387_constant_p (x))
17760 {
17761 /* Limit class to non-sse. */
17762 if (class == FLOAT_SSE_REGS)
17763 return FLOAT_REGS;
17764 if (class == FP_TOP_SSE_REGS)
17765 return FP_TOP_REG;
17766 if (class == FP_SECOND_SSE_REGS)
17767 return FP_SECOND_REG;
17768 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17769 return class;
17770 }
17771
17772 return NO_REGS;
17773 }
17774
17775 /* Generally when we see PLUS here, it's the function invariant
17776 (plus soft-fp const_int). Which can only be computed into general
17777 regs. */
17778 if (GET_CODE (x) == PLUS)
17779 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17780
17781 /* QImode constants are easy to load, but non-constant QImode data
17782 must go into Q_REGS. */
17783 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17784 {
17785 if (reg_class_subset_p (class, Q_REGS))
17786 return class;
17787 if (reg_class_subset_p (Q_REGS, class))
17788 return Q_REGS;
17789 return NO_REGS;
17790 }
17791
17792 return class;
17793 }
17794
17795 /* Discourage putting floating-point values in SSE registers unless
17796 SSE math is being used, and likewise for the 387 registers. */
17797 enum reg_class
17798 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17799 {
17800 enum machine_mode mode = GET_MODE (x);
17801
17802 /* Restrict the output reload class to the register bank that we are doing
17803 math on. If we would like not to return a subset of CLASS, reject this
17804 alternative: if reload cannot do this, it will still use its choice. */
17805 mode = GET_MODE (x);
17806 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17807 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17808
17809 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17810 {
17811 if (class == FP_TOP_SSE_REGS)
17812 return FP_TOP_REG;
17813 else if (class == FP_SECOND_SSE_REGS)
17814 return FP_SECOND_REG;
17815 else
17816 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17817 }
17818
17819 return class;
17820 }
17821
17822 /* If we are copying between general and FP registers, we need a memory
17823 location. The same is true for SSE and MMX registers.
17824
17825 The macro can't work reliably when one of the CLASSES is class containing
17826 registers from multiple units (SSE, MMX, integer). We avoid this by never
17827 combining those units in single alternative in the machine description.
17828 Ensure that this constraint holds to avoid unexpected surprises.
17829
17830 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17831 enforce these sanity checks. */
17832
17833 int
17834 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17835 enum machine_mode mode, int strict)
17836 {
17837 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17838 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17839 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17840 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17841 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17842 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17843 {
17844 gcc_assert (!strict);
17845 return true;
17846 }
17847
17848 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17849 return true;
17850
17851 /* ??? This is a lie. We do have moves between mmx/general, and for
17852 mmx/sse2. But by saying we need secondary memory we discourage the
17853 register allocator from using the mmx registers unless needed. */
17854 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17855 return true;
17856
17857 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17858 {
17859 /* SSE1 doesn't have any direct moves from other classes. */
17860 if (!TARGET_SSE2)
17861 return true;
17862
17863 /* If the target says that inter-unit moves are more expensive
17864 than moving through memory, then don't generate them. */
17865 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17866 return true;
17867
17868 /* Between SSE and general, we have moves no larger than word size. */
17869 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17870 return true;
17871
17872 /* ??? For the cost of one register reformat penalty, we could use
17873 the same instructions to move SFmode and DFmode data, but the
17874 relevant move patterns don't support those alternatives. */
17875 if (mode == SFmode || mode == DFmode)
17876 return true;
17877 }
17878
17879 return false;
17880 }
17881
17882 /* Return true if the registers in CLASS cannot represent the change from
17883 modes FROM to TO. */
17884
17885 bool
17886 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17887 enum reg_class class)
17888 {
17889 if (from == to)
17890 return false;
17891
17892 /* x87 registers can't do subreg at all, as all values are reformatted
17893 to extended precision. */
17894 if (MAYBE_FLOAT_CLASS_P (class))
17895 return true;
17896
17897 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17898 {
17899 /* Vector registers do not support QI or HImode loads. If we don't
17900 disallow a change to these modes, reload will assume it's ok to
17901 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17902 the vec_dupv4hi pattern. */
17903 if (GET_MODE_SIZE (from) < 4)
17904 return true;
17905
17906 /* Vector registers do not support subreg with nonzero offsets, which
17907 are otherwise valid for integer registers. Since we can't see
17908 whether we have a nonzero offset from here, prohibit all
17909 nonparadoxical subregs changing size. */
17910 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17911 return true;
17912 }
17913
17914 return false;
17915 }
17916
17917 /* Return the cost of moving data from a register in class CLASS1 to
17918 one in class CLASS2.
17919
17920 It is not required that the cost always equal 2 when FROM is the same as TO;
17921 on some machines it is expensive to move between registers if they are not
17922 general registers. */
17923
17924 int
17925 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17926 enum reg_class class2)
17927 {
17928 /* In case we require secondary memory, compute cost of the store followed
17929 by load. In order to avoid bad register allocation choices, we need
17930 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17931
17932 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17933 {
17934 int cost = 1;
17935
17936 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17937 MEMORY_MOVE_COST (mode, class1, 1));
17938 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17939 MEMORY_MOVE_COST (mode, class2, 1));
17940
17941 /* In case of copying from general_purpose_register we may emit multiple
17942 stores followed by single load causing memory size mismatch stall.
17943 Count this as arbitrarily high cost of 20. */
17944 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
17945 cost += 20;
17946
17947 /* In the case of FP/MMX moves, the registers actually overlap, and we
17948 have to switch modes in order to treat them differently. */
17949 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
17950 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
17951 cost += 20;
17952
17953 return cost;
17954 }
17955
17956 /* Moves between SSE/MMX and integer unit are expensive. */
17957 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
17958 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17959 return ix86_cost->mmxsse_to_integer;
17960 if (MAYBE_FLOAT_CLASS_P (class1))
17961 return ix86_cost->fp_move;
17962 if (MAYBE_SSE_CLASS_P (class1))
17963 return ix86_cost->sse_move;
17964 if (MAYBE_MMX_CLASS_P (class1))
17965 return ix86_cost->mmx_move;
17966 return 2;
17967 }
17968
17969 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
17970
17971 bool
17972 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
17973 {
17974 /* Flags and only flags can only hold CCmode values. */
17975 if (CC_REGNO_P (regno))
17976 return GET_MODE_CLASS (mode) == MODE_CC;
17977 if (GET_MODE_CLASS (mode) == MODE_CC
17978 || GET_MODE_CLASS (mode) == MODE_RANDOM
17979 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
17980 return 0;
17981 if (FP_REGNO_P (regno))
17982 return VALID_FP_MODE_P (mode);
17983 if (SSE_REGNO_P (regno))
17984 {
17985 /* We implement the move patterns for all vector modes into and
17986 out of SSE registers, even when no operation instructions
17987 are available. */
17988 return (VALID_SSE_REG_MODE (mode)
17989 || VALID_SSE2_REG_MODE (mode)
17990 || VALID_MMX_REG_MODE (mode)
17991 || VALID_MMX_REG_MODE_3DNOW (mode));
17992 }
17993 if (MMX_REGNO_P (regno))
17994 {
17995 /* We implement the move patterns for 3DNOW modes even in MMX mode,
17996 so if the register is available at all, then we can move data of
17997 the given mode into or out of it. */
17998 return (VALID_MMX_REG_MODE (mode)
17999 || VALID_MMX_REG_MODE_3DNOW (mode));
18000 }
18001
18002 if (mode == QImode)
18003 {
18004 /* Take care for QImode values - they can be in non-QI regs,
18005 but then they do cause partial register stalls. */
18006 if (regno < 4 || TARGET_64BIT)
18007 return 1;
18008 if (!TARGET_PARTIAL_REG_STALL)
18009 return 1;
18010 return reload_in_progress || reload_completed;
18011 }
18012 /* We handle both integer and floats in the general purpose registers. */
18013 else if (VALID_INT_MODE_P (mode))
18014 return 1;
18015 else if (VALID_FP_MODE_P (mode))
18016 return 1;
18017 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
18018 on to use that value in smaller contexts, this can easily force a
18019 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
18020 supporting DImode, allow it. */
18021 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
18022 return 1;
18023
18024 return 0;
18025 }
18026
18027 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
18028 tieable integer mode. */
18029
18030 static bool
18031 ix86_tieable_integer_mode_p (enum machine_mode mode)
18032 {
18033 switch (mode)
18034 {
18035 case HImode:
18036 case SImode:
18037 return true;
18038
18039 case QImode:
18040 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
18041
18042 case DImode:
18043 return TARGET_64BIT;
18044
18045 default:
18046 return false;
18047 }
18048 }
18049
18050 /* Return true if MODE1 is accessible in a register that can hold MODE2
18051 without copying. That is, all register classes that can hold MODE2
18052 can also hold MODE1. */
18053
18054 bool
18055 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
18056 {
18057 if (mode1 == mode2)
18058 return true;
18059
18060 if (ix86_tieable_integer_mode_p (mode1)
18061 && ix86_tieable_integer_mode_p (mode2))
18062 return true;
18063
18064 /* MODE2 being XFmode implies fp stack or general regs, which means we
18065 can tie any smaller floating point modes to it. Note that we do not
18066 tie this with TFmode. */
18067 if (mode2 == XFmode)
18068 return mode1 == SFmode || mode1 == DFmode;
18069
18070 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
18071 that we can tie it with SFmode. */
18072 if (mode2 == DFmode)
18073 return mode1 == SFmode;
18074
18075 /* If MODE2 is only appropriate for an SSE register, then tie with
18076 any other mode acceptable to SSE registers. */
18077 if (GET_MODE_SIZE (mode2) >= 8
18078 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
18079 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
18080
18081 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
18082 with any other mode acceptable to MMX registers. */
18083 if (GET_MODE_SIZE (mode2) == 8
18084 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
18085 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
18086
18087 return false;
18088 }
18089
18090 /* Return the cost of moving data of mode M between a
18091 register and memory. A value of 2 is the default; this cost is
18092 relative to those in `REGISTER_MOVE_COST'.
18093
18094 If moving between registers and memory is more expensive than
18095 between two registers, you should define this macro to express the
18096 relative cost.
18097
18098 Model also increased moving costs of QImode registers in non
18099 Q_REGS classes.
18100 */
18101 int
18102 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18103 {
18104 if (FLOAT_CLASS_P (class))
18105 {
18106 int index;
18107 switch (mode)
18108 {
18109 case SFmode:
18110 index = 0;
18111 break;
18112 case DFmode:
18113 index = 1;
18114 break;
18115 case XFmode:
18116 index = 2;
18117 break;
18118 default:
18119 return 100;
18120 }
18121 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18122 }
18123 if (SSE_CLASS_P (class))
18124 {
18125 int index;
18126 switch (GET_MODE_SIZE (mode))
18127 {
18128 case 4:
18129 index = 0;
18130 break;
18131 case 8:
18132 index = 1;
18133 break;
18134 case 16:
18135 index = 2;
18136 break;
18137 default:
18138 return 100;
18139 }
18140 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18141 }
18142 if (MMX_CLASS_P (class))
18143 {
18144 int index;
18145 switch (GET_MODE_SIZE (mode))
18146 {
18147 case 4:
18148 index = 0;
18149 break;
18150 case 8:
18151 index = 1;
18152 break;
18153 default:
18154 return 100;
18155 }
18156 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18157 }
18158 switch (GET_MODE_SIZE (mode))
18159 {
18160 case 1:
18161 if (in)
18162 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18163 : ix86_cost->movzbl_load);
18164 else
18165 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18166 : ix86_cost->int_store[0] + 4);
18167 break;
18168 case 2:
18169 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18170 default:
18171 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18172 if (mode == TFmode)
18173 mode = XFmode;
18174 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18175 * (((int) GET_MODE_SIZE (mode)
18176 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18177 }
18178 }
18179
18180 /* Compute a (partial) cost for rtx X. Return true if the complete
18181 cost has been computed, and false if subexpressions should be
18182 scanned. In either case, *TOTAL contains the cost result. */
18183
18184 static bool
18185 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18186 {
18187 enum machine_mode mode = GET_MODE (x);
18188
18189 switch (code)
18190 {
18191 case CONST_INT:
18192 case CONST:
18193 case LABEL_REF:
18194 case SYMBOL_REF:
18195 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18196 *total = 3;
18197 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18198 *total = 2;
18199 else if (flag_pic && SYMBOLIC_CONST (x)
18200 && (!TARGET_64BIT
18201 || (!GET_CODE (x) != LABEL_REF
18202 && (GET_CODE (x) != SYMBOL_REF
18203 || !SYMBOL_REF_LOCAL_P (x)))))
18204 *total = 1;
18205 else
18206 *total = 0;
18207 return true;
18208
18209 case CONST_DOUBLE:
18210 if (mode == VOIDmode)
18211 *total = 0;
18212 else
18213 switch (standard_80387_constant_p (x))
18214 {
18215 case 1: /* 0.0 */
18216 *total = 1;
18217 break;
18218 default: /* Other constants */
18219 *total = 2;
18220 break;
18221 case 0:
18222 case -1:
18223 /* Start with (MEM (SYMBOL_REF)), since that's where
18224 it'll probably end up. Add a penalty for size. */
18225 *total = (COSTS_N_INSNS (1)
18226 + (flag_pic != 0 && !TARGET_64BIT)
18227 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18228 break;
18229 }
18230 return true;
18231
18232 case ZERO_EXTEND:
18233 /* The zero extensions is often completely free on x86_64, so make
18234 it as cheap as possible. */
18235 if (TARGET_64BIT && mode == DImode
18236 && GET_MODE (XEXP (x, 0)) == SImode)
18237 *total = 1;
18238 else if (TARGET_ZERO_EXTEND_WITH_AND)
18239 *total = ix86_cost->add;
18240 else
18241 *total = ix86_cost->movzx;
18242 return false;
18243
18244 case SIGN_EXTEND:
18245 *total = ix86_cost->movsx;
18246 return false;
18247
18248 case ASHIFT:
18249 if (GET_CODE (XEXP (x, 1)) == CONST_INT
18250 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18251 {
18252 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18253 if (value == 1)
18254 {
18255 *total = ix86_cost->add;
18256 return false;
18257 }
18258 if ((value == 2 || value == 3)
18259 && ix86_cost->lea <= ix86_cost->shift_const)
18260 {
18261 *total = ix86_cost->lea;
18262 return false;
18263 }
18264 }
18265 /* FALLTHRU */
18266
18267 case ROTATE:
18268 case ASHIFTRT:
18269 case LSHIFTRT:
18270 case ROTATERT:
18271 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18272 {
18273 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18274 {
18275 if (INTVAL (XEXP (x, 1)) > 32)
18276 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18277 else
18278 *total = ix86_cost->shift_const * 2;
18279 }
18280 else
18281 {
18282 if (GET_CODE (XEXP (x, 1)) == AND)
18283 *total = ix86_cost->shift_var * 2;
18284 else
18285 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18286 }
18287 }
18288 else
18289 {
18290 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18291 *total = ix86_cost->shift_const;
18292 else
18293 *total = ix86_cost->shift_var;
18294 }
18295 return false;
18296
18297 case MULT:
18298 if (FLOAT_MODE_P (mode))
18299 {
18300 *total = ix86_cost->fmul;
18301 return false;
18302 }
18303 else
18304 {
18305 rtx op0 = XEXP (x, 0);
18306 rtx op1 = XEXP (x, 1);
18307 int nbits;
18308 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18309 {
18310 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18311 for (nbits = 0; value != 0; value &= value - 1)
18312 nbits++;
18313 }
18314 else
18315 /* This is arbitrary. */
18316 nbits = 7;
18317
18318 /* Compute costs correctly for widening multiplication. */
18319 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18320 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18321 == GET_MODE_SIZE (mode))
18322 {
18323 int is_mulwiden = 0;
18324 enum machine_mode inner_mode = GET_MODE (op0);
18325
18326 if (GET_CODE (op0) == GET_CODE (op1))
18327 is_mulwiden = 1, op1 = XEXP (op1, 0);
18328 else if (GET_CODE (op1) == CONST_INT)
18329 {
18330 if (GET_CODE (op0) == SIGN_EXTEND)
18331 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18332 == INTVAL (op1);
18333 else
18334 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18335 }
18336
18337 if (is_mulwiden)
18338 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18339 }
18340
18341 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18342 + nbits * ix86_cost->mult_bit
18343 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18344
18345 return true;
18346 }
18347
18348 case DIV:
18349 case UDIV:
18350 case MOD:
18351 case UMOD:
18352 if (FLOAT_MODE_P (mode))
18353 *total = ix86_cost->fdiv;
18354 else
18355 *total = ix86_cost->divide[MODE_INDEX (mode)];
18356 return false;
18357
18358 case PLUS:
18359 if (FLOAT_MODE_P (mode))
18360 *total = ix86_cost->fadd;
18361 else if (GET_MODE_CLASS (mode) == MODE_INT
18362 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18363 {
18364 if (GET_CODE (XEXP (x, 0)) == PLUS
18365 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18366 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
18367 && CONSTANT_P (XEXP (x, 1)))
18368 {
18369 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18370 if (val == 2 || val == 4 || val == 8)
18371 {
18372 *total = ix86_cost->lea;
18373 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18374 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18375 outer_code);
18376 *total += rtx_cost (XEXP (x, 1), outer_code);
18377 return true;
18378 }
18379 }
18380 else if (GET_CODE (XEXP (x, 0)) == MULT
18381 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
18382 {
18383 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18384 if (val == 2 || val == 4 || val == 8)
18385 {
18386 *total = ix86_cost->lea;
18387 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18388 *total += rtx_cost (XEXP (x, 1), outer_code);
18389 return true;
18390 }
18391 }
18392 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18393 {
18394 *total = ix86_cost->lea;
18395 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18396 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18397 *total += rtx_cost (XEXP (x, 1), outer_code);
18398 return true;
18399 }
18400 }
18401 /* FALLTHRU */
18402
18403 case MINUS:
18404 if (FLOAT_MODE_P (mode))
18405 {
18406 *total = ix86_cost->fadd;
18407 return false;
18408 }
18409 /* FALLTHRU */
18410
18411 case AND:
18412 case IOR:
18413 case XOR:
18414 if (!TARGET_64BIT && mode == DImode)
18415 {
18416 *total = (ix86_cost->add * 2
18417 + (rtx_cost (XEXP (x, 0), outer_code)
18418 << (GET_MODE (XEXP (x, 0)) != DImode))
18419 + (rtx_cost (XEXP (x, 1), outer_code)
18420 << (GET_MODE (XEXP (x, 1)) != DImode)));
18421 return true;
18422 }
18423 /* FALLTHRU */
18424
18425 case NEG:
18426 if (FLOAT_MODE_P (mode))
18427 {
18428 *total = ix86_cost->fchs;
18429 return false;
18430 }
18431 /* FALLTHRU */
18432
18433 case NOT:
18434 if (!TARGET_64BIT && mode == DImode)
18435 *total = ix86_cost->add * 2;
18436 else
18437 *total = ix86_cost->add;
18438 return false;
18439
18440 case COMPARE:
18441 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18442 && XEXP (XEXP (x, 0), 1) == const1_rtx
18443 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
18444 && XEXP (x, 1) == const0_rtx)
18445 {
18446 /* This kind of construct is implemented using test[bwl].
18447 Treat it as if we had an AND. */
18448 *total = (ix86_cost->add
18449 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18450 + rtx_cost (const1_rtx, outer_code));
18451 return true;
18452 }
18453 return false;
18454
18455 case FLOAT_EXTEND:
18456 if (!TARGET_SSE_MATH
18457 || mode == XFmode
18458 || (mode == DFmode && !TARGET_SSE2))
18459 *total = 0;
18460 return false;
18461
18462 case ABS:
18463 if (FLOAT_MODE_P (mode))
18464 *total = ix86_cost->fabs;
18465 return false;
18466
18467 case SQRT:
18468 if (FLOAT_MODE_P (mode))
18469 *total = ix86_cost->fsqrt;
18470 return false;
18471
18472 case UNSPEC:
18473 if (XINT (x, 1) == UNSPEC_TP)
18474 *total = 0;
18475 return false;
18476
18477 default:
18478 return false;
18479 }
18480 }
18481
18482 #if TARGET_MACHO
18483
18484 static int current_machopic_label_num;
18485
18486 /* Given a symbol name and its associated stub, write out the
18487 definition of the stub. */
18488
18489 void
18490 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18491 {
18492 unsigned int length;
18493 char *binder_name, *symbol_name, lazy_ptr_name[32];
18494 int label = ++current_machopic_label_num;
18495
18496 /* For 64-bit we shouldn't get here. */
18497 gcc_assert (!TARGET_64BIT);
18498
18499 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18500 symb = (*targetm.strip_name_encoding) (symb);
18501
18502 length = strlen (stub);
18503 binder_name = alloca (length + 32);
18504 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18505
18506 length = strlen (symb);
18507 symbol_name = alloca (length + 32);
18508 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18509
18510 sprintf (lazy_ptr_name, "L%d$lz", label);
18511
18512 if (MACHOPIC_PURE)
18513 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18514 else
18515 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18516
18517 fprintf (file, "%s:\n", stub);
18518 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18519
18520 if (MACHOPIC_PURE)
18521 {
18522 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18523 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18524 fprintf (file, "\tjmp\t*%%edx\n");
18525 }
18526 else
18527 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18528
18529 fprintf (file, "%s:\n", binder_name);
18530
18531 if (MACHOPIC_PURE)
18532 {
18533 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18534 fprintf (file, "\tpushl\t%%eax\n");
18535 }
18536 else
18537 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18538
18539 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18540
18541 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18542 fprintf (file, "%s:\n", lazy_ptr_name);
18543 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18544 fprintf (file, "\t.long %s\n", binder_name);
18545 }
18546
18547 void
18548 darwin_x86_file_end (void)
18549 {
18550 darwin_file_end ();
18551 ix86_file_end ();
18552 }
18553 #endif /* TARGET_MACHO */
18554
18555 /* Order the registers for register allocator. */
18556
18557 void
18558 x86_order_regs_for_local_alloc (void)
18559 {
18560 int pos = 0;
18561 int i;
18562
18563 /* First allocate the local general purpose registers. */
18564 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18565 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18566 reg_alloc_order [pos++] = i;
18567
18568 /* Global general purpose registers. */
18569 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18570 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18571 reg_alloc_order [pos++] = i;
18572
18573 /* x87 registers come first in case we are doing FP math
18574 using them. */
18575 if (!TARGET_SSE_MATH)
18576 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18577 reg_alloc_order [pos++] = i;
18578
18579 /* SSE registers. */
18580 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18581 reg_alloc_order [pos++] = i;
18582 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18583 reg_alloc_order [pos++] = i;
18584
18585 /* x87 registers. */
18586 if (TARGET_SSE_MATH)
18587 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18588 reg_alloc_order [pos++] = i;
18589
18590 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18591 reg_alloc_order [pos++] = i;
18592
18593 /* Initialize the rest of array as we do not allocate some registers
18594 at all. */
18595 while (pos < FIRST_PSEUDO_REGISTER)
18596 reg_alloc_order [pos++] = 0;
18597 }
18598
18599 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18600 struct attribute_spec.handler. */
18601 static tree
18602 ix86_handle_struct_attribute (tree *node, tree name,
18603 tree args ATTRIBUTE_UNUSED,
18604 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18605 {
18606 tree *type = NULL;
18607 if (DECL_P (*node))
18608 {
18609 if (TREE_CODE (*node) == TYPE_DECL)
18610 type = &TREE_TYPE (*node);
18611 }
18612 else
18613 type = node;
18614
18615 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18616 || TREE_CODE (*type) == UNION_TYPE)))
18617 {
18618 warning (OPT_Wattributes, "%qs attribute ignored",
18619 IDENTIFIER_POINTER (name));
18620 *no_add_attrs = true;
18621 }
18622
18623 else if ((is_attribute_p ("ms_struct", name)
18624 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18625 || ((is_attribute_p ("gcc_struct", name)
18626 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18627 {
18628 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18629 IDENTIFIER_POINTER (name));
18630 *no_add_attrs = true;
18631 }
18632
18633 return NULL_TREE;
18634 }
18635
18636 static bool
18637 ix86_ms_bitfield_layout_p (tree record_type)
18638 {
18639 return (TARGET_MS_BITFIELD_LAYOUT &&
18640 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18641 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18642 }
18643
18644 /* Returns an expression indicating where the this parameter is
18645 located on entry to the FUNCTION. */
18646
18647 static rtx
18648 x86_this_parameter (tree function)
18649 {
18650 tree type = TREE_TYPE (function);
18651
18652 if (TARGET_64BIT)
18653 {
18654 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18655 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18656 }
18657
18658 if (ix86_function_regparm (type, function) > 0)
18659 {
18660 tree parm;
18661
18662 parm = TYPE_ARG_TYPES (type);
18663 /* Figure out whether or not the function has a variable number of
18664 arguments. */
18665 for (; parm; parm = TREE_CHAIN (parm))
18666 if (TREE_VALUE (parm) == void_type_node)
18667 break;
18668 /* If not, the this parameter is in the first argument. */
18669 if (parm)
18670 {
18671 int regno = 0;
18672 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18673 regno = 2;
18674 return gen_rtx_REG (SImode, regno);
18675 }
18676 }
18677
18678 if (aggregate_value_p (TREE_TYPE (type), type))
18679 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18680 else
18681 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18682 }
18683
18684 /* Determine whether x86_output_mi_thunk can succeed. */
18685
18686 static bool
18687 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18688 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18689 HOST_WIDE_INT vcall_offset, tree function)
18690 {
18691 /* 64-bit can handle anything. */
18692 if (TARGET_64BIT)
18693 return true;
18694
18695 /* For 32-bit, everything's fine if we have one free register. */
18696 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18697 return true;
18698
18699 /* Need a free register for vcall_offset. */
18700 if (vcall_offset)
18701 return false;
18702
18703 /* Need a free register for GOT references. */
18704 if (flag_pic && !(*targetm.binds_local_p) (function))
18705 return false;
18706
18707 /* Otherwise ok. */
18708 return true;
18709 }
18710
18711 /* Output the assembler code for a thunk function. THUNK_DECL is the
18712 declaration for the thunk function itself, FUNCTION is the decl for
18713 the target function. DELTA is an immediate constant offset to be
18714 added to THIS. If VCALL_OFFSET is nonzero, the word at
18715 *(*this + vcall_offset) should be added to THIS. */
18716
18717 static void
18718 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18719 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18720 HOST_WIDE_INT vcall_offset, tree function)
18721 {
18722 rtx xops[3];
18723 rtx this = x86_this_parameter (function);
18724 rtx this_reg, tmp;
18725
18726 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18727 pull it in now and let DELTA benefit. */
18728 if (REG_P (this))
18729 this_reg = this;
18730 else if (vcall_offset)
18731 {
18732 /* Put the this parameter into %eax. */
18733 xops[0] = this;
18734 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18735 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18736 }
18737 else
18738 this_reg = NULL_RTX;
18739
18740 /* Adjust the this parameter by a fixed constant. */
18741 if (delta)
18742 {
18743 xops[0] = GEN_INT (delta);
18744 xops[1] = this_reg ? this_reg : this;
18745 if (TARGET_64BIT)
18746 {
18747 if (!x86_64_general_operand (xops[0], DImode))
18748 {
18749 tmp = gen_rtx_REG (DImode, R10_REG);
18750 xops[1] = tmp;
18751 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18752 xops[0] = tmp;
18753 xops[1] = this;
18754 }
18755 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18756 }
18757 else
18758 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18759 }
18760
18761 /* Adjust the this parameter by a value stored in the vtable. */
18762 if (vcall_offset)
18763 {
18764 if (TARGET_64BIT)
18765 tmp = gen_rtx_REG (DImode, R10_REG);
18766 else
18767 {
18768 int tmp_regno = 2 /* ECX */;
18769 if (lookup_attribute ("fastcall",
18770 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18771 tmp_regno = 0 /* EAX */;
18772 tmp = gen_rtx_REG (SImode, tmp_regno);
18773 }
18774
18775 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18776 xops[1] = tmp;
18777 if (TARGET_64BIT)
18778 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18779 else
18780 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18781
18782 /* Adjust the this parameter. */
18783 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18784 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18785 {
18786 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18787 xops[0] = GEN_INT (vcall_offset);
18788 xops[1] = tmp2;
18789 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18790 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18791 }
18792 xops[1] = this_reg;
18793 if (TARGET_64BIT)
18794 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18795 else
18796 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18797 }
18798
18799 /* If necessary, drop THIS back to its stack slot. */
18800 if (this_reg && this_reg != this)
18801 {
18802 xops[0] = this_reg;
18803 xops[1] = this;
18804 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18805 }
18806
18807 xops[0] = XEXP (DECL_RTL (function), 0);
18808 if (TARGET_64BIT)
18809 {
18810 if (!flag_pic || (*targetm.binds_local_p) (function))
18811 output_asm_insn ("jmp\t%P0", xops);
18812 else
18813 {
18814 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18815 tmp = gen_rtx_CONST (Pmode, tmp);
18816 tmp = gen_rtx_MEM (QImode, tmp);
18817 xops[0] = tmp;
18818 output_asm_insn ("jmp\t%A0", xops);
18819 }
18820 }
18821 else
18822 {
18823 if (!flag_pic || (*targetm.binds_local_p) (function))
18824 output_asm_insn ("jmp\t%P0", xops);
18825 else
18826 #if TARGET_MACHO
18827 if (TARGET_MACHO)
18828 {
18829 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18830 tmp = (gen_rtx_SYMBOL_REF
18831 (Pmode,
18832 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18833 tmp = gen_rtx_MEM (QImode, tmp);
18834 xops[0] = tmp;
18835 output_asm_insn ("jmp\t%0", xops);
18836 }
18837 else
18838 #endif /* TARGET_MACHO */
18839 {
18840 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18841 output_set_got (tmp, NULL_RTX);
18842
18843 xops[1] = tmp;
18844 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18845 output_asm_insn ("jmp\t{*}%1", xops);
18846 }
18847 }
18848 }
18849
18850 static void
18851 x86_file_start (void)
18852 {
18853 default_file_start ();
18854 #if TARGET_MACHO
18855 darwin_file_start ();
18856 #endif
18857 if (X86_FILE_START_VERSION_DIRECTIVE)
18858 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18859 if (X86_FILE_START_FLTUSED)
18860 fputs ("\t.global\t__fltused\n", asm_out_file);
18861 if (ix86_asm_dialect == ASM_INTEL)
18862 fputs ("\t.intel_syntax\n", asm_out_file);
18863 }
18864
18865 int
18866 x86_field_alignment (tree field, int computed)
18867 {
18868 enum machine_mode mode;
18869 tree type = TREE_TYPE (field);
18870
18871 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18872 return computed;
18873 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18874 ? get_inner_array_type (type) : type);
18875 if (mode == DFmode || mode == DCmode
18876 || GET_MODE_CLASS (mode) == MODE_INT
18877 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18878 return MIN (32, computed);
18879 return computed;
18880 }
18881
18882 /* Output assembler code to FILE to increment profiler label # LABELNO
18883 for profiling a function entry. */
18884 void
18885 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18886 {
18887 if (TARGET_64BIT)
18888 if (flag_pic)
18889 {
18890 #ifndef NO_PROFILE_COUNTERS
18891 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18892 #endif
18893 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18894 }
18895 else
18896 {
18897 #ifndef NO_PROFILE_COUNTERS
18898 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18899 #endif
18900 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18901 }
18902 else if (flag_pic)
18903 {
18904 #ifndef NO_PROFILE_COUNTERS
18905 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18906 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18907 #endif
18908 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18909 }
18910 else
18911 {
18912 #ifndef NO_PROFILE_COUNTERS
18913 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18914 PROFILE_COUNT_REGISTER);
18915 #endif
18916 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18917 }
18918 }
18919
18920 /* We don't have exact information about the insn sizes, but we may assume
18921 quite safely that we are informed about all 1 byte insns and memory
18922 address sizes. This is enough to eliminate unnecessary padding in
18923 99% of cases. */
18924
18925 static int
18926 min_insn_size (rtx insn)
18927 {
18928 int l = 0;
18929
18930 if (!INSN_P (insn) || !active_insn_p (insn))
18931 return 0;
18932
18933 /* Discard alignments we've emit and jump instructions. */
18934 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18935 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18936 return 0;
18937 if (GET_CODE (insn) == JUMP_INSN
18938 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18939 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
18940 return 0;
18941
18942 /* Important case - calls are always 5 bytes.
18943 It is common to have many calls in the row. */
18944 if (GET_CODE (insn) == CALL_INSN
18945 && symbolic_reference_mentioned_p (PATTERN (insn))
18946 && !SIBLING_CALL_P (insn))
18947 return 5;
18948 if (get_attr_length (insn) <= 1)
18949 return 1;
18950
18951 /* For normal instructions we may rely on the sizes of addresses
18952 and the presence of symbol to require 4 bytes of encoding.
18953 This is not the case for jumps where references are PC relative. */
18954 if (GET_CODE (insn) != JUMP_INSN)
18955 {
18956 l = get_attr_length_address (insn);
18957 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
18958 l = 4;
18959 }
18960 if (l)
18961 return 1+l;
18962 else
18963 return 2;
18964 }
18965
18966 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
18967 window. */
18968
18969 static void
18970 ix86_avoid_jump_misspredicts (void)
18971 {
18972 rtx insn, start = get_insns ();
18973 int nbytes = 0, njumps = 0;
18974 int isjump = 0;
18975
18976 /* Look for all minimal intervals of instructions containing 4 jumps.
18977 The intervals are bounded by START and INSN. NBYTES is the total
18978 size of instructions in the interval including INSN and not including
18979 START. When the NBYTES is smaller than 16 bytes, it is possible
18980 that the end of START and INSN ends up in the same 16byte page.
18981
18982 The smallest offset in the page INSN can start is the case where START
18983 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
18984 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
18985 */
18986 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18987 {
18988
18989 nbytes += min_insn_size (insn);
18990 if (dump_file)
18991 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
18992 INSN_UID (insn), min_insn_size (insn));
18993 if ((GET_CODE (insn) == JUMP_INSN
18994 && GET_CODE (PATTERN (insn)) != ADDR_VEC
18995 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
18996 || GET_CODE (insn) == CALL_INSN)
18997 njumps++;
18998 else
18999 continue;
19000
19001 while (njumps > 3)
19002 {
19003 start = NEXT_INSN (start);
19004 if ((GET_CODE (start) == JUMP_INSN
19005 && GET_CODE (PATTERN (start)) != ADDR_VEC
19006 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
19007 || GET_CODE (start) == CALL_INSN)
19008 njumps--, isjump = 1;
19009 else
19010 isjump = 0;
19011 nbytes -= min_insn_size (start);
19012 }
19013 gcc_assert (njumps >= 0);
19014 if (dump_file)
19015 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
19016 INSN_UID (start), INSN_UID (insn), nbytes);
19017
19018 if (njumps == 3 && isjump && nbytes < 16)
19019 {
19020 int padsize = 15 - nbytes + min_insn_size (insn);
19021
19022 if (dump_file)
19023 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
19024 INSN_UID (insn), padsize);
19025 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
19026 }
19027 }
19028 }
19029
19030 /* AMD Athlon works faster
19031 when RET is not destination of conditional jump or directly preceded
19032 by other jump instruction. We avoid the penalty by inserting NOP just
19033 before the RET instructions in such cases. */
19034 static void
19035 ix86_pad_returns (void)
19036 {
19037 edge e;
19038 edge_iterator ei;
19039
19040 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
19041 {
19042 basic_block bb = e->src;
19043 rtx ret = BB_END (bb);
19044 rtx prev;
19045 bool replace = false;
19046
19047 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
19048 || !maybe_hot_bb_p (bb))
19049 continue;
19050 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
19051 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
19052 break;
19053 if (prev && GET_CODE (prev) == CODE_LABEL)
19054 {
19055 edge e;
19056 edge_iterator ei;
19057
19058 FOR_EACH_EDGE (e, ei, bb->preds)
19059 if (EDGE_FREQUENCY (e) && e->src->index >= 0
19060 && !(e->flags & EDGE_FALLTHRU))
19061 replace = true;
19062 }
19063 if (!replace)
19064 {
19065 prev = prev_active_insn (ret);
19066 if (prev
19067 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
19068 || GET_CODE (prev) == CALL_INSN))
19069 replace = true;
19070 /* Empty functions get branch mispredict even when the jump destination
19071 is not visible to us. */
19072 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
19073 replace = true;
19074 }
19075 if (replace)
19076 {
19077 emit_insn_before (gen_return_internal_long (), ret);
19078 delete_insn (ret);
19079 }
19080 }
19081 }
19082
19083 /* Implement machine specific optimizations. We implement padding of returns
19084 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
19085 static void
19086 ix86_reorg (void)
19087 {
19088 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
19089 ix86_pad_returns ();
19090 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19091 ix86_avoid_jump_misspredicts ();
19092 }
19093
19094 /* Return nonzero when QImode register that must be represented via REX prefix
19095 is used. */
19096 bool
19097 x86_extended_QIreg_mentioned_p (rtx insn)
19098 {
19099 int i;
19100 extract_insn_cached (insn);
19101 for (i = 0; i < recog_data.n_operands; i++)
19102 if (REG_P (recog_data.operand[i])
19103 && REGNO (recog_data.operand[i]) >= 4)
19104 return true;
19105 return false;
19106 }
19107
19108 /* Return nonzero when P points to register encoded via REX prefix.
19109 Called via for_each_rtx. */
19110 static int
19111 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19112 {
19113 unsigned int regno;
19114 if (!REG_P (*p))
19115 return 0;
19116 regno = REGNO (*p);
19117 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19118 }
19119
19120 /* Return true when INSN mentions register that must be encoded using REX
19121 prefix. */
19122 bool
19123 x86_extended_reg_mentioned_p (rtx insn)
19124 {
19125 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19126 }
19127
19128 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19129 optabs would emit if we didn't have TFmode patterns. */
19130
19131 void
19132 x86_emit_floatuns (rtx operands[2])
19133 {
19134 rtx neglab, donelab, i0, i1, f0, in, out;
19135 enum machine_mode mode, inmode;
19136
19137 inmode = GET_MODE (operands[1]);
19138 gcc_assert (inmode == SImode || inmode == DImode);
19139
19140 out = operands[0];
19141 in = force_reg (inmode, operands[1]);
19142 mode = GET_MODE (out);
19143 neglab = gen_label_rtx ();
19144 donelab = gen_label_rtx ();
19145 i1 = gen_reg_rtx (Pmode);
19146 f0 = gen_reg_rtx (mode);
19147
19148 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19149
19150 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19151 emit_jump_insn (gen_jump (donelab));
19152 emit_barrier ();
19153
19154 emit_label (neglab);
19155
19156 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19157 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19158 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19159 expand_float (f0, i0, 0);
19160 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19161
19162 emit_label (donelab);
19163 }
19164 \f
19165 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19166 with all elements equal to VAR. Return true if successful. */
19167
19168 static bool
19169 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19170 rtx target, rtx val)
19171 {
19172 enum machine_mode smode, wsmode, wvmode;
19173 rtx x;
19174
19175 switch (mode)
19176 {
19177 case V2SImode:
19178 case V2SFmode:
19179 if (!mmx_ok)
19180 return false;
19181 /* FALLTHRU */
19182
19183 case V2DFmode:
19184 case V2DImode:
19185 case V4SFmode:
19186 case V4SImode:
19187 val = force_reg (GET_MODE_INNER (mode), val);
19188 x = gen_rtx_VEC_DUPLICATE (mode, val);
19189 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19190 return true;
19191
19192 case V4HImode:
19193 if (!mmx_ok)
19194 return false;
19195 if (TARGET_SSE || TARGET_3DNOW_A)
19196 {
19197 val = gen_lowpart (SImode, val);
19198 x = gen_rtx_TRUNCATE (HImode, val);
19199 x = gen_rtx_VEC_DUPLICATE (mode, x);
19200 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19201 return true;
19202 }
19203 else
19204 {
19205 smode = HImode;
19206 wsmode = SImode;
19207 wvmode = V2SImode;
19208 goto widen;
19209 }
19210
19211 case V8QImode:
19212 if (!mmx_ok)
19213 return false;
19214 smode = QImode;
19215 wsmode = HImode;
19216 wvmode = V4HImode;
19217 goto widen;
19218 case V8HImode:
19219 if (TARGET_SSE2)
19220 {
19221 rtx tmp1, tmp2;
19222 /* Extend HImode to SImode using a paradoxical SUBREG. */
19223 tmp1 = gen_reg_rtx (SImode);
19224 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19225 /* Insert the SImode value as low element of V4SImode vector. */
19226 tmp2 = gen_reg_rtx (V4SImode);
19227 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19228 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19229 CONST0_RTX (V4SImode),
19230 const1_rtx);
19231 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19232 /* Cast the V4SImode vector back to a V8HImode vector. */
19233 tmp1 = gen_reg_rtx (V8HImode);
19234 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19235 /* Duplicate the low short through the whole low SImode word. */
19236 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19237 /* Cast the V8HImode vector back to a V4SImode vector. */
19238 tmp2 = gen_reg_rtx (V4SImode);
19239 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19240 /* Replicate the low element of the V4SImode vector. */
19241 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19242 /* Cast the V2SImode back to V8HImode, and store in target. */
19243 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19244 return true;
19245 }
19246 smode = HImode;
19247 wsmode = SImode;
19248 wvmode = V4SImode;
19249 goto widen;
19250 case V16QImode:
19251 if (TARGET_SSE2)
19252 {
19253 rtx tmp1, tmp2;
19254 /* Extend QImode to SImode using a paradoxical SUBREG. */
19255 tmp1 = gen_reg_rtx (SImode);
19256 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19257 /* Insert the SImode value as low element of V4SImode vector. */
19258 tmp2 = gen_reg_rtx (V4SImode);
19259 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19260 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19261 CONST0_RTX (V4SImode),
19262 const1_rtx);
19263 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19264 /* Cast the V4SImode vector back to a V16QImode vector. */
19265 tmp1 = gen_reg_rtx (V16QImode);
19266 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19267 /* Duplicate the low byte through the whole low SImode word. */
19268 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19269 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19270 /* Cast the V16QImode vector back to a V4SImode vector. */
19271 tmp2 = gen_reg_rtx (V4SImode);
19272 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19273 /* Replicate the low element of the V4SImode vector. */
19274 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19275 /* Cast the V2SImode back to V16QImode, and store in target. */
19276 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19277 return true;
19278 }
19279 smode = QImode;
19280 wsmode = HImode;
19281 wvmode = V8HImode;
19282 goto widen;
19283 widen:
19284 /* Replicate the value once into the next wider mode and recurse. */
19285 val = convert_modes (wsmode, smode, val, true);
19286 x = expand_simple_binop (wsmode, ASHIFT, val,
19287 GEN_INT (GET_MODE_BITSIZE (smode)),
19288 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19289 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19290
19291 x = gen_reg_rtx (wvmode);
19292 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19293 gcc_unreachable ();
19294 emit_move_insn (target, gen_lowpart (mode, x));
19295 return true;
19296
19297 default:
19298 return false;
19299 }
19300 }
19301
19302 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19303 whose ONE_VAR element is VAR, and other elements are zero. Return true
19304 if successful. */
19305
19306 static bool
19307 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19308 rtx target, rtx var, int one_var)
19309 {
19310 enum machine_mode vsimode;
19311 rtx new_target;
19312 rtx x, tmp;
19313
19314 switch (mode)
19315 {
19316 case V2SFmode:
19317 case V2SImode:
19318 if (!mmx_ok)
19319 return false;
19320 /* FALLTHRU */
19321
19322 case V2DFmode:
19323 case V2DImode:
19324 if (one_var != 0)
19325 return false;
19326 var = force_reg (GET_MODE_INNER (mode), var);
19327 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19328 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19329 return true;
19330
19331 case V4SFmode:
19332 case V4SImode:
19333 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19334 new_target = gen_reg_rtx (mode);
19335 else
19336 new_target = target;
19337 var = force_reg (GET_MODE_INNER (mode), var);
19338 x = gen_rtx_VEC_DUPLICATE (mode, var);
19339 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19340 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19341 if (one_var != 0)
19342 {
19343 /* We need to shuffle the value to the correct position, so
19344 create a new pseudo to store the intermediate result. */
19345
19346 /* With SSE2, we can use the integer shuffle insns. */
19347 if (mode != V4SFmode && TARGET_SSE2)
19348 {
19349 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19350 GEN_INT (1),
19351 GEN_INT (one_var == 1 ? 0 : 1),
19352 GEN_INT (one_var == 2 ? 0 : 1),
19353 GEN_INT (one_var == 3 ? 0 : 1)));
19354 if (target != new_target)
19355 emit_move_insn (target, new_target);
19356 return true;
19357 }
19358
19359 /* Otherwise convert the intermediate result to V4SFmode and
19360 use the SSE1 shuffle instructions. */
19361 if (mode != V4SFmode)
19362 {
19363 tmp = gen_reg_rtx (V4SFmode);
19364 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19365 }
19366 else
19367 tmp = new_target;
19368
19369 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19370 GEN_INT (1),
19371 GEN_INT (one_var == 1 ? 0 : 1),
19372 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19373 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19374
19375 if (mode != V4SFmode)
19376 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19377 else if (tmp != target)
19378 emit_move_insn (target, tmp);
19379 }
19380 else if (target != new_target)
19381 emit_move_insn (target, new_target);
19382 return true;
19383
19384 case V8HImode:
19385 case V16QImode:
19386 vsimode = V4SImode;
19387 goto widen;
19388 case V4HImode:
19389 case V8QImode:
19390 if (!mmx_ok)
19391 return false;
19392 vsimode = V2SImode;
19393 goto widen;
19394 widen:
19395 if (one_var != 0)
19396 return false;
19397
19398 /* Zero extend the variable element to SImode and recurse. */
19399 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19400
19401 x = gen_reg_rtx (vsimode);
19402 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19403 var, one_var))
19404 gcc_unreachable ();
19405
19406 emit_move_insn (target, gen_lowpart (mode, x));
19407 return true;
19408
19409 default:
19410 return false;
19411 }
19412 }
19413
19414 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19415 consisting of the values in VALS. It is known that all elements
19416 except ONE_VAR are constants. Return true if successful. */
19417
19418 static bool
19419 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19420 rtx target, rtx vals, int one_var)
19421 {
19422 rtx var = XVECEXP (vals, 0, one_var);
19423 enum machine_mode wmode;
19424 rtx const_vec, x;
19425
19426 const_vec = copy_rtx (vals);
19427 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19428 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19429
19430 switch (mode)
19431 {
19432 case V2DFmode:
19433 case V2DImode:
19434 case V2SFmode:
19435 case V2SImode:
19436 /* For the two element vectors, it's just as easy to use
19437 the general case. */
19438 return false;
19439
19440 case V4SFmode:
19441 case V4SImode:
19442 case V8HImode:
19443 case V4HImode:
19444 break;
19445
19446 case V16QImode:
19447 wmode = V8HImode;
19448 goto widen;
19449 case V8QImode:
19450 wmode = V4HImode;
19451 goto widen;
19452 widen:
19453 /* There's no way to set one QImode entry easily. Combine
19454 the variable value with its adjacent constant value, and
19455 promote to an HImode set. */
19456 x = XVECEXP (vals, 0, one_var ^ 1);
19457 if (one_var & 1)
19458 {
19459 var = convert_modes (HImode, QImode, var, true);
19460 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19461 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19462 x = GEN_INT (INTVAL (x) & 0xff);
19463 }
19464 else
19465 {
19466 var = convert_modes (HImode, QImode, var, true);
19467 x = gen_int_mode (INTVAL (x) << 8, HImode);
19468 }
19469 if (x != const0_rtx)
19470 var = expand_simple_binop (HImode, IOR, var, x, var,
19471 1, OPTAB_LIB_WIDEN);
19472
19473 x = gen_reg_rtx (wmode);
19474 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19475 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19476
19477 emit_move_insn (target, gen_lowpart (mode, x));
19478 return true;
19479
19480 default:
19481 return false;
19482 }
19483
19484 emit_move_insn (target, const_vec);
19485 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19486 return true;
19487 }
19488
19489 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19490 all values variable, and none identical. */
19491
19492 static void
19493 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19494 rtx target, rtx vals)
19495 {
19496 enum machine_mode half_mode = GET_MODE_INNER (mode);
19497 rtx op0 = NULL, op1 = NULL;
19498 bool use_vec_concat = false;
19499
19500 switch (mode)
19501 {
19502 case V2SFmode:
19503 case V2SImode:
19504 if (!mmx_ok && !TARGET_SSE)
19505 break;
19506 /* FALLTHRU */
19507
19508 case V2DFmode:
19509 case V2DImode:
19510 /* For the two element vectors, we always implement VEC_CONCAT. */
19511 op0 = XVECEXP (vals, 0, 0);
19512 op1 = XVECEXP (vals, 0, 1);
19513 use_vec_concat = true;
19514 break;
19515
19516 case V4SFmode:
19517 half_mode = V2SFmode;
19518 goto half;
19519 case V4SImode:
19520 half_mode = V2SImode;
19521 goto half;
19522 half:
19523 {
19524 rtvec v;
19525
19526 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19527 Recurse to load the two halves. */
19528
19529 op0 = gen_reg_rtx (half_mode);
19530 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19531 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19532
19533 op1 = gen_reg_rtx (half_mode);
19534 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19535 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19536
19537 use_vec_concat = true;
19538 }
19539 break;
19540
19541 case V8HImode:
19542 case V16QImode:
19543 case V4HImode:
19544 case V8QImode:
19545 break;
19546
19547 default:
19548 gcc_unreachable ();
19549 }
19550
19551 if (use_vec_concat)
19552 {
19553 if (!register_operand (op0, half_mode))
19554 op0 = force_reg (half_mode, op0);
19555 if (!register_operand (op1, half_mode))
19556 op1 = force_reg (half_mode, op1);
19557
19558 emit_insn (gen_rtx_SET (VOIDmode, target,
19559 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19560 }
19561 else
19562 {
19563 int i, j, n_elts, n_words, n_elt_per_word;
19564 enum machine_mode inner_mode;
19565 rtx words[4], shift;
19566
19567 inner_mode = GET_MODE_INNER (mode);
19568 n_elts = GET_MODE_NUNITS (mode);
19569 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19570 n_elt_per_word = n_elts / n_words;
19571 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19572
19573 for (i = 0; i < n_words; ++i)
19574 {
19575 rtx word = NULL_RTX;
19576
19577 for (j = 0; j < n_elt_per_word; ++j)
19578 {
19579 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19580 elt = convert_modes (word_mode, inner_mode, elt, true);
19581
19582 if (j == 0)
19583 word = elt;
19584 else
19585 {
19586 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19587 word, 1, OPTAB_LIB_WIDEN);
19588 word = expand_simple_binop (word_mode, IOR, word, elt,
19589 word, 1, OPTAB_LIB_WIDEN);
19590 }
19591 }
19592
19593 words[i] = word;
19594 }
19595
19596 if (n_words == 1)
19597 emit_move_insn (target, gen_lowpart (mode, words[0]));
19598 else if (n_words == 2)
19599 {
19600 rtx tmp = gen_reg_rtx (mode);
19601 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19602 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19603 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19604 emit_move_insn (target, tmp);
19605 }
19606 else if (n_words == 4)
19607 {
19608 rtx tmp = gen_reg_rtx (V4SImode);
19609 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19610 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19611 emit_move_insn (target, gen_lowpart (mode, tmp));
19612 }
19613 else
19614 gcc_unreachable ();
19615 }
19616 }
19617
19618 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19619 instructions unless MMX_OK is true. */
19620
19621 void
19622 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19623 {
19624 enum machine_mode mode = GET_MODE (target);
19625 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19626 int n_elts = GET_MODE_NUNITS (mode);
19627 int n_var = 0, one_var = -1;
19628 bool all_same = true, all_const_zero = true;
19629 int i;
19630 rtx x;
19631
19632 for (i = 0; i < n_elts; ++i)
19633 {
19634 x = XVECEXP (vals, 0, i);
19635 if (!CONSTANT_P (x))
19636 n_var++, one_var = i;
19637 else if (x != CONST0_RTX (inner_mode))
19638 all_const_zero = false;
19639 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19640 all_same = false;
19641 }
19642
19643 /* Constants are best loaded from the constant pool. */
19644 if (n_var == 0)
19645 {
19646 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19647 return;
19648 }
19649
19650 /* If all values are identical, broadcast the value. */
19651 if (all_same
19652 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19653 XVECEXP (vals, 0, 0)))
19654 return;
19655
19656 /* Values where only one field is non-constant are best loaded from
19657 the pool and overwritten via move later. */
19658 if (n_var == 1)
19659 {
19660 if (all_const_zero
19661 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19662 XVECEXP (vals, 0, one_var),
19663 one_var))
19664 return;
19665
19666 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19667 return;
19668 }
19669
19670 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19671 }
19672
19673 void
19674 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19675 {
19676 enum machine_mode mode = GET_MODE (target);
19677 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19678 bool use_vec_merge = false;
19679 rtx tmp;
19680
19681 switch (mode)
19682 {
19683 case V2SFmode:
19684 case V2SImode:
19685 if (mmx_ok)
19686 {
19687 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19688 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19689 if (elt == 0)
19690 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19691 else
19692 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19693 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19694 return;
19695 }
19696 break;
19697
19698 case V2DFmode:
19699 case V2DImode:
19700 {
19701 rtx op0, op1;
19702
19703 /* For the two element vectors, we implement a VEC_CONCAT with
19704 the extraction of the other element. */
19705
19706 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19707 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19708
19709 if (elt == 0)
19710 op0 = val, op1 = tmp;
19711 else
19712 op0 = tmp, op1 = val;
19713
19714 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19715 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19716 }
19717 return;
19718
19719 case V4SFmode:
19720 switch (elt)
19721 {
19722 case 0:
19723 use_vec_merge = true;
19724 break;
19725
19726 case 1:
19727 /* tmp = target = A B C D */
19728 tmp = copy_to_reg (target);
19729 /* target = A A B B */
19730 emit_insn (gen_sse_unpcklps (target, target, target));
19731 /* target = X A B B */
19732 ix86_expand_vector_set (false, target, val, 0);
19733 /* target = A X C D */
19734 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19735 GEN_INT (1), GEN_INT (0),
19736 GEN_INT (2+4), GEN_INT (3+4)));
19737 return;
19738
19739 case 2:
19740 /* tmp = target = A B C D */
19741 tmp = copy_to_reg (target);
19742 /* tmp = X B C D */
19743 ix86_expand_vector_set (false, tmp, val, 0);
19744 /* target = A B X D */
19745 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19746 GEN_INT (0), GEN_INT (1),
19747 GEN_INT (0+4), GEN_INT (3+4)));
19748 return;
19749
19750 case 3:
19751 /* tmp = target = A B C D */
19752 tmp = copy_to_reg (target);
19753 /* tmp = X B C D */
19754 ix86_expand_vector_set (false, tmp, val, 0);
19755 /* target = A B X D */
19756 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19757 GEN_INT (0), GEN_INT (1),
19758 GEN_INT (2+4), GEN_INT (0+4)));
19759 return;
19760
19761 default:
19762 gcc_unreachable ();
19763 }
19764 break;
19765
19766 case V4SImode:
19767 /* Element 0 handled by vec_merge below. */
19768 if (elt == 0)
19769 {
19770 use_vec_merge = true;
19771 break;
19772 }
19773
19774 if (TARGET_SSE2)
19775 {
19776 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19777 store into element 0, then shuffle them back. */
19778
19779 rtx order[4];
19780
19781 order[0] = GEN_INT (elt);
19782 order[1] = const1_rtx;
19783 order[2] = const2_rtx;
19784 order[3] = GEN_INT (3);
19785 order[elt] = const0_rtx;
19786
19787 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19788 order[1], order[2], order[3]));
19789
19790 ix86_expand_vector_set (false, target, val, 0);
19791
19792 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19793 order[1], order[2], order[3]));
19794 }
19795 else
19796 {
19797 /* For SSE1, we have to reuse the V4SF code. */
19798 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19799 gen_lowpart (SFmode, val), elt);
19800 }
19801 return;
19802
19803 case V8HImode:
19804 use_vec_merge = TARGET_SSE2;
19805 break;
19806 case V4HImode:
19807 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19808 break;
19809
19810 case V16QImode:
19811 case V8QImode:
19812 default:
19813 break;
19814 }
19815
19816 if (use_vec_merge)
19817 {
19818 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19819 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19820 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19821 }
19822 else
19823 {
19824 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19825
19826 emit_move_insn (mem, target);
19827
19828 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19829 emit_move_insn (tmp, val);
19830
19831 emit_move_insn (target, mem);
19832 }
19833 }
19834
19835 void
19836 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19837 {
19838 enum machine_mode mode = GET_MODE (vec);
19839 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19840 bool use_vec_extr = false;
19841 rtx tmp;
19842
19843 switch (mode)
19844 {
19845 case V2SImode:
19846 case V2SFmode:
19847 if (!mmx_ok)
19848 break;
19849 /* FALLTHRU */
19850
19851 case V2DFmode:
19852 case V2DImode:
19853 use_vec_extr = true;
19854 break;
19855
19856 case V4SFmode:
19857 switch (elt)
19858 {
19859 case 0:
19860 tmp = vec;
19861 break;
19862
19863 case 1:
19864 case 3:
19865 tmp = gen_reg_rtx (mode);
19866 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19867 GEN_INT (elt), GEN_INT (elt),
19868 GEN_INT (elt+4), GEN_INT (elt+4)));
19869 break;
19870
19871 case 2:
19872 tmp = gen_reg_rtx (mode);
19873 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19874 break;
19875
19876 default:
19877 gcc_unreachable ();
19878 }
19879 vec = tmp;
19880 use_vec_extr = true;
19881 elt = 0;
19882 break;
19883
19884 case V4SImode:
19885 if (TARGET_SSE2)
19886 {
19887 switch (elt)
19888 {
19889 case 0:
19890 tmp = vec;
19891 break;
19892
19893 case 1:
19894 case 3:
19895 tmp = gen_reg_rtx (mode);
19896 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19897 GEN_INT (elt), GEN_INT (elt),
19898 GEN_INT (elt), GEN_INT (elt)));
19899 break;
19900
19901 case 2:
19902 tmp = gen_reg_rtx (mode);
19903 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19904 break;
19905
19906 default:
19907 gcc_unreachable ();
19908 }
19909 vec = tmp;
19910 use_vec_extr = true;
19911 elt = 0;
19912 }
19913 else
19914 {
19915 /* For SSE1, we have to reuse the V4SF code. */
19916 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19917 gen_lowpart (V4SFmode, vec), elt);
19918 return;
19919 }
19920 break;
19921
19922 case V8HImode:
19923 use_vec_extr = TARGET_SSE2;
19924 break;
19925 case V4HImode:
19926 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19927 break;
19928
19929 case V16QImode:
19930 case V8QImode:
19931 /* ??? Could extract the appropriate HImode element and shift. */
19932 default:
19933 break;
19934 }
19935
19936 if (use_vec_extr)
19937 {
19938 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19939 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
19940
19941 /* Let the rtl optimizers know about the zero extension performed. */
19942 if (inner_mode == HImode)
19943 {
19944 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
19945 target = gen_lowpart (SImode, target);
19946 }
19947
19948 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19949 }
19950 else
19951 {
19952 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19953
19954 emit_move_insn (mem, vec);
19955
19956 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19957 emit_move_insn (target, tmp);
19958 }
19959 }
19960
19961 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
19962 pattern to reduce; DEST is the destination; IN is the input vector. */
19963
19964 void
19965 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
19966 {
19967 rtx tmp1, tmp2, tmp3;
19968
19969 tmp1 = gen_reg_rtx (V4SFmode);
19970 tmp2 = gen_reg_rtx (V4SFmode);
19971 tmp3 = gen_reg_rtx (V4SFmode);
19972
19973 emit_insn (gen_sse_movhlps (tmp1, in, in));
19974 emit_insn (fn (tmp2, tmp1, in));
19975
19976 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
19977 GEN_INT (1), GEN_INT (1),
19978 GEN_INT (1+4), GEN_INT (1+4)));
19979 emit_insn (fn (dest, tmp2, tmp3));
19980 }
19981 \f
19982 /* Target hook for scalar_mode_supported_p. */
19983 static bool
19984 ix86_scalar_mode_supported_p (enum machine_mode mode)
19985 {
19986 if (DECIMAL_FLOAT_MODE_P (mode))
19987 return true;
19988 else
19989 return default_scalar_mode_supported_p (mode);
19990 }
19991
19992 /* Implements target hook vector_mode_supported_p. */
19993 static bool
19994 ix86_vector_mode_supported_p (enum machine_mode mode)
19995 {
19996 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
19997 return true;
19998 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
19999 return true;
20000 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
20001 return true;
20002 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
20003 return true;
20004 return false;
20005 }
20006
20007 /* Worker function for TARGET_MD_ASM_CLOBBERS.
20008
20009 We do this in the new i386 backend to maintain source compatibility
20010 with the old cc0-based compiler. */
20011
20012 static tree
20013 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
20014 tree inputs ATTRIBUTE_UNUSED,
20015 tree clobbers)
20016 {
20017 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
20018 clobbers);
20019 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
20020 clobbers);
20021 return clobbers;
20022 }
20023
20024 /* Return true if this goes in small data/bss. */
20025
20026 static bool
20027 ix86_in_large_data_p (tree exp)
20028 {
20029 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
20030 return false;
20031
20032 /* Functions are never large data. */
20033 if (TREE_CODE (exp) == FUNCTION_DECL)
20034 return false;
20035
20036 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
20037 {
20038 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
20039 if (strcmp (section, ".ldata") == 0
20040 || strcmp (section, ".lbss") == 0)
20041 return true;
20042 return false;
20043 }
20044 else
20045 {
20046 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
20047
20048 /* If this is an incomplete type with size 0, then we can't put it
20049 in data because it might be too big when completed. */
20050 if (!size || size > ix86_section_threshold)
20051 return true;
20052 }
20053
20054 return false;
20055 }
20056 static void
20057 ix86_encode_section_info (tree decl, rtx rtl, int first)
20058 {
20059 default_encode_section_info (decl, rtl, first);
20060
20061 if (TREE_CODE (decl) == VAR_DECL
20062 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
20063 && ix86_in_large_data_p (decl))
20064 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
20065 }
20066
20067 /* Worker function for REVERSE_CONDITION. */
20068
20069 enum rtx_code
20070 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
20071 {
20072 return (mode != CCFPmode && mode != CCFPUmode
20073 ? reverse_condition (code)
20074 : reverse_condition_maybe_unordered (code));
20075 }
20076
20077 /* Output code to perform an x87 FP register move, from OPERANDS[1]
20078 to OPERANDS[0]. */
20079
20080 const char *
20081 output_387_reg_move (rtx insn, rtx *operands)
20082 {
20083 if (REG_P (operands[1])
20084 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
20085 {
20086 if (REGNO (operands[0]) == FIRST_STACK_REG)
20087 return output_387_ffreep (operands, 0);
20088 return "fstp\t%y0";
20089 }
20090 if (STACK_TOP_P (operands[0]))
20091 return "fld%z1\t%y1";
20092 return "fst\t%y0";
20093 }
20094
20095 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20096 FP status register is set. */
20097
20098 void
20099 ix86_emit_fp_unordered_jump (rtx label)
20100 {
20101 rtx reg = gen_reg_rtx (HImode);
20102 rtx temp;
20103
20104 emit_insn (gen_x86_fnstsw_1 (reg));
20105
20106 if (TARGET_USE_SAHF)
20107 {
20108 emit_insn (gen_x86_sahf_1 (reg));
20109
20110 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20111 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20112 }
20113 else
20114 {
20115 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20116
20117 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20118 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20119 }
20120
20121 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20122 gen_rtx_LABEL_REF (VOIDmode, label),
20123 pc_rtx);
20124 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20125 emit_jump_insn (temp);
20126 }
20127
20128 /* Output code to perform a log1p XFmode calculation. */
20129
20130 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20131 {
20132 rtx label1 = gen_label_rtx ();
20133 rtx label2 = gen_label_rtx ();
20134
20135 rtx tmp = gen_reg_rtx (XFmode);
20136 rtx tmp2 = gen_reg_rtx (XFmode);
20137
20138 emit_insn (gen_absxf2 (tmp, op1));
20139 emit_insn (gen_cmpxf (tmp,
20140 CONST_DOUBLE_FROM_REAL_VALUE (
20141 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20142 XFmode)));
20143 emit_jump_insn (gen_bge (label1));
20144
20145 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20146 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
20147 emit_jump (label2);
20148
20149 emit_label (label1);
20150 emit_move_insn (tmp, CONST1_RTX (XFmode));
20151 emit_insn (gen_addxf3 (tmp, op1, tmp));
20152 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20153 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
20154
20155 emit_label (label2);
20156 }
20157
20158 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20159
20160 static void
20161 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20162 tree decl)
20163 {
20164 /* With Binutils 2.15, the "@unwind" marker must be specified on
20165 every occurrence of the ".eh_frame" section, not just the first
20166 one. */
20167 if (TARGET_64BIT
20168 && strcmp (name, ".eh_frame") == 0)
20169 {
20170 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20171 flags & SECTION_WRITE ? "aw" : "a");
20172 return;
20173 }
20174 default_elf_asm_named_section (name, flags, decl);
20175 }
20176
20177 /* Return the mangling of TYPE if it is an extended fundamental type. */
20178
20179 static const char *
20180 ix86_mangle_fundamental_type (tree type)
20181 {
20182 switch (TYPE_MODE (type))
20183 {
20184 case TFmode:
20185 /* __float128 is "g". */
20186 return "g";
20187 case XFmode:
20188 /* "long double" or __float80 is "e". */
20189 return "e";
20190 default:
20191 return NULL;
20192 }
20193 }
20194
20195 /* For 32-bit code we can save PIC register setup by using
20196 __stack_chk_fail_local hidden function instead of calling
20197 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20198 register, so it is better to call __stack_chk_fail directly. */
20199
20200 static tree
20201 ix86_stack_protect_fail (void)
20202 {
20203 return TARGET_64BIT
20204 ? default_external_stack_protect_fail ()
20205 : default_hidden_stack_protect_fail ();
20206 }
20207
20208 /* Select a format to encode pointers in exception handling data. CODE
20209 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20210 true if the symbol may be affected by dynamic relocations.
20211
20212 ??? All x86 object file formats are capable of representing this.
20213 After all, the relocation needed is the same as for the call insn.
20214 Whether or not a particular assembler allows us to enter such, I
20215 guess we'll have to see. */
20216 int
20217 asm_preferred_eh_data_format (int code, int global)
20218 {
20219 if (flag_pic)
20220 {
20221 int type = DW_EH_PE_sdata8;
20222 if (!TARGET_64BIT
20223 || ix86_cmodel == CM_SMALL_PIC
20224 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20225 type = DW_EH_PE_sdata4;
20226 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20227 }
20228 if (ix86_cmodel == CM_SMALL
20229 || (ix86_cmodel == CM_MEDIUM && code))
20230 return DW_EH_PE_udata4;
20231 return DW_EH_PE_absptr;
20232 }
20233 \f
20234 /* Expand copysign from SIGN to the positive value ABS_VALUE
20235 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20236 the sign-bit. */
20237 static void
20238 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20239 {
20240 enum machine_mode mode = GET_MODE (sign);
20241 rtx sgn = gen_reg_rtx (mode);
20242 if (mask == NULL_RTX)
20243 {
20244 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20245 if (!VECTOR_MODE_P (mode))
20246 {
20247 /* We need to generate a scalar mode mask in this case. */
20248 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20249 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20250 mask = gen_reg_rtx (mode);
20251 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20252 }
20253 }
20254 else
20255 mask = gen_rtx_NOT (mode, mask);
20256 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20257 gen_rtx_AND (mode, mask, sign)));
20258 emit_insn (gen_rtx_SET (VOIDmode, result,
20259 gen_rtx_IOR (mode, abs_value, sgn)));
20260 }
20261
20262 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20263 mask for masking out the sign-bit is stored in *SMASK, if that is
20264 non-null. */
20265 static rtx
20266 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20267 {
20268 enum machine_mode mode = GET_MODE (op0);
20269 rtx xa, mask;
20270
20271 xa = gen_reg_rtx (mode);
20272 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20273 if (!VECTOR_MODE_P (mode))
20274 {
20275 /* We need to generate a scalar mode mask in this case. */
20276 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20277 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20278 mask = gen_reg_rtx (mode);
20279 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20280 }
20281 emit_insn (gen_rtx_SET (VOIDmode, xa,
20282 gen_rtx_AND (mode, op0, mask)));
20283
20284 if (smask)
20285 *smask = mask;
20286
20287 return xa;
20288 }
20289
20290 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20291 swapping the operands if SWAP_OPERANDS is true. The expanded
20292 code is a forward jump to a newly created label in case the
20293 comparison is true. The generated label rtx is returned. */
20294 static rtx
20295 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20296 bool swap_operands)
20297 {
20298 rtx label, tmp;
20299
20300 if (swap_operands)
20301 {
20302 tmp = op0;
20303 op0 = op1;
20304 op1 = tmp;
20305 }
20306
20307 label = gen_label_rtx ();
20308 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20309 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20310 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20311 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20312 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20313 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20314 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20315 JUMP_LABEL (tmp) = label;
20316
20317 return label;
20318 }
20319
20320 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20321 using comparison code CODE. Operands are swapped for the comparison if
20322 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20323 static rtx
20324 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20325 bool swap_operands)
20326 {
20327 enum machine_mode mode = GET_MODE (op0);
20328 rtx mask = gen_reg_rtx (mode);
20329
20330 if (swap_operands)
20331 {
20332 rtx tmp = op0;
20333 op0 = op1;
20334 op1 = tmp;
20335 }
20336
20337 if (mode == DFmode)
20338 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20339 gen_rtx_fmt_ee (code, mode, op0, op1)));
20340 else
20341 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20342 gen_rtx_fmt_ee (code, mode, op0, op1)));
20343
20344 return mask;
20345 }
20346
20347 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20348 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20349 static rtx
20350 ix86_gen_TWO52 (enum machine_mode mode)
20351 {
20352 REAL_VALUE_TYPE TWO52r;
20353 rtx TWO52;
20354
20355 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20356 TWO52 = const_double_from_real_value (TWO52r, mode);
20357 TWO52 = force_reg (mode, TWO52);
20358
20359 return TWO52;
20360 }
20361
20362 /* Expand SSE sequence for computing lround from OP1 storing
20363 into OP0. */
20364 void
20365 ix86_expand_lround (rtx op0, rtx op1)
20366 {
20367 /* C code for the stuff we're doing below:
20368 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20369 return (long)tmp;
20370 */
20371 enum machine_mode mode = GET_MODE (op1);
20372 const struct real_format *fmt;
20373 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20374 rtx adj;
20375
20376 /* load nextafter (0.5, 0.0) */
20377 fmt = REAL_MODE_FORMAT (mode);
20378 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20379 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20380
20381 /* adj = copysign (0.5, op1) */
20382 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20383 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20384
20385 /* adj = op1 + adj */
20386 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20387
20388 /* op0 = (imode)adj */
20389 expand_fix (op0, adj, 0);
20390 }
20391
20392 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20393 into OPERAND0. */
20394 void
20395 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20396 {
20397 /* C code for the stuff we're doing below (for do_floor):
20398 xi = (long)op1;
20399 xi -= (double)xi > op1 ? 1 : 0;
20400 return xi;
20401 */
20402 enum machine_mode fmode = GET_MODE (op1);
20403 enum machine_mode imode = GET_MODE (op0);
20404 rtx ireg, freg, label, tmp;
20405
20406 /* reg = (long)op1 */
20407 ireg = gen_reg_rtx (imode);
20408 expand_fix (ireg, op1, 0);
20409
20410 /* freg = (double)reg */
20411 freg = gen_reg_rtx (fmode);
20412 expand_float (freg, ireg, 0);
20413
20414 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20415 label = ix86_expand_sse_compare_and_jump (UNLE,
20416 freg, op1, !do_floor);
20417 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20418 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20419 emit_move_insn (ireg, tmp);
20420
20421 emit_label (label);
20422 LABEL_NUSES (label) = 1;
20423
20424 emit_move_insn (op0, ireg);
20425 }
20426
20427 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20428 result in OPERAND0. */
20429 void
20430 ix86_expand_rint (rtx operand0, rtx operand1)
20431 {
20432 /* C code for the stuff we're doing below:
20433 xa = fabs (operand1);
20434 if (!isless (xa, 2**52))
20435 return operand1;
20436 xa = xa + 2**52 - 2**52;
20437 return copysign (xa, operand1);
20438 */
20439 enum machine_mode mode = GET_MODE (operand0);
20440 rtx res, xa, label, TWO52, mask;
20441
20442 res = gen_reg_rtx (mode);
20443 emit_move_insn (res, operand1);
20444
20445 /* xa = abs (operand1) */
20446 xa = ix86_expand_sse_fabs (res, &mask);
20447
20448 /* if (!isless (xa, TWO52)) goto label; */
20449 TWO52 = ix86_gen_TWO52 (mode);
20450 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20451
20452 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20453 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20454
20455 ix86_sse_copysign_to_positive (res, xa, res, mask);
20456
20457 emit_label (label);
20458 LABEL_NUSES (label) = 1;
20459
20460 emit_move_insn (operand0, res);
20461 }
20462
20463 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20464 into OPERAND0. */
20465 void
20466 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20467 {
20468 /* C code for the stuff we expand below.
20469 double xa = fabs (x), x2;
20470 if (!isless (xa, TWO52))
20471 return x;
20472 xa = xa + TWO52 - TWO52;
20473 x2 = copysign (xa, x);
20474 Compensate. Floor:
20475 if (x2 > x)
20476 x2 -= 1;
20477 Compensate. Ceil:
20478 if (x2 < x)
20479 x2 -= -1;
20480 return x2;
20481 */
20482 enum machine_mode mode = GET_MODE (operand0);
20483 rtx xa, TWO52, tmp, label, one, res, mask;
20484
20485 TWO52 = ix86_gen_TWO52 (mode);
20486
20487 /* Temporary for holding the result, initialized to the input
20488 operand to ease control flow. */
20489 res = gen_reg_rtx (mode);
20490 emit_move_insn (res, operand1);
20491
20492 /* xa = abs (operand1) */
20493 xa = ix86_expand_sse_fabs (res, &mask);
20494
20495 /* if (!isless (xa, TWO52)) goto label; */
20496 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20497
20498 /* xa = xa + TWO52 - TWO52; */
20499 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20500 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20501
20502 /* xa = copysign (xa, operand1) */
20503 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20504
20505 /* generate 1.0 or -1.0 */
20506 one = force_reg (mode,
20507 const_double_from_real_value (do_floor
20508 ? dconst1 : dconstm1, mode));
20509
20510 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20511 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20512 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20513 gen_rtx_AND (mode, one, tmp)));
20514 /* We always need to subtract here to preserve signed zero. */
20515 tmp = expand_simple_binop (mode, MINUS,
20516 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20517 emit_move_insn (res, tmp);
20518
20519 emit_label (label);
20520 LABEL_NUSES (label) = 1;
20521
20522 emit_move_insn (operand0, res);
20523 }
20524
20525 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20526 into OPERAND0. */
20527 void
20528 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20529 {
20530 /* C code for the stuff we expand below.
20531 double xa = fabs (x), x2;
20532 if (!isless (xa, TWO52))
20533 return x;
20534 x2 = (double)(long)x;
20535 Compensate. Floor:
20536 if (x2 > x)
20537 x2 -= 1;
20538 Compensate. Ceil:
20539 if (x2 < x)
20540 x2 += 1;
20541 if (HONOR_SIGNED_ZEROS (mode))
20542 return copysign (x2, x);
20543 return x2;
20544 */
20545 enum machine_mode mode = GET_MODE (operand0);
20546 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20547
20548 TWO52 = ix86_gen_TWO52 (mode);
20549
20550 /* Temporary for holding the result, initialized to the input
20551 operand to ease control flow. */
20552 res = gen_reg_rtx (mode);
20553 emit_move_insn (res, operand1);
20554
20555 /* xa = abs (operand1) */
20556 xa = ix86_expand_sse_fabs (res, &mask);
20557
20558 /* if (!isless (xa, TWO52)) goto label; */
20559 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20560
20561 /* xa = (double)(long)x */
20562 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20563 expand_fix (xi, res, 0);
20564 expand_float (xa, xi, 0);
20565
20566 /* generate 1.0 */
20567 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20568
20569 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20570 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20571 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20572 gen_rtx_AND (mode, one, tmp)));
20573 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20574 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20575 emit_move_insn (res, tmp);
20576
20577 if (HONOR_SIGNED_ZEROS (mode))
20578 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20579
20580 emit_label (label);
20581 LABEL_NUSES (label) = 1;
20582
20583 emit_move_insn (operand0, res);
20584 }
20585
20586 /* Expand SSE sequence for computing round from OPERAND1 storing
20587 into OPERAND0. Sequence that works without relying on DImode truncation
20588 via cvttsd2siq that is only available on 64bit targets. */
20589 void
20590 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20591 {
20592 /* C code for the stuff we expand below.
20593 double xa = fabs (x), xa2, x2;
20594 if (!isless (xa, TWO52))
20595 return x;
20596 Using the absolute value and copying back sign makes
20597 -0.0 -> -0.0 correct.
20598 xa2 = xa + TWO52 - TWO52;
20599 Compensate.
20600 dxa = xa2 - xa;
20601 if (dxa <= -0.5)
20602 xa2 += 1;
20603 else if (dxa > 0.5)
20604 xa2 -= 1;
20605 x2 = copysign (xa2, x);
20606 return x2;
20607 */
20608 enum machine_mode mode = GET_MODE (operand0);
20609 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20610
20611 TWO52 = ix86_gen_TWO52 (mode);
20612
20613 /* Temporary for holding the result, initialized to the input
20614 operand to ease control flow. */
20615 res = gen_reg_rtx (mode);
20616 emit_move_insn (res, operand1);
20617
20618 /* xa = abs (operand1) */
20619 xa = ix86_expand_sse_fabs (res, &mask);
20620
20621 /* if (!isless (xa, TWO52)) goto label; */
20622 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20623
20624 /* xa2 = xa + TWO52 - TWO52; */
20625 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20626 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20627
20628 /* dxa = xa2 - xa; */
20629 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20630
20631 /* generate 0.5, 1.0 and -0.5 */
20632 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20633 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20634 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20635 0, OPTAB_DIRECT);
20636
20637 /* Compensate. */
20638 tmp = gen_reg_rtx (mode);
20639 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20640 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20641 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20642 gen_rtx_AND (mode, one, tmp)));
20643 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20644 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20645 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20646 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20647 gen_rtx_AND (mode, one, tmp)));
20648 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20649
20650 /* res = copysign (xa2, operand1) */
20651 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20652
20653 emit_label (label);
20654 LABEL_NUSES (label) = 1;
20655
20656 emit_move_insn (operand0, res);
20657 }
20658
20659 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20660 into OPERAND0. */
20661 void
20662 ix86_expand_trunc (rtx operand0, rtx operand1)
20663 {
20664 /* C code for SSE variant we expand below.
20665 double xa = fabs (x), x2;
20666 if (!isless (xa, TWO52))
20667 return x;
20668 x2 = (double)(long)x;
20669 if (HONOR_SIGNED_ZEROS (mode))
20670 return copysign (x2, x);
20671 return x2;
20672 */
20673 enum machine_mode mode = GET_MODE (operand0);
20674 rtx xa, xi, TWO52, label, res, mask;
20675
20676 TWO52 = ix86_gen_TWO52 (mode);
20677
20678 /* Temporary for holding the result, initialized to the input
20679 operand to ease control flow. */
20680 res = gen_reg_rtx (mode);
20681 emit_move_insn (res, operand1);
20682
20683 /* xa = abs (operand1) */
20684 xa = ix86_expand_sse_fabs (res, &mask);
20685
20686 /* if (!isless (xa, TWO52)) goto label; */
20687 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20688
20689 /* x = (double)(long)x */
20690 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20691 expand_fix (xi, res, 0);
20692 expand_float (res, xi, 0);
20693
20694 if (HONOR_SIGNED_ZEROS (mode))
20695 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20696
20697 emit_label (label);
20698 LABEL_NUSES (label) = 1;
20699
20700 emit_move_insn (operand0, res);
20701 }
20702
20703 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20704 into OPERAND0. */
20705 void
20706 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20707 {
20708 enum machine_mode mode = GET_MODE (operand0);
20709 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20710
20711 /* C code for SSE variant we expand below.
20712 double xa = fabs (x), x2;
20713 if (!isless (xa, TWO52))
20714 return x;
20715 xa2 = xa + TWO52 - TWO52;
20716 Compensate:
20717 if (xa2 > xa)
20718 xa2 -= 1.0;
20719 x2 = copysign (xa2, x);
20720 return x2;
20721 */
20722
20723 TWO52 = ix86_gen_TWO52 (mode);
20724
20725 /* Temporary for holding the result, initialized to the input
20726 operand to ease control flow. */
20727 res = gen_reg_rtx (mode);
20728 emit_move_insn (res, operand1);
20729
20730 /* xa = abs (operand1) */
20731 xa = ix86_expand_sse_fabs (res, &smask);
20732
20733 /* if (!isless (xa, TWO52)) goto label; */
20734 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20735
20736 /* res = xa + TWO52 - TWO52; */
20737 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20738 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20739 emit_move_insn (res, tmp);
20740
20741 /* generate 1.0 */
20742 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20743
20744 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20745 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20746 emit_insn (gen_rtx_SET (VOIDmode, mask,
20747 gen_rtx_AND (mode, mask, one)));
20748 tmp = expand_simple_binop (mode, MINUS,
20749 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20750 emit_move_insn (res, tmp);
20751
20752 /* res = copysign (res, operand1) */
20753 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20754
20755 emit_label (label);
20756 LABEL_NUSES (label) = 1;
20757
20758 emit_move_insn (operand0, res);
20759 }
20760
20761 /* Expand SSE sequence for computing round from OPERAND1 storing
20762 into OPERAND0. */
20763 void
20764 ix86_expand_round (rtx operand0, rtx operand1)
20765 {
20766 /* C code for the stuff we're doing below:
20767 double xa = fabs (x);
20768 if (!isless (xa, TWO52))
20769 return x;
20770 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20771 return copysign (xa, x);
20772 */
20773 enum machine_mode mode = GET_MODE (operand0);
20774 rtx res, TWO52, xa, label, xi, half, mask;
20775 const struct real_format *fmt;
20776 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20777
20778 /* Temporary for holding the result, initialized to the input
20779 operand to ease control flow. */
20780 res = gen_reg_rtx (mode);
20781 emit_move_insn (res, operand1);
20782
20783 TWO52 = ix86_gen_TWO52 (mode);
20784 xa = ix86_expand_sse_fabs (res, &mask);
20785 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20786
20787 /* load nextafter (0.5, 0.0) */
20788 fmt = REAL_MODE_FORMAT (mode);
20789 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20790 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20791
20792 /* xa = xa + 0.5 */
20793 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20794 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20795
20796 /* xa = (double)(int64_t)xa */
20797 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20798 expand_fix (xi, xa, 0);
20799 expand_float (xa, xi, 0);
20800
20801 /* res = copysign (xa, operand1) */
20802 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20803
20804 emit_label (label);
20805 LABEL_NUSES (label) = 1;
20806
20807 emit_move_insn (operand0, res);
20808 }
20809
20810 #include "gt-i386.h"