Merge gimple-tuples-branch into mainline.
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
57 #endif
58
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
65 : 4)
66
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
70
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
72
73 static const
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
92 0, /* "large" insn */
93 2, /* MOVE_RATIO */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
117 2, /* Branch cost */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
128 };
129
130 /* Processor costs (relative to an add) */
131 static const
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
151 3, /* MOVE_RATIO */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
175 1, /* Branch cost */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
186 };
187
188 static const
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
208 3, /* MOVE_RATIO */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
232 1, /* Branch cost */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
242 DUMMY_STRINGOP_ALGS}
243 };
244
245 static const
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
265 6, /* MOVE_RATIO */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
289 2, /* Branch cost */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
299 DUMMY_STRINGOP_ALGS}
300 };
301
302 static const
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
322 6, /* MOVE_RATIO */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
346 2, /* Branch cost */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
357 */
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
363 DUMMY_STRINGOP_ALGS}
364 };
365
366 static const
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
386 4, /* MOVE_RATIO */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
397
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
411 1, /* Branch cost */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
421 DUMMY_STRINGOP_ALGS}
422 };
423
424 static const
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
444 4, /* MOVE_RATIO */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
468 1, /* Branch cost */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
478 DUMMY_STRINGOP_ALGS}
479 };
480
481 static const
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
501 9, /* MOVE_RATIO */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
525 5, /* Branch cost */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
538 DUMMY_STRINGOP_ALGS}
539 };
540
541 static const
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
561 9, /* MOVE_RATIO */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
588 time). */
589 100, /* number of parallel prefetches */
590 5, /* Branch cost */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
605 };
606
607 static const
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
627 6, /* MOVE_RATIO */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
651 2, /* Branch cost */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {64, loop}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {64, loop}, {20480, rep_prefix_4_byte},
661 {-1, libcall}}},
662 DUMMY_STRINGOP_ALGS},
663 };
664
665 static const
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
685 17, /* MOVE_RATIO */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
709 1, /* Branch cost */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {64, loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {64, loop}, {20480, rep_prefix_4_byte},
720 {-1, libcall}}},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
723 };
724
725 static const
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
745 16, /* MOVE_RATIO */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
768 3, /* Branch cost */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
782 };
783
784 /* Generic64 should produce code tuned for Nocona and K8. */
785 static const
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
809 17, /* MOVE_RATIO */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
835 3, /* Branch cost */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
846 };
847
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
849 static const
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
869 17, /* MOVE_RATIO */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
893 3, /* Branch cost */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
904 };
905
906 const struct processor_costs *ix86_cost = &pentium_cost;
907
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
925
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
928
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
995 of moves. */
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1000 undefined. */
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1006
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1010
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Compare and exchange 16 bytes was added for nocona. */
1022 const int x86_cmpxchg16b = m_NOCONA;
1023 /* Exchange and add was added for 80486. */
1024 const int x86_xadd = ~m_386;
1025 /* Byteswap was added for 80486. */
1026 const int x86_bswap = ~m_386;
1027 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1028
1029 static enum stringop_alg stringop_alg = no_stringop;
1030
1031 /* In case the average insn count for single function invocation is
1032 lower than this constant, emit fast (but longer) prologue and
1033 epilogue code. */
1034 #define FAST_PROLOGUE_INSN_COUNT 20
1035
1036 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1037 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1038 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1039 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1040
1041 /* Array of the smallest class containing reg number REGNO, indexed by
1042 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1043
1044 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1045 {
1046 /* ax, dx, cx, bx */
1047 AREG, DREG, CREG, BREG,
1048 /* si, di, bp, sp */
1049 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1050 /* FP registers */
1051 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1052 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1053 /* arg pointer */
1054 NON_Q_REGS,
1055 /* flags, fpsr, fpcr, dirflag, frame */
1056 NO_REGS, NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1057 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1058 SSE_REGS, SSE_REGS,
1059 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1060 MMX_REGS, MMX_REGS,
1061 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1062 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1063 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1064 SSE_REGS, SSE_REGS,
1065 };
1066
1067 /* The "default" register map used in 32bit mode. */
1068
1069 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1070 {
1071 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1072 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1073 -1, -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, dir, frame */
1074 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1075 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1076 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1077 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1078 };
1079
1080 static int const x86_64_int_parameter_registers[6] =
1081 {
1082 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1083 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1084 };
1085
1086 static int const x86_64_int_return_registers[4] =
1087 {
1088 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1089 };
1090
1091 /* The "default" register map used in 64bit mode. */
1092 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1093 {
1094 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1095 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1096 -1, -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, dir, frame */
1097 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1098 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1099 8,9,10,11,12,13,14,15, /* extended integer registers */
1100 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1101 };
1102
1103 /* Define the register numbers to be used in Dwarf debugging information.
1104 The SVR4 reference port C compiler uses the following register numbers
1105 in its Dwarf output code:
1106 0 for %eax (gcc regno = 0)
1107 1 for %ecx (gcc regno = 2)
1108 2 for %edx (gcc regno = 1)
1109 3 for %ebx (gcc regno = 3)
1110 4 for %esp (gcc regno = 7)
1111 5 for %ebp (gcc regno = 6)
1112 6 for %esi (gcc regno = 4)
1113 7 for %edi (gcc regno = 5)
1114 The following three DWARF register numbers are never generated by
1115 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1116 believes these numbers have these meanings.
1117 8 for %eip (no gcc equivalent)
1118 9 for %eflags (gcc regno = 17)
1119 10 for %trapno (no gcc equivalent)
1120 It is not at all clear how we should number the FP stack registers
1121 for the x86 architecture. If the version of SDB on x86/svr4 were
1122 a bit less brain dead with respect to floating-point then we would
1123 have a precedent to follow with respect to DWARF register numbers
1124 for x86 FP registers, but the SDB on x86/svr4 is so completely
1125 broken with respect to FP registers that it is hardly worth thinking
1126 of it as something to strive for compatibility with.
1127 The version of x86/svr4 SDB I have at the moment does (partially)
1128 seem to believe that DWARF register number 11 is associated with
1129 the x86 register %st(0), but that's about all. Higher DWARF
1130 register numbers don't seem to be associated with anything in
1131 particular, and even for DWARF regno 11, SDB only seems to under-
1132 stand that it should say that a variable lives in %st(0) (when
1133 asked via an `=' command) if we said it was in DWARF regno 11,
1134 but SDB still prints garbage when asked for the value of the
1135 variable in question (via a `/' command).
1136 (Also note that the labels SDB prints for various FP stack regs
1137 when doing an `x' command are all wrong.)
1138 Note that these problems generally don't affect the native SVR4
1139 C compiler because it doesn't allow the use of -O with -g and
1140 because when it is *not* optimizing, it allocates a memory
1141 location for each floating-point variable, and the memory
1142 location is what gets described in the DWARF AT_location
1143 attribute for the variable in question.
1144 Regardless of the severe mental illness of the x86/svr4 SDB, we
1145 do something sensible here and we use the following DWARF
1146 register numbers. Note that these are all stack-top-relative
1147 numbers.
1148 11 for %st(0) (gcc regno = 8)
1149 12 for %st(1) (gcc regno = 9)
1150 13 for %st(2) (gcc regno = 10)
1151 14 for %st(3) (gcc regno = 11)
1152 15 for %st(4) (gcc regno = 12)
1153 16 for %st(5) (gcc regno = 13)
1154 17 for %st(6) (gcc regno = 14)
1155 18 for %st(7) (gcc regno = 15)
1156 */
1157 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1158 {
1159 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1160 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1161 -1, 9, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, dir, frame */
1162 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1163 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1164 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1165 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1166 };
1167
1168 /* Test and compare insns in i386.md store the information needed to
1169 generate branch and scc insns here. */
1170
1171 rtx ix86_compare_op0 = NULL_RTX;
1172 rtx ix86_compare_op1 = NULL_RTX;
1173 rtx ix86_compare_emitted = NULL_RTX;
1174
1175 /* Size of the register save area. */
1176 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1177
1178 /* Define the structure for the machine field in struct function. */
1179
1180 struct stack_local_entry GTY(())
1181 {
1182 unsigned short mode;
1183 unsigned short n;
1184 rtx rtl;
1185 struct stack_local_entry *next;
1186 };
1187
1188 /* Structure describing stack frame layout.
1189 Stack grows downward:
1190
1191 [arguments]
1192 <- ARG_POINTER
1193 saved pc
1194
1195 saved frame pointer if frame_pointer_needed
1196 <- HARD_FRAME_POINTER
1197 [saved regs]
1198
1199 [padding1] \
1200 )
1201 [va_arg registers] (
1202 > to_allocate <- FRAME_POINTER
1203 [frame] (
1204 )
1205 [padding2] /
1206 */
1207 struct ix86_frame
1208 {
1209 int nregs;
1210 int padding1;
1211 int va_arg_size;
1212 HOST_WIDE_INT frame;
1213 int padding2;
1214 int outgoing_arguments_size;
1215 int red_zone_size;
1216
1217 HOST_WIDE_INT to_allocate;
1218 /* The offsets relative to ARG_POINTER. */
1219 HOST_WIDE_INT frame_pointer_offset;
1220 HOST_WIDE_INT hard_frame_pointer_offset;
1221 HOST_WIDE_INT stack_pointer_offset;
1222
1223 /* When save_regs_using_mov is set, emit prologue using
1224 move instead of push instructions. */
1225 bool save_regs_using_mov;
1226 };
1227
1228 /* Code model option. */
1229 enum cmodel ix86_cmodel;
1230 /* Asm dialect. */
1231 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1232 /* TLS dialects. */
1233 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1234
1235 /* Which unit we are generating floating point math for. */
1236 enum fpmath_unit ix86_fpmath;
1237
1238 /* Which cpu are we scheduling for. */
1239 enum processor_type ix86_tune;
1240 /* Which instruction set architecture to use. */
1241 enum processor_type ix86_arch;
1242
1243 /* true if sse prefetch instruction is not NOOP. */
1244 int x86_prefetch_sse;
1245
1246 /* ix86_regparm_string as a number */
1247 static int ix86_regparm;
1248
1249 /* -mstackrealign option */
1250 extern int ix86_force_align_arg_pointer;
1251 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1252
1253 /* Preferred alignment for stack boundary in bits. */
1254 unsigned int ix86_preferred_stack_boundary;
1255
1256 /* Values 1-5: see jump.c */
1257 int ix86_branch_cost;
1258
1259 /* Variables which are this size or smaller are put in the data/bss
1260 or ldata/lbss sections. */
1261
1262 int ix86_section_threshold = 65536;
1263
1264 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1265 char internal_label_prefix[16];
1266 int internal_label_prefix_len;
1267 \f
1268 static bool ix86_handle_option (size_t, const char *, int);
1269 static void output_pic_addr_const (FILE *, rtx, int);
1270 static void put_condition_code (enum rtx_code, enum machine_mode,
1271 int, int, FILE *);
1272 static const char *get_some_local_dynamic_name (void);
1273 static int get_some_local_dynamic_name_1 (rtx *, void *);
1274 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1275 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1276 rtx *);
1277 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1278 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1279 enum machine_mode);
1280 static rtx get_thread_pointer (int);
1281 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1282 static void get_pc_thunk_name (char [32], unsigned int);
1283 static rtx gen_push (rtx);
1284 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1285 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1286 static struct machine_function * ix86_init_machine_status (void);
1287 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1288 static int ix86_nsaved_regs (void);
1289 static void ix86_emit_save_regs (void);
1290 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1291 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1292 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1293 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1294 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1295 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1296 static int ix86_issue_rate (void);
1297 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1298 static int ia32_multipass_dfa_lookahead (void);
1299 static void ix86_init_mmx_sse_builtins (void);
1300 static rtx x86_this_parameter (tree);
1301 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1302 HOST_WIDE_INT, tree);
1303 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1304 static void x86_file_start (void);
1305 static void ix86_reorg (void);
1306 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1307 static tree ix86_build_builtin_va_list (void);
1308 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1309 tree, int *, int);
1310 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1311 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1312 static bool ix86_vector_mode_supported_p (enum machine_mode);
1313
1314 static int ix86_address_cost (rtx);
1315 static bool ix86_cannot_force_const_mem (rtx);
1316 static rtx ix86_delegitimize_address (rtx);
1317
1318 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1319
1320 struct builtin_description;
1321 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1322 tree, rtx);
1323 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1324 tree, rtx);
1325 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1326 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1327 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1328 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1329 static rtx safe_vector_operand (rtx, enum machine_mode);
1330 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1331 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1332 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_cost (enum rtx_code code);
1335 static unsigned int ix86_select_alt_pic_regnum (void);
1336 static int ix86_save_reg (unsigned int, int);
1337 static void ix86_compute_frame_layout (struct ix86_frame *);
1338 static int ix86_comp_type_attributes (tree, tree);
1339 static int ix86_function_regparm (tree, tree);
1340 const struct attribute_spec ix86_attribute_table[];
1341 static bool ix86_function_ok_for_sibcall (tree, tree);
1342 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1343 static int ix86_value_regno (enum machine_mode, tree, tree);
1344 static bool contains_128bit_aligned_vector_p (tree);
1345 static rtx ix86_struct_value_rtx (tree, int);
1346 static bool ix86_ms_bitfield_layout_p (tree);
1347 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1348 static int extended_reg_mentioned_1 (rtx *, void *);
1349 static bool ix86_rtx_costs (rtx, int, int, int *);
1350 static int min_insn_size (rtx);
1351 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1352 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1353 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1354 tree, bool);
1355 static void ix86_init_builtins (void);
1356 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1357 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1358 static const char *ix86_mangle_fundamental_type (tree);
1359 static tree ix86_stack_protect_fail (void);
1360 static rtx ix86_internal_arg_pointer (void);
1361 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1362
1363 /* This function is only used on Solaris. */
1364 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1365 ATTRIBUTE_UNUSED;
1366
1367 /* Register class used for passing given 64bit part of the argument.
1368 These represent classes as documented by the PS ABI, with the exception
1369 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1370 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1371
1372 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1373 whenever possible (upper half does contain padding).
1374 */
1375 enum x86_64_reg_class
1376 {
1377 X86_64_NO_CLASS,
1378 X86_64_INTEGER_CLASS,
1379 X86_64_INTEGERSI_CLASS,
1380 X86_64_SSE_CLASS,
1381 X86_64_SSESF_CLASS,
1382 X86_64_SSEDF_CLASS,
1383 X86_64_SSEUP_CLASS,
1384 X86_64_X87_CLASS,
1385 X86_64_X87UP_CLASS,
1386 X86_64_COMPLEX_X87_CLASS,
1387 X86_64_MEMORY_CLASS
1388 };
1389 static const char * const x86_64_reg_class_name[] = {
1390 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1391 "sseup", "x87", "x87up", "cplx87", "no"
1392 };
1393
1394 #define MAX_CLASSES 4
1395
1396 /* Table of constants used by fldpi, fldln2, etc.... */
1397 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1398 static bool ext_80387_constants_init = 0;
1399 static void init_ext_80387_constants (void);
1400 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1401 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1402 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1403 static section *x86_64_elf_select_section (tree decl, int reloc,
1404 unsigned HOST_WIDE_INT align)
1405 ATTRIBUTE_UNUSED;
1406 \f
1407 /* Initialize the GCC target structure. */
1408 #undef TARGET_ATTRIBUTE_TABLE
1409 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1410 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1411 # undef TARGET_MERGE_DECL_ATTRIBUTES
1412 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1413 #endif
1414
1415 #undef TARGET_COMP_TYPE_ATTRIBUTES
1416 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1417
1418 #undef TARGET_INIT_BUILTINS
1419 #define TARGET_INIT_BUILTINS ix86_init_builtins
1420 #undef TARGET_EXPAND_BUILTIN
1421 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1422 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1423 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1424
1425 #undef TARGET_ASM_FUNCTION_EPILOGUE
1426 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1427
1428 #undef TARGET_ENCODE_SECTION_INFO
1429 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1430 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1431 #else
1432 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1433 #endif
1434
1435 #undef TARGET_ASM_OPEN_PAREN
1436 #define TARGET_ASM_OPEN_PAREN ""
1437 #undef TARGET_ASM_CLOSE_PAREN
1438 #define TARGET_ASM_CLOSE_PAREN ""
1439
1440 #undef TARGET_ASM_ALIGNED_HI_OP
1441 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1442 #undef TARGET_ASM_ALIGNED_SI_OP
1443 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1444 #ifdef ASM_QUAD
1445 #undef TARGET_ASM_ALIGNED_DI_OP
1446 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1447 #endif
1448
1449 #undef TARGET_ASM_UNALIGNED_HI_OP
1450 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1451 #undef TARGET_ASM_UNALIGNED_SI_OP
1452 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1453 #undef TARGET_ASM_UNALIGNED_DI_OP
1454 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1455
1456 #undef TARGET_SCHED_ADJUST_COST
1457 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1458 #undef TARGET_SCHED_ISSUE_RATE
1459 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1462 ia32_multipass_dfa_lookahead
1463
1464 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1465 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1466
1467 #ifdef HAVE_AS_TLS
1468 #undef TARGET_HAVE_TLS
1469 #define TARGET_HAVE_TLS true
1470 #endif
1471 #undef TARGET_CANNOT_FORCE_CONST_MEM
1472 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1475
1476 #undef TARGET_DELEGITIMIZE_ADDRESS
1477 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1478
1479 #undef TARGET_MS_BITFIELD_LAYOUT_P
1480 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1481
1482 #if TARGET_MACHO
1483 #undef TARGET_BINDS_LOCAL_P
1484 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1485 #endif
1486
1487 #undef TARGET_ASM_OUTPUT_MI_THUNK
1488 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1489 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1490 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1491
1492 #undef TARGET_ASM_FILE_START
1493 #define TARGET_ASM_FILE_START x86_file_start
1494
1495 #undef TARGET_DEFAULT_TARGET_FLAGS
1496 #define TARGET_DEFAULT_TARGET_FLAGS \
1497 (TARGET_DEFAULT \
1498 | TARGET_64BIT_DEFAULT \
1499 | TARGET_SUBTARGET_DEFAULT \
1500 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1501
1502 #undef TARGET_HANDLE_OPTION
1503 #define TARGET_HANDLE_OPTION ix86_handle_option
1504
1505 #undef TARGET_RTX_COSTS
1506 #define TARGET_RTX_COSTS ix86_rtx_costs
1507 #undef TARGET_ADDRESS_COST
1508 #define TARGET_ADDRESS_COST ix86_address_cost
1509
1510 #undef TARGET_FIXED_CONDITION_CODE_REGS
1511 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1512 #undef TARGET_CC_MODES_COMPATIBLE
1513 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1514
1515 #undef TARGET_MACHINE_DEPENDENT_REORG
1516 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1517
1518 #undef TARGET_BUILD_BUILTIN_VA_LIST
1519 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1520
1521 #undef TARGET_MD_ASM_CLOBBERS
1522 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1523
1524 #undef TARGET_PROMOTE_PROTOTYPES
1525 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1526 #undef TARGET_STRUCT_VALUE_RTX
1527 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1528 #undef TARGET_SETUP_INCOMING_VARARGS
1529 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1530 #undef TARGET_MUST_PASS_IN_STACK
1531 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1532 #undef TARGET_PASS_BY_REFERENCE
1533 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1534 #undef TARGET_INTERNAL_ARG_POINTER
1535 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1536 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1537 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1538
1539 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1540 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1541
1542 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1543 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1544
1545 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1546 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1547
1548 #ifdef HAVE_AS_TLS
1549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1551 #endif
1552
1553 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1554 #undef TARGET_INSERT_ATTRIBUTES
1555 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1556 #endif
1557
1558 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1559 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1560
1561 #undef TARGET_STACK_PROTECT_FAIL
1562 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1563
1564 #undef TARGET_FUNCTION_VALUE
1565 #define TARGET_FUNCTION_VALUE ix86_function_value
1566
1567 struct gcc_target targetm = TARGET_INITIALIZER;
1568
1569 \f
1570 /* The svr4 ABI for the i386 says that records and unions are returned
1571 in memory. */
1572 #ifndef DEFAULT_PCC_STRUCT_RETURN
1573 #define DEFAULT_PCC_STRUCT_RETURN 1
1574 #endif
1575
1576 /* Implement TARGET_HANDLE_OPTION. */
1577
1578 static bool
1579 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1580 {
1581 switch (code)
1582 {
1583 case OPT_m3dnow:
1584 if (!value)
1585 {
1586 target_flags &= ~MASK_3DNOW_A;
1587 target_flags_explicit |= MASK_3DNOW_A;
1588 }
1589 return true;
1590
1591 case OPT_mmmx:
1592 if (!value)
1593 {
1594 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1595 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1596 }
1597 return true;
1598
1599 case OPT_msse:
1600 if (!value)
1601 {
1602 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1603 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1604 }
1605 return true;
1606
1607 case OPT_msse2:
1608 if (!value)
1609 {
1610 target_flags &= ~MASK_SSE3;
1611 target_flags_explicit |= MASK_SSE3;
1612 }
1613 return true;
1614
1615 default:
1616 return true;
1617 }
1618 }
1619
1620 /* Sometimes certain combinations of command options do not make
1621 sense on a particular target machine. You can define a macro
1622 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1623 defined, is executed once just after all the command options have
1624 been parsed.
1625
1626 Don't use this macro to turn on various extra optimizations for
1627 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1628
1629 void
1630 override_options (void)
1631 {
1632 int i;
1633 int ix86_tune_defaulted = 0;
1634
1635 /* Comes from final.c -- no real reason to change it. */
1636 #define MAX_CODE_ALIGN 16
1637
1638 static struct ptt
1639 {
1640 const struct processor_costs *cost; /* Processor costs */
1641 const int target_enable; /* Target flags to enable. */
1642 const int target_disable; /* Target flags to disable. */
1643 const int align_loop; /* Default alignments. */
1644 const int align_loop_max_skip;
1645 const int align_jump;
1646 const int align_jump_max_skip;
1647 const int align_func;
1648 }
1649 const processor_target_table[PROCESSOR_max] =
1650 {
1651 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1652 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1653 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1654 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1655 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1656 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1657 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1658 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1659 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1660 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1661 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1662 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1664 };
1665
1666 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1667 static struct pta
1668 {
1669 const char *const name; /* processor name or nickname. */
1670 const enum processor_type processor;
1671 const enum pta_flags
1672 {
1673 PTA_SSE = 1,
1674 PTA_SSE2 = 2,
1675 PTA_SSE3 = 4,
1676 PTA_MMX = 8,
1677 PTA_PREFETCH_SSE = 16,
1678 PTA_3DNOW = 32,
1679 PTA_3DNOW_A = 64,
1680 PTA_64BIT = 128,
1681 PTA_SSSE3 = 256
1682 } flags;
1683 }
1684 const processor_alias_table[] =
1685 {
1686 {"i386", PROCESSOR_I386, 0},
1687 {"i486", PROCESSOR_I486, 0},
1688 {"i586", PROCESSOR_PENTIUM, 0},
1689 {"pentium", PROCESSOR_PENTIUM, 0},
1690 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1691 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1692 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1693 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1694 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1695 {"i686", PROCESSOR_PENTIUMPRO, 0},
1696 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1697 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1698 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1699 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1700 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1701 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1702 | PTA_MMX | PTA_PREFETCH_SSE},
1703 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1710 | PTA_64BIT | PTA_MMX
1711 | PTA_PREFETCH_SSE},
1712 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1713 | PTA_3DNOW_A},
1714 {"k6", PROCESSOR_K6, PTA_MMX},
1715 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1716 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1717 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1718 | PTA_3DNOW_A},
1719 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1720 | PTA_3DNOW | PTA_3DNOW_A},
1721 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1722 | PTA_3DNOW_A | PTA_SSE},
1723 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1728 | PTA_SSE | PTA_SSE2 },
1729 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1730 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1731 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1739 };
1740
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1742
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1745 #endif
1746
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1749 #endif
1750
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1753 flag_pic = 2;
1754
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1757 if (TARGET_64BIT)
1758 {
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1766 }
1767 else
1768 {
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1775 }
1776
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1779 {
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1786 {
1787 if (TARGET_64BIT)
1788 ix86_tune_string = "generic64";
1789 else
1790 ix86_tune_string = "generic32";
1791 }
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1794 }
1795 else
1796 {
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1800 {
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1803 }
1804
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1810 {
1811 if (TARGET_64BIT)
1812 ix86_tune_string = "generic64";
1813 else
1814 ix86_tune_string = "generic32";
1815 }
1816 }
1817 if (ix86_stringop_string)
1818 {
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1833 else
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1835 }
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1839
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1846
1847 if (ix86_cmodel_string != 0)
1848 {
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1853 else if (flag_pic)
1854 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1855 else if (!strcmp (ix86_cmodel_string, "32"))
1856 ix86_cmodel = CM_32;
1857 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1858 ix86_cmodel = CM_KERNEL;
1859 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1860 ix86_cmodel = CM_LARGE;
1861 else
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1863 }
1864 else
1865 {
1866 ix86_cmodel = CM_32;
1867 if (TARGET_64BIT)
1868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1869 }
1870 if (ix86_asm_string != 0)
1871 {
1872 if (! TARGET_MACHO
1873 && !strcmp (ix86_asm_string, "intel"))
1874 ix86_asm_dialect = ASM_INTEL;
1875 else if (!strcmp (ix86_asm_string, "att"))
1876 ix86_asm_dialect = ASM_ATT;
1877 else
1878 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1879 }
1880 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1881 error ("code model %qs not supported in the %s bit mode",
1882 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1883 if (ix86_cmodel == CM_LARGE)
1884 sorry ("code model %<large%> not supported yet");
1885 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1886 sorry ("%i-bit mode not compiled in",
1887 (target_flags & MASK_64BIT) ? 64 : 32);
1888
1889 for (i = 0; i < pta_size; i++)
1890 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1891 {
1892 ix86_arch = processor_alias_table[i].processor;
1893 /* Default cpu tuning to the architecture. */
1894 ix86_tune = ix86_arch;
1895 if (processor_alias_table[i].flags & PTA_MMX
1896 && !(target_flags_explicit & MASK_MMX))
1897 target_flags |= MASK_MMX;
1898 if (processor_alias_table[i].flags & PTA_3DNOW
1899 && !(target_flags_explicit & MASK_3DNOW))
1900 target_flags |= MASK_3DNOW;
1901 if (processor_alias_table[i].flags & PTA_3DNOW_A
1902 && !(target_flags_explicit & MASK_3DNOW_A))
1903 target_flags |= MASK_3DNOW_A;
1904 if (processor_alias_table[i].flags & PTA_SSE
1905 && !(target_flags_explicit & MASK_SSE))
1906 target_flags |= MASK_SSE;
1907 if (processor_alias_table[i].flags & PTA_SSE2
1908 && !(target_flags_explicit & MASK_SSE2))
1909 target_flags |= MASK_SSE2;
1910 if (processor_alias_table[i].flags & PTA_SSE3
1911 && !(target_flags_explicit & MASK_SSE3))
1912 target_flags |= MASK_SSE3;
1913 if (processor_alias_table[i].flags & PTA_SSSE3
1914 && !(target_flags_explicit & MASK_SSSE3))
1915 target_flags |= MASK_SSSE3;
1916 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1917 x86_prefetch_sse = true;
1918 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1919 error ("CPU you selected does not support x86-64 "
1920 "instruction set");
1921 break;
1922 }
1923
1924 if (i == pta_size)
1925 error ("bad value (%s) for -march= switch", ix86_arch_string);
1926
1927 for (i = 0; i < pta_size; i++)
1928 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1929 {
1930 ix86_tune = processor_alias_table[i].processor;
1931 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1932 {
1933 if (ix86_tune_defaulted)
1934 {
1935 ix86_tune_string = "x86-64";
1936 for (i = 0; i < pta_size; i++)
1937 if (! strcmp (ix86_tune_string,
1938 processor_alias_table[i].name))
1939 break;
1940 ix86_tune = processor_alias_table[i].processor;
1941 }
1942 else
1943 error ("CPU you selected does not support x86-64 "
1944 "instruction set");
1945 }
1946 /* Intel CPUs have always interpreted SSE prefetch instructions as
1947 NOPs; so, we can enable SSE prefetch instructions even when
1948 -mtune (rather than -march) points us to a processor that has them.
1949 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1950 higher processors. */
1951 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1952 x86_prefetch_sse = true;
1953 break;
1954 }
1955 if (i == pta_size)
1956 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1957
1958 if (optimize_size)
1959 ix86_cost = &size_cost;
1960 else
1961 ix86_cost = processor_target_table[ix86_tune].cost;
1962 target_flags |= processor_target_table[ix86_tune].target_enable;
1963 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1964
1965 /* Arrange to set up i386_stack_locals for all functions. */
1966 init_machine_status = ix86_init_machine_status;
1967
1968 /* Validate -mregparm= value. */
1969 if (ix86_regparm_string)
1970 {
1971 i = atoi (ix86_regparm_string);
1972 if (i < 0 || i > REGPARM_MAX)
1973 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1974 else
1975 ix86_regparm = i;
1976 }
1977 else
1978 if (TARGET_64BIT)
1979 ix86_regparm = REGPARM_MAX;
1980
1981 /* If the user has provided any of the -malign-* options,
1982 warn and use that value only if -falign-* is not set.
1983 Remove this code in GCC 3.2 or later. */
1984 if (ix86_align_loops_string)
1985 {
1986 warning (0, "-malign-loops is obsolete, use -falign-loops");
1987 if (align_loops == 0)
1988 {
1989 i = atoi (ix86_align_loops_string);
1990 if (i < 0 || i > MAX_CODE_ALIGN)
1991 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1992 else
1993 align_loops = 1 << i;
1994 }
1995 }
1996
1997 if (ix86_align_jumps_string)
1998 {
1999 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2000 if (align_jumps == 0)
2001 {
2002 i = atoi (ix86_align_jumps_string);
2003 if (i < 0 || i > MAX_CODE_ALIGN)
2004 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2005 else
2006 align_jumps = 1 << i;
2007 }
2008 }
2009
2010 if (ix86_align_funcs_string)
2011 {
2012 warning (0, "-malign-functions is obsolete, use -falign-functions");
2013 if (align_functions == 0)
2014 {
2015 i = atoi (ix86_align_funcs_string);
2016 if (i < 0 || i > MAX_CODE_ALIGN)
2017 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2018 else
2019 align_functions = 1 << i;
2020 }
2021 }
2022
2023 /* Default align_* from the processor table. */
2024 if (align_loops == 0)
2025 {
2026 align_loops = processor_target_table[ix86_tune].align_loop;
2027 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2028 }
2029 if (align_jumps == 0)
2030 {
2031 align_jumps = processor_target_table[ix86_tune].align_jump;
2032 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2033 }
2034 if (align_functions == 0)
2035 {
2036 align_functions = processor_target_table[ix86_tune].align_func;
2037 }
2038
2039 /* Validate -mbranch-cost= value, or provide default. */
2040 ix86_branch_cost = ix86_cost->branch_cost;
2041 if (ix86_branch_cost_string)
2042 {
2043 i = atoi (ix86_branch_cost_string);
2044 if (i < 0 || i > 5)
2045 error ("-mbranch-cost=%d is not between 0 and 5", i);
2046 else
2047 ix86_branch_cost = i;
2048 }
2049 if (ix86_section_threshold_string)
2050 {
2051 i = atoi (ix86_section_threshold_string);
2052 if (i < 0)
2053 error ("-mlarge-data-threshold=%d is negative", i);
2054 else
2055 ix86_section_threshold = i;
2056 }
2057
2058 if (ix86_tls_dialect_string)
2059 {
2060 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2061 ix86_tls_dialect = TLS_DIALECT_GNU;
2062 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2063 ix86_tls_dialect = TLS_DIALECT_GNU2;
2064 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_SUN;
2066 else
2067 error ("bad value (%s) for -mtls-dialect= switch",
2068 ix86_tls_dialect_string);
2069 }
2070
2071 /* Keep nonleaf frame pointers. */
2072 if (flag_omit_frame_pointer)
2073 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2074 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2075 flag_omit_frame_pointer = 1;
2076
2077 /* If we're doing fast math, we don't care about comparison order
2078 wrt NaNs. This lets us use a shorter comparison sequence. */
2079 if (flag_finite_math_only)
2080 target_flags &= ~MASK_IEEE_FP;
2081
2082 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2083 since the insns won't need emulation. */
2084 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2085 target_flags &= ~MASK_NO_FANCY_MATH_387;
2086
2087 /* Likewise, if the target doesn't have a 387, or we've specified
2088 software floating point, don't use 387 inline intrinsics. */
2089 if (!TARGET_80387)
2090 target_flags |= MASK_NO_FANCY_MATH_387;
2091
2092 /* Turn on SSE3 builtins for -mssse3. */
2093 if (TARGET_SSSE3)
2094 target_flags |= MASK_SSE3;
2095
2096 /* Turn on SSE2 builtins for -msse3. */
2097 if (TARGET_SSE3)
2098 target_flags |= MASK_SSE2;
2099
2100 /* Turn on SSE builtins for -msse2. */
2101 if (TARGET_SSE2)
2102 target_flags |= MASK_SSE;
2103
2104 /* Turn on MMX builtins for -msse. */
2105 if (TARGET_SSE)
2106 {
2107 target_flags |= MASK_MMX & ~target_flags_explicit;
2108 x86_prefetch_sse = true;
2109 }
2110
2111 /* Turn on MMX builtins for 3Dnow. */
2112 if (TARGET_3DNOW)
2113 target_flags |= MASK_MMX;
2114
2115 if (TARGET_64BIT)
2116 {
2117 if (TARGET_ALIGN_DOUBLE)
2118 error ("-malign-double makes no sense in the 64bit mode");
2119 if (TARGET_RTD)
2120 error ("-mrtd calling convention not supported in the 64bit mode");
2121
2122 /* Enable by default the SSE and MMX builtins. Do allow the user to
2123 explicitly disable any of these. In particular, disabling SSE and
2124 MMX for kernel code is extremely useful. */
2125 target_flags
2126 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2127 & ~target_flags_explicit);
2128 }
2129 else
2130 {
2131 /* i386 ABI does not specify red zone. It still makes sense to use it
2132 when programmer takes care to stack from being destroyed. */
2133 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2134 target_flags |= MASK_NO_RED_ZONE;
2135 }
2136
2137 /* Validate -mpreferred-stack-boundary= value, or provide default.
2138 The default of 128 bits is for Pentium III's SSE __m128. We can't
2139 change it because of optimize_size. Otherwise, we can't mix object
2140 files compiled with -Os and -On. */
2141 ix86_preferred_stack_boundary = 128;
2142 if (ix86_preferred_stack_boundary_string)
2143 {
2144 i = atoi (ix86_preferred_stack_boundary_string);
2145 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2146 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2147 TARGET_64BIT ? 4 : 2);
2148 else
2149 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2150 }
2151
2152 /* Accept -mx87regparm only if 80387 support is enabled. */
2153 if (TARGET_X87REGPARM
2154 && ! TARGET_80387)
2155 error ("-mx87regparm used without 80387 enabled");
2156
2157 /* Accept -msseregparm only if at least SSE support is enabled. */
2158 if (TARGET_SSEREGPARM
2159 && ! TARGET_SSE)
2160 error ("-msseregparm used without SSE enabled");
2161
2162 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2163
2164 if (ix86_fpmath_string != 0)
2165 {
2166 if (! strcmp (ix86_fpmath_string, "387"))
2167 ix86_fpmath = FPMATH_387;
2168 else if (! strcmp (ix86_fpmath_string, "sse"))
2169 {
2170 if (!TARGET_SSE)
2171 {
2172 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2173 ix86_fpmath = FPMATH_387;
2174 }
2175 else
2176 ix86_fpmath = FPMATH_SSE;
2177 }
2178 else if (! strcmp (ix86_fpmath_string, "387,sse")
2179 || ! strcmp (ix86_fpmath_string, "sse,387"))
2180 {
2181 if (!TARGET_SSE)
2182 {
2183 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2184 ix86_fpmath = FPMATH_387;
2185 }
2186 else if (!TARGET_80387)
2187 {
2188 warning (0, "387 instruction set disabled, using SSE arithmetics");
2189 ix86_fpmath = FPMATH_SSE;
2190 }
2191 else
2192 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2193 }
2194 else
2195 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2196 }
2197
2198 /* If the i387 is disabled, then do not return values in it. */
2199 if (!TARGET_80387)
2200 target_flags &= ~MASK_FLOAT_RETURNS;
2201
2202 if ((x86_accumulate_outgoing_args & TUNEMASK)
2203 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2204 && !optimize_size)
2205 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2206
2207 /* ??? Unwind info is not correct around the CFG unless either a frame
2208 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2209 unwind info generation to be aware of the CFG and propagating states
2210 around edges. */
2211 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2212 || flag_exceptions || flag_non_call_exceptions)
2213 && flag_omit_frame_pointer
2214 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2215 {
2216 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2217 warning (0, "unwind tables currently require either a frame pointer "
2218 "or -maccumulate-outgoing-args for correctness");
2219 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2220 }
2221
2222 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2223 {
2224 char *p;
2225 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2226 p = strchr (internal_label_prefix, 'X');
2227 internal_label_prefix_len = p - internal_label_prefix;
2228 *p = '\0';
2229 }
2230
2231 /* When scheduling description is not available, disable scheduler pass
2232 so it won't slow down the compilation and make x87 code slower. */
2233 if (!TARGET_SCHEDULE)
2234 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2235
2236 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2237 set_param_value ("simultaneous-prefetches",
2238 ix86_cost->simultaneous_prefetches);
2239 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2240 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2241 }
2242 \f
2243 /* switch to the appropriate section for output of DECL.
2244 DECL is either a `VAR_DECL' node or a constant of some sort.
2245 RELOC indicates whether forming the initial value of DECL requires
2246 link-time relocations. */
2247
2248 static section *
2249 x86_64_elf_select_section (tree decl, int reloc,
2250 unsigned HOST_WIDE_INT align)
2251 {
2252 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2253 && ix86_in_large_data_p (decl))
2254 {
2255 const char *sname = NULL;
2256 unsigned int flags = SECTION_WRITE;
2257 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2258 {
2259 case SECCAT_DATA:
2260 sname = ".ldata";
2261 break;
2262 case SECCAT_DATA_REL:
2263 sname = ".ldata.rel";
2264 break;
2265 case SECCAT_DATA_REL_LOCAL:
2266 sname = ".ldata.rel.local";
2267 break;
2268 case SECCAT_DATA_REL_RO:
2269 sname = ".ldata.rel.ro";
2270 break;
2271 case SECCAT_DATA_REL_RO_LOCAL:
2272 sname = ".ldata.rel.ro.local";
2273 break;
2274 case SECCAT_BSS:
2275 sname = ".lbss";
2276 flags |= SECTION_BSS;
2277 break;
2278 case SECCAT_RODATA:
2279 case SECCAT_RODATA_MERGE_STR:
2280 case SECCAT_RODATA_MERGE_STR_INIT:
2281 case SECCAT_RODATA_MERGE_CONST:
2282 sname = ".lrodata";
2283 flags = 0;
2284 break;
2285 case SECCAT_SRODATA:
2286 case SECCAT_SDATA:
2287 case SECCAT_SBSS:
2288 gcc_unreachable ();
2289 case SECCAT_TEXT:
2290 case SECCAT_TDATA:
2291 case SECCAT_TBSS:
2292 /* We don't split these for medium model. Place them into
2293 default sections and hope for best. */
2294 break;
2295 }
2296 if (sname)
2297 {
2298 /* We might get called with string constants, but get_named_section
2299 doesn't like them as they are not DECLs. Also, we need to set
2300 flags in that case. */
2301 if (!DECL_P (decl))
2302 return get_section (sname, flags, NULL);
2303 return get_named_section (decl, sname, reloc);
2304 }
2305 }
2306 return default_elf_select_section (decl, reloc, align);
2307 }
2308
2309 /* Build up a unique section name, expressed as a
2310 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2311 RELOC indicates whether the initial value of EXP requires
2312 link-time relocations. */
2313
2314 static void
2315 x86_64_elf_unique_section (tree decl, int reloc)
2316 {
2317 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2318 && ix86_in_large_data_p (decl))
2319 {
2320 const char *prefix = NULL;
2321 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2322 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2323
2324 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2325 {
2326 case SECCAT_DATA:
2327 case SECCAT_DATA_REL:
2328 case SECCAT_DATA_REL_LOCAL:
2329 case SECCAT_DATA_REL_RO:
2330 case SECCAT_DATA_REL_RO_LOCAL:
2331 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2332 break;
2333 case SECCAT_BSS:
2334 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2335 break;
2336 case SECCAT_RODATA:
2337 case SECCAT_RODATA_MERGE_STR:
2338 case SECCAT_RODATA_MERGE_STR_INIT:
2339 case SECCAT_RODATA_MERGE_CONST:
2340 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2341 break;
2342 case SECCAT_SRODATA:
2343 case SECCAT_SDATA:
2344 case SECCAT_SBSS:
2345 gcc_unreachable ();
2346 case SECCAT_TEXT:
2347 case SECCAT_TDATA:
2348 case SECCAT_TBSS:
2349 /* We don't split these for medium model. Place them into
2350 default sections and hope for best. */
2351 break;
2352 }
2353 if (prefix)
2354 {
2355 const char *name;
2356 size_t nlen, plen;
2357 char *string;
2358 plen = strlen (prefix);
2359
2360 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2361 name = targetm.strip_name_encoding (name);
2362 nlen = strlen (name);
2363
2364 string = alloca (nlen + plen + 1);
2365 memcpy (string, prefix, plen);
2366 memcpy (string + plen, name, nlen + 1);
2367
2368 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2369 return;
2370 }
2371 }
2372 default_unique_section (decl, reloc);
2373 }
2374
2375 #ifdef COMMON_ASM_OP
2376 /* This says how to output assembler code to declare an
2377 uninitialized external linkage data object.
2378
2379 For medium model x86-64 we need to use .largecomm opcode for
2380 large objects. */
2381 void
2382 x86_elf_aligned_common (FILE *file,
2383 const char *name, unsigned HOST_WIDE_INT size,
2384 int align)
2385 {
2386 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2387 && size > (unsigned int)ix86_section_threshold)
2388 fprintf (file, ".largecomm\t");
2389 else
2390 fprintf (file, "%s", COMMON_ASM_OP);
2391 assemble_name (file, name);
2392 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2393 size, align / BITS_PER_UNIT);
2394 }
2395
2396 /* Utility function for targets to use in implementing
2397 ASM_OUTPUT_ALIGNED_BSS. */
2398
2399 void
2400 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2401 const char *name, unsigned HOST_WIDE_INT size,
2402 int align)
2403 {
2404 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2405 && size > (unsigned int)ix86_section_threshold)
2406 switch_to_section (get_named_section (decl, ".lbss", 0));
2407 else
2408 switch_to_section (bss_section);
2409 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2410 #ifdef ASM_DECLARE_OBJECT_NAME
2411 last_assemble_variable_decl = decl;
2412 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2413 #else
2414 /* Standard thing is just output label for the object. */
2415 ASM_OUTPUT_LABEL (file, name);
2416 #endif /* ASM_DECLARE_OBJECT_NAME */
2417 ASM_OUTPUT_SKIP (file, size ? size : 1);
2418 }
2419 #endif
2420 \f
2421 void
2422 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2423 {
2424 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2425 make the problem with not enough registers even worse. */
2426 #ifdef INSN_SCHEDULING
2427 if (level > 1)
2428 flag_schedule_insns = 0;
2429 #endif
2430
2431 if (TARGET_MACHO)
2432 /* The Darwin libraries never set errno, so we might as well
2433 avoid calling them when that's the only reason we would. */
2434 flag_errno_math = 0;
2435
2436 /* The default values of these switches depend on the TARGET_64BIT
2437 that is not known at this moment. Mark these values with 2 and
2438 let user the to override these. In case there is no command line option
2439 specifying them, we will set the defaults in override_options. */
2440 if (optimize >= 1)
2441 flag_omit_frame_pointer = 2;
2442 flag_pcc_struct_return = 2;
2443 flag_asynchronous_unwind_tables = 2;
2444 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2445 SUBTARGET_OPTIMIZATION_OPTIONS;
2446 #endif
2447 }
2448 \f
2449 /* Table of valid machine attributes. */
2450 const struct attribute_spec ix86_attribute_table[] =
2451 {
2452 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2453 /* Stdcall attribute says callee is responsible for popping arguments
2454 if they are not variable. */
2455 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2456 /* Fastcall attribute says callee is responsible for popping arguments
2457 if they are not variable. */
2458 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2459 /* Cdecl attribute says the callee is a normal C declaration */
2460 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2461 /* Regparm attribute specifies how many integer arguments are to be
2462 passed in registers. */
2463 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2464 /* X87regparm attribute says we are passing floating point arguments
2465 in 80387 registers. */
2466 { "x87regparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2467 /* Sseregparm attribute says we are using x86_64 calling conventions
2468 for FP arguments. */
2469 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2470 /* force_align_arg_pointer says this function realigns the stack at entry. */
2471 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2472 false, true, true, ix86_handle_cconv_attribute },
2473 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2474 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2475 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2476 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2477 #endif
2478 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2479 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2480 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2481 SUBTARGET_ATTRIBUTE_TABLE,
2482 #endif
2483 { NULL, 0, 0, false, false, false, NULL }
2484 };
2485
2486 /* Decide whether we can make a sibling call to a function. DECL is the
2487 declaration of the function being targeted by the call and EXP is the
2488 CALL_EXPR representing the call. */
2489
2490 static bool
2491 ix86_function_ok_for_sibcall (tree decl, tree exp)
2492 {
2493 tree func;
2494 rtx a, b;
2495
2496 /* If we are generating position-independent code, we cannot sibcall
2497 optimize any indirect call, or a direct call to a global function,
2498 as the PLT requires %ebx be live. */
2499 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2500 return false;
2501
2502 if (decl)
2503 func = decl;
2504 else
2505 {
2506 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2507 if (POINTER_TYPE_P (func))
2508 func = TREE_TYPE (func);
2509 }
2510
2511 /* Check that the return value locations are the same. Like
2512 if we are returning floats on the 80387 register stack, we cannot
2513 make a sibcall from a function that doesn't return a float to a
2514 function that does or, conversely, from a function that does return
2515 a float to a function that doesn't; the necessary stack adjustment
2516 would not be executed. This is also the place we notice
2517 differences in the return value ABI. Note that it is ok for one
2518 of the functions to have void return type as long as the return
2519 value of the other is passed in a register. */
2520 a = ix86_function_value (TREE_TYPE (exp), func, false);
2521 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2522 cfun->decl, false);
2523 if (STACK_REG_P (a) || STACK_REG_P (b))
2524 {
2525 if (!rtx_equal_p (a, b))
2526 return false;
2527 }
2528 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2529 ;
2530 else if (!rtx_equal_p (a, b))
2531 return false;
2532
2533 /* If this call is indirect, we'll need to be able to use a call-clobbered
2534 register for the address of the target function. Make sure that all
2535 such registers are not used for passing parameters. */
2536 if (!decl && !TARGET_64BIT)
2537 {
2538 tree type;
2539
2540 /* We're looking at the CALL_EXPR, we need the type of the function. */
2541 type = TREE_OPERAND (exp, 0); /* pointer expression */
2542 type = TREE_TYPE (type); /* pointer type */
2543 type = TREE_TYPE (type); /* function type */
2544
2545 if (ix86_function_regparm (type, NULL) >= 3)
2546 {
2547 /* ??? Need to count the actual number of registers to be used,
2548 not the possible number of registers. Fix later. */
2549 return false;
2550 }
2551 }
2552
2553 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2554 /* Dllimport'd functions are also called indirectly. */
2555 if (decl && DECL_DLLIMPORT_P (decl)
2556 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2557 return false;
2558 #endif
2559
2560 /* If we forced aligned the stack, then sibcalling would unalign the
2561 stack, which may break the called function. */
2562 if (cfun->machine->force_align_arg_pointer)
2563 return false;
2564
2565 /* Otherwise okay. That also includes certain types of indirect calls. */
2566 return true;
2567 }
2568
2569 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "x87regparm"
2570 and "sseregparm" calling convention attributes;
2571 arguments as in struct attribute_spec.handler. */
2572
2573 static tree
2574 ix86_handle_cconv_attribute (tree *node, tree name,
2575 tree args,
2576 int flags ATTRIBUTE_UNUSED,
2577 bool *no_add_attrs)
2578 {
2579 if (TREE_CODE (*node) != FUNCTION_TYPE
2580 && TREE_CODE (*node) != METHOD_TYPE
2581 && TREE_CODE (*node) != FIELD_DECL
2582 && TREE_CODE (*node) != TYPE_DECL)
2583 {
2584 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2585 IDENTIFIER_POINTER (name));
2586 *no_add_attrs = true;
2587 return NULL_TREE;
2588 }
2589
2590 /* Can combine regparm with all attributes but fastcall. */
2591 if (is_attribute_p ("regparm", name))
2592 {
2593 tree cst;
2594
2595 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2596 {
2597 error ("fastcall and regparm attributes are not compatible");
2598 }
2599
2600 cst = TREE_VALUE (args);
2601 if (TREE_CODE (cst) != INTEGER_CST)
2602 {
2603 warning (OPT_Wattributes,
2604 "%qs attribute requires an integer constant argument",
2605 IDENTIFIER_POINTER (name));
2606 *no_add_attrs = true;
2607 }
2608 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2609 {
2610 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2611 IDENTIFIER_POINTER (name), REGPARM_MAX);
2612 *no_add_attrs = true;
2613 }
2614
2615 if (!TARGET_64BIT
2616 && lookup_attribute (ix86_force_align_arg_pointer_string,
2617 TYPE_ATTRIBUTES (*node))
2618 && compare_tree_int (cst, REGPARM_MAX-1))
2619 {
2620 error ("%s functions limited to %d register parameters",
2621 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2622 }
2623
2624 return NULL_TREE;
2625 }
2626
2627 if (TARGET_64BIT)
2628 {
2629 warning (OPT_Wattributes, "%qs attribute ignored",
2630 IDENTIFIER_POINTER (name));
2631 *no_add_attrs = true;
2632 return NULL_TREE;
2633 }
2634
2635 /* Can combine fastcall with stdcall (redundant), x87regparm
2636 and sseregparm. */
2637 if (is_attribute_p ("fastcall", name))
2638 {
2639 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2640 {
2641 error ("fastcall and cdecl attributes are not compatible");
2642 }
2643 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2644 {
2645 error ("fastcall and stdcall attributes are not compatible");
2646 }
2647 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2648 {
2649 error ("fastcall and regparm attributes are not compatible");
2650 }
2651 }
2652
2653 /* Can combine stdcall with fastcall (redundant), regparm,
2654 x87regparm and sseregparm. */
2655 else if (is_attribute_p ("stdcall", name))
2656 {
2657 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2658 {
2659 error ("stdcall and cdecl attributes are not compatible");
2660 }
2661 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2662 {
2663 error ("stdcall and fastcall attributes are not compatible");
2664 }
2665 }
2666
2667 /* Can combine cdecl with regparm, x87regparm and sseregparm. */
2668 else if (is_attribute_p ("cdecl", name))
2669 {
2670 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2671 {
2672 error ("stdcall and cdecl attributes are not compatible");
2673 }
2674 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2675 {
2676 error ("fastcall and cdecl attributes are not compatible");
2677 }
2678 }
2679
2680 /* Can combine x87regparm or sseregparm with all attributes. */
2681
2682 return NULL_TREE;
2683 }
2684
2685 /* Return 0 if the attributes for two types are incompatible, 1 if they
2686 are compatible, and 2 if they are nearly compatible (which causes a
2687 warning to be generated). */
2688
2689 static int
2690 ix86_comp_type_attributes (tree type1, tree type2)
2691 {
2692 /* Check for mismatch of non-default calling convention. */
2693 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2694
2695 if (TREE_CODE (type1) != FUNCTION_TYPE)
2696 return 1;
2697
2698 /* Check for mismatched fastcall/regparm types. */
2699 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2700 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2701 || (ix86_function_regparm (type1, NULL)
2702 != ix86_function_regparm (type2, NULL)))
2703 return 0;
2704
2705 /* Check for mismatched x87regparm types. */
2706 if (!lookup_attribute ("x87regparm", TYPE_ATTRIBUTES (type1))
2707 != !lookup_attribute ("x87regparm", TYPE_ATTRIBUTES (type2)))
2708 return 0;
2709
2710 /* Check for mismatched sseregparm types. */
2711 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2712 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2713 return 0;
2714
2715 /* Check for mismatched return types (cdecl vs stdcall). */
2716 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2717 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2718 return 0;
2719
2720 return 1;
2721 }
2722 \f
2723 /* Return the regparm value for a function with the indicated TYPE and DECL.
2724 DECL may be NULL when calling function indirectly
2725 or considering a libcall. */
2726
2727 static int
2728 ix86_function_regparm (tree type, tree decl)
2729 {
2730 tree attr;
2731 int regparm = ix86_regparm;
2732 bool user_convention = false;
2733
2734 if (!TARGET_64BIT)
2735 {
2736 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2737 if (attr)
2738 {
2739 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2740 user_convention = true;
2741 }
2742
2743 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2744 {
2745 regparm = 2;
2746 user_convention = true;
2747 }
2748
2749 /* Use register calling convention for local functions when possible. */
2750 if (!TARGET_64BIT && !user_convention && decl
2751 && flag_unit_at_a_time && !profile_flag)
2752 {
2753 struct cgraph_local_info *i = cgraph_local_info (decl);
2754 if (i && i->local)
2755 {
2756 int local_regparm, globals = 0, regno;
2757
2758 /* Make sure no regparm register is taken by a global register
2759 variable. */
2760 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2761 if (global_regs[local_regparm])
2762 break;
2763 /* We can't use regparm(3) for nested functions as these use
2764 static chain pointer in third argument. */
2765 if (local_regparm == 3
2766 && decl_function_context (decl)
2767 && !DECL_NO_STATIC_CHAIN (decl))
2768 local_regparm = 2;
2769 /* If the function realigns its stackpointer, the
2770 prologue will clobber %ecx. If we've already
2771 generated code for the callee, the callee
2772 DECL_STRUCT_FUNCTION is gone, so we fall back to
2773 scanning the attributes for the self-realigning
2774 property. */
2775 if ((DECL_STRUCT_FUNCTION (decl)
2776 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2777 || (!DECL_STRUCT_FUNCTION (decl)
2778 && lookup_attribute (ix86_force_align_arg_pointer_string,
2779 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2780 local_regparm = 2;
2781 /* Each global register variable increases register preassure,
2782 so the more global reg vars there are, the smaller regparm
2783 optimization use, unless requested by the user explicitly. */
2784 for (regno = 0; regno < 6; regno++)
2785 if (global_regs[regno])
2786 globals++;
2787 local_regparm
2788 = globals < local_regparm ? local_regparm - globals : 0;
2789
2790 if (local_regparm > regparm)
2791 regparm = local_regparm;
2792 }
2793 }
2794 }
2795 return regparm;
2796 }
2797
2798 /* Return 1 if we can pass up to X87_REGPARM_MAX floating point
2799 arguments in x87 registers for a function with the indicated
2800 TYPE and DECL. DECL may be NULL when calling function indirectly
2801 or considering a libcall. For local functions, return 2.
2802 Otherwise return 0. */
2803
2804 static int
2805 ix86_function_x87regparm (tree type, tree decl)
2806 {
2807 /* Use x87 registers to pass floating point arguments if requested
2808 by the x87regparm attribute. */
2809 if (TARGET_X87REGPARM
2810 || (type
2811 && lookup_attribute ("x87regparm", TYPE_ATTRIBUTES (type))))
2812 {
2813 if (!TARGET_80387)
2814 {
2815 if (decl)
2816 error ("Calling %qD with attribute x87regparm without "
2817 "80387 enabled", decl);
2818 else
2819 error ("Calling %qT with attribute x87regparm without "
2820 "80387 enabled", type);
2821 return 0;
2822 }
2823
2824 return 1;
2825 }
2826
2827 /* For local functions, pass up to X87_REGPARM_MAX floating point
2828 arguments in x87 registers. */
2829 if (!TARGET_64BIT && decl
2830 && flag_unit_at_a_time && !profile_flag)
2831 {
2832 struct cgraph_local_info *i = cgraph_local_info (decl);
2833 if (i && i->local)
2834 return 2;
2835 }
2836
2837 return 0;
2838 }
2839
2840 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2841 DFmode (2) arguments in SSE registers for a function with the
2842 indicated TYPE and DECL. DECL may be NULL when calling function
2843 indirectly or considering a libcall. Otherwise return 0. */
2844
2845 static int
2846 ix86_function_sseregparm (tree type, tree decl)
2847 {
2848 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2849 by the sseregparm attribute. */
2850 if (TARGET_SSEREGPARM
2851 || (type
2852 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2853 {
2854 if (!TARGET_SSE)
2855 {
2856 if (decl)
2857 error ("Calling %qD with attribute sseregparm without "
2858 "SSE/SSE2 enabled", decl);
2859 else
2860 error ("Calling %qT with attribute sseregparm without "
2861 "SSE/SSE2 enabled", type);
2862 return 0;
2863 }
2864
2865 return 2;
2866 }
2867
2868 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2869 (and DFmode for SSE2) arguments in SSE registers,
2870 even for 32-bit targets. */
2871 if (!TARGET_64BIT && decl
2872 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2873 {
2874 struct cgraph_local_info *i = cgraph_local_info (decl);
2875 if (i && i->local)
2876 return TARGET_SSE2 ? 2 : 1;
2877 }
2878
2879 return 0;
2880 }
2881
2882 /* Return true if EAX is live at the start of the function. Used by
2883 ix86_expand_prologue to determine if we need special help before
2884 calling allocate_stack_worker. */
2885
2886 static bool
2887 ix86_eax_live_at_start_p (void)
2888 {
2889 /* Cheat. Don't bother working forward from ix86_function_regparm
2890 to the function type to whether an actual argument is located in
2891 eax. Instead just look at cfg info, which is still close enough
2892 to correct at this point. This gives false positives for broken
2893 functions that might use uninitialized data that happens to be
2894 allocated in eax, but who cares? */
2895 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2896 }
2897
2898 /* Value is the number of bytes of arguments automatically
2899 popped when returning from a subroutine call.
2900 FUNDECL is the declaration node of the function (as a tree),
2901 FUNTYPE is the data type of the function (as a tree),
2902 or for a library call it is an identifier node for the subroutine name.
2903 SIZE is the number of bytes of arguments passed on the stack.
2904
2905 On the 80386, the RTD insn may be used to pop them if the number
2906 of args is fixed, but if the number is variable then the caller
2907 must pop them all. RTD can't be used for library calls now
2908 because the library is compiled with the Unix compiler.
2909 Use of RTD is a selectable option, since it is incompatible with
2910 standard Unix calling sequences. If the option is not selected,
2911 the caller must always pop the args.
2912
2913 The attribute stdcall is equivalent to RTD on a per module basis. */
2914
2915 int
2916 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2917 {
2918 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2919
2920 /* Cdecl functions override -mrtd, and never pop the stack. */
2921 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2922
2923 /* Stdcall and fastcall functions will pop the stack if not
2924 variable args. */
2925 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2926 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2927 rtd = 1;
2928
2929 if (rtd
2930 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2931 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2932 == void_type_node)))
2933 return size;
2934 }
2935
2936 /* Lose any fake structure return argument if it is passed on the stack. */
2937 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2938 && !TARGET_64BIT
2939 && !KEEP_AGGREGATE_RETURN_POINTER)
2940 {
2941 int nregs = ix86_function_regparm (funtype, fundecl);
2942
2943 if (!nregs)
2944 return GET_MODE_SIZE (Pmode);
2945 }
2946
2947 return 0;
2948 }
2949 \f
2950 /* Argument support functions. */
2951
2952 /* Return true when register may be used to pass function parameters. */
2953 bool
2954 ix86_function_arg_regno_p (int regno)
2955 {
2956 int i;
2957 if (!TARGET_64BIT)
2958 return (regno < REGPARM_MAX
2959 || (TARGET_80387 && FP_REGNO_P (regno)
2960 && (regno < FIRST_FLOAT_REG + X87_REGPARM_MAX))
2961 || (TARGET_MMX && MMX_REGNO_P (regno)
2962 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2963 || (TARGET_SSE && SSE_REGNO_P (regno)
2964 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2965
2966 if (TARGET_SSE && SSE_REGNO_P (regno)
2967 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2968 return true;
2969 /* RAX is used as hidden argument to va_arg functions. */
2970 if (!regno)
2971 return true;
2972 for (i = 0; i < REGPARM_MAX; i++)
2973 if (regno == x86_64_int_parameter_registers[i])
2974 return true;
2975 return false;
2976 }
2977
2978 /* Return if we do not know how to pass TYPE solely in registers. */
2979
2980 static bool
2981 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2982 {
2983 if (must_pass_in_stack_var_size_or_pad (mode, type))
2984 return true;
2985
2986 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2987 The layout_type routine is crafty and tries to trick us into passing
2988 currently unsupported vector types on the stack by using TImode. */
2989 return (!TARGET_64BIT && mode == TImode
2990 && type && TREE_CODE (type) != VECTOR_TYPE);
2991 }
2992
2993 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2994 for a call to a function whose data type is FNTYPE.
2995 For a library call, FNTYPE is 0. */
2996
2997 void
2998 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2999 tree fntype, /* tree ptr for function decl */
3000 rtx libname, /* SYMBOL_REF of library name or 0 */
3001 tree fndecl)
3002 {
3003 static CUMULATIVE_ARGS zero_cum;
3004 tree param, next_param;
3005
3006 if (TARGET_DEBUG_ARG)
3007 {
3008 fprintf (stderr, "\ninit_cumulative_args (");
3009 if (fntype)
3010 fprintf (stderr, "fntype code = %s, ret code = %s",
3011 tree_code_name[(int) TREE_CODE (fntype)],
3012 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
3013 else
3014 fprintf (stderr, "no fntype");
3015
3016 if (libname)
3017 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
3018 }
3019
3020 *cum = zero_cum;
3021
3022 /* Set up the number of registers to use for passing arguments. */
3023 cum->nregs = ix86_regparm;
3024 if (TARGET_80387)
3025 cum->x87_nregs = X87_REGPARM_MAX;
3026 if (TARGET_SSE)
3027 cum->sse_nregs = SSE_REGPARM_MAX;
3028 if (TARGET_MMX)
3029 cum->mmx_nregs = MMX_REGPARM_MAX;
3030 cum->warn_sse = true;
3031 cum->warn_mmx = true;
3032 cum->maybe_vaarg = false;
3033
3034 /* Use ecx and edx registers if function has fastcall attribute,
3035 else look for regparm information. */
3036 if (fntype && !TARGET_64BIT)
3037 {
3038 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
3039 {
3040 cum->nregs = 2;
3041 cum->fastcall = 1;
3042 }
3043 else
3044 cum->nregs = ix86_function_regparm (fntype, fndecl);
3045 }
3046
3047 /* Set up the number of 80387 registers used for passing
3048 floating point arguments. Warn for mismatching ABI. */
3049 cum->float_in_x87 = ix86_function_x87regparm (fntype, fndecl);
3050
3051 /* Set up the number of SSE registers used for passing SFmode
3052 and DFmode arguments. Warn for mismatching ABI. */
3053 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
3054
3055 /* Determine if this function has variable arguments. This is
3056 indicated by the last argument being 'void_type_mode' if there
3057 are no variable arguments. If there are variable arguments, then
3058 we won't pass anything in registers in 32-bit mode. */
3059
3060 if (cum->nregs || cum->mmx_nregs
3061 || cum->x87_nregs || cum->sse_nregs)
3062 {
3063 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
3064 param != 0; param = next_param)
3065 {
3066 next_param = TREE_CHAIN (param);
3067 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3068 {
3069 if (!TARGET_64BIT)
3070 {
3071 cum->nregs = 0;
3072 cum->x87_nregs = 0;
3073 cum->sse_nregs = 0;
3074 cum->mmx_nregs = 0;
3075 cum->warn_sse = 0;
3076 cum->warn_mmx = 0;
3077 cum->fastcall = 0;
3078 cum->float_in_x87 = 0;
3079 cum->float_in_sse = 0;
3080 }
3081 cum->maybe_vaarg = true;
3082 }
3083 }
3084 }
3085 if ((!fntype && !libname)
3086 || (fntype && !TYPE_ARG_TYPES (fntype)))
3087 cum->maybe_vaarg = true;
3088
3089 if (TARGET_DEBUG_ARG)
3090 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3091
3092 return;
3093 }
3094
3095 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3096 But in the case of vector types, it is some vector mode.
3097
3098 When we have only some of our vector isa extensions enabled, then there
3099 are some modes for which vector_mode_supported_p is false. For these
3100 modes, the generic vector support in gcc will choose some non-vector mode
3101 in order to implement the type. By computing the natural mode, we'll
3102 select the proper ABI location for the operand and not depend on whatever
3103 the middle-end decides to do with these vector types. */
3104
3105 static enum machine_mode
3106 type_natural_mode (tree type)
3107 {
3108 enum machine_mode mode = TYPE_MODE (type);
3109
3110 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3111 {
3112 HOST_WIDE_INT size = int_size_in_bytes (type);
3113 if ((size == 8 || size == 16)
3114 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3115 && TYPE_VECTOR_SUBPARTS (type) > 1)
3116 {
3117 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3118
3119 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3120 mode = MIN_MODE_VECTOR_FLOAT;
3121 else
3122 mode = MIN_MODE_VECTOR_INT;
3123
3124 /* Get the mode which has this inner mode and number of units. */
3125 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3126 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3127 && GET_MODE_INNER (mode) == innermode)
3128 return mode;
3129
3130 gcc_unreachable ();
3131 }
3132 }
3133
3134 return mode;
3135 }
3136
3137 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3138 this may not agree with the mode that the type system has chosen for the
3139 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3140 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3141
3142 static rtx
3143 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3144 unsigned int regno)
3145 {
3146 rtx tmp;
3147
3148 if (orig_mode != BLKmode)
3149 tmp = gen_rtx_REG (orig_mode, regno);
3150 else
3151 {
3152 tmp = gen_rtx_REG (mode, regno);
3153 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3154 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3155 }
3156
3157 return tmp;
3158 }
3159
3160 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3161 of this code is to classify each 8bytes of incoming argument by the register
3162 class and assign registers accordingly. */
3163
3164 /* Return the union class of CLASS1 and CLASS2.
3165 See the x86-64 PS ABI for details. */
3166
3167 static enum x86_64_reg_class
3168 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3169 {
3170 /* Rule #1: If both classes are equal, this is the resulting class. */
3171 if (class1 == class2)
3172 return class1;
3173
3174 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3175 the other class. */
3176 if (class1 == X86_64_NO_CLASS)
3177 return class2;
3178 if (class2 == X86_64_NO_CLASS)
3179 return class1;
3180
3181 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3182 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3183 return X86_64_MEMORY_CLASS;
3184
3185 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3186 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3187 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3188 return X86_64_INTEGERSI_CLASS;
3189 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3190 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3191 return X86_64_INTEGER_CLASS;
3192
3193 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3194 MEMORY is used. */
3195 if (class1 == X86_64_X87_CLASS
3196 || class1 == X86_64_X87UP_CLASS
3197 || class1 == X86_64_COMPLEX_X87_CLASS
3198 || class2 == X86_64_X87_CLASS
3199 || class2 == X86_64_X87UP_CLASS
3200 || class2 == X86_64_COMPLEX_X87_CLASS)
3201 return X86_64_MEMORY_CLASS;
3202
3203 /* Rule #6: Otherwise class SSE is used. */
3204 return X86_64_SSE_CLASS;
3205 }
3206
3207 /* Classify the argument of type TYPE and mode MODE.
3208 CLASSES will be filled by the register class used to pass each word
3209 of the operand. The number of words is returned. In case the parameter
3210 should be passed in memory, 0 is returned. As a special case for zero
3211 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3212
3213 BIT_OFFSET is used internally for handling records and specifies offset
3214 of the offset in bits modulo 256 to avoid overflow cases.
3215
3216 See the x86-64 PS ABI for details.
3217 */
3218
3219 static int
3220 classify_argument (enum machine_mode mode, tree type,
3221 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3222 {
3223 HOST_WIDE_INT bytes =
3224 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3225 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3226
3227 /* Variable sized entities are always passed/returned in memory. */
3228 if (bytes < 0)
3229 return 0;
3230
3231 if (mode != VOIDmode
3232 && targetm.calls.must_pass_in_stack (mode, type))
3233 return 0;
3234
3235 if (type && AGGREGATE_TYPE_P (type))
3236 {
3237 int i;
3238 tree field;
3239 enum x86_64_reg_class subclasses[MAX_CLASSES];
3240
3241 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3242 if (bytes > 16)
3243 return 0;
3244
3245 for (i = 0; i < words; i++)
3246 classes[i] = X86_64_NO_CLASS;
3247
3248 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3249 signalize memory class, so handle it as special case. */
3250 if (!words)
3251 {
3252 classes[0] = X86_64_NO_CLASS;
3253 return 1;
3254 }
3255
3256 /* Classify each field of record and merge classes. */
3257 switch (TREE_CODE (type))
3258 {
3259 case RECORD_TYPE:
3260 /* And now merge the fields of structure. */
3261 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3262 {
3263 if (TREE_CODE (field) == FIELD_DECL)
3264 {
3265 int num;
3266
3267 if (TREE_TYPE (field) == error_mark_node)
3268 continue;
3269
3270 /* Bitfields are always classified as integer. Handle them
3271 early, since later code would consider them to be
3272 misaligned integers. */
3273 if (DECL_BIT_FIELD (field))
3274 {
3275 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3276 i < ((int_bit_position (field) + (bit_offset % 64))
3277 + tree_low_cst (DECL_SIZE (field), 0)
3278 + 63) / 8 / 8; i++)
3279 classes[i] =
3280 merge_classes (X86_64_INTEGER_CLASS,
3281 classes[i]);
3282 }
3283 else
3284 {
3285 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3286 TREE_TYPE (field), subclasses,
3287 (int_bit_position (field)
3288 + bit_offset) % 256);
3289 if (!num)
3290 return 0;
3291 for (i = 0; i < num; i++)
3292 {
3293 int pos =
3294 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3295 classes[i + pos] =
3296 merge_classes (subclasses[i], classes[i + pos]);
3297 }
3298 }
3299 }
3300 }
3301 break;
3302
3303 case ARRAY_TYPE:
3304 /* Arrays are handled as small records. */
3305 {
3306 int num;
3307 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3308 TREE_TYPE (type), subclasses, bit_offset);
3309 if (!num)
3310 return 0;
3311
3312 /* The partial classes are now full classes. */
3313 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3314 subclasses[0] = X86_64_SSE_CLASS;
3315 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3316 subclasses[0] = X86_64_INTEGER_CLASS;
3317
3318 for (i = 0; i < words; i++)
3319 classes[i] = subclasses[i % num];
3320
3321 break;
3322 }
3323 case UNION_TYPE:
3324 case QUAL_UNION_TYPE:
3325 /* Unions are similar to RECORD_TYPE but offset is always 0.
3326 */
3327 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3328 {
3329 if (TREE_CODE (field) == FIELD_DECL)
3330 {
3331 int num;
3332
3333 if (TREE_TYPE (field) == error_mark_node)
3334 continue;
3335
3336 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3337 TREE_TYPE (field), subclasses,
3338 bit_offset);
3339 if (!num)
3340 return 0;
3341 for (i = 0; i < num; i++)
3342 classes[i] = merge_classes (subclasses[i], classes[i]);
3343 }
3344 }
3345 break;
3346
3347 default:
3348 gcc_unreachable ();
3349 }
3350
3351 /* Final merger cleanup. */
3352 for (i = 0; i < words; i++)
3353 {
3354 /* If one class is MEMORY, everything should be passed in
3355 memory. */
3356 if (classes[i] == X86_64_MEMORY_CLASS)
3357 return 0;
3358
3359 /* The X86_64_SSEUP_CLASS should be always preceded by
3360 X86_64_SSE_CLASS. */
3361 if (classes[i] == X86_64_SSEUP_CLASS
3362 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3363 classes[i] = X86_64_SSE_CLASS;
3364
3365 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3366 if (classes[i] == X86_64_X87UP_CLASS
3367 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3368 classes[i] = X86_64_SSE_CLASS;
3369 }
3370 return words;
3371 }
3372
3373 /* Compute alignment needed. We align all types to natural boundaries with
3374 exception of XFmode that is aligned to 64bits. */
3375 if (mode != VOIDmode && mode != BLKmode)
3376 {
3377 int mode_alignment = GET_MODE_BITSIZE (mode);
3378
3379 if (mode == XFmode)
3380 mode_alignment = 128;
3381 else if (mode == XCmode)
3382 mode_alignment = 256;
3383 if (COMPLEX_MODE_P (mode))
3384 mode_alignment /= 2;
3385 /* Misaligned fields are always returned in memory. */
3386 if (bit_offset % mode_alignment)
3387 return 0;
3388 }
3389
3390 /* for V1xx modes, just use the base mode */
3391 if (VECTOR_MODE_P (mode)
3392 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3393 mode = GET_MODE_INNER (mode);
3394
3395 /* Classification of atomic types. */
3396 switch (mode)
3397 {
3398 case SDmode:
3399 case DDmode:
3400 classes[0] = X86_64_SSE_CLASS;
3401 return 1;
3402 case TDmode:
3403 classes[0] = X86_64_SSE_CLASS;
3404 classes[1] = X86_64_SSEUP_CLASS;
3405 return 2;
3406 case DImode:
3407 case SImode:
3408 case HImode:
3409 case QImode:
3410 case CSImode:
3411 case CHImode:
3412 case CQImode:
3413 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3414 classes[0] = X86_64_INTEGERSI_CLASS;
3415 else
3416 classes[0] = X86_64_INTEGER_CLASS;
3417 return 1;
3418 case CDImode:
3419 case TImode:
3420 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3421 return 2;
3422 case CTImode:
3423 return 0;
3424 case SFmode:
3425 if (!(bit_offset % 64))
3426 classes[0] = X86_64_SSESF_CLASS;
3427 else
3428 classes[0] = X86_64_SSE_CLASS;
3429 return 1;
3430 case DFmode:
3431 classes[0] = X86_64_SSEDF_CLASS;
3432 return 1;
3433 case XFmode:
3434 classes[0] = X86_64_X87_CLASS;
3435 classes[1] = X86_64_X87UP_CLASS;
3436 return 2;
3437 case TFmode:
3438 classes[0] = X86_64_SSE_CLASS;
3439 classes[1] = X86_64_SSEUP_CLASS;
3440 return 2;
3441 case SCmode:
3442 classes[0] = X86_64_SSE_CLASS;
3443 return 1;
3444 case DCmode:
3445 classes[0] = X86_64_SSEDF_CLASS;
3446 classes[1] = X86_64_SSEDF_CLASS;
3447 return 2;
3448 case XCmode:
3449 classes[0] = X86_64_COMPLEX_X87_CLASS;
3450 return 1;
3451 case TCmode:
3452 /* This modes is larger than 16 bytes. */
3453 return 0;
3454 case V4SFmode:
3455 case V4SImode:
3456 case V16QImode:
3457 case V8HImode:
3458 case V2DFmode:
3459 case V2DImode:
3460 classes[0] = X86_64_SSE_CLASS;
3461 classes[1] = X86_64_SSEUP_CLASS;
3462 return 2;
3463 case V2SFmode:
3464 case V2SImode:
3465 case V4HImode:
3466 case V8QImode:
3467 classes[0] = X86_64_SSE_CLASS;
3468 return 1;
3469 case BLKmode:
3470 case VOIDmode:
3471 return 0;
3472 default:
3473 gcc_assert (VECTOR_MODE_P (mode));
3474
3475 if (bytes > 16)
3476 return 0;
3477
3478 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3479
3480 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3481 classes[0] = X86_64_INTEGERSI_CLASS;
3482 else
3483 classes[0] = X86_64_INTEGER_CLASS;
3484 classes[1] = X86_64_INTEGER_CLASS;
3485 return 1 + (bytes > 8);
3486 }
3487 }
3488
3489 /* Examine the argument and return set number of register required in each
3490 class. Return 0 iff parameter should be passed in memory. */
3491 static int
3492 examine_argument (enum machine_mode mode, tree type, int in_return,
3493 int *int_nregs, int *sse_nregs)
3494 {
3495 enum x86_64_reg_class class[MAX_CLASSES];
3496 int n = classify_argument (mode, type, class, 0);
3497
3498 *int_nregs = 0;
3499 *sse_nregs = 0;
3500 if (!n)
3501 return 0;
3502 for (n--; n >= 0; n--)
3503 switch (class[n])
3504 {
3505 case X86_64_INTEGER_CLASS:
3506 case X86_64_INTEGERSI_CLASS:
3507 (*int_nregs)++;
3508 break;
3509 case X86_64_SSE_CLASS:
3510 case X86_64_SSESF_CLASS:
3511 case X86_64_SSEDF_CLASS:
3512 (*sse_nregs)++;
3513 break;
3514 case X86_64_NO_CLASS:
3515 case X86_64_SSEUP_CLASS:
3516 break;
3517 case X86_64_X87_CLASS:
3518 case X86_64_X87UP_CLASS:
3519 if (!in_return)
3520 return 0;
3521 break;
3522 case X86_64_COMPLEX_X87_CLASS:
3523 return in_return ? 2 : 0;
3524 case X86_64_MEMORY_CLASS:
3525 gcc_unreachable ();
3526 }
3527 return 1;
3528 }
3529
3530 /* Construct container for the argument used by GCC interface. See
3531 FUNCTION_ARG for the detailed description. */
3532
3533 static rtx
3534 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3535 tree type, int in_return, int nintregs, int nsseregs,
3536 const int *intreg, int sse_regno)
3537 {
3538 /* The following variables hold the static issued_error state. */
3539 static bool issued_sse_arg_error;
3540 static bool issued_sse_ret_error;
3541 static bool issued_x87_ret_error;
3542
3543 enum machine_mode tmpmode;
3544 int bytes =
3545 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3546 enum x86_64_reg_class class[MAX_CLASSES];
3547 int n;
3548 int i;
3549 int nexps = 0;
3550 int needed_sseregs, needed_intregs;
3551 rtx exp[MAX_CLASSES];
3552 rtx ret;
3553
3554 n = classify_argument (mode, type, class, 0);
3555 if (TARGET_DEBUG_ARG)
3556 {
3557 if (!n)
3558 fprintf (stderr, "Memory class\n");
3559 else
3560 {
3561 fprintf (stderr, "Classes:");
3562 for (i = 0; i < n; i++)
3563 {
3564 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3565 }
3566 fprintf (stderr, "\n");
3567 }
3568 }
3569 if (!n)
3570 return NULL;
3571 if (!examine_argument (mode, type, in_return, &needed_intregs,
3572 &needed_sseregs))
3573 return NULL;
3574 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3575 return NULL;
3576
3577 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3578 some less clueful developer tries to use floating-point anyway. */
3579 if (needed_sseregs && !TARGET_SSE)
3580 {
3581 if (in_return)
3582 {
3583 if (!issued_sse_ret_error)
3584 {
3585 error ("SSE register return with SSE disabled");
3586 issued_sse_ret_error = true;
3587 }
3588 }
3589 else if (!issued_sse_arg_error)
3590 {
3591 error ("SSE register argument with SSE disabled");
3592 issued_sse_arg_error = true;
3593 }
3594 return NULL;
3595 }
3596
3597 /* Likewise, error if the ABI requires us to return values in the
3598 x87 registers and the user specified -mno-80387. */
3599 if (!TARGET_80387 && in_return)
3600 for (i = 0; i < n; i++)
3601 if (class[i] == X86_64_X87_CLASS
3602 || class[i] == X86_64_X87UP_CLASS
3603 || class[i] == X86_64_COMPLEX_X87_CLASS)
3604 {
3605 if (!issued_x87_ret_error)
3606 {
3607 error ("x87 register return with x87 disabled");
3608 issued_x87_ret_error = true;
3609 }
3610 return NULL;
3611 }
3612
3613 /* First construct simple cases. Avoid SCmode, since we want to use
3614 single register to pass this type. */
3615 if (n == 1 && mode != SCmode)
3616 switch (class[0])
3617 {
3618 case X86_64_INTEGER_CLASS:
3619 case X86_64_INTEGERSI_CLASS:
3620 return gen_rtx_REG (mode, intreg[0]);
3621 case X86_64_SSE_CLASS:
3622 case X86_64_SSESF_CLASS:
3623 case X86_64_SSEDF_CLASS:
3624 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3625 case X86_64_X87_CLASS:
3626 case X86_64_COMPLEX_X87_CLASS:
3627 return gen_rtx_REG (mode, FIRST_STACK_REG);
3628 case X86_64_NO_CLASS:
3629 /* Zero sized array, struct or class. */
3630 return NULL;
3631 default:
3632 gcc_unreachable ();
3633 }
3634 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3635 && mode != BLKmode)
3636 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3637 if (n == 2
3638 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3639 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3640 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3641 && class[1] == X86_64_INTEGER_CLASS
3642 && (mode == CDImode || mode == TImode || mode == TFmode)
3643 && intreg[0] + 1 == intreg[1])
3644 return gen_rtx_REG (mode, intreg[0]);
3645
3646 /* Otherwise figure out the entries of the PARALLEL. */
3647 for (i = 0; i < n; i++)
3648 {
3649 switch (class[i])
3650 {
3651 case X86_64_NO_CLASS:
3652 break;
3653 case X86_64_INTEGER_CLASS:
3654 case X86_64_INTEGERSI_CLASS:
3655 /* Merge TImodes on aligned occasions here too. */
3656 if (i * 8 + 8 > bytes)
3657 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3658 else if (class[i] == X86_64_INTEGERSI_CLASS)
3659 tmpmode = SImode;
3660 else
3661 tmpmode = DImode;
3662 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3663 if (tmpmode == BLKmode)
3664 tmpmode = DImode;
3665 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3666 gen_rtx_REG (tmpmode, *intreg),
3667 GEN_INT (i*8));
3668 intreg++;
3669 break;
3670 case X86_64_SSESF_CLASS:
3671 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3672 gen_rtx_REG (SFmode,
3673 SSE_REGNO (sse_regno)),
3674 GEN_INT (i*8));
3675 sse_regno++;
3676 break;
3677 case X86_64_SSEDF_CLASS:
3678 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3679 gen_rtx_REG (DFmode,
3680 SSE_REGNO (sse_regno)),
3681 GEN_INT (i*8));
3682 sse_regno++;
3683 break;
3684 case X86_64_SSE_CLASS:
3685 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3686 tmpmode = TImode;
3687 else
3688 tmpmode = DImode;
3689 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3690 gen_rtx_REG (tmpmode,
3691 SSE_REGNO (sse_regno)),
3692 GEN_INT (i*8));
3693 if (tmpmode == TImode)
3694 i++;
3695 sse_regno++;
3696 break;
3697 default:
3698 gcc_unreachable ();
3699 }
3700 }
3701
3702 /* Empty aligned struct, union or class. */
3703 if (nexps == 0)
3704 return NULL;
3705
3706 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3707 for (i = 0; i < nexps; i++)
3708 XVECEXP (ret, 0, i) = exp [i];
3709 return ret;
3710 }
3711
3712 /* Update the data in CUM to advance over an argument
3713 of mode MODE and data type TYPE.
3714 (TYPE is null for libcalls where that information may not be available.) */
3715
3716 void
3717 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3718 tree type, int named)
3719 {
3720 int bytes =
3721 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3722 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3723
3724 if (type)
3725 mode = type_natural_mode (type);
3726
3727 if (TARGET_DEBUG_ARG)
3728 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3729 "mode=%s, named=%d)\n\n",
3730 words, cum->words, cum->nregs, cum->sse_nregs,
3731 GET_MODE_NAME (mode), named);
3732
3733 if (TARGET_64BIT)
3734 {
3735 int int_nregs, sse_nregs;
3736 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3737 cum->words += words;
3738 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3739 {
3740 cum->nregs -= int_nregs;
3741 cum->sse_nregs -= sse_nregs;
3742 cum->regno += int_nregs;
3743 cum->sse_regno += sse_nregs;
3744 }
3745 else
3746 cum->words += words;
3747 }
3748 else
3749 {
3750 switch (mode)
3751 {
3752 default:
3753 break;
3754
3755 case BLKmode:
3756 if (bytes < 0)
3757 break;
3758 /* FALLTHRU */
3759
3760 case DImode:
3761 case SImode:
3762 case HImode:
3763 case QImode:
3764 cum->words += words;
3765 cum->nregs -= words;
3766 cum->regno += words;
3767
3768 if (cum->nregs <= 0)
3769 {
3770 cum->nregs = 0;
3771 cum->regno = 0;
3772 }
3773 break;
3774
3775 case SFmode:
3776 if (cum->float_in_sse > 0)
3777 goto skip_80387;
3778
3779 case DFmode:
3780 if (cum->float_in_sse > 1)
3781 goto skip_80387;
3782
3783 /* Because no inherent XFmode->DFmode and XFmode->SFmode
3784 rounding takes place when values are passed in x87
3785 registers, pass DFmode and SFmode types to local functions
3786 only when flag_unsafe_math_optimizations is set. */
3787 if (!cum->float_in_x87
3788 || (cum->float_in_x87 == 2
3789 && !flag_unsafe_math_optimizations))
3790 break;
3791
3792 case XFmode:
3793 if (!cum->float_in_x87)
3794 break;
3795
3796 if (!type || !AGGREGATE_TYPE_P (type))
3797 {
3798 cum->x87_nregs -= 1;
3799 cum->x87_regno += 1;
3800 if (cum->x87_nregs <= 0)
3801 {
3802 cum->x87_nregs = 0;
3803 cum->x87_regno = 0;
3804 }
3805 }
3806 break;
3807
3808 skip_80387:
3809
3810 case TImode:
3811 case V16QImode:
3812 case V8HImode:
3813 case V4SImode:
3814 case V2DImode:
3815 case V4SFmode:
3816 case V2DFmode:
3817 if (!type || !AGGREGATE_TYPE_P (type))
3818 {
3819 cum->sse_nregs -= 1;
3820 cum->sse_regno += 1;
3821 if (cum->sse_nregs <= 0)
3822 {
3823 cum->sse_nregs = 0;
3824 cum->sse_regno = 0;
3825 }
3826 }
3827 break;
3828
3829 case V8QImode:
3830 case V4HImode:
3831 case V2SImode:
3832 case V2SFmode:
3833 if (!type || !AGGREGATE_TYPE_P (type))
3834 {
3835 cum->mmx_nregs -= 1;
3836 cum->mmx_regno += 1;
3837 if (cum->mmx_nregs <= 0)
3838 {
3839 cum->mmx_nregs = 0;
3840 cum->mmx_regno = 0;
3841 }
3842 }
3843 break;
3844 }
3845 }
3846 }
3847
3848 /* Define where to put the arguments to a function.
3849 Value is zero to push the argument on the stack,
3850 or a hard register in which to store the argument.
3851
3852 MODE is the argument's machine mode.
3853 TYPE is the data type of the argument (as a tree).
3854 This is null for libcalls where that information may
3855 not be available.
3856 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3857 the preceding args and about the function being called.
3858 NAMED is nonzero if this argument is a named parameter
3859 (otherwise it is an extra parameter matching an ellipsis). */
3860
3861 rtx
3862 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3863 tree type, int named)
3864 {
3865 enum machine_mode mode = orig_mode;
3866 rtx ret = NULL_RTX;
3867 int bytes =
3868 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3869 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3870 static bool warnedsse, warnedmmx;
3871
3872 /* To simplify the code below, represent vector types with a vector mode
3873 even if MMX/SSE are not active. */
3874 if (type && TREE_CODE (type) == VECTOR_TYPE)
3875 mode = type_natural_mode (type);
3876
3877 /* Handle a hidden AL argument containing number of registers for varargs
3878 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3879 any AL settings. */
3880 if (mode == VOIDmode)
3881 {
3882 if (TARGET_64BIT)
3883 return GEN_INT (cum->maybe_vaarg
3884 ? (cum->sse_nregs < 0
3885 ? SSE_REGPARM_MAX
3886 : cum->sse_regno)
3887 : -1);
3888 else
3889 return constm1_rtx;
3890 }
3891 if (TARGET_64BIT)
3892 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3893 cum->sse_nregs,
3894 &x86_64_int_parameter_registers [cum->regno],
3895 cum->sse_regno);
3896 else
3897 switch (mode)
3898 {
3899 default:
3900 break;
3901
3902 case BLKmode:
3903 if (bytes < 0)
3904 break;
3905 /* FALLTHRU */
3906 case DImode:
3907 case SImode:
3908 case HImode:
3909 case QImode:
3910 if (words <= cum->nregs)
3911 {
3912 int regno = cum->regno;
3913
3914 /* Fastcall allocates the first two DWORD (SImode) or
3915 smaller arguments to ECX and EDX. */
3916 if (cum->fastcall)
3917 {
3918 if (mode == BLKmode || mode == DImode)
3919 break;
3920
3921 /* ECX not EAX is the first allocated register. */
3922 if (regno == 0)
3923 regno = 2;
3924 }
3925 ret = gen_rtx_REG (mode, regno);
3926 }
3927 break;
3928
3929 case SFmode:
3930 if (cum->float_in_sse > 0)
3931 goto skip_80387;
3932
3933 case DFmode:
3934 if (cum->float_in_sse > 1)
3935 goto skip_80387;
3936
3937 /* Because no inherent XFmode->DFmode and XFmode->SFmode
3938 rounding takes place when values are passed in x87
3939 registers, pass DFmode and SFmode types to local functions
3940 only when flag_unsafe_math_optimizations is set. */
3941 if (!cum->float_in_x87
3942 || (cum->float_in_x87 == 2
3943 && !flag_unsafe_math_optimizations))
3944 break;
3945
3946 case XFmode:
3947 if (!cum->float_in_x87)
3948 break;
3949
3950 if (!type || !AGGREGATE_TYPE_P (type))
3951 if (cum->x87_nregs)
3952 ret = gen_rtx_REG (mode, cum->x87_regno + FIRST_FLOAT_REG);
3953 break;
3954
3955 skip_80387:
3956
3957 case TImode:
3958 case V16QImode:
3959 case V8HImode:
3960 case V4SImode:
3961 case V2DImode:
3962 case V4SFmode:
3963 case V2DFmode:
3964 if (!type || !AGGREGATE_TYPE_P (type))
3965 {
3966 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3967 {
3968 warnedsse = true;
3969 warning (0, "SSE vector argument without SSE enabled "
3970 "changes the ABI");
3971 }
3972 if (cum->sse_nregs)
3973 ret = gen_reg_or_parallel (mode, orig_mode,
3974 cum->sse_regno + FIRST_SSE_REG);
3975 }
3976 break;
3977 case V8QImode:
3978 case V4HImode:
3979 case V2SImode:
3980 case V2SFmode:
3981 if (!type || !AGGREGATE_TYPE_P (type))
3982 {
3983 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3984 {
3985 warnedmmx = true;
3986 warning (0, "MMX vector argument without MMX enabled "
3987 "changes the ABI");
3988 }
3989 if (cum->mmx_nregs)
3990 ret = gen_reg_or_parallel (mode, orig_mode,
3991 cum->mmx_regno + FIRST_MMX_REG);
3992 }
3993 break;
3994 }
3995
3996 if (TARGET_DEBUG_ARG)
3997 {
3998 fprintf (stderr,
3999 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
4000 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
4001
4002 if (ret)
4003 print_simple_rtl (stderr, ret);
4004 else
4005 fprintf (stderr, ", stack");
4006
4007 fprintf (stderr, " )\n");
4008 }
4009
4010 return ret;
4011 }
4012
4013 /* A C expression that indicates when an argument must be passed by
4014 reference. If nonzero for an argument, a copy of that argument is
4015 made in memory and a pointer to the argument is passed instead of
4016 the argument itself. The pointer is passed in whatever way is
4017 appropriate for passing a pointer to that type. */
4018
4019 static bool
4020 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4021 enum machine_mode mode ATTRIBUTE_UNUSED,
4022 tree type, bool named ATTRIBUTE_UNUSED)
4023 {
4024 if (!TARGET_64BIT)
4025 return 0;
4026
4027 if (type && int_size_in_bytes (type) == -1)
4028 {
4029 if (TARGET_DEBUG_ARG)
4030 fprintf (stderr, "function_arg_pass_by_reference\n");
4031 return 1;
4032 }
4033
4034 return 0;
4035 }
4036
4037 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
4038 ABI. Only called if TARGET_SSE. */
4039 static bool
4040 contains_128bit_aligned_vector_p (tree type)
4041 {
4042 enum machine_mode mode = TYPE_MODE (type);
4043 if (SSE_REG_MODE_P (mode)
4044 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
4045 return true;
4046 if (TYPE_ALIGN (type) < 128)
4047 return false;
4048
4049 if (AGGREGATE_TYPE_P (type))
4050 {
4051 /* Walk the aggregates recursively. */
4052 switch (TREE_CODE (type))
4053 {
4054 case RECORD_TYPE:
4055 case UNION_TYPE:
4056 case QUAL_UNION_TYPE:
4057 {
4058 tree field;
4059
4060 /* Walk all the structure fields. */
4061 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4062 {
4063 if (TREE_CODE (field) == FIELD_DECL
4064 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
4065 return true;
4066 }
4067 break;
4068 }
4069
4070 case ARRAY_TYPE:
4071 /* Just for use if some languages passes arrays by value. */
4072 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
4073 return true;
4074 break;
4075
4076 default:
4077 gcc_unreachable ();
4078 }
4079 }
4080 return false;
4081 }
4082
4083 /* Gives the alignment boundary, in bits, of an argument with the
4084 specified mode and type. */
4085
4086 int
4087 ix86_function_arg_boundary (enum machine_mode mode, tree type)
4088 {
4089 int align;
4090 if (type)
4091 align = TYPE_ALIGN (type);
4092 else
4093 align = GET_MODE_ALIGNMENT (mode);
4094 if (align < PARM_BOUNDARY)
4095 align = PARM_BOUNDARY;
4096 if (!TARGET_64BIT)
4097 {
4098 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
4099 make an exception for SSE modes since these require 128bit
4100 alignment.
4101
4102 The handling here differs from field_alignment. ICC aligns MMX
4103 arguments to 4 byte boundaries, while structure fields are aligned
4104 to 8 byte boundaries. */
4105 if (!TARGET_SSE)
4106 align = PARM_BOUNDARY;
4107 else if (!type)
4108 {
4109 if (!SSE_REG_MODE_P (mode))
4110 align = PARM_BOUNDARY;
4111 }
4112 else
4113 {
4114 if (!contains_128bit_aligned_vector_p (type))
4115 align = PARM_BOUNDARY;
4116 }
4117 }
4118 if (align > 128)
4119 align = 128;
4120 return align;
4121 }
4122
4123 /* Return true if N is a possible register number of function value. */
4124 bool
4125 ix86_function_value_regno_p (int regno)
4126 {
4127 if (regno == 0
4128 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4129 || (regno == FIRST_SSE_REG && TARGET_SSE))
4130 return true;
4131
4132 if (!TARGET_64BIT
4133 && (regno == FIRST_MMX_REG && TARGET_MMX))
4134 return true;
4135
4136 return false;
4137 }
4138
4139 /* Define how to find the value returned by a function.
4140 VALTYPE is the data type of the value (as a tree).
4141 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4142 otherwise, FUNC is 0. */
4143 rtx
4144 ix86_function_value (tree valtype, tree fntype_or_decl,
4145 bool outgoing ATTRIBUTE_UNUSED)
4146 {
4147 enum machine_mode natmode = type_natural_mode (valtype);
4148
4149 if (TARGET_64BIT)
4150 {
4151 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4152 1, REGPARM_MAX, SSE_REGPARM_MAX,
4153 x86_64_int_return_registers, 0);
4154 /* For zero sized structures, construct_container return NULL, but we
4155 need to keep rest of compiler happy by returning meaningful value. */
4156 if (!ret)
4157 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4158 return ret;
4159 }
4160 else
4161 {
4162 tree fn = NULL_TREE, fntype;
4163 if (fntype_or_decl
4164 && DECL_P (fntype_or_decl))
4165 fn = fntype_or_decl;
4166 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4167 return gen_rtx_REG (TYPE_MODE (valtype),
4168 ix86_value_regno (natmode, fn, fntype));
4169 }
4170 }
4171
4172 /* Return true iff type is returned in memory. */
4173 int
4174 ix86_return_in_memory (tree type)
4175 {
4176 int needed_intregs, needed_sseregs, size;
4177 enum machine_mode mode = type_natural_mode (type);
4178
4179 if (TARGET_64BIT)
4180 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4181
4182 if (mode == BLKmode)
4183 return 1;
4184
4185 size = int_size_in_bytes (type);
4186
4187 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4188 return 0;
4189
4190 if (VECTOR_MODE_P (mode) || mode == TImode)
4191 {
4192 /* User-created vectors small enough to fit in EAX. */
4193 if (size < 8)
4194 return 0;
4195
4196 /* MMX/3dNow values are returned in MM0,
4197 except when it doesn't exits. */
4198 if (size == 8)
4199 return (TARGET_MMX ? 0 : 1);
4200
4201 /* SSE values are returned in XMM0, except when it doesn't exist. */
4202 if (size == 16)
4203 return (TARGET_SSE ? 0 : 1);
4204 }
4205
4206 if (mode == XFmode)
4207 return 0;
4208
4209 if (mode == TDmode)
4210 return 1;
4211
4212 if (size > 12)
4213 return 1;
4214 return 0;
4215 }
4216
4217 /* When returning SSE vector types, we have a choice of either
4218 (1) being abi incompatible with a -march switch, or
4219 (2) generating an error.
4220 Given no good solution, I think the safest thing is one warning.
4221 The user won't be able to use -Werror, but....
4222
4223 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4224 called in response to actually generating a caller or callee that
4225 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4226 via aggregate_value_p for general type probing from tree-ssa. */
4227
4228 static rtx
4229 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4230 {
4231 static bool warnedsse, warnedmmx;
4232
4233 if (type)
4234 {
4235 /* Look at the return type of the function, not the function type. */
4236 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4237
4238 if (!TARGET_SSE && !warnedsse)
4239 {
4240 if (mode == TImode
4241 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4242 {
4243 warnedsse = true;
4244 warning (0, "SSE vector return without SSE enabled "
4245 "changes the ABI");
4246 }
4247 }
4248
4249 if (!TARGET_MMX && !warnedmmx)
4250 {
4251 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4252 {
4253 warnedmmx = true;
4254 warning (0, "MMX vector return without MMX enabled "
4255 "changes the ABI");
4256 }
4257 }
4258 }
4259
4260 return NULL;
4261 }
4262
4263 /* Define how to find the value returned by a library function
4264 assuming the value has mode MODE. */
4265 rtx
4266 ix86_libcall_value (enum machine_mode mode)
4267 {
4268 if (TARGET_64BIT)
4269 {
4270 switch (mode)
4271 {
4272 case SFmode:
4273 case SCmode:
4274 case DFmode:
4275 case DCmode:
4276 case TFmode:
4277 case SDmode:
4278 case DDmode:
4279 case TDmode:
4280 return gen_rtx_REG (mode, FIRST_SSE_REG);
4281 case XFmode:
4282 case XCmode:
4283 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4284 case TCmode:
4285 return NULL;
4286 default:
4287 return gen_rtx_REG (mode, 0);
4288 }
4289 }
4290 else
4291 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4292 }
4293
4294 /* Given a mode, return the register to use for a return value. */
4295
4296 static int
4297 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4298 {
4299 gcc_assert (!TARGET_64BIT);
4300
4301 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4302 we normally prevent this case when mmx is not available. However
4303 some ABIs may require the result to be returned like DImode. */
4304 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4305 return TARGET_MMX ? FIRST_MMX_REG : 0;
4306
4307 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4308 we prevent this case when sse is not available. However some ABIs
4309 may require the result to be returned like integer TImode. */
4310 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4311 return TARGET_SSE ? FIRST_SSE_REG : 0;
4312
4313 /* Decimal floating point values can go in %eax, unlike other float modes. */
4314 if (DECIMAL_FLOAT_MODE_P (mode))
4315 return 0;
4316
4317 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4318 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4319 return 0;
4320
4321 /* Floating point return values in %st(0), except for local functions when
4322 SSE math is enabled or for functions with sseregparm attribute. */
4323 if ((func || fntype)
4324 && (mode == SFmode || mode == DFmode))
4325 {
4326 int sse_level = ix86_function_sseregparm (fntype, func);
4327 if ((sse_level >= 1 && mode == SFmode)
4328 || (sse_level == 2 && mode == DFmode))
4329 return FIRST_SSE_REG;
4330 }
4331
4332 return FIRST_FLOAT_REG;
4333 }
4334 \f
4335 /* Create the va_list data type. */
4336
4337 static tree
4338 ix86_build_builtin_va_list (void)
4339 {
4340 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4341
4342 /* For i386 we use plain pointer to argument area. */
4343 if (!TARGET_64BIT)
4344 return build_pointer_type (char_type_node);
4345
4346 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4347 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4348
4349 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4350 unsigned_type_node);
4351 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4352 unsigned_type_node);
4353 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4354 ptr_type_node);
4355 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4356 ptr_type_node);
4357
4358 va_list_gpr_counter_field = f_gpr;
4359 va_list_fpr_counter_field = f_fpr;
4360
4361 DECL_FIELD_CONTEXT (f_gpr) = record;
4362 DECL_FIELD_CONTEXT (f_fpr) = record;
4363 DECL_FIELD_CONTEXT (f_ovf) = record;
4364 DECL_FIELD_CONTEXT (f_sav) = record;
4365
4366 TREE_CHAIN (record) = type_decl;
4367 TYPE_NAME (record) = type_decl;
4368 TYPE_FIELDS (record) = f_gpr;
4369 TREE_CHAIN (f_gpr) = f_fpr;
4370 TREE_CHAIN (f_fpr) = f_ovf;
4371 TREE_CHAIN (f_ovf) = f_sav;
4372
4373 layout_type (record);
4374
4375 /* The correct type is an array type of one element. */
4376 return build_array_type (record, build_index_type (size_zero_node));
4377 }
4378
4379 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4380
4381 static void
4382 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4383 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4384 int no_rtl)
4385 {
4386 CUMULATIVE_ARGS next_cum;
4387 rtx save_area = NULL_RTX, mem;
4388 rtx label;
4389 rtx label_ref;
4390 rtx tmp_reg;
4391 rtx nsse_reg;
4392 int set;
4393 tree fntype;
4394 int stdarg_p;
4395 int i;
4396
4397 if (!TARGET_64BIT)
4398 return;
4399
4400 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4401 return;
4402
4403 /* Indicate to allocate space on the stack for varargs save area. */
4404 ix86_save_varrargs_registers = 1;
4405
4406 cfun->stack_alignment_needed = 128;
4407
4408 fntype = TREE_TYPE (current_function_decl);
4409 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4410 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4411 != void_type_node));
4412
4413 /* For varargs, we do not want to skip the dummy va_dcl argument.
4414 For stdargs, we do want to skip the last named argument. */
4415 next_cum = *cum;
4416 if (stdarg_p)
4417 function_arg_advance (&next_cum, mode, type, 1);
4418
4419 if (!no_rtl)
4420 save_area = frame_pointer_rtx;
4421
4422 set = get_varargs_alias_set ();
4423
4424 for (i = next_cum.regno;
4425 i < ix86_regparm
4426 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4427 i++)
4428 {
4429 mem = gen_rtx_MEM (Pmode,
4430 plus_constant (save_area, i * UNITS_PER_WORD));
4431 MEM_NOTRAP_P (mem) = 1;
4432 set_mem_alias_set (mem, set);
4433 emit_move_insn (mem, gen_rtx_REG (Pmode,
4434 x86_64_int_parameter_registers[i]));
4435 }
4436
4437 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4438 {
4439 /* Now emit code to save SSE registers. The AX parameter contains number
4440 of SSE parameter registers used to call this function. We use
4441 sse_prologue_save insn template that produces computed jump across
4442 SSE saves. We need some preparation work to get this working. */
4443
4444 label = gen_label_rtx ();
4445 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4446
4447 /* Compute address to jump to :
4448 label - 5*eax + nnamed_sse_arguments*5 */
4449 tmp_reg = gen_reg_rtx (Pmode);
4450 nsse_reg = gen_reg_rtx (Pmode);
4451 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4452 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4453 gen_rtx_MULT (Pmode, nsse_reg,
4454 GEN_INT (4))));
4455 if (next_cum.sse_regno)
4456 emit_move_insn
4457 (nsse_reg,
4458 gen_rtx_CONST (DImode,
4459 gen_rtx_PLUS (DImode,
4460 label_ref,
4461 GEN_INT (next_cum.sse_regno * 4))));
4462 else
4463 emit_move_insn (nsse_reg, label_ref);
4464 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4465
4466 /* Compute address of memory block we save into. We always use pointer
4467 pointing 127 bytes after first byte to store - this is needed to keep
4468 instruction size limited by 4 bytes. */
4469 tmp_reg = gen_reg_rtx (Pmode);
4470 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4471 plus_constant (save_area,
4472 8 * REGPARM_MAX + 127)));
4473 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4474 MEM_NOTRAP_P (mem) = 1;
4475 set_mem_alias_set (mem, set);
4476 set_mem_align (mem, BITS_PER_WORD);
4477
4478 /* And finally do the dirty job! */
4479 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4480 GEN_INT (next_cum.sse_regno), label));
4481 }
4482
4483 }
4484
4485 /* Implement va_start. */
4486
4487 void
4488 ix86_va_start (tree valist, rtx nextarg)
4489 {
4490 HOST_WIDE_INT words, n_gpr, n_fpr;
4491 tree f_gpr, f_fpr, f_ovf, f_sav;
4492 tree gpr, fpr, ovf, sav, t;
4493 tree type;
4494
4495 /* Only 64bit target needs something special. */
4496 if (!TARGET_64BIT)
4497 {
4498 std_expand_builtin_va_start (valist, nextarg);
4499 return;
4500 }
4501
4502 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4503 f_fpr = TREE_CHAIN (f_gpr);
4504 f_ovf = TREE_CHAIN (f_fpr);
4505 f_sav = TREE_CHAIN (f_ovf);
4506
4507 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4508 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4509 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4510 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4511 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4512
4513 /* Count number of gp and fp argument registers used. */
4514 words = current_function_args_info.words;
4515 n_gpr = current_function_args_info.regno;
4516 n_fpr = current_function_args_info.sse_regno;
4517
4518 if (TARGET_DEBUG_ARG)
4519 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4520 (int) words, (int) n_gpr, (int) n_fpr);
4521
4522 if (cfun->va_list_gpr_size)
4523 {
4524 type = TREE_TYPE (gpr);
4525 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4526 build_int_cst (type, n_gpr * 8));
4527 TREE_SIDE_EFFECTS (t) = 1;
4528 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4529 }
4530
4531 if (cfun->va_list_fpr_size)
4532 {
4533 type = TREE_TYPE (fpr);
4534 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4535 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4536 TREE_SIDE_EFFECTS (t) = 1;
4537 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4538 }
4539
4540 /* Find the overflow area. */
4541 type = TREE_TYPE (ovf);
4542 t = make_tree (type, virtual_incoming_args_rtx);
4543 if (words != 0)
4544 t = build2 (PLUS_EXPR, type, t,
4545 build_int_cst (type, words * UNITS_PER_WORD));
4546 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4547 TREE_SIDE_EFFECTS (t) = 1;
4548 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4549
4550 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4551 {
4552 /* Find the register save area.
4553 Prologue of the function save it right above stack frame. */
4554 type = TREE_TYPE (sav);
4555 t = make_tree (type, frame_pointer_rtx);
4556 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4557 TREE_SIDE_EFFECTS (t) = 1;
4558 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4559 }
4560 }
4561
4562 /* Implement va_arg. */
4563
4564 tree
4565 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4566 {
4567 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4568 tree f_gpr, f_fpr, f_ovf, f_sav;
4569 tree gpr, fpr, ovf, sav, t;
4570 int size, rsize;
4571 tree lab_false, lab_over = NULL_TREE;
4572 tree addr, t2;
4573 rtx container;
4574 int indirect_p = 0;
4575 tree ptrtype;
4576 enum machine_mode nat_mode;
4577
4578 /* Only 64bit target needs something special. */
4579 if (!TARGET_64BIT)
4580 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4581
4582 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4583 f_fpr = TREE_CHAIN (f_gpr);
4584 f_ovf = TREE_CHAIN (f_fpr);
4585 f_sav = TREE_CHAIN (f_ovf);
4586
4587 valist = build_va_arg_indirect_ref (valist);
4588 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4589 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4590 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4591 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4592
4593 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4594 if (indirect_p)
4595 type = build_pointer_type (type);
4596 size = int_size_in_bytes (type);
4597 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4598
4599 nat_mode = type_natural_mode (type);
4600 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4601 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4602
4603 /* Pull the value out of the saved registers. */
4604
4605 addr = create_tmp_var (ptr_type_node, "addr");
4606 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4607
4608 if (container)
4609 {
4610 int needed_intregs, needed_sseregs;
4611 bool need_temp;
4612 tree int_addr, sse_addr;
4613
4614 lab_false = create_artificial_label ();
4615 lab_over = create_artificial_label ();
4616
4617 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4618
4619 need_temp = (!REG_P (container)
4620 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4621 || TYPE_ALIGN (type) > 128));
4622
4623 /* In case we are passing structure, verify that it is consecutive block
4624 on the register save area. If not we need to do moves. */
4625 if (!need_temp && !REG_P (container))
4626 {
4627 /* Verify that all registers are strictly consecutive */
4628 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4629 {
4630 int i;
4631
4632 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4633 {
4634 rtx slot = XVECEXP (container, 0, i);
4635 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4636 || INTVAL (XEXP (slot, 1)) != i * 16)
4637 need_temp = 1;
4638 }
4639 }
4640 else
4641 {
4642 int i;
4643
4644 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4645 {
4646 rtx slot = XVECEXP (container, 0, i);
4647 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4648 || INTVAL (XEXP (slot, 1)) != i * 8)
4649 need_temp = 1;
4650 }
4651 }
4652 }
4653 if (!need_temp)
4654 {
4655 int_addr = addr;
4656 sse_addr = addr;
4657 }
4658 else
4659 {
4660 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4661 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4662 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4663 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4664 }
4665
4666 /* First ensure that we fit completely in registers. */
4667 if (needed_intregs)
4668 {
4669 t = build_int_cst (TREE_TYPE (gpr),
4670 (REGPARM_MAX - needed_intregs + 1) * 8);
4671 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4672 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4673 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4674 gimplify_and_add (t, pre_p);
4675 }
4676 if (needed_sseregs)
4677 {
4678 t = build_int_cst (TREE_TYPE (fpr),
4679 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4680 + REGPARM_MAX * 8);
4681 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4682 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4683 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4684 gimplify_and_add (t, pre_p);
4685 }
4686
4687 /* Compute index to start of area used for integer regs. */
4688 if (needed_intregs)
4689 {
4690 /* int_addr = gpr + sav; */
4691 t = fold_convert (ptr_type_node, gpr);
4692 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4693 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4694 gimplify_and_add (t, pre_p);
4695 }
4696 if (needed_sseregs)
4697 {
4698 /* sse_addr = fpr + sav; */
4699 t = fold_convert (ptr_type_node, fpr);
4700 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4701 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4702 gimplify_and_add (t, pre_p);
4703 }
4704 if (need_temp)
4705 {
4706 int i;
4707 tree temp = create_tmp_var (type, "va_arg_tmp");
4708
4709 /* addr = &temp; */
4710 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4711 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4712 gimplify_and_add (t, pre_p);
4713
4714 for (i = 0; i < XVECLEN (container, 0); i++)
4715 {
4716 rtx slot = XVECEXP (container, 0, i);
4717 rtx reg = XEXP (slot, 0);
4718 enum machine_mode mode = GET_MODE (reg);
4719 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4720 tree addr_type = build_pointer_type (piece_type);
4721 tree src_addr, src;
4722 int src_offset;
4723 tree dest_addr, dest;
4724
4725 if (SSE_REGNO_P (REGNO (reg)))
4726 {
4727 src_addr = sse_addr;
4728 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4729 }
4730 else
4731 {
4732 src_addr = int_addr;
4733 src_offset = REGNO (reg) * 8;
4734 }
4735 src_addr = fold_convert (addr_type, src_addr);
4736 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4737 size_int (src_offset)));
4738 src = build_va_arg_indirect_ref (src_addr);
4739
4740 dest_addr = fold_convert (addr_type, addr);
4741 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4742 size_int (INTVAL (XEXP (slot, 1)))));
4743 dest = build_va_arg_indirect_ref (dest_addr);
4744
4745 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4746 gimplify_and_add (t, pre_p);
4747 }
4748 }
4749
4750 if (needed_intregs)
4751 {
4752 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4753 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4754 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4755 gimplify_and_add (t, pre_p);
4756 }
4757 if (needed_sseregs)
4758 {
4759 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4760 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4761 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4762 gimplify_and_add (t, pre_p);
4763 }
4764
4765 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4766 gimplify_and_add (t, pre_p);
4767
4768 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4769 append_to_statement_list (t, pre_p);
4770 }
4771
4772 /* ... otherwise out of the overflow area. */
4773
4774 /* Care for on-stack alignment if needed. */
4775 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4776 || integer_zerop (TYPE_SIZE (type)))
4777 t = ovf;
4778 else
4779 {
4780 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4781 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4782 build_int_cst (TREE_TYPE (ovf), align - 1));
4783 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4784 build_int_cst (TREE_TYPE (t), -align));
4785 }
4786 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4787
4788 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4789 gimplify_and_add (t2, pre_p);
4790
4791 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4792 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4793 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4794 gimplify_and_add (t, pre_p);
4795
4796 if (container)
4797 {
4798 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4799 append_to_statement_list (t, pre_p);
4800 }
4801
4802 ptrtype = build_pointer_type (type);
4803 addr = fold_convert (ptrtype, addr);
4804
4805 if (indirect_p)
4806 addr = build_va_arg_indirect_ref (addr);
4807 return build_va_arg_indirect_ref (addr);
4808 }
4809 \f
4810 /* Return nonzero if OPNUM's MEM should be matched
4811 in movabs* patterns. */
4812
4813 int
4814 ix86_check_movabs (rtx insn, int opnum)
4815 {
4816 rtx set, mem;
4817
4818 set = PATTERN (insn);
4819 if (GET_CODE (set) == PARALLEL)
4820 set = XVECEXP (set, 0, 0);
4821 gcc_assert (GET_CODE (set) == SET);
4822 mem = XEXP (set, opnum);
4823 while (GET_CODE (mem) == SUBREG)
4824 mem = SUBREG_REG (mem);
4825 gcc_assert (GET_CODE (mem) == MEM);
4826 return (volatile_ok || !MEM_VOLATILE_P (mem));
4827 }
4828 \f
4829 /* Initialize the table of extra 80387 mathematical constants. */
4830
4831 static void
4832 init_ext_80387_constants (void)
4833 {
4834 static const char * cst[5] =
4835 {
4836 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4837 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4838 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4839 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4840 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4841 };
4842 int i;
4843
4844 for (i = 0; i < 5; i++)
4845 {
4846 real_from_string (&ext_80387_constants_table[i], cst[i]);
4847 /* Ensure each constant is rounded to XFmode precision. */
4848 real_convert (&ext_80387_constants_table[i],
4849 XFmode, &ext_80387_constants_table[i]);
4850 }
4851
4852 ext_80387_constants_init = 1;
4853 }
4854
4855 /* Return true if the constant is something that can be loaded with
4856 a special instruction. */
4857
4858 int
4859 standard_80387_constant_p (rtx x)
4860 {
4861 REAL_VALUE_TYPE r;
4862
4863 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4864 return -1;
4865
4866 if (x == CONST0_RTX (GET_MODE (x)))
4867 return 1;
4868 if (x == CONST1_RTX (GET_MODE (x)))
4869 return 2;
4870
4871 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4872
4873 /* For XFmode constants, try to find a special 80387 instruction when
4874 optimizing for size or on those CPUs that benefit from them. */
4875 if (GET_MODE (x) == XFmode
4876 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4877 {
4878 int i;
4879
4880 if (! ext_80387_constants_init)
4881 init_ext_80387_constants ();
4882
4883 for (i = 0; i < 5; i++)
4884 if (real_identical (&r, &ext_80387_constants_table[i]))
4885 return i + 3;
4886 }
4887
4888 /* Load of the constant -0.0 or -1.0 will be split as
4889 fldz;fchs or fld1;fchs sequence. */
4890 if (real_isnegzero (&r))
4891 return 8;
4892 if (real_identical (&r, &dconstm1))
4893 return 9;
4894
4895 return 0;
4896 }
4897
4898 /* Return the opcode of the special instruction to be used to load
4899 the constant X. */
4900
4901 const char *
4902 standard_80387_constant_opcode (rtx x)
4903 {
4904 switch (standard_80387_constant_p (x))
4905 {
4906 case 1:
4907 return "fldz";
4908 case 2:
4909 return "fld1";
4910 case 3:
4911 return "fldlg2";
4912 case 4:
4913 return "fldln2";
4914 case 5:
4915 return "fldl2e";
4916 case 6:
4917 return "fldl2t";
4918 case 7:
4919 return "fldpi";
4920 case 8:
4921 case 9:
4922 return "#";
4923 default:
4924 gcc_unreachable ();
4925 }
4926 }
4927
4928 /* Return the CONST_DOUBLE representing the 80387 constant that is
4929 loaded by the specified special instruction. The argument IDX
4930 matches the return value from standard_80387_constant_p. */
4931
4932 rtx
4933 standard_80387_constant_rtx (int idx)
4934 {
4935 int i;
4936
4937 if (! ext_80387_constants_init)
4938 init_ext_80387_constants ();
4939
4940 switch (idx)
4941 {
4942 case 3:
4943 case 4:
4944 case 5:
4945 case 6:
4946 case 7:
4947 i = idx - 3;
4948 break;
4949
4950 default:
4951 gcc_unreachable ();
4952 }
4953
4954 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4955 XFmode);
4956 }
4957
4958 /* Return 1 if mode is a valid mode for sse. */
4959 static int
4960 standard_sse_mode_p (enum machine_mode mode)
4961 {
4962 switch (mode)
4963 {
4964 case V16QImode:
4965 case V8HImode:
4966 case V4SImode:
4967 case V2DImode:
4968 case V4SFmode:
4969 case V2DFmode:
4970 return 1;
4971
4972 default:
4973 return 0;
4974 }
4975 }
4976
4977 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4978 */
4979 int
4980 standard_sse_constant_p (rtx x)
4981 {
4982 enum machine_mode mode = GET_MODE (x);
4983
4984 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4985 return 1;
4986 if (vector_all_ones_operand (x, mode)
4987 && standard_sse_mode_p (mode))
4988 return TARGET_SSE2 ? 2 : -1;
4989
4990 return 0;
4991 }
4992
4993 /* Return the opcode of the special instruction to be used to load
4994 the constant X. */
4995
4996 const char *
4997 standard_sse_constant_opcode (rtx insn, rtx x)
4998 {
4999 switch (standard_sse_constant_p (x))
5000 {
5001 case 1:
5002 if (get_attr_mode (insn) == MODE_V4SF)
5003 return "xorps\t%0, %0";
5004 else if (get_attr_mode (insn) == MODE_V2DF)
5005 return "xorpd\t%0, %0";
5006 else
5007 return "pxor\t%0, %0";
5008 case 2:
5009 return "pcmpeqd\t%0, %0";
5010 }
5011 gcc_unreachable ();
5012 }
5013
5014 /* Returns 1 if OP contains a symbol reference */
5015
5016 int
5017 symbolic_reference_mentioned_p (rtx op)
5018 {
5019 const char *fmt;
5020 int i;
5021
5022 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
5023 return 1;
5024
5025 fmt = GET_RTX_FORMAT (GET_CODE (op));
5026 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5027 {
5028 if (fmt[i] == 'E')
5029 {
5030 int j;
5031
5032 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5033 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
5034 return 1;
5035 }
5036
5037 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
5038 return 1;
5039 }
5040
5041 return 0;
5042 }
5043
5044 /* Return 1 if it is appropriate to emit `ret' instructions in the
5045 body of a function. Do this only if the epilogue is simple, needing a
5046 couple of insns. Prior to reloading, we can't tell how many registers
5047 must be saved, so return 0 then. Return 0 if there is no frame
5048 marker to de-allocate. */
5049
5050 int
5051 ix86_can_use_return_insn_p (void)
5052 {
5053 struct ix86_frame frame;
5054
5055 if (! reload_completed || frame_pointer_needed)
5056 return 0;
5057
5058 /* Don't allow more than 32 pop, since that's all we can do
5059 with one instruction. */
5060 if (current_function_pops_args
5061 && current_function_args_size >= 32768)
5062 return 0;
5063
5064 ix86_compute_frame_layout (&frame);
5065 return frame.to_allocate == 0 && frame.nregs == 0;
5066 }
5067 \f
5068 /* Value should be nonzero if functions must have frame pointers.
5069 Zero means the frame pointer need not be set up (and parms may
5070 be accessed via the stack pointer) in functions that seem suitable. */
5071
5072 int
5073 ix86_frame_pointer_required (void)
5074 {
5075 /* If we accessed previous frames, then the generated code expects
5076 to be able to access the saved ebp value in our frame. */
5077 if (cfun->machine->accesses_prev_frame)
5078 return 1;
5079
5080 /* Several x86 os'es need a frame pointer for other reasons,
5081 usually pertaining to setjmp. */
5082 if (SUBTARGET_FRAME_POINTER_REQUIRED)
5083 return 1;
5084
5085 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
5086 the frame pointer by default. Turn it back on now if we've not
5087 got a leaf function. */
5088 if (TARGET_OMIT_LEAF_FRAME_POINTER
5089 && (!current_function_is_leaf
5090 || ix86_current_function_calls_tls_descriptor))
5091 return 1;
5092
5093 if (current_function_profile)
5094 return 1;
5095
5096 return 0;
5097 }
5098
5099 /* Record that the current function accesses previous call frames. */
5100
5101 void
5102 ix86_setup_frame_addresses (void)
5103 {
5104 cfun->machine->accesses_prev_frame = 1;
5105 }
5106 \f
5107 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
5108 # define USE_HIDDEN_LINKONCE 1
5109 #else
5110 # define USE_HIDDEN_LINKONCE 0
5111 #endif
5112
5113 static int pic_labels_used;
5114
5115 /* Fills in the label name that should be used for a pc thunk for
5116 the given register. */
5117
5118 static void
5119 get_pc_thunk_name (char name[32], unsigned int regno)
5120 {
5121 gcc_assert (!TARGET_64BIT);
5122
5123 if (USE_HIDDEN_LINKONCE)
5124 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5125 else
5126 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5127 }
5128
5129
5130 /* This function generates code for -fpic that loads %ebx with
5131 the return address of the caller and then returns. */
5132
5133 void
5134 ix86_file_end (void)
5135 {
5136 rtx xops[2];
5137 int regno;
5138
5139 for (regno = 0; regno < 8; ++regno)
5140 {
5141 char name[32];
5142
5143 if (! ((pic_labels_used >> regno) & 1))
5144 continue;
5145
5146 get_pc_thunk_name (name, regno);
5147
5148 #if TARGET_MACHO
5149 if (TARGET_MACHO)
5150 {
5151 switch_to_section (darwin_sections[text_coal_section]);
5152 fputs ("\t.weak_definition\t", asm_out_file);
5153 assemble_name (asm_out_file, name);
5154 fputs ("\n\t.private_extern\t", asm_out_file);
5155 assemble_name (asm_out_file, name);
5156 fputs ("\n", asm_out_file);
5157 ASM_OUTPUT_LABEL (asm_out_file, name);
5158 }
5159 else
5160 #endif
5161 if (USE_HIDDEN_LINKONCE)
5162 {
5163 tree decl;
5164
5165 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5166 error_mark_node);
5167 TREE_PUBLIC (decl) = 1;
5168 TREE_STATIC (decl) = 1;
5169 DECL_ONE_ONLY (decl) = 1;
5170
5171 (*targetm.asm_out.unique_section) (decl, 0);
5172 switch_to_section (get_named_section (decl, NULL, 0));
5173
5174 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5175 fputs ("\t.hidden\t", asm_out_file);
5176 assemble_name (asm_out_file, name);
5177 fputc ('\n', asm_out_file);
5178 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5179 }
5180 else
5181 {
5182 switch_to_section (text_section);
5183 ASM_OUTPUT_LABEL (asm_out_file, name);
5184 }
5185
5186 xops[0] = gen_rtx_REG (SImode, regno);
5187 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5188 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5189 output_asm_insn ("ret", xops);
5190 }
5191
5192 if (NEED_INDICATE_EXEC_STACK)
5193 file_end_indicate_exec_stack ();
5194 }
5195
5196 /* Emit code for the SET_GOT patterns. */
5197
5198 const char *
5199 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5200 {
5201 rtx xops[3];
5202
5203 xops[0] = dest;
5204 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5205
5206 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5207 {
5208 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5209
5210 if (!flag_pic)
5211 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5212 else
5213 output_asm_insn ("call\t%a2", xops);
5214
5215 #if TARGET_MACHO
5216 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5217 is what will be referenced by the Mach-O PIC subsystem. */
5218 if (!label)
5219 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5220 #endif
5221
5222 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5223 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5224
5225 if (flag_pic)
5226 output_asm_insn ("pop{l}\t%0", xops);
5227 }
5228 else
5229 {
5230 char name[32];
5231 get_pc_thunk_name (name, REGNO (dest));
5232 pic_labels_used |= 1 << REGNO (dest);
5233
5234 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5235 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5236 output_asm_insn ("call\t%X2", xops);
5237 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5238 is what will be referenced by the Mach-O PIC subsystem. */
5239 #if TARGET_MACHO
5240 if (!label)
5241 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5242 else
5243 targetm.asm_out.internal_label (asm_out_file, "L",
5244 CODE_LABEL_NUMBER (label));
5245 #endif
5246 }
5247
5248 if (TARGET_MACHO)
5249 return "";
5250
5251 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5252 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5253 else
5254 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5255
5256 return "";
5257 }
5258
5259 /* Generate an "push" pattern for input ARG. */
5260
5261 static rtx
5262 gen_push (rtx arg)
5263 {
5264 return gen_rtx_SET (VOIDmode,
5265 gen_rtx_MEM (Pmode,
5266 gen_rtx_PRE_DEC (Pmode,
5267 stack_pointer_rtx)),
5268 arg);
5269 }
5270
5271 /* Return >= 0 if there is an unused call-clobbered register available
5272 for the entire function. */
5273
5274 static unsigned int
5275 ix86_select_alt_pic_regnum (void)
5276 {
5277 if (current_function_is_leaf && !current_function_profile
5278 && !ix86_current_function_calls_tls_descriptor)
5279 {
5280 int i;
5281 for (i = 2; i >= 0; --i)
5282 if (!regs_ever_live[i])
5283 return i;
5284 }
5285
5286 return INVALID_REGNUM;
5287 }
5288
5289 /* Return 1 if we need to save REGNO. */
5290 static int
5291 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5292 {
5293 if (pic_offset_table_rtx
5294 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5295 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5296 || current_function_profile
5297 || current_function_calls_eh_return
5298 || current_function_uses_const_pool))
5299 {
5300 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5301 return 0;
5302 return 1;
5303 }
5304
5305 if (current_function_calls_eh_return && maybe_eh_return)
5306 {
5307 unsigned i;
5308 for (i = 0; ; i++)
5309 {
5310 unsigned test = EH_RETURN_DATA_REGNO (i);
5311 if (test == INVALID_REGNUM)
5312 break;
5313 if (test == regno)
5314 return 1;
5315 }
5316 }
5317
5318 if (cfun->machine->force_align_arg_pointer
5319 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5320 return 1;
5321
5322 return (regs_ever_live[regno]
5323 && !call_used_regs[regno]
5324 && !fixed_regs[regno]
5325 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5326 }
5327
5328 /* Return number of registers to be saved on the stack. */
5329
5330 static int
5331 ix86_nsaved_regs (void)
5332 {
5333 int nregs = 0;
5334 int regno;
5335
5336 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5337 if (ix86_save_reg (regno, true))
5338 nregs++;
5339 return nregs;
5340 }
5341
5342 /* Return the offset between two registers, one to be eliminated, and the other
5343 its replacement, at the start of a routine. */
5344
5345 HOST_WIDE_INT
5346 ix86_initial_elimination_offset (int from, int to)
5347 {
5348 struct ix86_frame frame;
5349 ix86_compute_frame_layout (&frame);
5350
5351 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5352 return frame.hard_frame_pointer_offset;
5353 else if (from == FRAME_POINTER_REGNUM
5354 && to == HARD_FRAME_POINTER_REGNUM)
5355 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5356 else
5357 {
5358 gcc_assert (to == STACK_POINTER_REGNUM);
5359
5360 if (from == ARG_POINTER_REGNUM)
5361 return frame.stack_pointer_offset;
5362
5363 gcc_assert (from == FRAME_POINTER_REGNUM);
5364 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5365 }
5366 }
5367
5368 /* Fill structure ix86_frame about frame of currently computed function. */
5369
5370 static void
5371 ix86_compute_frame_layout (struct ix86_frame *frame)
5372 {
5373 HOST_WIDE_INT total_size;
5374 unsigned int stack_alignment_needed;
5375 HOST_WIDE_INT offset;
5376 unsigned int preferred_alignment;
5377 HOST_WIDE_INT size = get_frame_size ();
5378
5379 frame->nregs = ix86_nsaved_regs ();
5380 total_size = size;
5381
5382 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5383 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5384
5385 /* During reload iteration the amount of registers saved can change.
5386 Recompute the value as needed. Do not recompute when amount of registers
5387 didn't change as reload does multiple calls to the function and does not
5388 expect the decision to change within single iteration. */
5389 if (!optimize_size
5390 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5391 {
5392 int count = frame->nregs;
5393
5394 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5395 /* The fast prologue uses move instead of push to save registers. This
5396 is significantly longer, but also executes faster as modern hardware
5397 can execute the moves in parallel, but can't do that for push/pop.
5398
5399 Be careful about choosing what prologue to emit: When function takes
5400 many instructions to execute we may use slow version as well as in
5401 case function is known to be outside hot spot (this is known with
5402 feedback only). Weight the size of function by number of registers
5403 to save as it is cheap to use one or two push instructions but very
5404 slow to use many of them. */
5405 if (count)
5406 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5407 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5408 || (flag_branch_probabilities
5409 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5410 cfun->machine->use_fast_prologue_epilogue = false;
5411 else
5412 cfun->machine->use_fast_prologue_epilogue
5413 = !expensive_function_p (count);
5414 }
5415 if (TARGET_PROLOGUE_USING_MOVE
5416 && cfun->machine->use_fast_prologue_epilogue)
5417 frame->save_regs_using_mov = true;
5418 else
5419 frame->save_regs_using_mov = false;
5420
5421
5422 /* Skip return address and saved base pointer. */
5423 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5424
5425 frame->hard_frame_pointer_offset = offset;
5426
5427 /* Do some sanity checking of stack_alignment_needed and
5428 preferred_alignment, since i386 port is the only using those features
5429 that may break easily. */
5430
5431 gcc_assert (!size || stack_alignment_needed);
5432 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5433 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5434 gcc_assert (stack_alignment_needed
5435 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5436
5437 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5438 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5439
5440 /* Register save area */
5441 offset += frame->nregs * UNITS_PER_WORD;
5442
5443 /* Va-arg area */
5444 if (ix86_save_varrargs_registers)
5445 {
5446 offset += X86_64_VARARGS_SIZE;
5447 frame->va_arg_size = X86_64_VARARGS_SIZE;
5448 }
5449 else
5450 frame->va_arg_size = 0;
5451
5452 /* Align start of frame for local function. */
5453 frame->padding1 = ((offset + stack_alignment_needed - 1)
5454 & -stack_alignment_needed) - offset;
5455
5456 offset += frame->padding1;
5457
5458 /* Frame pointer points here. */
5459 frame->frame_pointer_offset = offset;
5460
5461 offset += size;
5462
5463 /* Add outgoing arguments area. Can be skipped if we eliminated
5464 all the function calls as dead code.
5465 Skipping is however impossible when function calls alloca. Alloca
5466 expander assumes that last current_function_outgoing_args_size
5467 of stack frame are unused. */
5468 if (ACCUMULATE_OUTGOING_ARGS
5469 && (!current_function_is_leaf || current_function_calls_alloca
5470 || ix86_current_function_calls_tls_descriptor))
5471 {
5472 offset += current_function_outgoing_args_size;
5473 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5474 }
5475 else
5476 frame->outgoing_arguments_size = 0;
5477
5478 /* Align stack boundary. Only needed if we're calling another function
5479 or using alloca. */
5480 if (!current_function_is_leaf || current_function_calls_alloca
5481 || ix86_current_function_calls_tls_descriptor)
5482 frame->padding2 = ((offset + preferred_alignment - 1)
5483 & -preferred_alignment) - offset;
5484 else
5485 frame->padding2 = 0;
5486
5487 offset += frame->padding2;
5488
5489 /* We've reached end of stack frame. */
5490 frame->stack_pointer_offset = offset;
5491
5492 /* Size prologue needs to allocate. */
5493 frame->to_allocate =
5494 (size + frame->padding1 + frame->padding2
5495 + frame->outgoing_arguments_size + frame->va_arg_size);
5496
5497 if ((!frame->to_allocate && frame->nregs <= 1)
5498 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5499 frame->save_regs_using_mov = false;
5500
5501 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5502 && current_function_is_leaf
5503 && !ix86_current_function_calls_tls_descriptor)
5504 {
5505 frame->red_zone_size = frame->to_allocate;
5506 if (frame->save_regs_using_mov)
5507 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5508 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5509 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5510 }
5511 else
5512 frame->red_zone_size = 0;
5513 frame->to_allocate -= frame->red_zone_size;
5514 frame->stack_pointer_offset -= frame->red_zone_size;
5515 #if 0
5516 fprintf (stderr, "nregs: %i\n", frame->nregs);
5517 fprintf (stderr, "size: %i\n", size);
5518 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
5519 fprintf (stderr, "padding1: %i\n", frame->padding1);
5520 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
5521 fprintf (stderr, "padding2: %i\n", frame->padding2);
5522 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
5523 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
5524 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
5525 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
5526 frame->hard_frame_pointer_offset);
5527 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
5528 #endif
5529 }
5530
5531 /* Emit code to save registers in the prologue. */
5532
5533 static void
5534 ix86_emit_save_regs (void)
5535 {
5536 unsigned int regno;
5537 rtx insn;
5538
5539 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5540 if (ix86_save_reg (regno, true))
5541 {
5542 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5543 RTX_FRAME_RELATED_P (insn) = 1;
5544 }
5545 }
5546
5547 /* Emit code to save registers using MOV insns. First register
5548 is restored from POINTER + OFFSET. */
5549 static void
5550 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5551 {
5552 unsigned int regno;
5553 rtx insn;
5554
5555 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5556 if (ix86_save_reg (regno, true))
5557 {
5558 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5559 Pmode, offset),
5560 gen_rtx_REG (Pmode, regno));
5561 RTX_FRAME_RELATED_P (insn) = 1;
5562 offset += UNITS_PER_WORD;
5563 }
5564 }
5565
5566 /* Expand prologue or epilogue stack adjustment.
5567 The pattern exist to put a dependency on all ebp-based memory accesses.
5568 STYLE should be negative if instructions should be marked as frame related,
5569 zero if %r11 register is live and cannot be freely used and positive
5570 otherwise. */
5571
5572 static void
5573 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5574 {
5575 rtx insn;
5576
5577 if (! TARGET_64BIT)
5578 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5579 else if (x86_64_immediate_operand (offset, DImode))
5580 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5581 else
5582 {
5583 rtx r11;
5584 /* r11 is used by indirect sibcall return as well, set before the
5585 epilogue and used after the epilogue. ATM indirect sibcall
5586 shouldn't be used together with huge frame sizes in one
5587 function because of the frame_size check in sibcall.c. */
5588 gcc_assert (style);
5589 r11 = gen_rtx_REG (DImode, R11_REG);
5590 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5591 if (style < 0)
5592 RTX_FRAME_RELATED_P (insn) = 1;
5593 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5594 offset));
5595 }
5596 if (style < 0)
5597 RTX_FRAME_RELATED_P (insn) = 1;
5598 }
5599
5600 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5601
5602 static rtx
5603 ix86_internal_arg_pointer (void)
5604 {
5605 bool has_force_align_arg_pointer =
5606 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5607 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5608 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5609 && DECL_NAME (current_function_decl)
5610 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5611 && DECL_FILE_SCOPE_P (current_function_decl))
5612 || ix86_force_align_arg_pointer
5613 || has_force_align_arg_pointer)
5614 {
5615 /* Nested functions can't realign the stack due to a register
5616 conflict. */
5617 if (DECL_CONTEXT (current_function_decl)
5618 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5619 {
5620 if (ix86_force_align_arg_pointer)
5621 warning (0, "-mstackrealign ignored for nested functions");
5622 if (has_force_align_arg_pointer)
5623 error ("%s not supported for nested functions",
5624 ix86_force_align_arg_pointer_string);
5625 return virtual_incoming_args_rtx;
5626 }
5627 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5628 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5629 }
5630 else
5631 return virtual_incoming_args_rtx;
5632 }
5633
5634 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5635 This is called from dwarf2out.c to emit call frame instructions
5636 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5637 static void
5638 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5639 {
5640 rtx unspec = SET_SRC (pattern);
5641 gcc_assert (GET_CODE (unspec) == UNSPEC);
5642
5643 switch (index)
5644 {
5645 case UNSPEC_REG_SAVE:
5646 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5647 SET_DEST (pattern));
5648 break;
5649 case UNSPEC_DEF_CFA:
5650 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5651 INTVAL (XVECEXP (unspec, 0, 0)));
5652 break;
5653 default:
5654 gcc_unreachable ();
5655 }
5656 }
5657
5658 /* Expand the prologue into a bunch of separate insns. */
5659
5660 void
5661 ix86_expand_prologue (void)
5662 {
5663 rtx insn;
5664 bool pic_reg_used;
5665 struct ix86_frame frame;
5666 HOST_WIDE_INT allocate;
5667
5668 ix86_compute_frame_layout (&frame);
5669
5670 if (cfun->machine->force_align_arg_pointer)
5671 {
5672 rtx x, y;
5673
5674 /* Grab the argument pointer. */
5675 x = plus_constant (stack_pointer_rtx, 4);
5676 y = cfun->machine->force_align_arg_pointer;
5677 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5678 RTX_FRAME_RELATED_P (insn) = 1;
5679
5680 /* The unwind info consists of two parts: install the fafp as the cfa,
5681 and record the fafp as the "save register" of the stack pointer.
5682 The later is there in order that the unwinder can see where it
5683 should restore the stack pointer across the and insn. */
5684 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5685 x = gen_rtx_SET (VOIDmode, y, x);
5686 RTX_FRAME_RELATED_P (x) = 1;
5687 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5688 UNSPEC_REG_SAVE);
5689 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5690 RTX_FRAME_RELATED_P (y) = 1;
5691 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5692 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5693 REG_NOTES (insn) = x;
5694
5695 /* Align the stack. */
5696 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5697 GEN_INT (-16)));
5698
5699 /* And here we cheat like madmen with the unwind info. We force the
5700 cfa register back to sp+4, which is exactly what it was at the
5701 start of the function. Re-pushing the return address results in
5702 the return at the same spot relative to the cfa, and thus is
5703 correct wrt the unwind info. */
5704 x = cfun->machine->force_align_arg_pointer;
5705 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5706 insn = emit_insn (gen_push (x));
5707 RTX_FRAME_RELATED_P (insn) = 1;
5708
5709 x = GEN_INT (4);
5710 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5711 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5712 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5713 REG_NOTES (insn) = x;
5714 }
5715
5716 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5717 slower on all targets. Also sdb doesn't like it. */
5718
5719 if (frame_pointer_needed)
5720 {
5721 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5722 RTX_FRAME_RELATED_P (insn) = 1;
5723
5724 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5725 RTX_FRAME_RELATED_P (insn) = 1;
5726 }
5727
5728 allocate = frame.to_allocate;
5729
5730 if (!frame.save_regs_using_mov)
5731 ix86_emit_save_regs ();
5732 else
5733 allocate += frame.nregs * UNITS_PER_WORD;
5734
5735 /* When using red zone we may start register saving before allocating
5736 the stack frame saving one cycle of the prologue. */
5737 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5738 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5739 : stack_pointer_rtx,
5740 -frame.nregs * UNITS_PER_WORD);
5741
5742 if (allocate == 0)
5743 ;
5744 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5745 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5746 GEN_INT (-allocate), -1);
5747 else
5748 {
5749 /* Only valid for Win32. */
5750 rtx eax = gen_rtx_REG (SImode, 0);
5751 bool eax_live = ix86_eax_live_at_start_p ();
5752 rtx t;
5753
5754 gcc_assert (!TARGET_64BIT);
5755
5756 if (eax_live)
5757 {
5758 emit_insn (gen_push (eax));
5759 allocate -= 4;
5760 }
5761
5762 emit_move_insn (eax, GEN_INT (allocate));
5763
5764 insn = emit_insn (gen_allocate_stack_worker (eax));
5765 RTX_FRAME_RELATED_P (insn) = 1;
5766 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5767 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5768 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5769 t, REG_NOTES (insn));
5770
5771 if (eax_live)
5772 {
5773 if (frame_pointer_needed)
5774 t = plus_constant (hard_frame_pointer_rtx,
5775 allocate
5776 - frame.to_allocate
5777 - frame.nregs * UNITS_PER_WORD);
5778 else
5779 t = plus_constant (stack_pointer_rtx, allocate);
5780 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5781 }
5782 }
5783
5784 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5785 {
5786 if (!frame_pointer_needed || !frame.to_allocate)
5787 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5788 else
5789 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5790 -frame.nregs * UNITS_PER_WORD);
5791 }
5792
5793 pic_reg_used = false;
5794 if (pic_offset_table_rtx
5795 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5796 || current_function_profile))
5797 {
5798 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5799
5800 if (alt_pic_reg_used != INVALID_REGNUM)
5801 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5802
5803 pic_reg_used = true;
5804 }
5805
5806 if (pic_reg_used)
5807 {
5808 if (TARGET_64BIT)
5809 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5810 else
5811 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5812
5813 /* Even with accurate pre-reload life analysis, we can wind up
5814 deleting all references to the pic register after reload.
5815 Consider if cross-jumping unifies two sides of a branch
5816 controlled by a comparison vs the only read from a global.
5817 In which case, allow the set_got to be deleted, though we're
5818 too late to do anything about the ebx save in the prologue. */
5819 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5820 }
5821
5822 /* Prevent function calls from be scheduled before the call to mcount.
5823 In the pic_reg_used case, make sure that the got load isn't deleted. */
5824 if (current_function_profile)
5825 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5826 }
5827
5828 /* Emit code to restore saved registers using MOV insns. First register
5829 is restored from POINTER + OFFSET. */
5830 static void
5831 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5832 int maybe_eh_return)
5833 {
5834 int regno;
5835 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5836
5837 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5838 if (ix86_save_reg (regno, maybe_eh_return))
5839 {
5840 /* Ensure that adjust_address won't be forced to produce pointer
5841 out of range allowed by x86-64 instruction set. */
5842 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5843 {
5844 rtx r11;
5845
5846 r11 = gen_rtx_REG (DImode, R11_REG);
5847 emit_move_insn (r11, GEN_INT (offset));
5848 emit_insn (gen_adddi3 (r11, r11, pointer));
5849 base_address = gen_rtx_MEM (Pmode, r11);
5850 offset = 0;
5851 }
5852 emit_move_insn (gen_rtx_REG (Pmode, regno),
5853 adjust_address (base_address, Pmode, offset));
5854 offset += UNITS_PER_WORD;
5855 }
5856 }
5857
5858 /* Restore function stack, frame, and registers. */
5859
5860 void
5861 ix86_expand_epilogue (int style)
5862 {
5863 int regno;
5864 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5865 struct ix86_frame frame;
5866 HOST_WIDE_INT offset;
5867
5868 ix86_compute_frame_layout (&frame);
5869
5870 /* Calculate start of saved registers relative to ebp. Special care
5871 must be taken for the normal return case of a function using
5872 eh_return: the eax and edx registers are marked as saved, but not
5873 restored along this path. */
5874 offset = frame.nregs;
5875 if (current_function_calls_eh_return && style != 2)
5876 offset -= 2;
5877 offset *= -UNITS_PER_WORD;
5878
5879 /* If we're only restoring one register and sp is not valid then
5880 using a move instruction to restore the register since it's
5881 less work than reloading sp and popping the register.
5882
5883 The default code result in stack adjustment using add/lea instruction,
5884 while this code results in LEAVE instruction (or discrete equivalent),
5885 so it is profitable in some other cases as well. Especially when there
5886 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5887 and there is exactly one register to pop. This heuristic may need some
5888 tuning in future. */
5889 if ((!sp_valid && frame.nregs <= 1)
5890 || (TARGET_EPILOGUE_USING_MOVE
5891 && cfun->machine->use_fast_prologue_epilogue
5892 && (frame.nregs > 1 || frame.to_allocate))
5893 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5894 || (frame_pointer_needed && TARGET_USE_LEAVE
5895 && cfun->machine->use_fast_prologue_epilogue
5896 && frame.nregs == 1)
5897 || current_function_calls_eh_return)
5898 {
5899 /* Restore registers. We can use ebp or esp to address the memory
5900 locations. If both are available, default to ebp, since offsets
5901 are known to be small. Only exception is esp pointing directly to the
5902 end of block of saved registers, where we may simplify addressing
5903 mode. */
5904
5905 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5906 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5907 frame.to_allocate, style == 2);
5908 else
5909 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5910 offset, style == 2);
5911
5912 /* eh_return epilogues need %ecx added to the stack pointer. */
5913 if (style == 2)
5914 {
5915 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5916
5917 if (frame_pointer_needed)
5918 {
5919 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5920 tmp = plus_constant (tmp, UNITS_PER_WORD);
5921 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5922
5923 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5924 emit_move_insn (hard_frame_pointer_rtx, tmp);
5925
5926 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5927 const0_rtx, style);
5928 }
5929 else
5930 {
5931 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5932 tmp = plus_constant (tmp, (frame.to_allocate
5933 + frame.nregs * UNITS_PER_WORD));
5934 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5935 }
5936 }
5937 else if (!frame_pointer_needed)
5938 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5939 GEN_INT (frame.to_allocate
5940 + frame.nregs * UNITS_PER_WORD),
5941 style);
5942 /* If not an i386, mov & pop is faster than "leave". */
5943 else if (TARGET_USE_LEAVE || optimize_size
5944 || !cfun->machine->use_fast_prologue_epilogue)
5945 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5946 else
5947 {
5948 pro_epilogue_adjust_stack (stack_pointer_rtx,
5949 hard_frame_pointer_rtx,
5950 const0_rtx, style);
5951 if (TARGET_64BIT)
5952 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5953 else
5954 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5955 }
5956 }
5957 else
5958 {
5959 /* First step is to deallocate the stack frame so that we can
5960 pop the registers. */
5961 if (!sp_valid)
5962 {
5963 gcc_assert (frame_pointer_needed);
5964 pro_epilogue_adjust_stack (stack_pointer_rtx,
5965 hard_frame_pointer_rtx,
5966 GEN_INT (offset), style);
5967 }
5968 else if (frame.to_allocate)
5969 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5970 GEN_INT (frame.to_allocate), style);
5971
5972 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5973 if (ix86_save_reg (regno, false))
5974 {
5975 if (TARGET_64BIT)
5976 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5977 else
5978 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5979 }
5980 if (frame_pointer_needed)
5981 {
5982 /* Leave results in shorter dependency chains on CPUs that are
5983 able to grok it fast. */
5984 if (TARGET_USE_LEAVE)
5985 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5986 else if (TARGET_64BIT)
5987 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5988 else
5989 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5990 }
5991 }
5992
5993 if (cfun->machine->force_align_arg_pointer)
5994 {
5995 emit_insn (gen_addsi3 (stack_pointer_rtx,
5996 cfun->machine->force_align_arg_pointer,
5997 GEN_INT (-4)));
5998 }
5999
6000 /* Sibcall epilogues don't want a return instruction. */
6001 if (style == 0)
6002 return;
6003
6004 if (current_function_pops_args && current_function_args_size)
6005 {
6006 rtx popc = GEN_INT (current_function_pops_args);
6007
6008 /* i386 can only pop 64K bytes. If asked to pop more, pop
6009 return address, do explicit add, and jump indirectly to the
6010 caller. */
6011
6012 if (current_function_pops_args >= 65536)
6013 {
6014 rtx ecx = gen_rtx_REG (SImode, 2);
6015
6016 /* There is no "pascal" calling convention in 64bit ABI. */
6017 gcc_assert (!TARGET_64BIT);
6018
6019 emit_insn (gen_popsi1 (ecx));
6020 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
6021 emit_jump_insn (gen_return_indirect_internal (ecx));
6022 }
6023 else
6024 emit_jump_insn (gen_return_pop_internal (popc));
6025 }
6026 else
6027 emit_jump_insn (gen_return_internal ());
6028 }
6029
6030 /* Reset from the function's potential modifications. */
6031
6032 static void
6033 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6034 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6035 {
6036 if (pic_offset_table_rtx)
6037 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
6038 #if TARGET_MACHO
6039 /* Mach-O doesn't support labels at the end of objects, so if
6040 it looks like we might want one, insert a NOP. */
6041 {
6042 rtx insn = get_last_insn ();
6043 while (insn
6044 && NOTE_P (insn)
6045 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
6046 insn = PREV_INSN (insn);
6047 if (insn
6048 && (LABEL_P (insn)
6049 || (NOTE_P (insn)
6050 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
6051 fputs ("\tnop\n", file);
6052 }
6053 #endif
6054
6055 }
6056 \f
6057 /* Extract the parts of an RTL expression that is a valid memory address
6058 for an instruction. Return 0 if the structure of the address is
6059 grossly off. Return -1 if the address contains ASHIFT, so it is not
6060 strictly valid, but still used for computing length of lea instruction. */
6061
6062 int
6063 ix86_decompose_address (rtx addr, struct ix86_address *out)
6064 {
6065 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
6066 rtx base_reg, index_reg;
6067 HOST_WIDE_INT scale = 1;
6068 rtx scale_rtx = NULL_RTX;
6069 int retval = 1;
6070 enum ix86_address_seg seg = SEG_DEFAULT;
6071
6072 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
6073 base = addr;
6074 else if (GET_CODE (addr) == PLUS)
6075 {
6076 rtx addends[4], op;
6077 int n = 0, i;
6078
6079 op = addr;
6080 do
6081 {
6082 if (n >= 4)
6083 return 0;
6084 addends[n++] = XEXP (op, 1);
6085 op = XEXP (op, 0);
6086 }
6087 while (GET_CODE (op) == PLUS);
6088 if (n >= 4)
6089 return 0;
6090 addends[n] = op;
6091
6092 for (i = n; i >= 0; --i)
6093 {
6094 op = addends[i];
6095 switch (GET_CODE (op))
6096 {
6097 case MULT:
6098 if (index)
6099 return 0;
6100 index = XEXP (op, 0);
6101 scale_rtx = XEXP (op, 1);
6102 break;
6103
6104 case UNSPEC:
6105 if (XINT (op, 1) == UNSPEC_TP
6106 && TARGET_TLS_DIRECT_SEG_REFS
6107 && seg == SEG_DEFAULT)
6108 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
6109 else
6110 return 0;
6111 break;
6112
6113 case REG:
6114 case SUBREG:
6115 if (!base)
6116 base = op;
6117 else if (!index)
6118 index = op;
6119 else
6120 return 0;
6121 break;
6122
6123 case CONST:
6124 case CONST_INT:
6125 case SYMBOL_REF:
6126 case LABEL_REF:
6127 if (disp)
6128 return 0;
6129 disp = op;
6130 break;
6131
6132 default:
6133 return 0;
6134 }
6135 }
6136 }
6137 else if (GET_CODE (addr) == MULT)
6138 {
6139 index = XEXP (addr, 0); /* index*scale */
6140 scale_rtx = XEXP (addr, 1);
6141 }
6142 else if (GET_CODE (addr) == ASHIFT)
6143 {
6144 rtx tmp;
6145
6146 /* We're called for lea too, which implements ashift on occasion. */
6147 index = XEXP (addr, 0);
6148 tmp = XEXP (addr, 1);
6149 if (GET_CODE (tmp) != CONST_INT)
6150 return 0;
6151 scale = INTVAL (tmp);
6152 if ((unsigned HOST_WIDE_INT) scale > 3)
6153 return 0;
6154 scale = 1 << scale;
6155 retval = -1;
6156 }
6157 else
6158 disp = addr; /* displacement */
6159
6160 /* Extract the integral value of scale. */
6161 if (scale_rtx)
6162 {
6163 if (GET_CODE (scale_rtx) != CONST_INT)
6164 return 0;
6165 scale = INTVAL (scale_rtx);
6166 }
6167
6168 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6169 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6170
6171 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6172 if (base_reg && index_reg && scale == 1
6173 && (index_reg == arg_pointer_rtx
6174 || index_reg == frame_pointer_rtx
6175 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6176 {
6177 rtx tmp;
6178 tmp = base, base = index, index = tmp;
6179 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6180 }
6181
6182 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6183 if ((base_reg == hard_frame_pointer_rtx
6184 || base_reg == frame_pointer_rtx
6185 || base_reg == arg_pointer_rtx) && !disp)
6186 disp = const0_rtx;
6187
6188 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6189 Avoid this by transforming to [%esi+0]. */
6190 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6191 && base_reg && !index_reg && !disp
6192 && REG_P (base_reg)
6193 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6194 disp = const0_rtx;
6195
6196 /* Special case: encode reg+reg instead of reg*2. */
6197 if (!base && index && scale && scale == 2)
6198 base = index, base_reg = index_reg, scale = 1;
6199
6200 /* Special case: scaling cannot be encoded without base or displacement. */
6201 if (!base && !disp && index && scale != 1)
6202 disp = const0_rtx;
6203
6204 out->base = base;
6205 out->index = index;
6206 out->disp = disp;
6207 out->scale = scale;
6208 out->seg = seg;
6209
6210 return retval;
6211 }
6212 \f
6213 /* Return cost of the memory address x.
6214 For i386, it is better to use a complex address than let gcc copy
6215 the address into a reg and make a new pseudo. But not if the address
6216 requires to two regs - that would mean more pseudos with longer
6217 lifetimes. */
6218 static int
6219 ix86_address_cost (rtx x)
6220 {
6221 struct ix86_address parts;
6222 int cost = 1;
6223 int ok = ix86_decompose_address (x, &parts);
6224
6225 gcc_assert (ok);
6226
6227 if (parts.base && GET_CODE (parts.base) == SUBREG)
6228 parts.base = SUBREG_REG (parts.base);
6229 if (parts.index && GET_CODE (parts.index) == SUBREG)
6230 parts.index = SUBREG_REG (parts.index);
6231
6232 /* More complex memory references are better. */
6233 if (parts.disp && parts.disp != const0_rtx)
6234 cost--;
6235 if (parts.seg != SEG_DEFAULT)
6236 cost--;
6237
6238 /* Attempt to minimize number of registers in the address. */
6239 if ((parts.base
6240 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6241 || (parts.index
6242 && (!REG_P (parts.index)
6243 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6244 cost++;
6245
6246 if (parts.base
6247 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6248 && parts.index
6249 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6250 && parts.base != parts.index)
6251 cost++;
6252
6253 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6254 since it's predecode logic can't detect the length of instructions
6255 and it degenerates to vector decoded. Increase cost of such
6256 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6257 to split such addresses or even refuse such addresses at all.
6258
6259 Following addressing modes are affected:
6260 [base+scale*index]
6261 [scale*index+disp]
6262 [base+index]
6263
6264 The first and last case may be avoidable by explicitly coding the zero in
6265 memory address, but I don't have AMD-K6 machine handy to check this
6266 theory. */
6267
6268 if (TARGET_K6
6269 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6270 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6271 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6272 cost += 10;
6273
6274 return cost;
6275 }
6276 \f
6277 /* If X is a machine specific address (i.e. a symbol or label being
6278 referenced as a displacement from the GOT implemented using an
6279 UNSPEC), then return the base term. Otherwise return X. */
6280
6281 rtx
6282 ix86_find_base_term (rtx x)
6283 {
6284 rtx term;
6285
6286 if (TARGET_64BIT)
6287 {
6288 if (GET_CODE (x) != CONST)
6289 return x;
6290 term = XEXP (x, 0);
6291 if (GET_CODE (term) == PLUS
6292 && (GET_CODE (XEXP (term, 1)) == CONST_INT
6293 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6294 term = XEXP (term, 0);
6295 if (GET_CODE (term) != UNSPEC
6296 || XINT (term, 1) != UNSPEC_GOTPCREL)
6297 return x;
6298
6299 term = XVECEXP (term, 0, 0);
6300
6301 if (GET_CODE (term) != SYMBOL_REF
6302 && GET_CODE (term) != LABEL_REF)
6303 return x;
6304
6305 return term;
6306 }
6307
6308 term = ix86_delegitimize_address (x);
6309
6310 if (GET_CODE (term) != SYMBOL_REF
6311 && GET_CODE (term) != LABEL_REF)
6312 return x;
6313
6314 return term;
6315 }
6316
6317 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6318 this is used for to form addresses to local data when -fPIC is in
6319 use. */
6320
6321 static bool
6322 darwin_local_data_pic (rtx disp)
6323 {
6324 if (GET_CODE (disp) == MINUS)
6325 {
6326 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6327 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6328 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6329 {
6330 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6331 if (! strcmp (sym_name, "<pic base>"))
6332 return true;
6333 }
6334 }
6335
6336 return false;
6337 }
6338 \f
6339 /* Determine if a given RTX is a valid constant. We already know this
6340 satisfies CONSTANT_P. */
6341
6342 bool
6343 legitimate_constant_p (rtx x)
6344 {
6345 switch (GET_CODE (x))
6346 {
6347 case CONST:
6348 x = XEXP (x, 0);
6349
6350 if (GET_CODE (x) == PLUS)
6351 {
6352 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6353 return false;
6354 x = XEXP (x, 0);
6355 }
6356
6357 if (TARGET_MACHO && darwin_local_data_pic (x))
6358 return true;
6359
6360 /* Only some unspecs are valid as "constants". */
6361 if (GET_CODE (x) == UNSPEC)
6362 switch (XINT (x, 1))
6363 {
6364 case UNSPEC_GOTOFF:
6365 return TARGET_64BIT;
6366 case UNSPEC_TPOFF:
6367 case UNSPEC_NTPOFF:
6368 x = XVECEXP (x, 0, 0);
6369 return (GET_CODE (x) == SYMBOL_REF
6370 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6371 case UNSPEC_DTPOFF:
6372 x = XVECEXP (x, 0, 0);
6373 return (GET_CODE (x) == SYMBOL_REF
6374 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6375 default:
6376 return false;
6377 }
6378
6379 /* We must have drilled down to a symbol. */
6380 if (GET_CODE (x) == LABEL_REF)
6381 return true;
6382 if (GET_CODE (x) != SYMBOL_REF)
6383 return false;
6384 /* FALLTHRU */
6385
6386 case SYMBOL_REF:
6387 /* TLS symbols are never valid. */
6388 if (SYMBOL_REF_TLS_MODEL (x))
6389 return false;
6390 break;
6391
6392 case CONST_DOUBLE:
6393 if (GET_MODE (x) == TImode
6394 && x != CONST0_RTX (TImode)
6395 && !TARGET_64BIT)
6396 return false;
6397 break;
6398
6399 case CONST_VECTOR:
6400 if (x == CONST0_RTX (GET_MODE (x)))
6401 return true;
6402 return false;
6403
6404 default:
6405 break;
6406 }
6407
6408 /* Otherwise we handle everything else in the move patterns. */
6409 return true;
6410 }
6411
6412 /* Determine if it's legal to put X into the constant pool. This
6413 is not possible for the address of thread-local symbols, which
6414 is checked above. */
6415
6416 static bool
6417 ix86_cannot_force_const_mem (rtx x)
6418 {
6419 /* We can always put integral constants and vectors in memory. */
6420 switch (GET_CODE (x))
6421 {
6422 case CONST_INT:
6423 case CONST_DOUBLE:
6424 case CONST_VECTOR:
6425 return false;
6426
6427 default:
6428 break;
6429 }
6430 return !legitimate_constant_p (x);
6431 }
6432
6433 /* Determine if a given RTX is a valid constant address. */
6434
6435 bool
6436 constant_address_p (rtx x)
6437 {
6438 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6439 }
6440
6441 /* Nonzero if the constant value X is a legitimate general operand
6442 when generating PIC code. It is given that flag_pic is on and
6443 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6444
6445 bool
6446 legitimate_pic_operand_p (rtx x)
6447 {
6448 rtx inner;
6449
6450 switch (GET_CODE (x))
6451 {
6452 case CONST:
6453 inner = XEXP (x, 0);
6454 if (GET_CODE (inner) == PLUS
6455 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
6456 inner = XEXP (inner, 0);
6457
6458 /* Only some unspecs are valid as "constants". */
6459 if (GET_CODE (inner) == UNSPEC)
6460 switch (XINT (inner, 1))
6461 {
6462 case UNSPEC_GOTOFF:
6463 return TARGET_64BIT;
6464 case UNSPEC_TPOFF:
6465 x = XVECEXP (inner, 0, 0);
6466 return (GET_CODE (x) == SYMBOL_REF
6467 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6468 default:
6469 return false;
6470 }
6471 /* FALLTHRU */
6472
6473 case SYMBOL_REF:
6474 case LABEL_REF:
6475 return legitimate_pic_address_disp_p (x);
6476
6477 default:
6478 return true;
6479 }
6480 }
6481
6482 /* Determine if a given CONST RTX is a valid memory displacement
6483 in PIC mode. */
6484
6485 int
6486 legitimate_pic_address_disp_p (rtx disp)
6487 {
6488 bool saw_plus;
6489
6490 /* In 64bit mode we can allow direct addresses of symbols and labels
6491 when they are not dynamic symbols. */
6492 if (TARGET_64BIT)
6493 {
6494 rtx op0 = disp, op1;
6495
6496 switch (GET_CODE (disp))
6497 {
6498 case LABEL_REF:
6499 return true;
6500
6501 case CONST:
6502 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6503 break;
6504 op0 = XEXP (XEXP (disp, 0), 0);
6505 op1 = XEXP (XEXP (disp, 0), 1);
6506 if (GET_CODE (op1) != CONST_INT
6507 || INTVAL (op1) >= 16*1024*1024
6508 || INTVAL (op1) < -16*1024*1024)
6509 break;
6510 if (GET_CODE (op0) == LABEL_REF)
6511 return true;
6512 if (GET_CODE (op0) != SYMBOL_REF)
6513 break;
6514 /* FALLTHRU */
6515
6516 case SYMBOL_REF:
6517 /* TLS references should always be enclosed in UNSPEC. */
6518 if (SYMBOL_REF_TLS_MODEL (op0))
6519 return false;
6520 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6521 return true;
6522 break;
6523
6524 default:
6525 break;
6526 }
6527 }
6528 if (GET_CODE (disp) != CONST)
6529 return 0;
6530 disp = XEXP (disp, 0);
6531
6532 if (TARGET_64BIT)
6533 {
6534 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6535 of GOT tables. We should not need these anyway. */
6536 if (GET_CODE (disp) != UNSPEC
6537 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6538 && XINT (disp, 1) != UNSPEC_GOTOFF))
6539 return 0;
6540
6541 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6542 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6543 return 0;
6544 return 1;
6545 }
6546
6547 saw_plus = false;
6548 if (GET_CODE (disp) == PLUS)
6549 {
6550 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
6551 return 0;
6552 disp = XEXP (disp, 0);
6553 saw_plus = true;
6554 }
6555
6556 if (TARGET_MACHO && darwin_local_data_pic (disp))
6557 return 1;
6558
6559 if (GET_CODE (disp) != UNSPEC)
6560 return 0;
6561
6562 switch (XINT (disp, 1))
6563 {
6564 case UNSPEC_GOT:
6565 if (saw_plus)
6566 return false;
6567 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6568 case UNSPEC_GOTOFF:
6569 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6570 While ABI specify also 32bit relocation but we don't produce it in
6571 small PIC model at all. */
6572 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6573 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6574 && !TARGET_64BIT)
6575 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6576 return false;
6577 case UNSPEC_GOTTPOFF:
6578 case UNSPEC_GOTNTPOFF:
6579 case UNSPEC_INDNTPOFF:
6580 if (saw_plus)
6581 return false;
6582 disp = XVECEXP (disp, 0, 0);
6583 return (GET_CODE (disp) == SYMBOL_REF
6584 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6585 case UNSPEC_NTPOFF:
6586 disp = XVECEXP (disp, 0, 0);
6587 return (GET_CODE (disp) == SYMBOL_REF
6588 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6589 case UNSPEC_DTPOFF:
6590 disp = XVECEXP (disp, 0, 0);
6591 return (GET_CODE (disp) == SYMBOL_REF
6592 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6593 }
6594
6595 return 0;
6596 }
6597
6598 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6599 memory address for an instruction. The MODE argument is the machine mode
6600 for the MEM expression that wants to use this address.
6601
6602 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6603 convert common non-canonical forms to canonical form so that they will
6604 be recognized. */
6605
6606 int
6607 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6608 {
6609 struct ix86_address parts;
6610 rtx base, index, disp;
6611 HOST_WIDE_INT scale;
6612 const char *reason = NULL;
6613 rtx reason_rtx = NULL_RTX;
6614
6615 if (TARGET_DEBUG_ADDR)
6616 {
6617 fprintf (stderr,
6618 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6619 GET_MODE_NAME (mode), strict);
6620 debug_rtx (addr);
6621 }
6622
6623 if (ix86_decompose_address (addr, &parts) <= 0)
6624 {
6625 reason = "decomposition failed";
6626 goto report_error;
6627 }
6628
6629 base = parts.base;
6630 index = parts.index;
6631 disp = parts.disp;
6632 scale = parts.scale;
6633
6634 /* Validate base register.
6635
6636 Don't allow SUBREG's that span more than a word here. It can lead to spill
6637 failures when the base is one word out of a two word structure, which is
6638 represented internally as a DImode int. */
6639
6640 if (base)
6641 {
6642 rtx reg;
6643 reason_rtx = base;
6644
6645 if (REG_P (base))
6646 reg = base;
6647 else if (GET_CODE (base) == SUBREG
6648 && REG_P (SUBREG_REG (base))
6649 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6650 <= UNITS_PER_WORD)
6651 reg = SUBREG_REG (base);
6652 else
6653 {
6654 reason = "base is not a register";
6655 goto report_error;
6656 }
6657
6658 if (GET_MODE (base) != Pmode)
6659 {
6660 reason = "base is not in Pmode";
6661 goto report_error;
6662 }
6663
6664 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6665 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6666 {
6667 reason = "base is not valid";
6668 goto report_error;
6669 }
6670 }
6671
6672 /* Validate index register.
6673
6674 Don't allow SUBREG's that span more than a word here -- same as above. */
6675
6676 if (index)
6677 {
6678 rtx reg;
6679 reason_rtx = index;
6680
6681 if (REG_P (index))
6682 reg = index;
6683 else if (GET_CODE (index) == SUBREG
6684 && REG_P (SUBREG_REG (index))
6685 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6686 <= UNITS_PER_WORD)
6687 reg = SUBREG_REG (index);
6688 else
6689 {
6690 reason = "index is not a register";
6691 goto report_error;
6692 }
6693
6694 if (GET_MODE (index) != Pmode)
6695 {
6696 reason = "index is not in Pmode";
6697 goto report_error;
6698 }
6699
6700 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6701 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6702 {
6703 reason = "index is not valid";
6704 goto report_error;
6705 }
6706 }
6707
6708 /* Validate scale factor. */
6709 if (scale != 1)
6710 {
6711 reason_rtx = GEN_INT (scale);
6712 if (!index)
6713 {
6714 reason = "scale without index";
6715 goto report_error;
6716 }
6717
6718 if (scale != 2 && scale != 4 && scale != 8)
6719 {
6720 reason = "scale is not a valid multiplier";
6721 goto report_error;
6722 }
6723 }
6724
6725 /* Validate displacement. */
6726 if (disp)
6727 {
6728 reason_rtx = disp;
6729
6730 if (GET_CODE (disp) == CONST
6731 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6732 switch (XINT (XEXP (disp, 0), 1))
6733 {
6734 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6735 used. While ABI specify also 32bit relocations, we don't produce
6736 them at all and use IP relative instead. */
6737 case UNSPEC_GOT:
6738 case UNSPEC_GOTOFF:
6739 gcc_assert (flag_pic);
6740 if (!TARGET_64BIT)
6741 goto is_legitimate_pic;
6742 reason = "64bit address unspec";
6743 goto report_error;
6744
6745 case UNSPEC_GOTPCREL:
6746 gcc_assert (flag_pic);
6747 goto is_legitimate_pic;
6748
6749 case UNSPEC_GOTTPOFF:
6750 case UNSPEC_GOTNTPOFF:
6751 case UNSPEC_INDNTPOFF:
6752 case UNSPEC_NTPOFF:
6753 case UNSPEC_DTPOFF:
6754 break;
6755
6756 default:
6757 reason = "invalid address unspec";
6758 goto report_error;
6759 }
6760
6761 else if (SYMBOLIC_CONST (disp)
6762 && (flag_pic
6763 || (TARGET_MACHO
6764 #if TARGET_MACHO
6765 && MACHOPIC_INDIRECT
6766 && !machopic_operand_p (disp)
6767 #endif
6768 )))
6769 {
6770
6771 is_legitimate_pic:
6772 if (TARGET_64BIT && (index || base))
6773 {
6774 /* foo@dtpoff(%rX) is ok. */
6775 if (GET_CODE (disp) != CONST
6776 || GET_CODE (XEXP (disp, 0)) != PLUS
6777 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6778 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6779 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6780 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6781 {
6782 reason = "non-constant pic memory reference";
6783 goto report_error;
6784 }
6785 }
6786 else if (! legitimate_pic_address_disp_p (disp))
6787 {
6788 reason = "displacement is an invalid pic construct";
6789 goto report_error;
6790 }
6791
6792 /* This code used to verify that a symbolic pic displacement
6793 includes the pic_offset_table_rtx register.
6794
6795 While this is good idea, unfortunately these constructs may
6796 be created by "adds using lea" optimization for incorrect
6797 code like:
6798
6799 int a;
6800 int foo(int i)
6801 {
6802 return *(&a+i);
6803 }
6804
6805 This code is nonsensical, but results in addressing
6806 GOT table with pic_offset_table_rtx base. We can't
6807 just refuse it easily, since it gets matched by
6808 "addsi3" pattern, that later gets split to lea in the
6809 case output register differs from input. While this
6810 can be handled by separate addsi pattern for this case
6811 that never results in lea, this seems to be easier and
6812 correct fix for crash to disable this test. */
6813 }
6814 else if (GET_CODE (disp) != LABEL_REF
6815 && GET_CODE (disp) != CONST_INT
6816 && (GET_CODE (disp) != CONST
6817 || !legitimate_constant_p (disp))
6818 && (GET_CODE (disp) != SYMBOL_REF
6819 || !legitimate_constant_p (disp)))
6820 {
6821 reason = "displacement is not constant";
6822 goto report_error;
6823 }
6824 else if (TARGET_64BIT
6825 && !x86_64_immediate_operand (disp, VOIDmode))
6826 {
6827 reason = "displacement is out of range";
6828 goto report_error;
6829 }
6830 }
6831
6832 /* Everything looks valid. */
6833 if (TARGET_DEBUG_ADDR)
6834 fprintf (stderr, "Success.\n");
6835 return TRUE;
6836
6837 report_error:
6838 if (TARGET_DEBUG_ADDR)
6839 {
6840 fprintf (stderr, "Error: %s\n", reason);
6841 debug_rtx (reason_rtx);
6842 }
6843 return FALSE;
6844 }
6845 \f
6846 /* Return a unique alias set for the GOT. */
6847
6848 static HOST_WIDE_INT
6849 ix86_GOT_alias_set (void)
6850 {
6851 static HOST_WIDE_INT set = -1;
6852 if (set == -1)
6853 set = new_alias_set ();
6854 return set;
6855 }
6856
6857 /* Return a legitimate reference for ORIG (an address) using the
6858 register REG. If REG is 0, a new pseudo is generated.
6859
6860 There are two types of references that must be handled:
6861
6862 1. Global data references must load the address from the GOT, via
6863 the PIC reg. An insn is emitted to do this load, and the reg is
6864 returned.
6865
6866 2. Static data references, constant pool addresses, and code labels
6867 compute the address as an offset from the GOT, whose base is in
6868 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6869 differentiate them from global data objects. The returned
6870 address is the PIC reg + an unspec constant.
6871
6872 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6873 reg also appears in the address. */
6874
6875 static rtx
6876 legitimize_pic_address (rtx orig, rtx reg)
6877 {
6878 rtx addr = orig;
6879 rtx new = orig;
6880 rtx base;
6881
6882 #if TARGET_MACHO
6883 if (TARGET_MACHO && !TARGET_64BIT)
6884 {
6885 if (reg == 0)
6886 reg = gen_reg_rtx (Pmode);
6887 /* Use the generic Mach-O PIC machinery. */
6888 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6889 }
6890 #endif
6891
6892 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6893 new = addr;
6894 else if (TARGET_64BIT
6895 && ix86_cmodel != CM_SMALL_PIC
6896 && local_symbolic_operand (addr, Pmode))
6897 {
6898 rtx tmpreg;
6899 /* This symbol may be referenced via a displacement from the PIC
6900 base address (@GOTOFF). */
6901
6902 if (reload_in_progress)
6903 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6904 if (GET_CODE (addr) == CONST)
6905 addr = XEXP (addr, 0);
6906 if (GET_CODE (addr) == PLUS)
6907 {
6908 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6909 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6910 }
6911 else
6912 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6913 new = gen_rtx_CONST (Pmode, new);
6914 if (!reg)
6915 tmpreg = gen_reg_rtx (Pmode);
6916 else
6917 tmpreg = reg;
6918 emit_move_insn (tmpreg, new);
6919
6920 if (reg != 0)
6921 {
6922 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6923 tmpreg, 1, OPTAB_DIRECT);
6924 new = reg;
6925 }
6926 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6927 }
6928 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6929 {
6930 /* This symbol may be referenced via a displacement from the PIC
6931 base address (@GOTOFF). */
6932
6933 if (reload_in_progress)
6934 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6935 if (GET_CODE (addr) == CONST)
6936 addr = XEXP (addr, 0);
6937 if (GET_CODE (addr) == PLUS)
6938 {
6939 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6940 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6941 }
6942 else
6943 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6944 new = gen_rtx_CONST (Pmode, new);
6945 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6946
6947 if (reg != 0)
6948 {
6949 emit_move_insn (reg, new);
6950 new = reg;
6951 }
6952 }
6953 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6954 {
6955 if (TARGET_64BIT)
6956 {
6957 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6958 new = gen_rtx_CONST (Pmode, new);
6959 new = gen_const_mem (Pmode, new);
6960 set_mem_alias_set (new, ix86_GOT_alias_set ());
6961
6962 if (reg == 0)
6963 reg = gen_reg_rtx (Pmode);
6964 /* Use directly gen_movsi, otherwise the address is loaded
6965 into register for CSE. We don't want to CSE this addresses,
6966 instead we CSE addresses from the GOT table, so skip this. */
6967 emit_insn (gen_movsi (reg, new));
6968 new = reg;
6969 }
6970 else
6971 {
6972 /* This symbol must be referenced via a load from the
6973 Global Offset Table (@GOT). */
6974
6975 if (reload_in_progress)
6976 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6977 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6978 new = gen_rtx_CONST (Pmode, new);
6979 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6980 new = gen_const_mem (Pmode, new);
6981 set_mem_alias_set (new, ix86_GOT_alias_set ());
6982
6983 if (reg == 0)
6984 reg = gen_reg_rtx (Pmode);
6985 emit_move_insn (reg, new);
6986 new = reg;
6987 }
6988 }
6989 else
6990 {
6991 if (GET_CODE (addr) == CONST_INT
6992 && !x86_64_immediate_operand (addr, VOIDmode))
6993 {
6994 if (reg)
6995 {
6996 emit_move_insn (reg, addr);
6997 new = reg;
6998 }
6999 else
7000 new = force_reg (Pmode, addr);
7001 }
7002 else if (GET_CODE (addr) == CONST)
7003 {
7004 addr = XEXP (addr, 0);
7005
7006 /* We must match stuff we generate before. Assume the only
7007 unspecs that can get here are ours. Not that we could do
7008 anything with them anyway.... */
7009 if (GET_CODE (addr) == UNSPEC
7010 || (GET_CODE (addr) == PLUS
7011 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
7012 return orig;
7013 gcc_assert (GET_CODE (addr) == PLUS);
7014 }
7015 if (GET_CODE (addr) == PLUS)
7016 {
7017 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
7018
7019 /* Check first to see if this is a constant offset from a @GOTOFF
7020 symbol reference. */
7021 if (local_symbolic_operand (op0, Pmode)
7022 && GET_CODE (op1) == CONST_INT)
7023 {
7024 if (!TARGET_64BIT)
7025 {
7026 if (reload_in_progress)
7027 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7028 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
7029 UNSPEC_GOTOFF);
7030 new = gen_rtx_PLUS (Pmode, new, op1);
7031 new = gen_rtx_CONST (Pmode, new);
7032 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
7033
7034 if (reg != 0)
7035 {
7036 emit_move_insn (reg, new);
7037 new = reg;
7038 }
7039 }
7040 else
7041 {
7042 if (INTVAL (op1) < -16*1024*1024
7043 || INTVAL (op1) >= 16*1024*1024)
7044 {
7045 if (!x86_64_immediate_operand (op1, Pmode))
7046 op1 = force_reg (Pmode, op1);
7047 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
7048 }
7049 }
7050 }
7051 else
7052 {
7053 base = legitimize_pic_address (XEXP (addr, 0), reg);
7054 new = legitimize_pic_address (XEXP (addr, 1),
7055 base == reg ? NULL_RTX : reg);
7056
7057 if (GET_CODE (new) == CONST_INT)
7058 new = plus_constant (base, INTVAL (new));
7059 else
7060 {
7061 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
7062 {
7063 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
7064 new = XEXP (new, 1);
7065 }
7066 new = gen_rtx_PLUS (Pmode, base, new);
7067 }
7068 }
7069 }
7070 }
7071 return new;
7072 }
7073 \f
7074 /* Load the thread pointer. If TO_REG is true, force it into a register. */
7075
7076 static rtx
7077 get_thread_pointer (int to_reg)
7078 {
7079 rtx tp, reg, insn;
7080
7081 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
7082 if (!to_reg)
7083 return tp;
7084
7085 reg = gen_reg_rtx (Pmode);
7086 insn = gen_rtx_SET (VOIDmode, reg, tp);
7087 insn = emit_insn (insn);
7088
7089 return reg;
7090 }
7091
7092 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
7093 false if we expect this to be used for a memory address and true if
7094 we expect to load the address into a register. */
7095
7096 static rtx
7097 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
7098 {
7099 rtx dest, base, off, pic, tp;
7100 int type;
7101
7102 switch (model)
7103 {
7104 case TLS_MODEL_GLOBAL_DYNAMIC:
7105 dest = gen_reg_rtx (Pmode);
7106 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7107
7108 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7109 {
7110 rtx rax = gen_rtx_REG (Pmode, 0), insns;
7111
7112 start_sequence ();
7113 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7114 insns = get_insns ();
7115 end_sequence ();
7116
7117 emit_libcall_block (insns, dest, rax, x);
7118 }
7119 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7120 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7121 else
7122 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7123
7124 if (TARGET_GNU2_TLS)
7125 {
7126 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7127
7128 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7129 }
7130 break;
7131
7132 case TLS_MODEL_LOCAL_DYNAMIC:
7133 base = gen_reg_rtx (Pmode);
7134 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7135
7136 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7137 {
7138 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7139
7140 start_sequence ();
7141 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7142 insns = get_insns ();
7143 end_sequence ();
7144
7145 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7146 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7147 emit_libcall_block (insns, base, rax, note);
7148 }
7149 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7150 emit_insn (gen_tls_local_dynamic_base_64 (base));
7151 else
7152 emit_insn (gen_tls_local_dynamic_base_32 (base));
7153
7154 if (TARGET_GNU2_TLS)
7155 {
7156 rtx x = ix86_tls_module_base ();
7157
7158 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7159 gen_rtx_MINUS (Pmode, x, tp));
7160 }
7161
7162 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7163 off = gen_rtx_CONST (Pmode, off);
7164
7165 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7166
7167 if (TARGET_GNU2_TLS)
7168 {
7169 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7170
7171 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7172 }
7173
7174 break;
7175
7176 case TLS_MODEL_INITIAL_EXEC:
7177 if (TARGET_64BIT)
7178 {
7179 pic = NULL;
7180 type = UNSPEC_GOTNTPOFF;
7181 }
7182 else if (flag_pic)
7183 {
7184 if (reload_in_progress)
7185 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7186 pic = pic_offset_table_rtx;
7187 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7188 }
7189 else if (!TARGET_ANY_GNU_TLS)
7190 {
7191 pic = gen_reg_rtx (Pmode);
7192 emit_insn (gen_set_got (pic));
7193 type = UNSPEC_GOTTPOFF;
7194 }
7195 else
7196 {
7197 pic = NULL;
7198 type = UNSPEC_INDNTPOFF;
7199 }
7200
7201 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7202 off = gen_rtx_CONST (Pmode, off);
7203 if (pic)
7204 off = gen_rtx_PLUS (Pmode, pic, off);
7205 off = gen_const_mem (Pmode, off);
7206 set_mem_alias_set (off, ix86_GOT_alias_set ());
7207
7208 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7209 {
7210 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7211 off = force_reg (Pmode, off);
7212 return gen_rtx_PLUS (Pmode, base, off);
7213 }
7214 else
7215 {
7216 base = get_thread_pointer (true);
7217 dest = gen_reg_rtx (Pmode);
7218 emit_insn (gen_subsi3 (dest, base, off));
7219 }
7220 break;
7221
7222 case TLS_MODEL_LOCAL_EXEC:
7223 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7224 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7225 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7226 off = gen_rtx_CONST (Pmode, off);
7227
7228 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7229 {
7230 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7231 return gen_rtx_PLUS (Pmode, base, off);
7232 }
7233 else
7234 {
7235 base = get_thread_pointer (true);
7236 dest = gen_reg_rtx (Pmode);
7237 emit_insn (gen_subsi3 (dest, base, off));
7238 }
7239 break;
7240
7241 default:
7242 gcc_unreachable ();
7243 }
7244
7245 return dest;
7246 }
7247
7248 /* Try machine-dependent ways of modifying an illegitimate address
7249 to be legitimate. If we find one, return the new, valid address.
7250 This macro is used in only one place: `memory_address' in explow.c.
7251
7252 OLDX is the address as it was before break_out_memory_refs was called.
7253 In some cases it is useful to look at this to decide what needs to be done.
7254
7255 MODE and WIN are passed so that this macro can use
7256 GO_IF_LEGITIMATE_ADDRESS.
7257
7258 It is always safe for this macro to do nothing. It exists to recognize
7259 opportunities to optimize the output.
7260
7261 For the 80386, we handle X+REG by loading X into a register R and
7262 using R+REG. R will go in a general reg and indexing will be used.
7263 However, if REG is a broken-out memory address or multiplication,
7264 nothing needs to be done because REG can certainly go in a general reg.
7265
7266 When -fpic is used, special handling is needed for symbolic references.
7267 See comments by legitimize_pic_address in i386.c for details. */
7268
7269 rtx
7270 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7271 {
7272 int changed = 0;
7273 unsigned log;
7274
7275 if (TARGET_DEBUG_ADDR)
7276 {
7277 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7278 GET_MODE_NAME (mode));
7279 debug_rtx (x);
7280 }
7281
7282 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7283 if (log)
7284 return legitimize_tls_address (x, log, false);
7285 if (GET_CODE (x) == CONST
7286 && GET_CODE (XEXP (x, 0)) == PLUS
7287 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7288 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7289 {
7290 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7291 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7292 }
7293
7294 if (flag_pic && SYMBOLIC_CONST (x))
7295 return legitimize_pic_address (x, 0);
7296
7297 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7298 if (GET_CODE (x) == ASHIFT
7299 && GET_CODE (XEXP (x, 1)) == CONST_INT
7300 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7301 {
7302 changed = 1;
7303 log = INTVAL (XEXP (x, 1));
7304 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7305 GEN_INT (1 << log));
7306 }
7307
7308 if (GET_CODE (x) == PLUS)
7309 {
7310 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7311
7312 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7313 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7314 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7315 {
7316 changed = 1;
7317 log = INTVAL (XEXP (XEXP (x, 0), 1));
7318 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7319 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7320 GEN_INT (1 << log));
7321 }
7322
7323 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7324 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
7325 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7326 {
7327 changed = 1;
7328 log = INTVAL (XEXP (XEXP (x, 1), 1));
7329 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7330 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7331 GEN_INT (1 << log));
7332 }
7333
7334 /* Put multiply first if it isn't already. */
7335 if (GET_CODE (XEXP (x, 1)) == MULT)
7336 {
7337 rtx tmp = XEXP (x, 0);
7338 XEXP (x, 0) = XEXP (x, 1);
7339 XEXP (x, 1) = tmp;
7340 changed = 1;
7341 }
7342
7343 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7344 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7345 created by virtual register instantiation, register elimination, and
7346 similar optimizations. */
7347 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7348 {
7349 changed = 1;
7350 x = gen_rtx_PLUS (Pmode,
7351 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7352 XEXP (XEXP (x, 1), 0)),
7353 XEXP (XEXP (x, 1), 1));
7354 }
7355
7356 /* Canonicalize
7357 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7358 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7359 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7360 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7361 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7362 && CONSTANT_P (XEXP (x, 1)))
7363 {
7364 rtx constant;
7365 rtx other = NULL_RTX;
7366
7367 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7368 {
7369 constant = XEXP (x, 1);
7370 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7371 }
7372 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
7373 {
7374 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7375 other = XEXP (x, 1);
7376 }
7377 else
7378 constant = 0;
7379
7380 if (constant)
7381 {
7382 changed = 1;
7383 x = gen_rtx_PLUS (Pmode,
7384 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7385 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7386 plus_constant (other, INTVAL (constant)));
7387 }
7388 }
7389
7390 if (changed && legitimate_address_p (mode, x, FALSE))
7391 return x;
7392
7393 if (GET_CODE (XEXP (x, 0)) == MULT)
7394 {
7395 changed = 1;
7396 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7397 }
7398
7399 if (GET_CODE (XEXP (x, 1)) == MULT)
7400 {
7401 changed = 1;
7402 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7403 }
7404
7405 if (changed
7406 && GET_CODE (XEXP (x, 1)) == REG
7407 && GET_CODE (XEXP (x, 0)) == REG)
7408 return x;
7409
7410 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7411 {
7412 changed = 1;
7413 x = legitimize_pic_address (x, 0);
7414 }
7415
7416 if (changed && legitimate_address_p (mode, x, FALSE))
7417 return x;
7418
7419 if (GET_CODE (XEXP (x, 0)) == REG)
7420 {
7421 rtx temp = gen_reg_rtx (Pmode);
7422 rtx val = force_operand (XEXP (x, 1), temp);
7423 if (val != temp)
7424 emit_move_insn (temp, val);
7425
7426 XEXP (x, 1) = temp;
7427 return x;
7428 }
7429
7430 else if (GET_CODE (XEXP (x, 1)) == REG)
7431 {
7432 rtx temp = gen_reg_rtx (Pmode);
7433 rtx val = force_operand (XEXP (x, 0), temp);
7434 if (val != temp)
7435 emit_move_insn (temp, val);
7436
7437 XEXP (x, 0) = temp;
7438 return x;
7439 }
7440 }
7441
7442 return x;
7443 }
7444 \f
7445 /* Print an integer constant expression in assembler syntax. Addition
7446 and subtraction are the only arithmetic that may appear in these
7447 expressions. FILE is the stdio stream to write to, X is the rtx, and
7448 CODE is the operand print code from the output string. */
7449
7450 static void
7451 output_pic_addr_const (FILE *file, rtx x, int code)
7452 {
7453 char buf[256];
7454
7455 switch (GET_CODE (x))
7456 {
7457 case PC:
7458 gcc_assert (flag_pic);
7459 putc ('.', file);
7460 break;
7461
7462 case SYMBOL_REF:
7463 output_addr_const (file, x);
7464 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7465 fputs ("@PLT", file);
7466 break;
7467
7468 case LABEL_REF:
7469 x = XEXP (x, 0);
7470 /* FALLTHRU */
7471 case CODE_LABEL:
7472 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7473 assemble_name (asm_out_file, buf);
7474 break;
7475
7476 case CONST_INT:
7477 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7478 break;
7479
7480 case CONST:
7481 /* This used to output parentheses around the expression,
7482 but that does not work on the 386 (either ATT or BSD assembler). */
7483 output_pic_addr_const (file, XEXP (x, 0), code);
7484 break;
7485
7486 case CONST_DOUBLE:
7487 if (GET_MODE (x) == VOIDmode)
7488 {
7489 /* We can use %d if the number is <32 bits and positive. */
7490 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7491 fprintf (file, "0x%lx%08lx",
7492 (unsigned long) CONST_DOUBLE_HIGH (x),
7493 (unsigned long) CONST_DOUBLE_LOW (x));
7494 else
7495 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7496 }
7497 else
7498 /* We can't handle floating point constants;
7499 PRINT_OPERAND must handle them. */
7500 output_operand_lossage ("floating constant misused");
7501 break;
7502
7503 case PLUS:
7504 /* Some assemblers need integer constants to appear first. */
7505 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
7506 {
7507 output_pic_addr_const (file, XEXP (x, 0), code);
7508 putc ('+', file);
7509 output_pic_addr_const (file, XEXP (x, 1), code);
7510 }
7511 else
7512 {
7513 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
7514 output_pic_addr_const (file, XEXP (x, 1), code);
7515 putc ('+', file);
7516 output_pic_addr_const (file, XEXP (x, 0), code);
7517 }
7518 break;
7519
7520 case MINUS:
7521 if (!TARGET_MACHO)
7522 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7523 output_pic_addr_const (file, XEXP (x, 0), code);
7524 putc ('-', file);
7525 output_pic_addr_const (file, XEXP (x, 1), code);
7526 if (!TARGET_MACHO)
7527 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7528 break;
7529
7530 case UNSPEC:
7531 gcc_assert (XVECLEN (x, 0) == 1);
7532 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7533 switch (XINT (x, 1))
7534 {
7535 case UNSPEC_GOT:
7536 fputs ("@GOT", file);
7537 break;
7538 case UNSPEC_GOTOFF:
7539 fputs ("@GOTOFF", file);
7540 break;
7541 case UNSPEC_GOTPCREL:
7542 fputs ("@GOTPCREL(%rip)", file);
7543 break;
7544 case UNSPEC_GOTTPOFF:
7545 /* FIXME: This might be @TPOFF in Sun ld too. */
7546 fputs ("@GOTTPOFF", file);
7547 break;
7548 case UNSPEC_TPOFF:
7549 fputs ("@TPOFF", file);
7550 break;
7551 case UNSPEC_NTPOFF:
7552 if (TARGET_64BIT)
7553 fputs ("@TPOFF", file);
7554 else
7555 fputs ("@NTPOFF", file);
7556 break;
7557 case UNSPEC_DTPOFF:
7558 fputs ("@DTPOFF", file);
7559 break;
7560 case UNSPEC_GOTNTPOFF:
7561 if (TARGET_64BIT)
7562 fputs ("@GOTTPOFF(%rip)", file);
7563 else
7564 fputs ("@GOTNTPOFF", file);
7565 break;
7566 case UNSPEC_INDNTPOFF:
7567 fputs ("@INDNTPOFF", file);
7568 break;
7569 default:
7570 output_operand_lossage ("invalid UNSPEC as operand");
7571 break;
7572 }
7573 break;
7574
7575 default:
7576 output_operand_lossage ("invalid expression as operand");
7577 }
7578 }
7579
7580 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7581 We need to emit DTP-relative relocations. */
7582
7583 static void
7584 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7585 {
7586 fputs (ASM_LONG, file);
7587 output_addr_const (file, x);
7588 fputs ("@DTPOFF", file);
7589 switch (size)
7590 {
7591 case 4:
7592 break;
7593 case 8:
7594 fputs (", 0", file);
7595 break;
7596 default:
7597 gcc_unreachable ();
7598 }
7599 }
7600
7601 /* In the name of slightly smaller debug output, and to cater to
7602 general assembler lossage, recognize PIC+GOTOFF and turn it back
7603 into a direct symbol reference.
7604
7605 On Darwin, this is necessary to avoid a crash, because Darwin
7606 has a different PIC label for each routine but the DWARF debugging
7607 information is not associated with any particular routine, so it's
7608 necessary to remove references to the PIC label from RTL stored by
7609 the DWARF output code. */
7610
7611 static rtx
7612 ix86_delegitimize_address (rtx orig_x)
7613 {
7614 rtx x = orig_x;
7615 /* reg_addend is NULL or a multiple of some register. */
7616 rtx reg_addend = NULL_RTX;
7617 /* const_addend is NULL or a const_int. */
7618 rtx const_addend = NULL_RTX;
7619 /* This is the result, or NULL. */
7620 rtx result = NULL_RTX;
7621
7622 if (GET_CODE (x) == MEM)
7623 x = XEXP (x, 0);
7624
7625 if (TARGET_64BIT)
7626 {
7627 if (GET_CODE (x) != CONST
7628 || GET_CODE (XEXP (x, 0)) != UNSPEC
7629 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7630 || GET_CODE (orig_x) != MEM)
7631 return orig_x;
7632 return XVECEXP (XEXP (x, 0), 0, 0);
7633 }
7634
7635 if (GET_CODE (x) != PLUS
7636 || GET_CODE (XEXP (x, 1)) != CONST)
7637 return orig_x;
7638
7639 if (GET_CODE (XEXP (x, 0)) == REG
7640 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7641 /* %ebx + GOT/GOTOFF */
7642 ;
7643 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7644 {
7645 /* %ebx + %reg * scale + GOT/GOTOFF */
7646 reg_addend = XEXP (x, 0);
7647 if (GET_CODE (XEXP (reg_addend, 0)) == REG
7648 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7649 reg_addend = XEXP (reg_addend, 1);
7650 else if (GET_CODE (XEXP (reg_addend, 1)) == REG
7651 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7652 reg_addend = XEXP (reg_addend, 0);
7653 else
7654 return orig_x;
7655 if (GET_CODE (reg_addend) != REG
7656 && GET_CODE (reg_addend) != MULT
7657 && GET_CODE (reg_addend) != ASHIFT)
7658 return orig_x;
7659 }
7660 else
7661 return orig_x;
7662
7663 x = XEXP (XEXP (x, 1), 0);
7664 if (GET_CODE (x) == PLUS
7665 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7666 {
7667 const_addend = XEXP (x, 1);
7668 x = XEXP (x, 0);
7669 }
7670
7671 if (GET_CODE (x) == UNSPEC
7672 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
7673 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
7674 result = XVECEXP (x, 0, 0);
7675
7676 if (TARGET_MACHO && darwin_local_data_pic (x)
7677 && GET_CODE (orig_x) != MEM)
7678 result = XEXP (x, 0);
7679
7680 if (! result)
7681 return orig_x;
7682
7683 if (const_addend)
7684 result = gen_rtx_PLUS (Pmode, result, const_addend);
7685 if (reg_addend)
7686 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7687 return result;
7688 }
7689 \f
7690 static void
7691 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7692 int fp, FILE *file)
7693 {
7694 const char *suffix;
7695
7696 if (mode == CCFPmode || mode == CCFPUmode)
7697 {
7698 enum rtx_code second_code, bypass_code;
7699 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7700 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7701 code = ix86_fp_compare_code_to_integer (code);
7702 mode = CCmode;
7703 }
7704 if (reverse)
7705 code = reverse_condition (code);
7706
7707 switch (code)
7708 {
7709 case EQ:
7710 suffix = "e";
7711 break;
7712 case NE:
7713 suffix = "ne";
7714 break;
7715 case GT:
7716 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7717 suffix = "g";
7718 break;
7719 case GTU:
7720 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7721 Those same assemblers have the same but opposite lossage on cmov. */
7722 gcc_assert (mode == CCmode);
7723 suffix = fp ? "nbe" : "a";
7724 break;
7725 case LT:
7726 switch (mode)
7727 {
7728 case CCNOmode:
7729 case CCGOCmode:
7730 suffix = "s";
7731 break;
7732
7733 case CCmode:
7734 case CCGCmode:
7735 suffix = "l";
7736 break;
7737
7738 default:
7739 gcc_unreachable ();
7740 }
7741 break;
7742 case LTU:
7743 gcc_assert (mode == CCmode);
7744 suffix = "b";
7745 break;
7746 case GE:
7747 switch (mode)
7748 {
7749 case CCNOmode:
7750 case CCGOCmode:
7751 suffix = "ns";
7752 break;
7753
7754 case CCmode:
7755 case CCGCmode:
7756 suffix = "ge";
7757 break;
7758
7759 default:
7760 gcc_unreachable ();
7761 }
7762 break;
7763 case GEU:
7764 /* ??? As above. */
7765 gcc_assert (mode == CCmode);
7766 suffix = fp ? "nb" : "ae";
7767 break;
7768 case LE:
7769 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7770 suffix = "le";
7771 break;
7772 case LEU:
7773 gcc_assert (mode == CCmode);
7774 suffix = "be";
7775 break;
7776 case UNORDERED:
7777 suffix = fp ? "u" : "p";
7778 break;
7779 case ORDERED:
7780 suffix = fp ? "nu" : "np";
7781 break;
7782 default:
7783 gcc_unreachable ();
7784 }
7785 fputs (suffix, file);
7786 }
7787
7788 /* Print the name of register X to FILE based on its machine mode and number.
7789 If CODE is 'w', pretend the mode is HImode.
7790 If CODE is 'b', pretend the mode is QImode.
7791 If CODE is 'k', pretend the mode is SImode.
7792 If CODE is 'q', pretend the mode is DImode.
7793 If CODE is 'h', pretend the reg is the 'high' byte register.
7794 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7795
7796 void
7797 print_reg (rtx x, int code, FILE *file)
7798 {
7799 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7800 && REGNO (x) != FRAME_POINTER_REGNUM
7801 && REGNO (x) != FLAGS_REG
7802 && REGNO (x) != FPSR_REG
7803 && REGNO (x) != FPCR_REG);
7804
7805 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7806 putc ('%', file);
7807
7808 if (code == 'w' || MMX_REG_P (x))
7809 code = 2;
7810 else if (code == 'b')
7811 code = 1;
7812 else if (code == 'k')
7813 code = 4;
7814 else if (code == 'q')
7815 code = 8;
7816 else if (code == 'y')
7817 code = 3;
7818 else if (code == 'h')
7819 code = 0;
7820 else
7821 code = GET_MODE_SIZE (GET_MODE (x));
7822
7823 /* Irritatingly, AMD extended registers use different naming convention
7824 from the normal registers. */
7825 if (REX_INT_REG_P (x))
7826 {
7827 gcc_assert (TARGET_64BIT);
7828 switch (code)
7829 {
7830 case 0:
7831 error ("extended registers have no high halves");
7832 break;
7833 case 1:
7834 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7835 break;
7836 case 2:
7837 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7838 break;
7839 case 4:
7840 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7841 break;
7842 case 8:
7843 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7844 break;
7845 default:
7846 error ("unsupported operand size for extended register");
7847 break;
7848 }
7849 return;
7850 }
7851 switch (code)
7852 {
7853 case 3:
7854 if (STACK_TOP_P (x))
7855 {
7856 fputs ("st(0)", file);
7857 break;
7858 }
7859 /* FALLTHRU */
7860 case 8:
7861 case 4:
7862 case 12:
7863 if (! ANY_FP_REG_P (x))
7864 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7865 /* FALLTHRU */
7866 case 16:
7867 case 2:
7868 normal:
7869 fputs (hi_reg_name[REGNO (x)], file);
7870 break;
7871 case 1:
7872 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7873 goto normal;
7874 fputs (qi_reg_name[REGNO (x)], file);
7875 break;
7876 case 0:
7877 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7878 goto normal;
7879 fputs (qi_high_reg_name[REGNO (x)], file);
7880 break;
7881 default:
7882 gcc_unreachable ();
7883 }
7884 }
7885
7886 /* Locate some local-dynamic symbol still in use by this function
7887 so that we can print its name in some tls_local_dynamic_base
7888 pattern. */
7889
7890 static const char *
7891 get_some_local_dynamic_name (void)
7892 {
7893 rtx insn;
7894
7895 if (cfun->machine->some_ld_name)
7896 return cfun->machine->some_ld_name;
7897
7898 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7899 if (INSN_P (insn)
7900 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7901 return cfun->machine->some_ld_name;
7902
7903 gcc_unreachable ();
7904 }
7905
7906 static int
7907 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7908 {
7909 rtx x = *px;
7910
7911 if (GET_CODE (x) == SYMBOL_REF
7912 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7913 {
7914 cfun->machine->some_ld_name = XSTR (x, 0);
7915 return 1;
7916 }
7917
7918 return 0;
7919 }
7920
7921 /* Meaning of CODE:
7922 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7923 C -- print opcode suffix for set/cmov insn.
7924 c -- like C, but print reversed condition
7925 F,f -- likewise, but for floating-point.
7926 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7927 otherwise nothing
7928 R -- print the prefix for register names.
7929 z -- print the opcode suffix for the size of the current operand.
7930 * -- print a star (in certain assembler syntax)
7931 A -- print an absolute memory reference.
7932 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7933 s -- print a shift double count, followed by the assemblers argument
7934 delimiter.
7935 b -- print the QImode name of the register for the indicated operand.
7936 %b0 would print %al if operands[0] is reg 0.
7937 w -- likewise, print the HImode name of the register.
7938 k -- likewise, print the SImode name of the register.
7939 q -- likewise, print the DImode name of the register.
7940 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7941 y -- print "st(0)" instead of "st" as a register.
7942 D -- print condition for SSE cmp instruction.
7943 P -- if PIC, print an @PLT suffix.
7944 X -- don't print any sort of PIC '@' suffix for a symbol.
7945 & -- print some in-use local-dynamic symbol name.
7946 H -- print a memory address offset by 8; used for sse high-parts
7947 */
7948
7949 void
7950 print_operand (FILE *file, rtx x, int code)
7951 {
7952 if (code)
7953 {
7954 switch (code)
7955 {
7956 case '*':
7957 if (ASSEMBLER_DIALECT == ASM_ATT)
7958 putc ('*', file);
7959 return;
7960
7961 case '&':
7962 assemble_name (file, get_some_local_dynamic_name ());
7963 return;
7964
7965 case 'A':
7966 switch (ASSEMBLER_DIALECT)
7967 {
7968 case ASM_ATT:
7969 putc ('*', file);
7970 break;
7971
7972 case ASM_INTEL:
7973 /* Intel syntax. For absolute addresses, registers should not
7974 be surrounded by braces. */
7975 if (GET_CODE (x) != REG)
7976 {
7977 putc ('[', file);
7978 PRINT_OPERAND (file, x, 0);
7979 putc (']', file);
7980 return;
7981 }
7982 break;
7983
7984 default:
7985 gcc_unreachable ();
7986 }
7987
7988 PRINT_OPERAND (file, x, 0);
7989 return;
7990
7991
7992 case 'L':
7993 if (ASSEMBLER_DIALECT == ASM_ATT)
7994 putc ('l', file);
7995 return;
7996
7997 case 'W':
7998 if (ASSEMBLER_DIALECT == ASM_ATT)
7999 putc ('w', file);
8000 return;
8001
8002 case 'B':
8003 if (ASSEMBLER_DIALECT == ASM_ATT)
8004 putc ('b', file);
8005 return;
8006
8007 case 'Q':
8008 if (ASSEMBLER_DIALECT == ASM_ATT)
8009 putc ('l', file);
8010 return;
8011
8012 case 'S':
8013 if (ASSEMBLER_DIALECT == ASM_ATT)
8014 putc ('s', file);
8015 return;
8016
8017 case 'T':
8018 if (ASSEMBLER_DIALECT == ASM_ATT)
8019 putc ('t', file);
8020 return;
8021
8022 case 'z':
8023 /* 387 opcodes don't get size suffixes if the operands are
8024 registers. */
8025 if (STACK_REG_P (x))
8026 return;
8027
8028 /* Likewise if using Intel opcodes. */
8029 if (ASSEMBLER_DIALECT == ASM_INTEL)
8030 return;
8031
8032 /* This is the size of op from size of operand. */
8033 switch (GET_MODE_SIZE (GET_MODE (x)))
8034 {
8035 case 2:
8036 #ifdef HAVE_GAS_FILDS_FISTS
8037 putc ('s', file);
8038 #endif
8039 return;
8040
8041 case 4:
8042 if (GET_MODE (x) == SFmode)
8043 {
8044 putc ('s', file);
8045 return;
8046 }
8047 else
8048 putc ('l', file);
8049 return;
8050
8051 case 12:
8052 case 16:
8053 putc ('t', file);
8054 return;
8055
8056 case 8:
8057 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
8058 {
8059 #ifdef GAS_MNEMONICS
8060 putc ('q', file);
8061 #else
8062 putc ('l', file);
8063 putc ('l', file);
8064 #endif
8065 }
8066 else
8067 putc ('l', file);
8068 return;
8069
8070 default:
8071 gcc_unreachable ();
8072 }
8073
8074 case 'b':
8075 case 'w':
8076 case 'k':
8077 case 'q':
8078 case 'h':
8079 case 'y':
8080 case 'X':
8081 case 'P':
8082 break;
8083
8084 case 's':
8085 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
8086 {
8087 PRINT_OPERAND (file, x, 0);
8088 putc (',', file);
8089 }
8090 return;
8091
8092 case 'D':
8093 /* Little bit of braindamage here. The SSE compare instructions
8094 does use completely different names for the comparisons that the
8095 fp conditional moves. */
8096 switch (GET_CODE (x))
8097 {
8098 case EQ:
8099 case UNEQ:
8100 fputs ("eq", file);
8101 break;
8102 case LT:
8103 case UNLT:
8104 fputs ("lt", file);
8105 break;
8106 case LE:
8107 case UNLE:
8108 fputs ("le", file);
8109 break;
8110 case UNORDERED:
8111 fputs ("unord", file);
8112 break;
8113 case NE:
8114 case LTGT:
8115 fputs ("neq", file);
8116 break;
8117 case UNGE:
8118 case GE:
8119 fputs ("nlt", file);
8120 break;
8121 case UNGT:
8122 case GT:
8123 fputs ("nle", file);
8124 break;
8125 case ORDERED:
8126 fputs ("ord", file);
8127 break;
8128 default:
8129 gcc_unreachable ();
8130 }
8131 return;
8132 case 'O':
8133 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8134 if (ASSEMBLER_DIALECT == ASM_ATT)
8135 {
8136 switch (GET_MODE (x))
8137 {
8138 case HImode: putc ('w', file); break;
8139 case SImode:
8140 case SFmode: putc ('l', file); break;
8141 case DImode:
8142 case DFmode: putc ('q', file); break;
8143 default: gcc_unreachable ();
8144 }
8145 putc ('.', file);
8146 }
8147 #endif
8148 return;
8149 case 'C':
8150 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8151 return;
8152 case 'F':
8153 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8154 if (ASSEMBLER_DIALECT == ASM_ATT)
8155 putc ('.', file);
8156 #endif
8157 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8158 return;
8159
8160 /* Like above, but reverse condition */
8161 case 'c':
8162 /* Check to see if argument to %c is really a constant
8163 and not a condition code which needs to be reversed. */
8164 if (!COMPARISON_P (x))
8165 {
8166 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8167 return;
8168 }
8169 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8170 return;
8171 case 'f':
8172 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8173 if (ASSEMBLER_DIALECT == ASM_ATT)
8174 putc ('.', file);
8175 #endif
8176 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8177 return;
8178
8179 case 'H':
8180 /* It doesn't actually matter what mode we use here, as we're
8181 only going to use this for printing. */
8182 x = adjust_address_nv (x, DImode, 8);
8183 break;
8184
8185 case '+':
8186 {
8187 rtx x;
8188
8189 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8190 return;
8191
8192 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8193 if (x)
8194 {
8195 int pred_val = INTVAL (XEXP (x, 0));
8196
8197 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8198 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8199 {
8200 int taken = pred_val > REG_BR_PROB_BASE / 2;
8201 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8202
8203 /* Emit hints only in the case default branch prediction
8204 heuristics would fail. */
8205 if (taken != cputaken)
8206 {
8207 /* We use 3e (DS) prefix for taken branches and
8208 2e (CS) prefix for not taken branches. */
8209 if (taken)
8210 fputs ("ds ; ", file);
8211 else
8212 fputs ("cs ; ", file);
8213 }
8214 }
8215 }
8216 return;
8217 }
8218 default:
8219 output_operand_lossage ("invalid operand code '%c'", code);
8220 }
8221 }
8222
8223 if (GET_CODE (x) == REG)
8224 print_reg (x, code, file);
8225
8226 else if (GET_CODE (x) == MEM)
8227 {
8228 /* No `byte ptr' prefix for call instructions. */
8229 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8230 {
8231 const char * size;
8232 switch (GET_MODE_SIZE (GET_MODE (x)))
8233 {
8234 case 1: size = "BYTE"; break;
8235 case 2: size = "WORD"; break;
8236 case 4: size = "DWORD"; break;
8237 case 8: size = "QWORD"; break;
8238 case 12: size = "XWORD"; break;
8239 case 16: size = "XMMWORD"; break;
8240 default:
8241 gcc_unreachable ();
8242 }
8243
8244 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8245 if (code == 'b')
8246 size = "BYTE";
8247 else if (code == 'w')
8248 size = "WORD";
8249 else if (code == 'k')
8250 size = "DWORD";
8251
8252 fputs (size, file);
8253 fputs (" PTR ", file);
8254 }
8255
8256 x = XEXP (x, 0);
8257 /* Avoid (%rip) for call operands. */
8258 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8259 && GET_CODE (x) != CONST_INT)
8260 output_addr_const (file, x);
8261 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8262 output_operand_lossage ("invalid constraints for operand");
8263 else
8264 output_address (x);
8265 }
8266
8267 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8268 {
8269 REAL_VALUE_TYPE r;
8270 long l;
8271
8272 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8273 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8274
8275 if (ASSEMBLER_DIALECT == ASM_ATT)
8276 putc ('$', file);
8277 fprintf (file, "0x%08lx", l);
8278 }
8279
8280 /* These float cases don't actually occur as immediate operands. */
8281 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8282 {
8283 char dstr[30];
8284
8285 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8286 fprintf (file, "%s", dstr);
8287 }
8288
8289 else if (GET_CODE (x) == CONST_DOUBLE
8290 && GET_MODE (x) == XFmode)
8291 {
8292 char dstr[30];
8293
8294 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8295 fprintf (file, "%s", dstr);
8296 }
8297
8298 else
8299 {
8300 /* We have patterns that allow zero sets of memory, for instance.
8301 In 64-bit mode, we should probably support all 8-byte vectors,
8302 since we can in fact encode that into an immediate. */
8303 if (GET_CODE (x) == CONST_VECTOR)
8304 {
8305 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8306 x = const0_rtx;
8307 }
8308
8309 if (code != 'P')
8310 {
8311 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8312 {
8313 if (ASSEMBLER_DIALECT == ASM_ATT)
8314 putc ('$', file);
8315 }
8316 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8317 || GET_CODE (x) == LABEL_REF)
8318 {
8319 if (ASSEMBLER_DIALECT == ASM_ATT)
8320 putc ('$', file);
8321 else
8322 fputs ("OFFSET FLAT:", file);
8323 }
8324 }
8325 if (GET_CODE (x) == CONST_INT)
8326 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8327 else if (flag_pic)
8328 output_pic_addr_const (file, x, code);
8329 else
8330 output_addr_const (file, x);
8331 }
8332 }
8333 \f
8334 /* Print a memory operand whose address is ADDR. */
8335
8336 void
8337 print_operand_address (FILE *file, rtx addr)
8338 {
8339 struct ix86_address parts;
8340 rtx base, index, disp;
8341 int scale;
8342 int ok = ix86_decompose_address (addr, &parts);
8343
8344 gcc_assert (ok);
8345
8346 base = parts.base;
8347 index = parts.index;
8348 disp = parts.disp;
8349 scale = parts.scale;
8350
8351 switch (parts.seg)
8352 {
8353 case SEG_DEFAULT:
8354 break;
8355 case SEG_FS:
8356 case SEG_GS:
8357 if (USER_LABEL_PREFIX[0] == 0)
8358 putc ('%', file);
8359 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8360 break;
8361 default:
8362 gcc_unreachable ();
8363 }
8364
8365 if (!base && !index)
8366 {
8367 /* Displacement only requires special attention. */
8368
8369 if (GET_CODE (disp) == CONST_INT)
8370 {
8371 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8372 {
8373 if (USER_LABEL_PREFIX[0] == 0)
8374 putc ('%', file);
8375 fputs ("ds:", file);
8376 }
8377 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8378 }
8379 else if (flag_pic)
8380 output_pic_addr_const (file, disp, 0);
8381 else
8382 output_addr_const (file, disp);
8383
8384 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8385 if (TARGET_64BIT)
8386 {
8387 if (GET_CODE (disp) == CONST
8388 && GET_CODE (XEXP (disp, 0)) == PLUS
8389 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8390 disp = XEXP (XEXP (disp, 0), 0);
8391 if (GET_CODE (disp) == LABEL_REF
8392 || (GET_CODE (disp) == SYMBOL_REF
8393 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8394 fputs ("(%rip)", file);
8395 }
8396 }
8397 else
8398 {
8399 if (ASSEMBLER_DIALECT == ASM_ATT)
8400 {
8401 if (disp)
8402 {
8403 if (flag_pic)
8404 output_pic_addr_const (file, disp, 0);
8405 else if (GET_CODE (disp) == LABEL_REF)
8406 output_asm_label (disp);
8407 else
8408 output_addr_const (file, disp);
8409 }
8410
8411 putc ('(', file);
8412 if (base)
8413 print_reg (base, 0, file);
8414 if (index)
8415 {
8416 putc (',', file);
8417 print_reg (index, 0, file);
8418 if (scale != 1)
8419 fprintf (file, ",%d", scale);
8420 }
8421 putc (')', file);
8422 }
8423 else
8424 {
8425 rtx offset = NULL_RTX;
8426
8427 if (disp)
8428 {
8429 /* Pull out the offset of a symbol; print any symbol itself. */
8430 if (GET_CODE (disp) == CONST
8431 && GET_CODE (XEXP (disp, 0)) == PLUS
8432 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8433 {
8434 offset = XEXP (XEXP (disp, 0), 1);
8435 disp = gen_rtx_CONST (VOIDmode,
8436 XEXP (XEXP (disp, 0), 0));
8437 }
8438
8439 if (flag_pic)
8440 output_pic_addr_const (file, disp, 0);
8441 else if (GET_CODE (disp) == LABEL_REF)
8442 output_asm_label (disp);
8443 else if (GET_CODE (disp) == CONST_INT)
8444 offset = disp;
8445 else
8446 output_addr_const (file, disp);
8447 }
8448
8449 putc ('[', file);
8450 if (base)
8451 {
8452 print_reg (base, 0, file);
8453 if (offset)
8454 {
8455 if (INTVAL (offset) >= 0)
8456 putc ('+', file);
8457 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8458 }
8459 }
8460 else if (offset)
8461 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8462 else
8463 putc ('0', file);
8464
8465 if (index)
8466 {
8467 putc ('+', file);
8468 print_reg (index, 0, file);
8469 if (scale != 1)
8470 fprintf (file, "*%d", scale);
8471 }
8472 putc (']', file);
8473 }
8474 }
8475 }
8476
8477 bool
8478 output_addr_const_extra (FILE *file, rtx x)
8479 {
8480 rtx op;
8481
8482 if (GET_CODE (x) != UNSPEC)
8483 return false;
8484
8485 op = XVECEXP (x, 0, 0);
8486 switch (XINT (x, 1))
8487 {
8488 case UNSPEC_GOTTPOFF:
8489 output_addr_const (file, op);
8490 /* FIXME: This might be @TPOFF in Sun ld. */
8491 fputs ("@GOTTPOFF", file);
8492 break;
8493 case UNSPEC_TPOFF:
8494 output_addr_const (file, op);
8495 fputs ("@TPOFF", file);
8496 break;
8497 case UNSPEC_NTPOFF:
8498 output_addr_const (file, op);
8499 if (TARGET_64BIT)
8500 fputs ("@TPOFF", file);
8501 else
8502 fputs ("@NTPOFF", file);
8503 break;
8504 case UNSPEC_DTPOFF:
8505 output_addr_const (file, op);
8506 fputs ("@DTPOFF", file);
8507 break;
8508 case UNSPEC_GOTNTPOFF:
8509 output_addr_const (file, op);
8510 if (TARGET_64BIT)
8511 fputs ("@GOTTPOFF(%rip)", file);
8512 else
8513 fputs ("@GOTNTPOFF", file);
8514 break;
8515 case UNSPEC_INDNTPOFF:
8516 output_addr_const (file, op);
8517 fputs ("@INDNTPOFF", file);
8518 break;
8519
8520 default:
8521 return false;
8522 }
8523
8524 return true;
8525 }
8526 \f
8527 /* Split one or more DImode RTL references into pairs of SImode
8528 references. The RTL can be REG, offsettable MEM, integer constant, or
8529 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8530 split and "num" is its length. lo_half and hi_half are output arrays
8531 that parallel "operands". */
8532
8533 void
8534 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8535 {
8536 while (num--)
8537 {
8538 rtx op = operands[num];
8539
8540 /* simplify_subreg refuse to split volatile memory addresses,
8541 but we still have to handle it. */
8542 if (GET_CODE (op) == MEM)
8543 {
8544 lo_half[num] = adjust_address (op, SImode, 0);
8545 hi_half[num] = adjust_address (op, SImode, 4);
8546 }
8547 else
8548 {
8549 lo_half[num] = simplify_gen_subreg (SImode, op,
8550 GET_MODE (op) == VOIDmode
8551 ? DImode : GET_MODE (op), 0);
8552 hi_half[num] = simplify_gen_subreg (SImode, op,
8553 GET_MODE (op) == VOIDmode
8554 ? DImode : GET_MODE (op), 4);
8555 }
8556 }
8557 }
8558 /* Split one or more TImode RTL references into pairs of DImode
8559 references. The RTL can be REG, offsettable MEM, integer constant, or
8560 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8561 split and "num" is its length. lo_half and hi_half are output arrays
8562 that parallel "operands". */
8563
8564 void
8565 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8566 {
8567 while (num--)
8568 {
8569 rtx op = operands[num];
8570
8571 /* simplify_subreg refuse to split volatile memory addresses, but we
8572 still have to handle it. */
8573 if (GET_CODE (op) == MEM)
8574 {
8575 lo_half[num] = adjust_address (op, DImode, 0);
8576 hi_half[num] = adjust_address (op, DImode, 8);
8577 }
8578 else
8579 {
8580 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8581 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8582 }
8583 }
8584 }
8585 \f
8586 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8587 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8588 is the expression of the binary operation. The output may either be
8589 emitted here, or returned to the caller, like all output_* functions.
8590
8591 There is no guarantee that the operands are the same mode, as they
8592 might be within FLOAT or FLOAT_EXTEND expressions. */
8593
8594 #ifndef SYSV386_COMPAT
8595 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8596 wants to fix the assemblers because that causes incompatibility
8597 with gcc. No-one wants to fix gcc because that causes
8598 incompatibility with assemblers... You can use the option of
8599 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8600 #define SYSV386_COMPAT 1
8601 #endif
8602
8603 const char *
8604 output_387_binary_op (rtx insn, rtx *operands)
8605 {
8606 static char buf[30];
8607 const char *p;
8608 const char *ssep;
8609 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8610
8611 #ifdef ENABLE_CHECKING
8612 /* Even if we do not want to check the inputs, this documents input
8613 constraints. Which helps in understanding the following code. */
8614 if (STACK_REG_P (operands[0])
8615 && ((REG_P (operands[1])
8616 && REGNO (operands[0]) == REGNO (operands[1])
8617 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
8618 || (REG_P (operands[2])
8619 && REGNO (operands[0]) == REGNO (operands[2])
8620 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
8621 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8622 ; /* ok */
8623 else
8624 gcc_assert (is_sse);
8625 #endif
8626
8627 switch (GET_CODE (operands[3]))
8628 {
8629 case PLUS:
8630 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8631 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8632 p = "fiadd";
8633 else
8634 p = "fadd";
8635 ssep = "add";
8636 break;
8637
8638 case MINUS:
8639 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8640 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8641 p = "fisub";
8642 else
8643 p = "fsub";
8644 ssep = "sub";
8645 break;
8646
8647 case MULT:
8648 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8649 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8650 p = "fimul";
8651 else
8652 p = "fmul";
8653 ssep = "mul";
8654 break;
8655
8656 case DIV:
8657 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8658 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8659 p = "fidiv";
8660 else
8661 p = "fdiv";
8662 ssep = "div";
8663 break;
8664
8665 default:
8666 gcc_unreachable ();
8667 }
8668
8669 if (is_sse)
8670 {
8671 strcpy (buf, ssep);
8672 if (GET_MODE (operands[0]) == SFmode)
8673 strcat (buf, "ss\t{%2, %0|%0, %2}");
8674 else
8675 strcat (buf, "sd\t{%2, %0|%0, %2}");
8676 return buf;
8677 }
8678 strcpy (buf, p);
8679
8680 switch (GET_CODE (operands[3]))
8681 {
8682 case MULT:
8683 case PLUS:
8684 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8685 {
8686 rtx temp = operands[2];
8687 operands[2] = operands[1];
8688 operands[1] = temp;
8689 }
8690
8691 /* know operands[0] == operands[1]. */
8692
8693 if (GET_CODE (operands[2]) == MEM)
8694 {
8695 p = "%z2\t%2";
8696 break;
8697 }
8698
8699 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8700 {
8701 if (STACK_TOP_P (operands[0]))
8702 /* How is it that we are storing to a dead operand[2]?
8703 Well, presumably operands[1] is dead too. We can't
8704 store the result to st(0) as st(0) gets popped on this
8705 instruction. Instead store to operands[2] (which I
8706 think has to be st(1)). st(1) will be popped later.
8707 gcc <= 2.8.1 didn't have this check and generated
8708 assembly code that the Unixware assembler rejected. */
8709 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8710 else
8711 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8712 break;
8713 }
8714
8715 if (STACK_TOP_P (operands[0]))
8716 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8717 else
8718 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8719 break;
8720
8721 case MINUS:
8722 case DIV:
8723 if (GET_CODE (operands[1]) == MEM)
8724 {
8725 p = "r%z1\t%1";
8726 break;
8727 }
8728
8729 if (GET_CODE (operands[2]) == MEM)
8730 {
8731 p = "%z2\t%2";
8732 break;
8733 }
8734
8735 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8736 {
8737 #if SYSV386_COMPAT
8738 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8739 derived assemblers, confusingly reverse the direction of
8740 the operation for fsub{r} and fdiv{r} when the
8741 destination register is not st(0). The Intel assembler
8742 doesn't have this brain damage. Read !SYSV386_COMPAT to
8743 figure out what the hardware really does. */
8744 if (STACK_TOP_P (operands[0]))
8745 p = "{p\t%0, %2|rp\t%2, %0}";
8746 else
8747 p = "{rp\t%2, %0|p\t%0, %2}";
8748 #else
8749 if (STACK_TOP_P (operands[0]))
8750 /* As above for fmul/fadd, we can't store to st(0). */
8751 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8752 else
8753 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8754 #endif
8755 break;
8756 }
8757
8758 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8759 {
8760 #if SYSV386_COMPAT
8761 if (STACK_TOP_P (operands[0]))
8762 p = "{rp\t%0, %1|p\t%1, %0}";
8763 else
8764 p = "{p\t%1, %0|rp\t%0, %1}";
8765 #else
8766 if (STACK_TOP_P (operands[0]))
8767 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8768 else
8769 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8770 #endif
8771 break;
8772 }
8773
8774 if (STACK_TOP_P (operands[0]))
8775 {
8776 if (STACK_TOP_P (operands[1]))
8777 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8778 else
8779 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8780 break;
8781 }
8782 else if (STACK_TOP_P (operands[1]))
8783 {
8784 #if SYSV386_COMPAT
8785 p = "{\t%1, %0|r\t%0, %1}";
8786 #else
8787 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8788 #endif
8789 }
8790 else
8791 {
8792 #if SYSV386_COMPAT
8793 p = "{r\t%2, %0|\t%0, %2}";
8794 #else
8795 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8796 #endif
8797 }
8798 break;
8799
8800 default:
8801 gcc_unreachable ();
8802 }
8803
8804 strcat (buf, p);
8805 return buf;
8806 }
8807
8808 /* Return needed mode for entity in optimize_mode_switching pass. */
8809
8810 int
8811 ix86_mode_needed (int entity, rtx insn)
8812 {
8813 enum attr_i387_cw mode;
8814
8815 /* The mode UNINITIALIZED is used to store control word after a
8816 function call or ASM pattern. The mode ANY specify that function
8817 has no requirements on the control word and make no changes in the
8818 bits we are interested in. */
8819
8820 if (CALL_P (insn)
8821 || (NONJUMP_INSN_P (insn)
8822 && (asm_noperands (PATTERN (insn)) >= 0
8823 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8824 return I387_CW_UNINITIALIZED;
8825
8826 if (recog_memoized (insn) < 0)
8827 return I387_CW_ANY;
8828
8829 mode = get_attr_i387_cw (insn);
8830
8831 switch (entity)
8832 {
8833 case I387_TRUNC:
8834 if (mode == I387_CW_TRUNC)
8835 return mode;
8836 break;
8837
8838 case I387_FLOOR:
8839 if (mode == I387_CW_FLOOR)
8840 return mode;
8841 break;
8842
8843 case I387_CEIL:
8844 if (mode == I387_CW_CEIL)
8845 return mode;
8846 break;
8847
8848 case I387_MASK_PM:
8849 if (mode == I387_CW_MASK_PM)
8850 return mode;
8851 break;
8852
8853 default:
8854 gcc_unreachable ();
8855 }
8856
8857 return I387_CW_ANY;
8858 }
8859
8860 /* Output code to initialize control word copies used by trunc?f?i and
8861 rounding patterns. CURRENT_MODE is set to current control word,
8862 while NEW_MODE is set to new control word. */
8863
8864 void
8865 emit_i387_cw_initialization (int mode)
8866 {
8867 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8868 rtx new_mode;
8869
8870 int slot;
8871
8872 rtx reg = gen_reg_rtx (HImode);
8873
8874 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8875 emit_move_insn (reg, copy_rtx (stored_mode));
8876
8877 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8878 {
8879 switch (mode)
8880 {
8881 case I387_CW_TRUNC:
8882 /* round toward zero (truncate) */
8883 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8884 slot = SLOT_CW_TRUNC;
8885 break;
8886
8887 case I387_CW_FLOOR:
8888 /* round down toward -oo */
8889 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8890 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8891 slot = SLOT_CW_FLOOR;
8892 break;
8893
8894 case I387_CW_CEIL:
8895 /* round up toward +oo */
8896 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8897 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8898 slot = SLOT_CW_CEIL;
8899 break;
8900
8901 case I387_CW_MASK_PM:
8902 /* mask precision exception for nearbyint() */
8903 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8904 slot = SLOT_CW_MASK_PM;
8905 break;
8906
8907 default:
8908 gcc_unreachable ();
8909 }
8910 }
8911 else
8912 {
8913 switch (mode)
8914 {
8915 case I387_CW_TRUNC:
8916 /* round toward zero (truncate) */
8917 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8918 slot = SLOT_CW_TRUNC;
8919 break;
8920
8921 case I387_CW_FLOOR:
8922 /* round down toward -oo */
8923 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8924 slot = SLOT_CW_FLOOR;
8925 break;
8926
8927 case I387_CW_CEIL:
8928 /* round up toward +oo */
8929 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8930 slot = SLOT_CW_CEIL;
8931 break;
8932
8933 case I387_CW_MASK_PM:
8934 /* mask precision exception for nearbyint() */
8935 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8936 slot = SLOT_CW_MASK_PM;
8937 break;
8938
8939 default:
8940 gcc_unreachable ();
8941 }
8942 }
8943
8944 gcc_assert (slot < MAX_386_STACK_LOCALS);
8945
8946 new_mode = assign_386_stack_local (HImode, slot);
8947 emit_move_insn (new_mode, reg);
8948 }
8949
8950 /* Output code for INSN to convert a float to a signed int. OPERANDS
8951 are the insn operands. The output may be [HSD]Imode and the input
8952 operand may be [SDX]Fmode. */
8953
8954 const char *
8955 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8956 {
8957 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8958 int dimode_p = GET_MODE (operands[0]) == DImode;
8959 int round_mode = get_attr_i387_cw (insn);
8960
8961 /* Jump through a hoop or two for DImode, since the hardware has no
8962 non-popping instruction. We used to do this a different way, but
8963 that was somewhat fragile and broke with post-reload splitters. */
8964 if ((dimode_p || fisttp) && !stack_top_dies)
8965 output_asm_insn ("fld\t%y1", operands);
8966
8967 gcc_assert (STACK_TOP_P (operands[1]));
8968 gcc_assert (GET_CODE (operands[0]) == MEM);
8969
8970 if (fisttp)
8971 output_asm_insn ("fisttp%z0\t%0", operands);
8972 else
8973 {
8974 if (round_mode != I387_CW_ANY)
8975 output_asm_insn ("fldcw\t%3", operands);
8976 if (stack_top_dies || dimode_p)
8977 output_asm_insn ("fistp%z0\t%0", operands);
8978 else
8979 output_asm_insn ("fist%z0\t%0", operands);
8980 if (round_mode != I387_CW_ANY)
8981 output_asm_insn ("fldcw\t%2", operands);
8982 }
8983
8984 return "";
8985 }
8986
8987 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8988 have the values zero or one, indicates the ffreep insn's operand
8989 from the OPERANDS array. */
8990
8991 static const char *
8992 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8993 {
8994 if (TARGET_USE_FFREEP)
8995 #if HAVE_AS_IX86_FFREEP
8996 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8997 #else
8998 {
8999 static char retval[] = ".word\t0xc_df";
9000 int regno = REGNO (operands[opno]);
9001
9002 gcc_assert (FP_REGNO_P (regno));
9003
9004 retval[9] = '0' + (regno - FIRST_STACK_REG);
9005 return retval;
9006 }
9007 #endif
9008
9009 return opno ? "fstp\t%y1" : "fstp\t%y0";
9010 }
9011
9012
9013 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
9014 should be used. UNORDERED_P is true when fucom should be used. */
9015
9016 const char *
9017 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
9018 {
9019 int stack_top_dies;
9020 rtx cmp_op0, cmp_op1;
9021 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
9022
9023 if (eflags_p)
9024 {
9025 cmp_op0 = operands[0];
9026 cmp_op1 = operands[1];
9027 }
9028 else
9029 {
9030 cmp_op0 = operands[1];
9031 cmp_op1 = operands[2];
9032 }
9033
9034 if (is_sse)
9035 {
9036 if (GET_MODE (operands[0]) == SFmode)
9037 if (unordered_p)
9038 return "ucomiss\t{%1, %0|%0, %1}";
9039 else
9040 return "comiss\t{%1, %0|%0, %1}";
9041 else
9042 if (unordered_p)
9043 return "ucomisd\t{%1, %0|%0, %1}";
9044 else
9045 return "comisd\t{%1, %0|%0, %1}";
9046 }
9047
9048 gcc_assert (STACK_TOP_P (cmp_op0));
9049
9050 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
9051
9052 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
9053 {
9054 if (stack_top_dies)
9055 {
9056 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
9057 return output_387_ffreep (operands, 1);
9058 }
9059 else
9060 return "ftst\n\tfnstsw\t%0";
9061 }
9062
9063 if (STACK_REG_P (cmp_op1)
9064 && stack_top_dies
9065 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
9066 && REGNO (cmp_op1) != FIRST_STACK_REG)
9067 {
9068 /* If both the top of the 387 stack dies, and the other operand
9069 is also a stack register that dies, then this must be a
9070 `fcompp' float compare */
9071
9072 if (eflags_p)
9073 {
9074 /* There is no double popping fcomi variant. Fortunately,
9075 eflags is immune from the fstp's cc clobbering. */
9076 if (unordered_p)
9077 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
9078 else
9079 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
9080 return output_387_ffreep (operands, 0);
9081 }
9082 else
9083 {
9084 if (unordered_p)
9085 return "fucompp\n\tfnstsw\t%0";
9086 else
9087 return "fcompp\n\tfnstsw\t%0";
9088 }
9089 }
9090 else
9091 {
9092 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
9093
9094 static const char * const alt[16] =
9095 {
9096 "fcom%z2\t%y2\n\tfnstsw\t%0",
9097 "fcomp%z2\t%y2\n\tfnstsw\t%0",
9098 "fucom%z2\t%y2\n\tfnstsw\t%0",
9099 "fucomp%z2\t%y2\n\tfnstsw\t%0",
9100
9101 "ficom%z2\t%y2\n\tfnstsw\t%0",
9102 "ficomp%z2\t%y2\n\tfnstsw\t%0",
9103 NULL,
9104 NULL,
9105
9106 "fcomi\t{%y1, %0|%0, %y1}",
9107 "fcomip\t{%y1, %0|%0, %y1}",
9108 "fucomi\t{%y1, %0|%0, %y1}",
9109 "fucomip\t{%y1, %0|%0, %y1}",
9110
9111 NULL,
9112 NULL,
9113 NULL,
9114 NULL
9115 };
9116
9117 int mask;
9118 const char *ret;
9119
9120 mask = eflags_p << 3;
9121 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9122 mask |= unordered_p << 1;
9123 mask |= stack_top_dies;
9124
9125 gcc_assert (mask < 16);
9126 ret = alt[mask];
9127 gcc_assert (ret);
9128
9129 return ret;
9130 }
9131 }
9132
9133 void
9134 ix86_output_addr_vec_elt (FILE *file, int value)
9135 {
9136 const char *directive = ASM_LONG;
9137
9138 #ifdef ASM_QUAD
9139 if (TARGET_64BIT)
9140 directive = ASM_QUAD;
9141 #else
9142 gcc_assert (!TARGET_64BIT);
9143 #endif
9144
9145 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9146 }
9147
9148 void
9149 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9150 {
9151 if (TARGET_64BIT)
9152 fprintf (file, "%s%s%d-%s%d\n",
9153 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9154 else if (HAVE_AS_GOTOFF_IN_DATA)
9155 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9156 #if TARGET_MACHO
9157 else if (TARGET_MACHO)
9158 {
9159 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9160 machopic_output_function_base_name (file);
9161 fprintf(file, "\n");
9162 }
9163 #endif
9164 else
9165 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9166 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9167 }
9168 \f
9169 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9170 for the target. */
9171
9172 void
9173 ix86_expand_clear (rtx dest)
9174 {
9175 rtx tmp;
9176
9177 /* We play register width games, which are only valid after reload. */
9178 gcc_assert (reload_completed);
9179
9180 /* Avoid HImode and its attendant prefix byte. */
9181 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9182 dest = gen_rtx_REG (SImode, REGNO (dest));
9183
9184 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9185
9186 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9187 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9188 {
9189 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9190 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9191 }
9192
9193 emit_insn (tmp);
9194 }
9195
9196 /* X is an unchanging MEM. If it is a constant pool reference, return
9197 the constant pool rtx, else NULL. */
9198
9199 rtx
9200 maybe_get_pool_constant (rtx x)
9201 {
9202 x = ix86_delegitimize_address (XEXP (x, 0));
9203
9204 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9205 return get_pool_constant (x);
9206
9207 return NULL_RTX;
9208 }
9209
9210 void
9211 ix86_expand_move (enum machine_mode mode, rtx operands[])
9212 {
9213 int strict = (reload_in_progress || reload_completed);
9214 rtx op0, op1;
9215 enum tls_model model;
9216
9217 op0 = operands[0];
9218 op1 = operands[1];
9219
9220 if (GET_CODE (op1) == SYMBOL_REF)
9221 {
9222 model = SYMBOL_REF_TLS_MODEL (op1);
9223 if (model)
9224 {
9225 op1 = legitimize_tls_address (op1, model, true);
9226 op1 = force_operand (op1, op0);
9227 if (op1 == op0)
9228 return;
9229 }
9230 }
9231 else if (GET_CODE (op1) == CONST
9232 && GET_CODE (XEXP (op1, 0)) == PLUS
9233 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9234 {
9235 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9236 if (model)
9237 {
9238 rtx addend = XEXP (XEXP (op1, 0), 1);
9239 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9240 op1 = force_operand (op1, NULL);
9241 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9242 op0, 1, OPTAB_DIRECT);
9243 if (op1 == op0)
9244 return;
9245 }
9246 }
9247
9248 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9249 {
9250 if (TARGET_MACHO && !TARGET_64BIT)
9251 {
9252 #if TARGET_MACHO
9253 if (MACHOPIC_PURE)
9254 {
9255 rtx temp = ((reload_in_progress
9256 || ((op0 && GET_CODE (op0) == REG)
9257 && mode == Pmode))
9258 ? op0 : gen_reg_rtx (Pmode));
9259 op1 = machopic_indirect_data_reference (op1, temp);
9260 op1 = machopic_legitimize_pic_address (op1, mode,
9261 temp == op1 ? 0 : temp);
9262 }
9263 else if (MACHOPIC_INDIRECT)
9264 op1 = machopic_indirect_data_reference (op1, 0);
9265 if (op0 == op1)
9266 return;
9267 #endif
9268 }
9269 else
9270 {
9271 if (GET_CODE (op0) == MEM)
9272 op1 = force_reg (Pmode, op1);
9273 else
9274 op1 = legitimize_address (op1, op1, Pmode);
9275 }
9276 }
9277 else
9278 {
9279 if (GET_CODE (op0) == MEM
9280 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9281 || !push_operand (op0, mode))
9282 && GET_CODE (op1) == MEM)
9283 op1 = force_reg (mode, op1);
9284
9285 if (push_operand (op0, mode)
9286 && ! general_no_elim_operand (op1, mode))
9287 op1 = copy_to_mode_reg (mode, op1);
9288
9289 /* Force large constants in 64bit compilation into register
9290 to get them CSEed. */
9291 if (TARGET_64BIT && mode == DImode
9292 && immediate_operand (op1, mode)
9293 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9294 && !register_operand (op0, mode)
9295 && optimize && !reload_completed && !reload_in_progress)
9296 op1 = copy_to_mode_reg (mode, op1);
9297
9298 if (FLOAT_MODE_P (mode))
9299 {
9300 /* If we are loading a floating point constant to a register,
9301 force the value to memory now, since we'll get better code
9302 out the back end. */
9303
9304 if (strict)
9305 ;
9306 else if (GET_CODE (op1) == CONST_DOUBLE)
9307 {
9308 op1 = validize_mem (force_const_mem (mode, op1));
9309 if (!register_operand (op0, mode))
9310 {
9311 rtx temp = gen_reg_rtx (mode);
9312 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9313 emit_move_insn (op0, temp);
9314 return;
9315 }
9316 }
9317 }
9318 }
9319
9320 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9321 }
9322
9323 void
9324 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9325 {
9326 rtx op0 = operands[0], op1 = operands[1];
9327
9328 /* Force constants other than zero into memory. We do not know how
9329 the instructions used to build constants modify the upper 64 bits
9330 of the register, once we have that information we may be able
9331 to handle some of them more efficiently. */
9332 if ((reload_in_progress | reload_completed) == 0
9333 && register_operand (op0, mode)
9334 && CONSTANT_P (op1)
9335 && standard_sse_constant_p (op1) <= 0)
9336 op1 = validize_mem (force_const_mem (mode, op1));
9337
9338 /* Make operand1 a register if it isn't already. */
9339 if (!no_new_pseudos
9340 && !register_operand (op0, mode)
9341 && !register_operand (op1, mode))
9342 {
9343 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9344 return;
9345 }
9346
9347 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9348 }
9349
9350 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9351 straight to ix86_expand_vector_move. */
9352
9353 void
9354 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9355 {
9356 rtx op0, op1, m;
9357
9358 op0 = operands[0];
9359 op1 = operands[1];
9360
9361 if (MEM_P (op1))
9362 {
9363 /* If we're optimizing for size, movups is the smallest. */
9364 if (optimize_size)
9365 {
9366 op0 = gen_lowpart (V4SFmode, op0);
9367 op1 = gen_lowpart (V4SFmode, op1);
9368 emit_insn (gen_sse_movups (op0, op1));
9369 return;
9370 }
9371
9372 /* ??? If we have typed data, then it would appear that using
9373 movdqu is the only way to get unaligned data loaded with
9374 integer type. */
9375 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9376 {
9377 op0 = gen_lowpart (V16QImode, op0);
9378 op1 = gen_lowpart (V16QImode, op1);
9379 emit_insn (gen_sse2_movdqu (op0, op1));
9380 return;
9381 }
9382
9383 if (TARGET_SSE2 && mode == V2DFmode)
9384 {
9385 rtx zero;
9386
9387 /* When SSE registers are split into halves, we can avoid
9388 writing to the top half twice. */
9389 if (TARGET_SSE_SPLIT_REGS)
9390 {
9391 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9392 zero = op0;
9393 }
9394 else
9395 {
9396 /* ??? Not sure about the best option for the Intel chips.
9397 The following would seem to satisfy; the register is
9398 entirely cleared, breaking the dependency chain. We
9399 then store to the upper half, with a dependency depth
9400 of one. A rumor has it that Intel recommends two movsd
9401 followed by an unpacklpd, but this is unconfirmed. And
9402 given that the dependency depth of the unpacklpd would
9403 still be one, I'm not sure why this would be better. */
9404 zero = CONST0_RTX (V2DFmode);
9405 }
9406
9407 m = adjust_address (op1, DFmode, 0);
9408 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9409 m = adjust_address (op1, DFmode, 8);
9410 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9411 }
9412 else
9413 {
9414 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9415 emit_move_insn (op0, CONST0_RTX (mode));
9416 else
9417 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9418
9419 if (mode != V4SFmode)
9420 op0 = gen_lowpart (V4SFmode, op0);
9421 m = adjust_address (op1, V2SFmode, 0);
9422 emit_insn (gen_sse_loadlps (op0, op0, m));
9423 m = adjust_address (op1, V2SFmode, 8);
9424 emit_insn (gen_sse_loadhps (op0, op0, m));
9425 }
9426 }
9427 else if (MEM_P (op0))
9428 {
9429 /* If we're optimizing for size, movups is the smallest. */
9430 if (optimize_size)
9431 {
9432 op0 = gen_lowpart (V4SFmode, op0);
9433 op1 = gen_lowpart (V4SFmode, op1);
9434 emit_insn (gen_sse_movups (op0, op1));
9435 return;
9436 }
9437
9438 /* ??? Similar to above, only less clear because of quote
9439 typeless stores unquote. */
9440 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9441 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9442 {
9443 op0 = gen_lowpart (V16QImode, op0);
9444 op1 = gen_lowpart (V16QImode, op1);
9445 emit_insn (gen_sse2_movdqu (op0, op1));
9446 return;
9447 }
9448
9449 if (TARGET_SSE2 && mode == V2DFmode)
9450 {
9451 m = adjust_address (op0, DFmode, 0);
9452 emit_insn (gen_sse2_storelpd (m, op1));
9453 m = adjust_address (op0, DFmode, 8);
9454 emit_insn (gen_sse2_storehpd (m, op1));
9455 }
9456 else
9457 {
9458 if (mode != V4SFmode)
9459 op1 = gen_lowpart (V4SFmode, op1);
9460 m = adjust_address (op0, V2SFmode, 0);
9461 emit_insn (gen_sse_storelps (m, op1));
9462 m = adjust_address (op0, V2SFmode, 8);
9463 emit_insn (gen_sse_storehps (m, op1));
9464 }
9465 }
9466 else
9467 gcc_unreachable ();
9468 }
9469
9470 /* Expand a push in MODE. This is some mode for which we do not support
9471 proper push instructions, at least from the registers that we expect
9472 the value to live in. */
9473
9474 void
9475 ix86_expand_push (enum machine_mode mode, rtx x)
9476 {
9477 rtx tmp;
9478
9479 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9480 GEN_INT (-GET_MODE_SIZE (mode)),
9481 stack_pointer_rtx, 1, OPTAB_DIRECT);
9482 if (tmp != stack_pointer_rtx)
9483 emit_move_insn (stack_pointer_rtx, tmp);
9484
9485 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9486 emit_move_insn (tmp, x);
9487 }
9488
9489 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9490 destination to use for the operation. If different from the true
9491 destination in operands[0], a copy operation will be required. */
9492
9493 rtx
9494 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9495 rtx operands[])
9496 {
9497 int matching_memory;
9498 rtx src1, src2, dst;
9499
9500 dst = operands[0];
9501 src1 = operands[1];
9502 src2 = operands[2];
9503
9504 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9505 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9506 && (rtx_equal_p (dst, src2)
9507 || immediate_operand (src1, mode)))
9508 {
9509 rtx temp = src1;
9510 src1 = src2;
9511 src2 = temp;
9512 }
9513
9514 /* If the destination is memory, and we do not have matching source
9515 operands, do things in registers. */
9516 matching_memory = 0;
9517 if (GET_CODE (dst) == MEM)
9518 {
9519 if (rtx_equal_p (dst, src1))
9520 matching_memory = 1;
9521 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9522 && rtx_equal_p (dst, src2))
9523 matching_memory = 2;
9524 else
9525 dst = gen_reg_rtx (mode);
9526 }
9527
9528 /* Both source operands cannot be in memory. */
9529 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
9530 {
9531 if (matching_memory != 2)
9532 src2 = force_reg (mode, src2);
9533 else
9534 src1 = force_reg (mode, src1);
9535 }
9536
9537 /* If the operation is not commutable, source 1 cannot be a constant
9538 or non-matching memory. */
9539 if ((CONSTANT_P (src1)
9540 || (!matching_memory && GET_CODE (src1) == MEM))
9541 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9542 src1 = force_reg (mode, src1);
9543
9544 src1 = operands[1] = src1;
9545 src2 = operands[2] = src2;
9546 return dst;
9547 }
9548
9549 /* Similarly, but assume that the destination has already been
9550 set up properly. */
9551
9552 void
9553 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9554 enum machine_mode mode, rtx operands[])
9555 {
9556 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9557 gcc_assert (dst == operands[0]);
9558 }
9559
9560 /* Attempt to expand a binary operator. Make the expansion closer to the
9561 actual machine, then just general_operand, which will allow 3 separate
9562 memory references (one output, two input) in a single insn. */
9563
9564 void
9565 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9566 rtx operands[])
9567 {
9568 rtx src1, src2, dst, op, clob;
9569
9570 dst = ix86_fixup_binary_operands (code, mode, operands);
9571 src1 = operands[1];
9572 src2 = operands[2];
9573
9574 /* Emit the instruction. */
9575
9576 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9577 if (reload_in_progress)
9578 {
9579 /* Reload doesn't know about the flags register, and doesn't know that
9580 it doesn't want to clobber it. We can only do this with PLUS. */
9581 gcc_assert (code == PLUS);
9582 emit_insn (op);
9583 }
9584 else
9585 {
9586 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9587 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9588 }
9589
9590 /* Fix up the destination if needed. */
9591 if (dst != operands[0])
9592 emit_move_insn (operands[0], dst);
9593 }
9594
9595 /* Return TRUE or FALSE depending on whether the binary operator meets the
9596 appropriate constraints. */
9597
9598 int
9599 ix86_binary_operator_ok (enum rtx_code code,
9600 enum machine_mode mode ATTRIBUTE_UNUSED,
9601 rtx operands[3])
9602 {
9603 /* Both source operands cannot be in memory. */
9604 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
9605 return 0;
9606 /* If the operation is not commutable, source 1 cannot be a constant. */
9607 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9608 return 0;
9609 /* If the destination is memory, we must have a matching source operand. */
9610 if (GET_CODE (operands[0]) == MEM
9611 && ! (rtx_equal_p (operands[0], operands[1])
9612 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9613 && rtx_equal_p (operands[0], operands[2]))))
9614 return 0;
9615 /* If the operation is not commutable and the source 1 is memory, we must
9616 have a matching destination. */
9617 if (GET_CODE (operands[1]) == MEM
9618 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9619 && ! rtx_equal_p (operands[0], operands[1]))
9620 return 0;
9621 return 1;
9622 }
9623
9624 /* Attempt to expand a unary operator. Make the expansion closer to the
9625 actual machine, then just general_operand, which will allow 2 separate
9626 memory references (one output, one input) in a single insn. */
9627
9628 void
9629 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9630 rtx operands[])
9631 {
9632 int matching_memory;
9633 rtx src, dst, op, clob;
9634
9635 dst = operands[0];
9636 src = operands[1];
9637
9638 /* If the destination is memory, and we do not have matching source
9639 operands, do things in registers. */
9640 matching_memory = 0;
9641 if (MEM_P (dst))
9642 {
9643 if (rtx_equal_p (dst, src))
9644 matching_memory = 1;
9645 else
9646 dst = gen_reg_rtx (mode);
9647 }
9648
9649 /* When source operand is memory, destination must match. */
9650 if (MEM_P (src) && !matching_memory)
9651 src = force_reg (mode, src);
9652
9653 /* Emit the instruction. */
9654
9655 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9656 if (reload_in_progress || code == NOT)
9657 {
9658 /* Reload doesn't know about the flags register, and doesn't know that
9659 it doesn't want to clobber it. */
9660 gcc_assert (code == NOT);
9661 emit_insn (op);
9662 }
9663 else
9664 {
9665 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9666 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9667 }
9668
9669 /* Fix up the destination if needed. */
9670 if (dst != operands[0])
9671 emit_move_insn (operands[0], dst);
9672 }
9673
9674 /* Return TRUE or FALSE depending on whether the unary operator meets the
9675 appropriate constraints. */
9676
9677 int
9678 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9679 enum machine_mode mode ATTRIBUTE_UNUSED,
9680 rtx operands[2] ATTRIBUTE_UNUSED)
9681 {
9682 /* If one of operands is memory, source and destination must match. */
9683 if ((GET_CODE (operands[0]) == MEM
9684 || GET_CODE (operands[1]) == MEM)
9685 && ! rtx_equal_p (operands[0], operands[1]))
9686 return FALSE;
9687 return TRUE;
9688 }
9689
9690 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9691 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9692 true, then replicate the mask for all elements of the vector register.
9693 If INVERT is true, then create a mask excluding the sign bit. */
9694
9695 rtx
9696 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9697 {
9698 enum machine_mode vec_mode;
9699 HOST_WIDE_INT hi, lo;
9700 int shift = 63;
9701 rtvec v;
9702 rtx mask;
9703
9704 /* Find the sign bit, sign extended to 2*HWI. */
9705 if (mode == SFmode)
9706 lo = 0x80000000, hi = lo < 0;
9707 else if (HOST_BITS_PER_WIDE_INT >= 64)
9708 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9709 else
9710 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9711
9712 if (invert)
9713 lo = ~lo, hi = ~hi;
9714
9715 /* Force this value into the low part of a fp vector constant. */
9716 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9717 mask = gen_lowpart (mode, mask);
9718
9719 if (mode == SFmode)
9720 {
9721 if (vect)
9722 v = gen_rtvec (4, mask, mask, mask, mask);
9723 else
9724 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9725 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9726 vec_mode = V4SFmode;
9727 }
9728 else
9729 {
9730 if (vect)
9731 v = gen_rtvec (2, mask, mask);
9732 else
9733 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9734 vec_mode = V2DFmode;
9735 }
9736
9737 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9738 }
9739
9740 /* Generate code for floating point ABS or NEG. */
9741
9742 void
9743 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9744 rtx operands[])
9745 {
9746 rtx mask, set, use, clob, dst, src;
9747 bool matching_memory;
9748 bool use_sse = false;
9749 bool vector_mode = VECTOR_MODE_P (mode);
9750 enum machine_mode elt_mode = mode;
9751
9752 if (vector_mode)
9753 {
9754 elt_mode = GET_MODE_INNER (mode);
9755 use_sse = true;
9756 }
9757 else if (TARGET_SSE_MATH)
9758 use_sse = SSE_FLOAT_MODE_P (mode);
9759
9760 /* NEG and ABS performed with SSE use bitwise mask operations.
9761 Create the appropriate mask now. */
9762 if (use_sse)
9763 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9764 else
9765 mask = NULL_RTX;
9766
9767 dst = operands[0];
9768 src = operands[1];
9769
9770 /* If the destination is memory, and we don't have matching source
9771 operands or we're using the x87, do things in registers. */
9772 matching_memory = false;
9773 if (MEM_P (dst))
9774 {
9775 if (use_sse && rtx_equal_p (dst, src))
9776 matching_memory = true;
9777 else
9778 dst = gen_reg_rtx (mode);
9779 }
9780 if (MEM_P (src) && !matching_memory)
9781 src = force_reg (mode, src);
9782
9783 if (vector_mode)
9784 {
9785 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9786 set = gen_rtx_SET (VOIDmode, dst, set);
9787 emit_insn (set);
9788 }
9789 else
9790 {
9791 set = gen_rtx_fmt_e (code, mode, src);
9792 set = gen_rtx_SET (VOIDmode, dst, set);
9793 if (mask)
9794 {
9795 use = gen_rtx_USE (VOIDmode, mask);
9796 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9797 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9798 gen_rtvec (3, set, use, clob)));
9799 }
9800 else
9801 emit_insn (set);
9802 }
9803
9804 if (dst != operands[0])
9805 emit_move_insn (operands[0], dst);
9806 }
9807
9808 /* Expand a copysign operation. Special case operand 0 being a constant. */
9809
9810 void
9811 ix86_expand_copysign (rtx operands[])
9812 {
9813 enum machine_mode mode, vmode;
9814 rtx dest, op0, op1, mask, nmask;
9815
9816 dest = operands[0];
9817 op0 = operands[1];
9818 op1 = operands[2];
9819
9820 mode = GET_MODE (dest);
9821 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9822
9823 if (GET_CODE (op0) == CONST_DOUBLE)
9824 {
9825 rtvec v;
9826
9827 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9828 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9829
9830 if (op0 == CONST0_RTX (mode))
9831 op0 = CONST0_RTX (vmode);
9832 else
9833 {
9834 if (mode == SFmode)
9835 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9836 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9837 else
9838 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9839 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9840 }
9841
9842 mask = ix86_build_signbit_mask (mode, 0, 0);
9843
9844 if (mode == SFmode)
9845 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9846 else
9847 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9848 }
9849 else
9850 {
9851 nmask = ix86_build_signbit_mask (mode, 0, 1);
9852 mask = ix86_build_signbit_mask (mode, 0, 0);
9853
9854 if (mode == SFmode)
9855 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9856 else
9857 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9858 }
9859 }
9860
9861 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9862 be a constant, and so has already been expanded into a vector constant. */
9863
9864 void
9865 ix86_split_copysign_const (rtx operands[])
9866 {
9867 enum machine_mode mode, vmode;
9868 rtx dest, op0, op1, mask, x;
9869
9870 dest = operands[0];
9871 op0 = operands[1];
9872 op1 = operands[2];
9873 mask = operands[3];
9874
9875 mode = GET_MODE (dest);
9876 vmode = GET_MODE (mask);
9877
9878 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9879 x = gen_rtx_AND (vmode, dest, mask);
9880 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9881
9882 if (op0 != CONST0_RTX (vmode))
9883 {
9884 x = gen_rtx_IOR (vmode, dest, op0);
9885 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9886 }
9887 }
9888
9889 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9890 so we have to do two masks. */
9891
9892 void
9893 ix86_split_copysign_var (rtx operands[])
9894 {
9895 enum machine_mode mode, vmode;
9896 rtx dest, scratch, op0, op1, mask, nmask, x;
9897
9898 dest = operands[0];
9899 scratch = operands[1];
9900 op0 = operands[2];
9901 op1 = operands[3];
9902 nmask = operands[4];
9903 mask = operands[5];
9904
9905 mode = GET_MODE (dest);
9906 vmode = GET_MODE (mask);
9907
9908 if (rtx_equal_p (op0, op1))
9909 {
9910 /* Shouldn't happen often (it's useless, obviously), but when it does
9911 we'd generate incorrect code if we continue below. */
9912 emit_move_insn (dest, op0);
9913 return;
9914 }
9915
9916 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9917 {
9918 gcc_assert (REGNO (op1) == REGNO (scratch));
9919
9920 x = gen_rtx_AND (vmode, scratch, mask);
9921 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9922
9923 dest = mask;
9924 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9925 x = gen_rtx_NOT (vmode, dest);
9926 x = gen_rtx_AND (vmode, x, op0);
9927 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9928 }
9929 else
9930 {
9931 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9932 {
9933 x = gen_rtx_AND (vmode, scratch, mask);
9934 }
9935 else /* alternative 2,4 */
9936 {
9937 gcc_assert (REGNO (mask) == REGNO (scratch));
9938 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9939 x = gen_rtx_AND (vmode, scratch, op1);
9940 }
9941 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9942
9943 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9944 {
9945 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9946 x = gen_rtx_AND (vmode, dest, nmask);
9947 }
9948 else /* alternative 3,4 */
9949 {
9950 gcc_assert (REGNO (nmask) == REGNO (dest));
9951 dest = nmask;
9952 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9953 x = gen_rtx_AND (vmode, dest, op0);
9954 }
9955 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9956 }
9957
9958 x = gen_rtx_IOR (vmode, dest, scratch);
9959 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9960 }
9961
9962 /* Return TRUE or FALSE depending on whether the first SET in INSN
9963 has source and destination with matching CC modes, and that the
9964 CC mode is at least as constrained as REQ_MODE. */
9965
9966 int
9967 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9968 {
9969 rtx set;
9970 enum machine_mode set_mode;
9971
9972 set = PATTERN (insn);
9973 if (GET_CODE (set) == PARALLEL)
9974 set = XVECEXP (set, 0, 0);
9975 gcc_assert (GET_CODE (set) == SET);
9976 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9977
9978 set_mode = GET_MODE (SET_DEST (set));
9979 switch (set_mode)
9980 {
9981 case CCNOmode:
9982 if (req_mode != CCNOmode
9983 && (req_mode != CCmode
9984 || XEXP (SET_SRC (set), 1) != const0_rtx))
9985 return 0;
9986 break;
9987 case CCmode:
9988 if (req_mode == CCGCmode)
9989 return 0;
9990 /* FALLTHRU */
9991 case CCGCmode:
9992 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9993 return 0;
9994 /* FALLTHRU */
9995 case CCGOCmode:
9996 if (req_mode == CCZmode)
9997 return 0;
9998 /* FALLTHRU */
9999 case CCZmode:
10000 break;
10001
10002 default:
10003 gcc_unreachable ();
10004 }
10005
10006 return (GET_MODE (SET_SRC (set)) == set_mode);
10007 }
10008
10009 /* Generate insn patterns to do an integer compare of OPERANDS. */
10010
10011 static rtx
10012 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
10013 {
10014 enum machine_mode cmpmode;
10015 rtx tmp, flags;
10016
10017 cmpmode = SELECT_CC_MODE (code, op0, op1);
10018 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
10019
10020 /* This is very simple, but making the interface the same as in the
10021 FP case makes the rest of the code easier. */
10022 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
10023 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
10024
10025 /* Return the test that should be put into the flags user, i.e.
10026 the bcc, scc, or cmov instruction. */
10027 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
10028 }
10029
10030 /* Figure out whether to use ordered or unordered fp comparisons.
10031 Return the appropriate mode to use. */
10032
10033 enum machine_mode
10034 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
10035 {
10036 /* ??? In order to make all comparisons reversible, we do all comparisons
10037 non-trapping when compiling for IEEE. Once gcc is able to distinguish
10038 all forms trapping and nontrapping comparisons, we can make inequality
10039 comparisons trapping again, since it results in better code when using
10040 FCOM based compares. */
10041 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
10042 }
10043
10044 enum machine_mode
10045 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
10046 {
10047 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10048 return ix86_fp_compare_mode (code);
10049 switch (code)
10050 {
10051 /* Only zero flag is needed. */
10052 case EQ: /* ZF=0 */
10053 case NE: /* ZF!=0 */
10054 return CCZmode;
10055 /* Codes needing carry flag. */
10056 case GEU: /* CF=0 */
10057 case GTU: /* CF=0 & ZF=0 */
10058 case LTU: /* CF=1 */
10059 case LEU: /* CF=1 | ZF=1 */
10060 return CCmode;
10061 /* Codes possibly doable only with sign flag when
10062 comparing against zero. */
10063 case GE: /* SF=OF or SF=0 */
10064 case LT: /* SF<>OF or SF=1 */
10065 if (op1 == const0_rtx)
10066 return CCGOCmode;
10067 else
10068 /* For other cases Carry flag is not required. */
10069 return CCGCmode;
10070 /* Codes doable only with sign flag when comparing
10071 against zero, but we miss jump instruction for it
10072 so we need to use relational tests against overflow
10073 that thus needs to be zero. */
10074 case GT: /* ZF=0 & SF=OF */
10075 case LE: /* ZF=1 | SF<>OF */
10076 if (op1 == const0_rtx)
10077 return CCNOmode;
10078 else
10079 return CCGCmode;
10080 /* strcmp pattern do (use flags) and combine may ask us for proper
10081 mode. */
10082 case USE:
10083 return CCmode;
10084 default:
10085 gcc_unreachable ();
10086 }
10087 }
10088
10089 /* Return the fixed registers used for condition codes. */
10090
10091 static bool
10092 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
10093 {
10094 *p1 = FLAGS_REG;
10095 *p2 = FPSR_REG;
10096 return true;
10097 }
10098
10099 /* If two condition code modes are compatible, return a condition code
10100 mode which is compatible with both. Otherwise, return
10101 VOIDmode. */
10102
10103 static enum machine_mode
10104 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
10105 {
10106 if (m1 == m2)
10107 return m1;
10108
10109 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
10110 return VOIDmode;
10111
10112 if ((m1 == CCGCmode && m2 == CCGOCmode)
10113 || (m1 == CCGOCmode && m2 == CCGCmode))
10114 return CCGCmode;
10115
10116 switch (m1)
10117 {
10118 default:
10119 gcc_unreachable ();
10120
10121 case CCmode:
10122 case CCGCmode:
10123 case CCGOCmode:
10124 case CCNOmode:
10125 case CCZmode:
10126 switch (m2)
10127 {
10128 default:
10129 return VOIDmode;
10130
10131 case CCmode:
10132 case CCGCmode:
10133 case CCGOCmode:
10134 case CCNOmode:
10135 case CCZmode:
10136 return CCmode;
10137 }
10138
10139 case CCFPmode:
10140 case CCFPUmode:
10141 /* These are only compatible with themselves, which we already
10142 checked above. */
10143 return VOIDmode;
10144 }
10145 }
10146
10147 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10148
10149 int
10150 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10151 {
10152 enum rtx_code swapped_code = swap_condition (code);
10153 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10154 || (ix86_fp_comparison_cost (swapped_code)
10155 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10156 }
10157
10158 /* Swap, force into registers, or otherwise massage the two operands
10159 to a fp comparison. The operands are updated in place; the new
10160 comparison code is returned. */
10161
10162 static enum rtx_code
10163 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10164 {
10165 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10166 rtx op0 = *pop0, op1 = *pop1;
10167 enum machine_mode op_mode = GET_MODE (op0);
10168 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10169
10170 /* All of the unordered compare instructions only work on registers.
10171 The same is true of the fcomi compare instructions. The XFmode
10172 compare instructions require registers except when comparing
10173 against zero or when converting operand 1 from fixed point to
10174 floating point. */
10175
10176 if (!is_sse
10177 && (fpcmp_mode == CCFPUmode
10178 || (op_mode == XFmode
10179 && ! (standard_80387_constant_p (op0) == 1
10180 || standard_80387_constant_p (op1) == 1)
10181 && GET_CODE (op1) != FLOAT)
10182 || ix86_use_fcomi_compare (code)))
10183 {
10184 op0 = force_reg (op_mode, op0);
10185 op1 = force_reg (op_mode, op1);
10186 }
10187 else
10188 {
10189 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10190 things around if they appear profitable, otherwise force op0
10191 into a register. */
10192
10193 if (standard_80387_constant_p (op0) == 0
10194 || (GET_CODE (op0) == MEM
10195 && ! (standard_80387_constant_p (op1) == 0
10196 || GET_CODE (op1) == MEM)))
10197 {
10198 rtx tmp;
10199 tmp = op0, op0 = op1, op1 = tmp;
10200 code = swap_condition (code);
10201 }
10202
10203 if (GET_CODE (op0) != REG)
10204 op0 = force_reg (op_mode, op0);
10205
10206 if (CONSTANT_P (op1))
10207 {
10208 int tmp = standard_80387_constant_p (op1);
10209 if (tmp == 0)
10210 op1 = validize_mem (force_const_mem (op_mode, op1));
10211 else if (tmp == 1)
10212 {
10213 if (TARGET_CMOVE)
10214 op1 = force_reg (op_mode, op1);
10215 }
10216 else
10217 op1 = force_reg (op_mode, op1);
10218 }
10219 }
10220
10221 /* Try to rearrange the comparison to make it cheaper. */
10222 if (ix86_fp_comparison_cost (code)
10223 > ix86_fp_comparison_cost (swap_condition (code))
10224 && (GET_CODE (op1) == REG || !no_new_pseudos))
10225 {
10226 rtx tmp;
10227 tmp = op0, op0 = op1, op1 = tmp;
10228 code = swap_condition (code);
10229 if (GET_CODE (op0) != REG)
10230 op0 = force_reg (op_mode, op0);
10231 }
10232
10233 *pop0 = op0;
10234 *pop1 = op1;
10235 return code;
10236 }
10237
10238 /* Convert comparison codes we use to represent FP comparison to integer
10239 code that will result in proper branch. Return UNKNOWN if no such code
10240 is available. */
10241
10242 enum rtx_code
10243 ix86_fp_compare_code_to_integer (enum rtx_code code)
10244 {
10245 switch (code)
10246 {
10247 case GT:
10248 return GTU;
10249 case GE:
10250 return GEU;
10251 case ORDERED:
10252 case UNORDERED:
10253 return code;
10254 break;
10255 case UNEQ:
10256 return EQ;
10257 break;
10258 case UNLT:
10259 return LTU;
10260 break;
10261 case UNLE:
10262 return LEU;
10263 break;
10264 case LTGT:
10265 return NE;
10266 break;
10267 default:
10268 return UNKNOWN;
10269 }
10270 }
10271
10272 /* Split comparison code CODE into comparisons we can do using branch
10273 instructions. BYPASS_CODE is comparison code for branch that will
10274 branch around FIRST_CODE and SECOND_CODE. If some of branches
10275 is not required, set value to UNKNOWN.
10276 We never require more than two branches. */
10277
10278 void
10279 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10280 enum rtx_code *first_code,
10281 enum rtx_code *second_code)
10282 {
10283 *first_code = code;
10284 *bypass_code = UNKNOWN;
10285 *second_code = UNKNOWN;
10286
10287 /* The fcomi comparison sets flags as follows:
10288
10289 cmp ZF PF CF
10290 > 0 0 0
10291 < 0 0 1
10292 = 1 0 0
10293 un 1 1 1 */
10294
10295 switch (code)
10296 {
10297 case GT: /* GTU - CF=0 & ZF=0 */
10298 case GE: /* GEU - CF=0 */
10299 case ORDERED: /* PF=0 */
10300 case UNORDERED: /* PF=1 */
10301 case UNEQ: /* EQ - ZF=1 */
10302 case UNLT: /* LTU - CF=1 */
10303 case UNLE: /* LEU - CF=1 | ZF=1 */
10304 case LTGT: /* EQ - ZF=0 */
10305 break;
10306 case LT: /* LTU - CF=1 - fails on unordered */
10307 *first_code = UNLT;
10308 *bypass_code = UNORDERED;
10309 break;
10310 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10311 *first_code = UNLE;
10312 *bypass_code = UNORDERED;
10313 break;
10314 case EQ: /* EQ - ZF=1 - fails on unordered */
10315 *first_code = UNEQ;
10316 *bypass_code = UNORDERED;
10317 break;
10318 case NE: /* NE - ZF=0 - fails on unordered */
10319 *first_code = LTGT;
10320 *second_code = UNORDERED;
10321 break;
10322 case UNGE: /* GEU - CF=0 - fails on unordered */
10323 *first_code = GE;
10324 *second_code = UNORDERED;
10325 break;
10326 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10327 *first_code = GT;
10328 *second_code = UNORDERED;
10329 break;
10330 default:
10331 gcc_unreachable ();
10332 }
10333 if (!TARGET_IEEE_FP)
10334 {
10335 *second_code = UNKNOWN;
10336 *bypass_code = UNKNOWN;
10337 }
10338 }
10339
10340 /* Return cost of comparison done fcom + arithmetics operations on AX.
10341 All following functions do use number of instructions as a cost metrics.
10342 In future this should be tweaked to compute bytes for optimize_size and
10343 take into account performance of various instructions on various CPUs. */
10344 static int
10345 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10346 {
10347 if (!TARGET_IEEE_FP)
10348 return 4;
10349 /* The cost of code output by ix86_expand_fp_compare. */
10350 switch (code)
10351 {
10352 case UNLE:
10353 case UNLT:
10354 case LTGT:
10355 case GT:
10356 case GE:
10357 case UNORDERED:
10358 case ORDERED:
10359 case UNEQ:
10360 return 4;
10361 break;
10362 case LT:
10363 case NE:
10364 case EQ:
10365 case UNGE:
10366 return 5;
10367 break;
10368 case LE:
10369 case UNGT:
10370 return 6;
10371 break;
10372 default:
10373 gcc_unreachable ();
10374 }
10375 }
10376
10377 /* Return cost of comparison done using fcomi operation.
10378 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10379 static int
10380 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10381 {
10382 enum rtx_code bypass_code, first_code, second_code;
10383 /* Return arbitrarily high cost when instruction is not supported - this
10384 prevents gcc from using it. */
10385 if (!TARGET_CMOVE)
10386 return 1024;
10387 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10388 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10389 }
10390
10391 /* Return cost of comparison done using sahf operation.
10392 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10393 static int
10394 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10395 {
10396 enum rtx_code bypass_code, first_code, second_code;
10397 /* Return arbitrarily high cost when instruction is not preferred - this
10398 avoids gcc from using it. */
10399 if (!TARGET_USE_SAHF && !optimize_size)
10400 return 1024;
10401 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10402 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10403 }
10404
10405 /* Compute cost of the comparison done using any method.
10406 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10407 static int
10408 ix86_fp_comparison_cost (enum rtx_code code)
10409 {
10410 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10411 int min;
10412
10413 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10414 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10415
10416 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10417 if (min > sahf_cost)
10418 min = sahf_cost;
10419 if (min > fcomi_cost)
10420 min = fcomi_cost;
10421 return min;
10422 }
10423
10424 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10425
10426 static rtx
10427 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10428 rtx *second_test, rtx *bypass_test)
10429 {
10430 enum machine_mode fpcmp_mode, intcmp_mode;
10431 rtx tmp, tmp2;
10432 int cost = ix86_fp_comparison_cost (code);
10433 enum rtx_code bypass_code, first_code, second_code;
10434
10435 fpcmp_mode = ix86_fp_compare_mode (code);
10436 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10437
10438 if (second_test)
10439 *second_test = NULL_RTX;
10440 if (bypass_test)
10441 *bypass_test = NULL_RTX;
10442
10443 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10444
10445 /* Do fcomi/sahf based test when profitable. */
10446 if ((bypass_code == UNKNOWN || bypass_test)
10447 && (second_code == UNKNOWN || second_test)
10448 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10449 {
10450 if (TARGET_CMOVE)
10451 {
10452 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10453 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10454 tmp);
10455 emit_insn (tmp);
10456 }
10457 else
10458 {
10459 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10460 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10461 if (!scratch)
10462 scratch = gen_reg_rtx (HImode);
10463 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10464 emit_insn (gen_x86_sahf_1 (scratch));
10465 }
10466
10467 /* The FP codes work out to act like unsigned. */
10468 intcmp_mode = fpcmp_mode;
10469 code = first_code;
10470 if (bypass_code != UNKNOWN)
10471 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10472 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10473 const0_rtx);
10474 if (second_code != UNKNOWN)
10475 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10476 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10477 const0_rtx);
10478 }
10479 else
10480 {
10481 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10482 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10483 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10484 if (!scratch)
10485 scratch = gen_reg_rtx (HImode);
10486 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10487
10488 /* In the unordered case, we have to check C2 for NaN's, which
10489 doesn't happen to work out to anything nice combination-wise.
10490 So do some bit twiddling on the value we've got in AH to come
10491 up with an appropriate set of condition codes. */
10492
10493 intcmp_mode = CCNOmode;
10494 switch (code)
10495 {
10496 case GT:
10497 case UNGT:
10498 if (code == GT || !TARGET_IEEE_FP)
10499 {
10500 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10501 code = EQ;
10502 }
10503 else
10504 {
10505 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10506 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10507 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10508 intcmp_mode = CCmode;
10509 code = GEU;
10510 }
10511 break;
10512 case LT:
10513 case UNLT:
10514 if (code == LT && TARGET_IEEE_FP)
10515 {
10516 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10517 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10518 intcmp_mode = CCmode;
10519 code = EQ;
10520 }
10521 else
10522 {
10523 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10524 code = NE;
10525 }
10526 break;
10527 case GE:
10528 case UNGE:
10529 if (code == GE || !TARGET_IEEE_FP)
10530 {
10531 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10532 code = EQ;
10533 }
10534 else
10535 {
10536 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10537 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10538 GEN_INT (0x01)));
10539 code = NE;
10540 }
10541 break;
10542 case LE:
10543 case UNLE:
10544 if (code == LE && TARGET_IEEE_FP)
10545 {
10546 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10547 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10548 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10549 intcmp_mode = CCmode;
10550 code = LTU;
10551 }
10552 else
10553 {
10554 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10555 code = NE;
10556 }
10557 break;
10558 case EQ:
10559 case UNEQ:
10560 if (code == EQ && TARGET_IEEE_FP)
10561 {
10562 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10563 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10564 intcmp_mode = CCmode;
10565 code = EQ;
10566 }
10567 else
10568 {
10569 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10570 code = NE;
10571 break;
10572 }
10573 break;
10574 case NE:
10575 case LTGT:
10576 if (code == NE && TARGET_IEEE_FP)
10577 {
10578 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10579 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10580 GEN_INT (0x40)));
10581 code = NE;
10582 }
10583 else
10584 {
10585 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10586 code = EQ;
10587 }
10588 break;
10589
10590 case UNORDERED:
10591 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10592 code = NE;
10593 break;
10594 case ORDERED:
10595 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10596 code = EQ;
10597 break;
10598
10599 default:
10600 gcc_unreachable ();
10601 }
10602 }
10603
10604 /* Return the test that should be put into the flags user, i.e.
10605 the bcc, scc, or cmov instruction. */
10606 return gen_rtx_fmt_ee (code, VOIDmode,
10607 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10608 const0_rtx);
10609 }
10610
10611 rtx
10612 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10613 {
10614 rtx op0, op1, ret;
10615 op0 = ix86_compare_op0;
10616 op1 = ix86_compare_op1;
10617
10618 if (second_test)
10619 *second_test = NULL_RTX;
10620 if (bypass_test)
10621 *bypass_test = NULL_RTX;
10622
10623 if (ix86_compare_emitted)
10624 {
10625 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10626 ix86_compare_emitted = NULL_RTX;
10627 }
10628 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10629 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10630 second_test, bypass_test);
10631 else
10632 ret = ix86_expand_int_compare (code, op0, op1);
10633
10634 return ret;
10635 }
10636
10637 /* Return true if the CODE will result in nontrivial jump sequence. */
10638 bool
10639 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10640 {
10641 enum rtx_code bypass_code, first_code, second_code;
10642 if (!TARGET_CMOVE)
10643 return true;
10644 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10645 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10646 }
10647
10648 void
10649 ix86_expand_branch (enum rtx_code code, rtx label)
10650 {
10651 rtx tmp;
10652
10653 /* If we have emitted a compare insn, go straight to simple.
10654 ix86_expand_compare won't emit anything if ix86_compare_emitted
10655 is non NULL. */
10656 if (ix86_compare_emitted)
10657 goto simple;
10658
10659 switch (GET_MODE (ix86_compare_op0))
10660 {
10661 case QImode:
10662 case HImode:
10663 case SImode:
10664 simple:
10665 tmp = ix86_expand_compare (code, NULL, NULL);
10666 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10667 gen_rtx_LABEL_REF (VOIDmode, label),
10668 pc_rtx);
10669 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10670 return;
10671
10672 case SFmode:
10673 case DFmode:
10674 case XFmode:
10675 {
10676 rtvec vec;
10677 int use_fcomi;
10678 enum rtx_code bypass_code, first_code, second_code;
10679
10680 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10681 &ix86_compare_op1);
10682
10683 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10684
10685 /* Check whether we will use the natural sequence with one jump. If
10686 so, we can expand jump early. Otherwise delay expansion by
10687 creating compound insn to not confuse optimizers. */
10688 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10689 && TARGET_CMOVE)
10690 {
10691 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10692 gen_rtx_LABEL_REF (VOIDmode, label),
10693 pc_rtx, NULL_RTX, NULL_RTX);
10694 }
10695 else
10696 {
10697 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10698 ix86_compare_op0, ix86_compare_op1);
10699 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10700 gen_rtx_LABEL_REF (VOIDmode, label),
10701 pc_rtx);
10702 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10703
10704 use_fcomi = ix86_use_fcomi_compare (code);
10705 vec = rtvec_alloc (3 + !use_fcomi);
10706 RTVEC_ELT (vec, 0) = tmp;
10707 RTVEC_ELT (vec, 1)
10708 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10709 RTVEC_ELT (vec, 2)
10710 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10711 if (! use_fcomi)
10712 RTVEC_ELT (vec, 3)
10713 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10714
10715 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10716 }
10717 return;
10718 }
10719
10720 case DImode:
10721 if (TARGET_64BIT)
10722 goto simple;
10723 case TImode:
10724 /* Expand DImode branch into multiple compare+branch. */
10725 {
10726 rtx lo[2], hi[2], label2;
10727 enum rtx_code code1, code2, code3;
10728 enum machine_mode submode;
10729
10730 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10731 {
10732 tmp = ix86_compare_op0;
10733 ix86_compare_op0 = ix86_compare_op1;
10734 ix86_compare_op1 = tmp;
10735 code = swap_condition (code);
10736 }
10737 if (GET_MODE (ix86_compare_op0) == DImode)
10738 {
10739 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10740 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10741 submode = SImode;
10742 }
10743 else
10744 {
10745 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10746 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10747 submode = DImode;
10748 }
10749
10750 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10751 avoid two branches. This costs one extra insn, so disable when
10752 optimizing for size. */
10753
10754 if ((code == EQ || code == NE)
10755 && (!optimize_size
10756 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10757 {
10758 rtx xor0, xor1;
10759
10760 xor1 = hi[0];
10761 if (hi[1] != const0_rtx)
10762 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10763 NULL_RTX, 0, OPTAB_WIDEN);
10764
10765 xor0 = lo[0];
10766 if (lo[1] != const0_rtx)
10767 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10768 NULL_RTX, 0, OPTAB_WIDEN);
10769
10770 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10771 NULL_RTX, 0, OPTAB_WIDEN);
10772
10773 ix86_compare_op0 = tmp;
10774 ix86_compare_op1 = const0_rtx;
10775 ix86_expand_branch (code, label);
10776 return;
10777 }
10778
10779 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10780 op1 is a constant and the low word is zero, then we can just
10781 examine the high word. */
10782
10783 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
10784 switch (code)
10785 {
10786 case LT: case LTU: case GE: case GEU:
10787 ix86_compare_op0 = hi[0];
10788 ix86_compare_op1 = hi[1];
10789 ix86_expand_branch (code, label);
10790 return;
10791 default:
10792 break;
10793 }
10794
10795 /* Otherwise, we need two or three jumps. */
10796
10797 label2 = gen_label_rtx ();
10798
10799 code1 = code;
10800 code2 = swap_condition (code);
10801 code3 = unsigned_condition (code);
10802
10803 switch (code)
10804 {
10805 case LT: case GT: case LTU: case GTU:
10806 break;
10807
10808 case LE: code1 = LT; code2 = GT; break;
10809 case GE: code1 = GT; code2 = LT; break;
10810 case LEU: code1 = LTU; code2 = GTU; break;
10811 case GEU: code1 = GTU; code2 = LTU; break;
10812
10813 case EQ: code1 = UNKNOWN; code2 = NE; break;
10814 case NE: code2 = UNKNOWN; break;
10815
10816 default:
10817 gcc_unreachable ();
10818 }
10819
10820 /*
10821 * a < b =>
10822 * if (hi(a) < hi(b)) goto true;
10823 * if (hi(a) > hi(b)) goto false;
10824 * if (lo(a) < lo(b)) goto true;
10825 * false:
10826 */
10827
10828 ix86_compare_op0 = hi[0];
10829 ix86_compare_op1 = hi[1];
10830
10831 if (code1 != UNKNOWN)
10832 ix86_expand_branch (code1, label);
10833 if (code2 != UNKNOWN)
10834 ix86_expand_branch (code2, label2);
10835
10836 ix86_compare_op0 = lo[0];
10837 ix86_compare_op1 = lo[1];
10838 ix86_expand_branch (code3, label);
10839
10840 if (code2 != UNKNOWN)
10841 emit_label (label2);
10842 return;
10843 }
10844
10845 default:
10846 gcc_unreachable ();
10847 }
10848 }
10849
10850 /* Split branch based on floating point condition. */
10851 void
10852 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10853 rtx target1, rtx target2, rtx tmp, rtx pushed)
10854 {
10855 rtx second, bypass;
10856 rtx label = NULL_RTX;
10857 rtx condition;
10858 int bypass_probability = -1, second_probability = -1, probability = -1;
10859 rtx i;
10860
10861 if (target2 != pc_rtx)
10862 {
10863 rtx tmp = target2;
10864 code = reverse_condition_maybe_unordered (code);
10865 target2 = target1;
10866 target1 = tmp;
10867 }
10868
10869 condition = ix86_expand_fp_compare (code, op1, op2,
10870 tmp, &second, &bypass);
10871
10872 /* Remove pushed operand from stack. */
10873 if (pushed)
10874 ix86_free_from_memory (GET_MODE (pushed));
10875
10876 if (split_branch_probability >= 0)
10877 {
10878 /* Distribute the probabilities across the jumps.
10879 Assume the BYPASS and SECOND to be always test
10880 for UNORDERED. */
10881 probability = split_branch_probability;
10882
10883 /* Value of 1 is low enough to make no need for probability
10884 to be updated. Later we may run some experiments and see
10885 if unordered values are more frequent in practice. */
10886 if (bypass)
10887 bypass_probability = 1;
10888 if (second)
10889 second_probability = 1;
10890 }
10891 if (bypass != NULL_RTX)
10892 {
10893 label = gen_label_rtx ();
10894 i = emit_jump_insn (gen_rtx_SET
10895 (VOIDmode, pc_rtx,
10896 gen_rtx_IF_THEN_ELSE (VOIDmode,
10897 bypass,
10898 gen_rtx_LABEL_REF (VOIDmode,
10899 label),
10900 pc_rtx)));
10901 if (bypass_probability >= 0)
10902 REG_NOTES (i)
10903 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10904 GEN_INT (bypass_probability),
10905 REG_NOTES (i));
10906 }
10907 i = emit_jump_insn (gen_rtx_SET
10908 (VOIDmode, pc_rtx,
10909 gen_rtx_IF_THEN_ELSE (VOIDmode,
10910 condition, target1, target2)));
10911 if (probability >= 0)
10912 REG_NOTES (i)
10913 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10914 GEN_INT (probability),
10915 REG_NOTES (i));
10916 if (second != NULL_RTX)
10917 {
10918 i = emit_jump_insn (gen_rtx_SET
10919 (VOIDmode, pc_rtx,
10920 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10921 target2)));
10922 if (second_probability >= 0)
10923 REG_NOTES (i)
10924 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10925 GEN_INT (second_probability),
10926 REG_NOTES (i));
10927 }
10928 if (label != NULL_RTX)
10929 emit_label (label);
10930 }
10931
10932 int
10933 ix86_expand_setcc (enum rtx_code code, rtx dest)
10934 {
10935 rtx ret, tmp, tmpreg, equiv;
10936 rtx second_test, bypass_test;
10937
10938 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10939 return 0; /* FAIL */
10940
10941 gcc_assert (GET_MODE (dest) == QImode);
10942
10943 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10944 PUT_MODE (ret, QImode);
10945
10946 tmp = dest;
10947 tmpreg = dest;
10948
10949 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10950 if (bypass_test || second_test)
10951 {
10952 rtx test = second_test;
10953 int bypass = 0;
10954 rtx tmp2 = gen_reg_rtx (QImode);
10955 if (bypass_test)
10956 {
10957 gcc_assert (!second_test);
10958 test = bypass_test;
10959 bypass = 1;
10960 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10961 }
10962 PUT_MODE (test, QImode);
10963 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10964
10965 if (bypass)
10966 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10967 else
10968 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10969 }
10970
10971 /* Attach a REG_EQUAL note describing the comparison result. */
10972 if (ix86_compare_op0 && ix86_compare_op1)
10973 {
10974 equiv = simplify_gen_relational (code, QImode,
10975 GET_MODE (ix86_compare_op0),
10976 ix86_compare_op0, ix86_compare_op1);
10977 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10978 }
10979
10980 return 1; /* DONE */
10981 }
10982
10983 /* Expand comparison setting or clearing carry flag. Return true when
10984 successful and set pop for the operation. */
10985 static bool
10986 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10987 {
10988 enum machine_mode mode =
10989 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10990
10991 /* Do not handle DImode compares that go through special path. Also we can't
10992 deal with FP compares yet. This is possible to add. */
10993 if (mode == (TARGET_64BIT ? TImode : DImode))
10994 return false;
10995 if (FLOAT_MODE_P (mode))
10996 {
10997 rtx second_test = NULL, bypass_test = NULL;
10998 rtx compare_op, compare_seq;
10999
11000 /* Shortcut: following common codes never translate into carry flag compares. */
11001 if (code == EQ || code == NE || code == UNEQ || code == LTGT
11002 || code == ORDERED || code == UNORDERED)
11003 return false;
11004
11005 /* These comparisons require zero flag; swap operands so they won't. */
11006 if ((code == GT || code == UNLE || code == LE || code == UNGT)
11007 && !TARGET_IEEE_FP)
11008 {
11009 rtx tmp = op0;
11010 op0 = op1;
11011 op1 = tmp;
11012 code = swap_condition (code);
11013 }
11014
11015 /* Try to expand the comparison and verify that we end up with carry flag
11016 based comparison. This is fails to be true only when we decide to expand
11017 comparison using arithmetic that is not too common scenario. */
11018 start_sequence ();
11019 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
11020 &second_test, &bypass_test);
11021 compare_seq = get_insns ();
11022 end_sequence ();
11023
11024 if (second_test || bypass_test)
11025 return false;
11026 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11027 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11028 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
11029 else
11030 code = GET_CODE (compare_op);
11031 if (code != LTU && code != GEU)
11032 return false;
11033 emit_insn (compare_seq);
11034 *pop = compare_op;
11035 return true;
11036 }
11037 if (!INTEGRAL_MODE_P (mode))
11038 return false;
11039 switch (code)
11040 {
11041 case LTU:
11042 case GEU:
11043 break;
11044
11045 /* Convert a==0 into (unsigned)a<1. */
11046 case EQ:
11047 case NE:
11048 if (op1 != const0_rtx)
11049 return false;
11050 op1 = const1_rtx;
11051 code = (code == EQ ? LTU : GEU);
11052 break;
11053
11054 /* Convert a>b into b<a or a>=b-1. */
11055 case GTU:
11056 case LEU:
11057 if (GET_CODE (op1) == CONST_INT)
11058 {
11059 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
11060 /* Bail out on overflow. We still can swap operands but that
11061 would force loading of the constant into register. */
11062 if (op1 == const0_rtx
11063 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
11064 return false;
11065 code = (code == GTU ? GEU : LTU);
11066 }
11067 else
11068 {
11069 rtx tmp = op1;
11070 op1 = op0;
11071 op0 = tmp;
11072 code = (code == GTU ? LTU : GEU);
11073 }
11074 break;
11075
11076 /* Convert a>=0 into (unsigned)a<0x80000000. */
11077 case LT:
11078 case GE:
11079 if (mode == DImode || op1 != const0_rtx)
11080 return false;
11081 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
11082 code = (code == LT ? GEU : LTU);
11083 break;
11084 case LE:
11085 case GT:
11086 if (mode == DImode || op1 != constm1_rtx)
11087 return false;
11088 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
11089 code = (code == LE ? GEU : LTU);
11090 break;
11091
11092 default:
11093 return false;
11094 }
11095 /* Swapping operands may cause constant to appear as first operand. */
11096 if (!nonimmediate_operand (op0, VOIDmode))
11097 {
11098 if (no_new_pseudos)
11099 return false;
11100 op0 = force_reg (mode, op0);
11101 }
11102 ix86_compare_op0 = op0;
11103 ix86_compare_op1 = op1;
11104 *pop = ix86_expand_compare (code, NULL, NULL);
11105 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
11106 return true;
11107 }
11108
11109 int
11110 ix86_expand_int_movcc (rtx operands[])
11111 {
11112 enum rtx_code code = GET_CODE (operands[1]), compare_code;
11113 rtx compare_seq, compare_op;
11114 rtx second_test, bypass_test;
11115 enum machine_mode mode = GET_MODE (operands[0]);
11116 bool sign_bit_compare_p = false;;
11117
11118 start_sequence ();
11119 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11120 compare_seq = get_insns ();
11121 end_sequence ();
11122
11123 compare_code = GET_CODE (compare_op);
11124
11125 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11126 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11127 sign_bit_compare_p = true;
11128
11129 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11130 HImode insns, we'd be swallowed in word prefix ops. */
11131
11132 if ((mode != HImode || TARGET_FAST_PREFIX)
11133 && (mode != (TARGET_64BIT ? TImode : DImode))
11134 && GET_CODE (operands[2]) == CONST_INT
11135 && GET_CODE (operands[3]) == CONST_INT)
11136 {
11137 rtx out = operands[0];
11138 HOST_WIDE_INT ct = INTVAL (operands[2]);
11139 HOST_WIDE_INT cf = INTVAL (operands[3]);
11140 HOST_WIDE_INT diff;
11141
11142 diff = ct - cf;
11143 /* Sign bit compares are better done using shifts than we do by using
11144 sbb. */
11145 if (sign_bit_compare_p
11146 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11147 ix86_compare_op1, &compare_op))
11148 {
11149 /* Detect overlap between destination and compare sources. */
11150 rtx tmp = out;
11151
11152 if (!sign_bit_compare_p)
11153 {
11154 bool fpcmp = false;
11155
11156 compare_code = GET_CODE (compare_op);
11157
11158 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11159 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11160 {
11161 fpcmp = true;
11162 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11163 }
11164
11165 /* To simplify rest of code, restrict to the GEU case. */
11166 if (compare_code == LTU)
11167 {
11168 HOST_WIDE_INT tmp = ct;
11169 ct = cf;
11170 cf = tmp;
11171 compare_code = reverse_condition (compare_code);
11172 code = reverse_condition (code);
11173 }
11174 else
11175 {
11176 if (fpcmp)
11177 PUT_CODE (compare_op,
11178 reverse_condition_maybe_unordered
11179 (GET_CODE (compare_op)));
11180 else
11181 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11182 }
11183 diff = ct - cf;
11184
11185 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11186 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11187 tmp = gen_reg_rtx (mode);
11188
11189 if (mode == DImode)
11190 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11191 else
11192 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11193 }
11194 else
11195 {
11196 if (code == GT || code == GE)
11197 code = reverse_condition (code);
11198 else
11199 {
11200 HOST_WIDE_INT tmp = ct;
11201 ct = cf;
11202 cf = tmp;
11203 diff = ct - cf;
11204 }
11205 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11206 ix86_compare_op1, VOIDmode, 0, -1);
11207 }
11208
11209 if (diff == 1)
11210 {
11211 /*
11212 * cmpl op0,op1
11213 * sbbl dest,dest
11214 * [addl dest, ct]
11215 *
11216 * Size 5 - 8.
11217 */
11218 if (ct)
11219 tmp = expand_simple_binop (mode, PLUS,
11220 tmp, GEN_INT (ct),
11221 copy_rtx (tmp), 1, OPTAB_DIRECT);
11222 }
11223 else if (cf == -1)
11224 {
11225 /*
11226 * cmpl op0,op1
11227 * sbbl dest,dest
11228 * orl $ct, dest
11229 *
11230 * Size 8.
11231 */
11232 tmp = expand_simple_binop (mode, IOR,
11233 tmp, GEN_INT (ct),
11234 copy_rtx (tmp), 1, OPTAB_DIRECT);
11235 }
11236 else if (diff == -1 && ct)
11237 {
11238 /*
11239 * cmpl op0,op1
11240 * sbbl dest,dest
11241 * notl dest
11242 * [addl dest, cf]
11243 *
11244 * Size 8 - 11.
11245 */
11246 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11247 if (cf)
11248 tmp = expand_simple_binop (mode, PLUS,
11249 copy_rtx (tmp), GEN_INT (cf),
11250 copy_rtx (tmp), 1, OPTAB_DIRECT);
11251 }
11252 else
11253 {
11254 /*
11255 * cmpl op0,op1
11256 * sbbl dest,dest
11257 * [notl dest]
11258 * andl cf - ct, dest
11259 * [addl dest, ct]
11260 *
11261 * Size 8 - 11.
11262 */
11263
11264 if (cf == 0)
11265 {
11266 cf = ct;
11267 ct = 0;
11268 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11269 }
11270
11271 tmp = expand_simple_binop (mode, AND,
11272 copy_rtx (tmp),
11273 gen_int_mode (cf - ct, mode),
11274 copy_rtx (tmp), 1, OPTAB_DIRECT);
11275 if (ct)
11276 tmp = expand_simple_binop (mode, PLUS,
11277 copy_rtx (tmp), GEN_INT (ct),
11278 copy_rtx (tmp), 1, OPTAB_DIRECT);
11279 }
11280
11281 if (!rtx_equal_p (tmp, out))
11282 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11283
11284 return 1; /* DONE */
11285 }
11286
11287 if (diff < 0)
11288 {
11289 HOST_WIDE_INT tmp;
11290 tmp = ct, ct = cf, cf = tmp;
11291 diff = -diff;
11292 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11293 {
11294 /* We may be reversing unordered compare to normal compare, that
11295 is not valid in general (we may convert non-trapping condition
11296 to trapping one), however on i386 we currently emit all
11297 comparisons unordered. */
11298 compare_code = reverse_condition_maybe_unordered (compare_code);
11299 code = reverse_condition_maybe_unordered (code);
11300 }
11301 else
11302 {
11303 compare_code = reverse_condition (compare_code);
11304 code = reverse_condition (code);
11305 }
11306 }
11307
11308 compare_code = UNKNOWN;
11309 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11310 && GET_CODE (ix86_compare_op1) == CONST_INT)
11311 {
11312 if (ix86_compare_op1 == const0_rtx
11313 && (code == LT || code == GE))
11314 compare_code = code;
11315 else if (ix86_compare_op1 == constm1_rtx)
11316 {
11317 if (code == LE)
11318 compare_code = LT;
11319 else if (code == GT)
11320 compare_code = GE;
11321 }
11322 }
11323
11324 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11325 if (compare_code != UNKNOWN
11326 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11327 && (cf == -1 || ct == -1))
11328 {
11329 /* If lea code below could be used, only optimize
11330 if it results in a 2 insn sequence. */
11331
11332 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11333 || diff == 3 || diff == 5 || diff == 9)
11334 || (compare_code == LT && ct == -1)
11335 || (compare_code == GE && cf == -1))
11336 {
11337 /*
11338 * notl op1 (if necessary)
11339 * sarl $31, op1
11340 * orl cf, op1
11341 */
11342 if (ct != -1)
11343 {
11344 cf = ct;
11345 ct = -1;
11346 code = reverse_condition (code);
11347 }
11348
11349 out = emit_store_flag (out, code, ix86_compare_op0,
11350 ix86_compare_op1, VOIDmode, 0, -1);
11351
11352 out = expand_simple_binop (mode, IOR,
11353 out, GEN_INT (cf),
11354 out, 1, OPTAB_DIRECT);
11355 if (out != operands[0])
11356 emit_move_insn (operands[0], out);
11357
11358 return 1; /* DONE */
11359 }
11360 }
11361
11362
11363 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11364 || diff == 3 || diff == 5 || diff == 9)
11365 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11366 && (mode != DImode
11367 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11368 {
11369 /*
11370 * xorl dest,dest
11371 * cmpl op1,op2
11372 * setcc dest
11373 * lea cf(dest*(ct-cf)),dest
11374 *
11375 * Size 14.
11376 *
11377 * This also catches the degenerate setcc-only case.
11378 */
11379
11380 rtx tmp;
11381 int nops;
11382
11383 out = emit_store_flag (out, code, ix86_compare_op0,
11384 ix86_compare_op1, VOIDmode, 0, 1);
11385
11386 nops = 0;
11387 /* On x86_64 the lea instruction operates on Pmode, so we need
11388 to get arithmetics done in proper mode to match. */
11389 if (diff == 1)
11390 tmp = copy_rtx (out);
11391 else
11392 {
11393 rtx out1;
11394 out1 = copy_rtx (out);
11395 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11396 nops++;
11397 if (diff & 1)
11398 {
11399 tmp = gen_rtx_PLUS (mode, tmp, out1);
11400 nops++;
11401 }
11402 }
11403 if (cf != 0)
11404 {
11405 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11406 nops++;
11407 }
11408 if (!rtx_equal_p (tmp, out))
11409 {
11410 if (nops == 1)
11411 out = force_operand (tmp, copy_rtx (out));
11412 else
11413 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11414 }
11415 if (!rtx_equal_p (out, operands[0]))
11416 emit_move_insn (operands[0], copy_rtx (out));
11417
11418 return 1; /* DONE */
11419 }
11420
11421 /*
11422 * General case: Jumpful:
11423 * xorl dest,dest cmpl op1, op2
11424 * cmpl op1, op2 movl ct, dest
11425 * setcc dest jcc 1f
11426 * decl dest movl cf, dest
11427 * andl (cf-ct),dest 1:
11428 * addl ct,dest
11429 *
11430 * Size 20. Size 14.
11431 *
11432 * This is reasonably steep, but branch mispredict costs are
11433 * high on modern cpus, so consider failing only if optimizing
11434 * for space.
11435 */
11436
11437 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11438 && BRANCH_COST >= 2)
11439 {
11440 if (cf == 0)
11441 {
11442 cf = ct;
11443 ct = 0;
11444 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11445 /* We may be reversing unordered compare to normal compare,
11446 that is not valid in general (we may convert non-trapping
11447 condition to trapping one), however on i386 we currently
11448 emit all comparisons unordered. */
11449 code = reverse_condition_maybe_unordered (code);
11450 else
11451 {
11452 code = reverse_condition (code);
11453 if (compare_code != UNKNOWN)
11454 compare_code = reverse_condition (compare_code);
11455 }
11456 }
11457
11458 if (compare_code != UNKNOWN)
11459 {
11460 /* notl op1 (if needed)
11461 sarl $31, op1
11462 andl (cf-ct), op1
11463 addl ct, op1
11464
11465 For x < 0 (resp. x <= -1) there will be no notl,
11466 so if possible swap the constants to get rid of the
11467 complement.
11468 True/false will be -1/0 while code below (store flag
11469 followed by decrement) is 0/-1, so the constants need
11470 to be exchanged once more. */
11471
11472 if (compare_code == GE || !cf)
11473 {
11474 code = reverse_condition (code);
11475 compare_code = LT;
11476 }
11477 else
11478 {
11479 HOST_WIDE_INT tmp = cf;
11480 cf = ct;
11481 ct = tmp;
11482 }
11483
11484 out = emit_store_flag (out, code, ix86_compare_op0,
11485 ix86_compare_op1, VOIDmode, 0, -1);
11486 }
11487 else
11488 {
11489 out = emit_store_flag (out, code, ix86_compare_op0,
11490 ix86_compare_op1, VOIDmode, 0, 1);
11491
11492 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11493 copy_rtx (out), 1, OPTAB_DIRECT);
11494 }
11495
11496 out = expand_simple_binop (mode, AND, copy_rtx (out),
11497 gen_int_mode (cf - ct, mode),
11498 copy_rtx (out), 1, OPTAB_DIRECT);
11499 if (ct)
11500 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11501 copy_rtx (out), 1, OPTAB_DIRECT);
11502 if (!rtx_equal_p (out, operands[0]))
11503 emit_move_insn (operands[0], copy_rtx (out));
11504
11505 return 1; /* DONE */
11506 }
11507 }
11508
11509 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11510 {
11511 /* Try a few things more with specific constants and a variable. */
11512
11513 optab op;
11514 rtx var, orig_out, out, tmp;
11515
11516 if (BRANCH_COST <= 2)
11517 return 0; /* FAIL */
11518
11519 /* If one of the two operands is an interesting constant, load a
11520 constant with the above and mask it in with a logical operation. */
11521
11522 if (GET_CODE (operands[2]) == CONST_INT)
11523 {
11524 var = operands[3];
11525 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11526 operands[3] = constm1_rtx, op = and_optab;
11527 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11528 operands[3] = const0_rtx, op = ior_optab;
11529 else
11530 return 0; /* FAIL */
11531 }
11532 else if (GET_CODE (operands[3]) == CONST_INT)
11533 {
11534 var = operands[2];
11535 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11536 operands[2] = constm1_rtx, op = and_optab;
11537 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11538 operands[2] = const0_rtx, op = ior_optab;
11539 else
11540 return 0; /* FAIL */
11541 }
11542 else
11543 return 0; /* FAIL */
11544
11545 orig_out = operands[0];
11546 tmp = gen_reg_rtx (mode);
11547 operands[0] = tmp;
11548
11549 /* Recurse to get the constant loaded. */
11550 if (ix86_expand_int_movcc (operands) == 0)
11551 return 0; /* FAIL */
11552
11553 /* Mask in the interesting variable. */
11554 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11555 OPTAB_WIDEN);
11556 if (!rtx_equal_p (out, orig_out))
11557 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11558
11559 return 1; /* DONE */
11560 }
11561
11562 /*
11563 * For comparison with above,
11564 *
11565 * movl cf,dest
11566 * movl ct,tmp
11567 * cmpl op1,op2
11568 * cmovcc tmp,dest
11569 *
11570 * Size 15.
11571 */
11572
11573 if (! nonimmediate_operand (operands[2], mode))
11574 operands[2] = force_reg (mode, operands[2]);
11575 if (! nonimmediate_operand (operands[3], mode))
11576 operands[3] = force_reg (mode, operands[3]);
11577
11578 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11579 {
11580 rtx tmp = gen_reg_rtx (mode);
11581 emit_move_insn (tmp, operands[3]);
11582 operands[3] = tmp;
11583 }
11584 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11585 {
11586 rtx tmp = gen_reg_rtx (mode);
11587 emit_move_insn (tmp, operands[2]);
11588 operands[2] = tmp;
11589 }
11590
11591 if (! register_operand (operands[2], VOIDmode)
11592 && (mode == QImode
11593 || ! register_operand (operands[3], VOIDmode)))
11594 operands[2] = force_reg (mode, operands[2]);
11595
11596 if (mode == QImode
11597 && ! register_operand (operands[3], VOIDmode))
11598 operands[3] = force_reg (mode, operands[3]);
11599
11600 emit_insn (compare_seq);
11601 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11602 gen_rtx_IF_THEN_ELSE (mode,
11603 compare_op, operands[2],
11604 operands[3])));
11605 if (bypass_test)
11606 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11607 gen_rtx_IF_THEN_ELSE (mode,
11608 bypass_test,
11609 copy_rtx (operands[3]),
11610 copy_rtx (operands[0]))));
11611 if (second_test)
11612 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11613 gen_rtx_IF_THEN_ELSE (mode,
11614 second_test,
11615 copy_rtx (operands[2]),
11616 copy_rtx (operands[0]))));
11617
11618 return 1; /* DONE */
11619 }
11620
11621 /* Swap, force into registers, or otherwise massage the two operands
11622 to an sse comparison with a mask result. Thus we differ a bit from
11623 ix86_prepare_fp_compare_args which expects to produce a flags result.
11624
11625 The DEST operand exists to help determine whether to commute commutative
11626 operators. The POP0/POP1 operands are updated in place. The new
11627 comparison code is returned, or UNKNOWN if not implementable. */
11628
11629 static enum rtx_code
11630 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11631 rtx *pop0, rtx *pop1)
11632 {
11633 rtx tmp;
11634
11635 switch (code)
11636 {
11637 case LTGT:
11638 case UNEQ:
11639 /* We have no LTGT as an operator. We could implement it with
11640 NE & ORDERED, but this requires an extra temporary. It's
11641 not clear that it's worth it. */
11642 return UNKNOWN;
11643
11644 case LT:
11645 case LE:
11646 case UNGT:
11647 case UNGE:
11648 /* These are supported directly. */
11649 break;
11650
11651 case EQ:
11652 case NE:
11653 case UNORDERED:
11654 case ORDERED:
11655 /* For commutative operators, try to canonicalize the destination
11656 operand to be first in the comparison - this helps reload to
11657 avoid extra moves. */
11658 if (!dest || !rtx_equal_p (dest, *pop1))
11659 break;
11660 /* FALLTHRU */
11661
11662 case GE:
11663 case GT:
11664 case UNLE:
11665 case UNLT:
11666 /* These are not supported directly. Swap the comparison operands
11667 to transform into something that is supported. */
11668 tmp = *pop0;
11669 *pop0 = *pop1;
11670 *pop1 = tmp;
11671 code = swap_condition (code);
11672 break;
11673
11674 default:
11675 gcc_unreachable ();
11676 }
11677
11678 return code;
11679 }
11680
11681 /* Detect conditional moves that exactly match min/max operational
11682 semantics. Note that this is IEEE safe, as long as we don't
11683 interchange the operands.
11684
11685 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11686 and TRUE if the operation is successful and instructions are emitted. */
11687
11688 static bool
11689 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11690 rtx cmp_op1, rtx if_true, rtx if_false)
11691 {
11692 enum machine_mode mode;
11693 bool is_min;
11694 rtx tmp;
11695
11696 if (code == LT)
11697 ;
11698 else if (code == UNGE)
11699 {
11700 tmp = if_true;
11701 if_true = if_false;
11702 if_false = tmp;
11703 }
11704 else
11705 return false;
11706
11707 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11708 is_min = true;
11709 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11710 is_min = false;
11711 else
11712 return false;
11713
11714 mode = GET_MODE (dest);
11715
11716 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11717 but MODE may be a vector mode and thus not appropriate. */
11718 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11719 {
11720 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11721 rtvec v;
11722
11723 if_true = force_reg (mode, if_true);
11724 v = gen_rtvec (2, if_true, if_false);
11725 tmp = gen_rtx_UNSPEC (mode, v, u);
11726 }
11727 else
11728 {
11729 code = is_min ? SMIN : SMAX;
11730 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11731 }
11732
11733 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11734 return true;
11735 }
11736
11737 /* Expand an sse vector comparison. Return the register with the result. */
11738
11739 static rtx
11740 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11741 rtx op_true, rtx op_false)
11742 {
11743 enum machine_mode mode = GET_MODE (dest);
11744 rtx x;
11745
11746 cmp_op0 = force_reg (mode, cmp_op0);
11747 if (!nonimmediate_operand (cmp_op1, mode))
11748 cmp_op1 = force_reg (mode, cmp_op1);
11749
11750 if (optimize
11751 || reg_overlap_mentioned_p (dest, op_true)
11752 || reg_overlap_mentioned_p (dest, op_false))
11753 dest = gen_reg_rtx (mode);
11754
11755 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11756 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11757
11758 return dest;
11759 }
11760
11761 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11762 operations. This is used for both scalar and vector conditional moves. */
11763
11764 static void
11765 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11766 {
11767 enum machine_mode mode = GET_MODE (dest);
11768 rtx t2, t3, x;
11769
11770 if (op_false == CONST0_RTX (mode))
11771 {
11772 op_true = force_reg (mode, op_true);
11773 x = gen_rtx_AND (mode, cmp, op_true);
11774 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11775 }
11776 else if (op_true == CONST0_RTX (mode))
11777 {
11778 op_false = force_reg (mode, op_false);
11779 x = gen_rtx_NOT (mode, cmp);
11780 x = gen_rtx_AND (mode, x, op_false);
11781 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11782 }
11783 else
11784 {
11785 op_true = force_reg (mode, op_true);
11786 op_false = force_reg (mode, op_false);
11787
11788 t2 = gen_reg_rtx (mode);
11789 if (optimize)
11790 t3 = gen_reg_rtx (mode);
11791 else
11792 t3 = dest;
11793
11794 x = gen_rtx_AND (mode, op_true, cmp);
11795 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11796
11797 x = gen_rtx_NOT (mode, cmp);
11798 x = gen_rtx_AND (mode, x, op_false);
11799 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11800
11801 x = gen_rtx_IOR (mode, t3, t2);
11802 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11803 }
11804 }
11805
11806 /* Expand a floating-point conditional move. Return true if successful. */
11807
11808 int
11809 ix86_expand_fp_movcc (rtx operands[])
11810 {
11811 enum machine_mode mode = GET_MODE (operands[0]);
11812 enum rtx_code code = GET_CODE (operands[1]);
11813 rtx tmp, compare_op, second_test, bypass_test;
11814
11815 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11816 {
11817 enum machine_mode cmode;
11818
11819 /* Since we've no cmove for sse registers, don't force bad register
11820 allocation just to gain access to it. Deny movcc when the
11821 comparison mode doesn't match the move mode. */
11822 cmode = GET_MODE (ix86_compare_op0);
11823 if (cmode == VOIDmode)
11824 cmode = GET_MODE (ix86_compare_op1);
11825 if (cmode != mode)
11826 return 0;
11827
11828 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11829 &ix86_compare_op0,
11830 &ix86_compare_op1);
11831 if (code == UNKNOWN)
11832 return 0;
11833
11834 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11835 ix86_compare_op1, operands[2],
11836 operands[3]))
11837 return 1;
11838
11839 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11840 ix86_compare_op1, operands[2], operands[3]);
11841 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11842 return 1;
11843 }
11844
11845 /* The floating point conditional move instructions don't directly
11846 support conditions resulting from a signed integer comparison. */
11847
11848 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11849
11850 /* The floating point conditional move instructions don't directly
11851 support signed integer comparisons. */
11852
11853 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11854 {
11855 gcc_assert (!second_test && !bypass_test);
11856 tmp = gen_reg_rtx (QImode);
11857 ix86_expand_setcc (code, tmp);
11858 code = NE;
11859 ix86_compare_op0 = tmp;
11860 ix86_compare_op1 = const0_rtx;
11861 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11862 }
11863 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11864 {
11865 tmp = gen_reg_rtx (mode);
11866 emit_move_insn (tmp, operands[3]);
11867 operands[3] = tmp;
11868 }
11869 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11870 {
11871 tmp = gen_reg_rtx (mode);
11872 emit_move_insn (tmp, operands[2]);
11873 operands[2] = tmp;
11874 }
11875
11876 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11877 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11878 operands[2], operands[3])));
11879 if (bypass_test)
11880 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11881 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11882 operands[3], operands[0])));
11883 if (second_test)
11884 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11885 gen_rtx_IF_THEN_ELSE (mode, second_test,
11886 operands[2], operands[0])));
11887
11888 return 1;
11889 }
11890
11891 /* Expand a floating-point vector conditional move; a vcond operation
11892 rather than a movcc operation. */
11893
11894 bool
11895 ix86_expand_fp_vcond (rtx operands[])
11896 {
11897 enum rtx_code code = GET_CODE (operands[3]);
11898 rtx cmp;
11899
11900 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11901 &operands[4], &operands[5]);
11902 if (code == UNKNOWN)
11903 return false;
11904
11905 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11906 operands[5], operands[1], operands[2]))
11907 return true;
11908
11909 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11910 operands[1], operands[2]);
11911 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11912 return true;
11913 }
11914
11915 /* Expand a signed integral vector conditional move. */
11916
11917 bool
11918 ix86_expand_int_vcond (rtx operands[])
11919 {
11920 enum machine_mode mode = GET_MODE (operands[0]);
11921 enum rtx_code code = GET_CODE (operands[3]);
11922 bool negate = false;
11923 rtx x, cop0, cop1;
11924
11925 cop0 = operands[4];
11926 cop1 = operands[5];
11927
11928 /* Canonicalize the comparison to EQ, GT, GTU. */
11929 switch (code)
11930 {
11931 case EQ:
11932 case GT:
11933 case GTU:
11934 break;
11935
11936 case NE:
11937 case LE:
11938 case LEU:
11939 code = reverse_condition (code);
11940 negate = true;
11941 break;
11942
11943 case GE:
11944 case GEU:
11945 code = reverse_condition (code);
11946 negate = true;
11947 /* FALLTHRU */
11948
11949 case LT:
11950 case LTU:
11951 code = swap_condition (code);
11952 x = cop0, cop0 = cop1, cop1 = x;
11953 break;
11954
11955 default:
11956 gcc_unreachable ();
11957 }
11958
11959 /* Unsigned parallel compare is not supported by the hardware. Play some
11960 tricks to turn this into a signed comparison against 0. */
11961 if (code == GTU)
11962 {
11963 cop0 = force_reg (mode, cop0);
11964
11965 switch (mode)
11966 {
11967 case V4SImode:
11968 {
11969 rtx t1, t2, mask;
11970
11971 /* Perform a parallel modulo subtraction. */
11972 t1 = gen_reg_rtx (mode);
11973 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11974
11975 /* Extract the original sign bit of op0. */
11976 mask = GEN_INT (-0x80000000);
11977 mask = gen_rtx_CONST_VECTOR (mode,
11978 gen_rtvec (4, mask, mask, mask, mask));
11979 mask = force_reg (mode, mask);
11980 t2 = gen_reg_rtx (mode);
11981 emit_insn (gen_andv4si3 (t2, cop0, mask));
11982
11983 /* XOR it back into the result of the subtraction. This results
11984 in the sign bit set iff we saw unsigned underflow. */
11985 x = gen_reg_rtx (mode);
11986 emit_insn (gen_xorv4si3 (x, t1, t2));
11987
11988 code = GT;
11989 }
11990 break;
11991
11992 case V16QImode:
11993 case V8HImode:
11994 /* Perform a parallel unsigned saturating subtraction. */
11995 x = gen_reg_rtx (mode);
11996 emit_insn (gen_rtx_SET (VOIDmode, x,
11997 gen_rtx_US_MINUS (mode, cop0, cop1)));
11998
11999 code = EQ;
12000 negate = !negate;
12001 break;
12002
12003 default:
12004 gcc_unreachable ();
12005 }
12006
12007 cop0 = x;
12008 cop1 = CONST0_RTX (mode);
12009 }
12010
12011 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
12012 operands[1+negate], operands[2-negate]);
12013
12014 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
12015 operands[2-negate]);
12016 return true;
12017 }
12018
12019 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
12020 true if we should do zero extension, else sign extension. HIGH_P is
12021 true if we want the N/2 high elements, else the low elements. */
12022
12023 void
12024 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
12025 {
12026 enum machine_mode imode = GET_MODE (operands[1]);
12027 rtx (*unpack)(rtx, rtx, rtx);
12028 rtx se, dest;
12029
12030 switch (imode)
12031 {
12032 case V16QImode:
12033 if (high_p)
12034 unpack = gen_vec_interleave_highv16qi;
12035 else
12036 unpack = gen_vec_interleave_lowv16qi;
12037 break;
12038 case V8HImode:
12039 if (high_p)
12040 unpack = gen_vec_interleave_highv8hi;
12041 else
12042 unpack = gen_vec_interleave_lowv8hi;
12043 break;
12044 case V4SImode:
12045 if (high_p)
12046 unpack = gen_vec_interleave_highv4si;
12047 else
12048 unpack = gen_vec_interleave_lowv4si;
12049 break;
12050 default:
12051 gcc_unreachable ();
12052 }
12053
12054 dest = gen_lowpart (imode, operands[0]);
12055
12056 if (unsigned_p)
12057 se = force_reg (imode, CONST0_RTX (imode));
12058 else
12059 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
12060 operands[1], pc_rtx, pc_rtx);
12061
12062 emit_insn (unpack (dest, operands[1], se));
12063 }
12064
12065 /* Expand conditional increment or decrement using adb/sbb instructions.
12066 The default case using setcc followed by the conditional move can be
12067 done by generic code. */
12068 int
12069 ix86_expand_int_addcc (rtx operands[])
12070 {
12071 enum rtx_code code = GET_CODE (operands[1]);
12072 rtx compare_op;
12073 rtx val = const0_rtx;
12074 bool fpcmp = false;
12075 enum machine_mode mode = GET_MODE (operands[0]);
12076
12077 if (operands[3] != const1_rtx
12078 && operands[3] != constm1_rtx)
12079 return 0;
12080 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
12081 ix86_compare_op1, &compare_op))
12082 return 0;
12083 code = GET_CODE (compare_op);
12084
12085 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
12086 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
12087 {
12088 fpcmp = true;
12089 code = ix86_fp_compare_code_to_integer (code);
12090 }
12091
12092 if (code != LTU)
12093 {
12094 val = constm1_rtx;
12095 if (fpcmp)
12096 PUT_CODE (compare_op,
12097 reverse_condition_maybe_unordered
12098 (GET_CODE (compare_op)));
12099 else
12100 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
12101 }
12102 PUT_MODE (compare_op, mode);
12103
12104 /* Construct either adc or sbb insn. */
12105 if ((code == LTU) == (operands[3] == constm1_rtx))
12106 {
12107 switch (GET_MODE (operands[0]))
12108 {
12109 case QImode:
12110 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
12111 break;
12112 case HImode:
12113 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12114 break;
12115 case SImode:
12116 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12117 break;
12118 case DImode:
12119 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12120 break;
12121 default:
12122 gcc_unreachable ();
12123 }
12124 }
12125 else
12126 {
12127 switch (GET_MODE (operands[0]))
12128 {
12129 case QImode:
12130 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12131 break;
12132 case HImode:
12133 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12134 break;
12135 case SImode:
12136 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12137 break;
12138 case DImode:
12139 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12140 break;
12141 default:
12142 gcc_unreachable ();
12143 }
12144 }
12145 return 1; /* DONE */
12146 }
12147
12148
12149 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12150 works for floating pointer parameters and nonoffsetable memories.
12151 For pushes, it returns just stack offsets; the values will be saved
12152 in the right order. Maximally three parts are generated. */
12153
12154 static int
12155 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12156 {
12157 int size;
12158
12159 if (!TARGET_64BIT)
12160 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12161 else
12162 size = (GET_MODE_SIZE (mode) + 4) / 8;
12163
12164 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
12165 gcc_assert (size >= 2 && size <= 3);
12166
12167 /* Optimize constant pool reference to immediates. This is used by fp
12168 moves, that force all constants to memory to allow combining. */
12169 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
12170 {
12171 rtx tmp = maybe_get_pool_constant (operand);
12172 if (tmp)
12173 operand = tmp;
12174 }
12175
12176 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
12177 {
12178 /* The only non-offsetable memories we handle are pushes. */
12179 int ok = push_operand (operand, VOIDmode);
12180
12181 gcc_assert (ok);
12182
12183 operand = copy_rtx (operand);
12184 PUT_MODE (operand, Pmode);
12185 parts[0] = parts[1] = parts[2] = operand;
12186 return size;
12187 }
12188
12189 if (GET_CODE (operand) == CONST_VECTOR)
12190 {
12191 enum machine_mode imode = int_mode_for_mode (mode);
12192 /* Caution: if we looked through a constant pool memory above,
12193 the operand may actually have a different mode now. That's
12194 ok, since we want to pun this all the way back to an integer. */
12195 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12196 gcc_assert (operand != NULL);
12197 mode = imode;
12198 }
12199
12200 if (!TARGET_64BIT)
12201 {
12202 if (mode == DImode)
12203 split_di (&operand, 1, &parts[0], &parts[1]);
12204 else
12205 {
12206 if (REG_P (operand))
12207 {
12208 gcc_assert (reload_completed);
12209 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12210 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12211 if (size == 3)
12212 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12213 }
12214 else if (offsettable_memref_p (operand))
12215 {
12216 operand = adjust_address (operand, SImode, 0);
12217 parts[0] = operand;
12218 parts[1] = adjust_address (operand, SImode, 4);
12219 if (size == 3)
12220 parts[2] = adjust_address (operand, SImode, 8);
12221 }
12222 else if (GET_CODE (operand) == CONST_DOUBLE)
12223 {
12224 REAL_VALUE_TYPE r;
12225 long l[4];
12226
12227 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12228 switch (mode)
12229 {
12230 case XFmode:
12231 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12232 parts[2] = gen_int_mode (l[2], SImode);
12233 break;
12234 case DFmode:
12235 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12236 break;
12237 default:
12238 gcc_unreachable ();
12239 }
12240 parts[1] = gen_int_mode (l[1], SImode);
12241 parts[0] = gen_int_mode (l[0], SImode);
12242 }
12243 else
12244 gcc_unreachable ();
12245 }
12246 }
12247 else
12248 {
12249 if (mode == TImode)
12250 split_ti (&operand, 1, &parts[0], &parts[1]);
12251 if (mode == XFmode || mode == TFmode)
12252 {
12253 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12254 if (REG_P (operand))
12255 {
12256 gcc_assert (reload_completed);
12257 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12258 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12259 }
12260 else if (offsettable_memref_p (operand))
12261 {
12262 operand = adjust_address (operand, DImode, 0);
12263 parts[0] = operand;
12264 parts[1] = adjust_address (operand, upper_mode, 8);
12265 }
12266 else if (GET_CODE (operand) == CONST_DOUBLE)
12267 {
12268 REAL_VALUE_TYPE r;
12269 long l[4];
12270
12271 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12272 real_to_target (l, &r, mode);
12273
12274 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12275 if (HOST_BITS_PER_WIDE_INT >= 64)
12276 parts[0]
12277 = gen_int_mode
12278 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12279 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12280 DImode);
12281 else
12282 parts[0] = immed_double_const (l[0], l[1], DImode);
12283
12284 if (upper_mode == SImode)
12285 parts[1] = gen_int_mode (l[2], SImode);
12286 else if (HOST_BITS_PER_WIDE_INT >= 64)
12287 parts[1]
12288 = gen_int_mode
12289 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12290 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12291 DImode);
12292 else
12293 parts[1] = immed_double_const (l[2], l[3], DImode);
12294 }
12295 else
12296 gcc_unreachable ();
12297 }
12298 }
12299
12300 return size;
12301 }
12302
12303 /* Emit insns to perform a move or push of DI, DF, and XF values.
12304 Return false when normal moves are needed; true when all required
12305 insns have been emitted. Operands 2-4 contain the input values
12306 int the correct order; operands 5-7 contain the output values. */
12307
12308 void
12309 ix86_split_long_move (rtx operands[])
12310 {
12311 rtx part[2][3];
12312 int nparts;
12313 int push = 0;
12314 int collisions = 0;
12315 enum machine_mode mode = GET_MODE (operands[0]);
12316
12317 /* The DFmode expanders may ask us to move double.
12318 For 64bit target this is single move. By hiding the fact
12319 here we simplify i386.md splitters. */
12320 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12321 {
12322 /* Optimize constant pool reference to immediates. This is used by
12323 fp moves, that force all constants to memory to allow combining. */
12324
12325 if (GET_CODE (operands[1]) == MEM
12326 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12327 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12328 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12329 if (push_operand (operands[0], VOIDmode))
12330 {
12331 operands[0] = copy_rtx (operands[0]);
12332 PUT_MODE (operands[0], Pmode);
12333 }
12334 else
12335 operands[0] = gen_lowpart (DImode, operands[0]);
12336 operands[1] = gen_lowpart (DImode, operands[1]);
12337 emit_move_insn (operands[0], operands[1]);
12338 return;
12339 }
12340
12341 /* The only non-offsettable memory we handle is push. */
12342 if (push_operand (operands[0], VOIDmode))
12343 push = 1;
12344 else
12345 gcc_assert (GET_CODE (operands[0]) != MEM
12346 || offsettable_memref_p (operands[0]));
12347
12348 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12349 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12350
12351 /* When emitting push, take care for source operands on the stack. */
12352 if (push && GET_CODE (operands[1]) == MEM
12353 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12354 {
12355 if (nparts == 3)
12356 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12357 XEXP (part[1][2], 0));
12358 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12359 XEXP (part[1][1], 0));
12360 }
12361
12362 /* We need to do copy in the right order in case an address register
12363 of the source overlaps the destination. */
12364 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
12365 {
12366 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12367 collisions++;
12368 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12369 collisions++;
12370 if (nparts == 3
12371 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12372 collisions++;
12373
12374 /* Collision in the middle part can be handled by reordering. */
12375 if (collisions == 1 && nparts == 3
12376 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12377 {
12378 rtx tmp;
12379 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12380 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12381 }
12382
12383 /* If there are more collisions, we can't handle it by reordering.
12384 Do an lea to the last part and use only one colliding move. */
12385 else if (collisions > 1)
12386 {
12387 rtx base;
12388
12389 collisions = 1;
12390
12391 base = part[0][nparts - 1];
12392
12393 /* Handle the case when the last part isn't valid for lea.
12394 Happens in 64-bit mode storing the 12-byte XFmode. */
12395 if (GET_MODE (base) != Pmode)
12396 base = gen_rtx_REG (Pmode, REGNO (base));
12397
12398 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12399 part[1][0] = replace_equiv_address (part[1][0], base);
12400 part[1][1] = replace_equiv_address (part[1][1],
12401 plus_constant (base, UNITS_PER_WORD));
12402 if (nparts == 3)
12403 part[1][2] = replace_equiv_address (part[1][2],
12404 plus_constant (base, 8));
12405 }
12406 }
12407
12408 if (push)
12409 {
12410 if (!TARGET_64BIT)
12411 {
12412 if (nparts == 3)
12413 {
12414 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12415 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12416 emit_move_insn (part[0][2], part[1][2]);
12417 }
12418 }
12419 else
12420 {
12421 /* In 64bit mode we don't have 32bit push available. In case this is
12422 register, it is OK - we will just use larger counterpart. We also
12423 retype memory - these comes from attempt to avoid REX prefix on
12424 moving of second half of TFmode value. */
12425 if (GET_MODE (part[1][1]) == SImode)
12426 {
12427 switch (GET_CODE (part[1][1]))
12428 {
12429 case MEM:
12430 part[1][1] = adjust_address (part[1][1], DImode, 0);
12431 break;
12432
12433 case REG:
12434 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12435 break;
12436
12437 default:
12438 gcc_unreachable ();
12439 }
12440
12441 if (GET_MODE (part[1][0]) == SImode)
12442 part[1][0] = part[1][1];
12443 }
12444 }
12445 emit_move_insn (part[0][1], part[1][1]);
12446 emit_move_insn (part[0][0], part[1][0]);
12447 return;
12448 }
12449
12450 /* Choose correct order to not overwrite the source before it is copied. */
12451 if ((REG_P (part[0][0])
12452 && REG_P (part[1][1])
12453 && (REGNO (part[0][0]) == REGNO (part[1][1])
12454 || (nparts == 3
12455 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12456 || (collisions > 0
12457 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12458 {
12459 if (nparts == 3)
12460 {
12461 operands[2] = part[0][2];
12462 operands[3] = part[0][1];
12463 operands[4] = part[0][0];
12464 operands[5] = part[1][2];
12465 operands[6] = part[1][1];
12466 operands[7] = part[1][0];
12467 }
12468 else
12469 {
12470 operands[2] = part[0][1];
12471 operands[3] = part[0][0];
12472 operands[5] = part[1][1];
12473 operands[6] = part[1][0];
12474 }
12475 }
12476 else
12477 {
12478 if (nparts == 3)
12479 {
12480 operands[2] = part[0][0];
12481 operands[3] = part[0][1];
12482 operands[4] = part[0][2];
12483 operands[5] = part[1][0];
12484 operands[6] = part[1][1];
12485 operands[7] = part[1][2];
12486 }
12487 else
12488 {
12489 operands[2] = part[0][0];
12490 operands[3] = part[0][1];
12491 operands[5] = part[1][0];
12492 operands[6] = part[1][1];
12493 }
12494 }
12495
12496 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12497 if (optimize_size)
12498 {
12499 if (GET_CODE (operands[5]) == CONST_INT
12500 && operands[5] != const0_rtx
12501 && REG_P (operands[2]))
12502 {
12503 if (GET_CODE (operands[6]) == CONST_INT
12504 && INTVAL (operands[6]) == INTVAL (operands[5]))
12505 operands[6] = operands[2];
12506
12507 if (nparts == 3
12508 && GET_CODE (operands[7]) == CONST_INT
12509 && INTVAL (operands[7]) == INTVAL (operands[5]))
12510 operands[7] = operands[2];
12511 }
12512
12513 if (nparts == 3
12514 && GET_CODE (operands[6]) == CONST_INT
12515 && operands[6] != const0_rtx
12516 && REG_P (operands[3])
12517 && GET_CODE (operands[7]) == CONST_INT
12518 && INTVAL (operands[7]) == INTVAL (operands[6]))
12519 operands[7] = operands[3];
12520 }
12521
12522 emit_move_insn (operands[2], operands[5]);
12523 emit_move_insn (operands[3], operands[6]);
12524 if (nparts == 3)
12525 emit_move_insn (operands[4], operands[7]);
12526
12527 return;
12528 }
12529
12530 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12531 left shift by a constant, either using a single shift or
12532 a sequence of add instructions. */
12533
12534 static void
12535 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12536 {
12537 if (count == 1)
12538 {
12539 emit_insn ((mode == DImode
12540 ? gen_addsi3
12541 : gen_adddi3) (operand, operand, operand));
12542 }
12543 else if (!optimize_size
12544 && count * ix86_cost->add <= ix86_cost->shift_const)
12545 {
12546 int i;
12547 for (i=0; i<count; i++)
12548 {
12549 emit_insn ((mode == DImode
12550 ? gen_addsi3
12551 : gen_adddi3) (operand, operand, operand));
12552 }
12553 }
12554 else
12555 emit_insn ((mode == DImode
12556 ? gen_ashlsi3
12557 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12558 }
12559
12560 void
12561 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12562 {
12563 rtx low[2], high[2];
12564 int count;
12565 const int single_width = mode == DImode ? 32 : 64;
12566
12567 if (GET_CODE (operands[2]) == CONST_INT)
12568 {
12569 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12570 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12571
12572 if (count >= single_width)
12573 {
12574 emit_move_insn (high[0], low[1]);
12575 emit_move_insn (low[0], const0_rtx);
12576
12577 if (count > single_width)
12578 ix86_expand_ashl_const (high[0], count - single_width, mode);
12579 }
12580 else
12581 {
12582 if (!rtx_equal_p (operands[0], operands[1]))
12583 emit_move_insn (operands[0], operands[1]);
12584 emit_insn ((mode == DImode
12585 ? gen_x86_shld_1
12586 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12587 ix86_expand_ashl_const (low[0], count, mode);
12588 }
12589 return;
12590 }
12591
12592 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12593
12594 if (operands[1] == const1_rtx)
12595 {
12596 /* Assuming we've chosen a QImode capable registers, then 1 << N
12597 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12598 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12599 {
12600 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12601
12602 ix86_expand_clear (low[0]);
12603 ix86_expand_clear (high[0]);
12604 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12605
12606 d = gen_lowpart (QImode, low[0]);
12607 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12608 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12609 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12610
12611 d = gen_lowpart (QImode, high[0]);
12612 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12613 s = gen_rtx_NE (QImode, flags, const0_rtx);
12614 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12615 }
12616
12617 /* Otherwise, we can get the same results by manually performing
12618 a bit extract operation on bit 5/6, and then performing the two
12619 shifts. The two methods of getting 0/1 into low/high are exactly
12620 the same size. Avoiding the shift in the bit extract case helps
12621 pentium4 a bit; no one else seems to care much either way. */
12622 else
12623 {
12624 rtx x;
12625
12626 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12627 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12628 else
12629 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12630 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12631
12632 emit_insn ((mode == DImode
12633 ? gen_lshrsi3
12634 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12635 emit_insn ((mode == DImode
12636 ? gen_andsi3
12637 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12638 emit_move_insn (low[0], high[0]);
12639 emit_insn ((mode == DImode
12640 ? gen_xorsi3
12641 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12642 }
12643
12644 emit_insn ((mode == DImode
12645 ? gen_ashlsi3
12646 : gen_ashldi3) (low[0], low[0], operands[2]));
12647 emit_insn ((mode == DImode
12648 ? gen_ashlsi3
12649 : gen_ashldi3) (high[0], high[0], operands[2]));
12650 return;
12651 }
12652
12653 if (operands[1] == constm1_rtx)
12654 {
12655 /* For -1 << N, we can avoid the shld instruction, because we
12656 know that we're shifting 0...31/63 ones into a -1. */
12657 emit_move_insn (low[0], constm1_rtx);
12658 if (optimize_size)
12659 emit_move_insn (high[0], low[0]);
12660 else
12661 emit_move_insn (high[0], constm1_rtx);
12662 }
12663 else
12664 {
12665 if (!rtx_equal_p (operands[0], operands[1]))
12666 emit_move_insn (operands[0], operands[1]);
12667
12668 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12669 emit_insn ((mode == DImode
12670 ? gen_x86_shld_1
12671 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12672 }
12673
12674 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12675
12676 if (TARGET_CMOVE && scratch)
12677 {
12678 ix86_expand_clear (scratch);
12679 emit_insn ((mode == DImode
12680 ? gen_x86_shift_adj_1
12681 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12682 }
12683 else
12684 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12685 }
12686
12687 void
12688 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12689 {
12690 rtx low[2], high[2];
12691 int count;
12692 const int single_width = mode == DImode ? 32 : 64;
12693
12694 if (GET_CODE (operands[2]) == CONST_INT)
12695 {
12696 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12697 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12698
12699 if (count == single_width * 2 - 1)
12700 {
12701 emit_move_insn (high[0], high[1]);
12702 emit_insn ((mode == DImode
12703 ? gen_ashrsi3
12704 : gen_ashrdi3) (high[0], high[0],
12705 GEN_INT (single_width - 1)));
12706 emit_move_insn (low[0], high[0]);
12707
12708 }
12709 else if (count >= single_width)
12710 {
12711 emit_move_insn (low[0], high[1]);
12712 emit_move_insn (high[0], low[0]);
12713 emit_insn ((mode == DImode
12714 ? gen_ashrsi3
12715 : gen_ashrdi3) (high[0], high[0],
12716 GEN_INT (single_width - 1)));
12717 if (count > single_width)
12718 emit_insn ((mode == DImode
12719 ? gen_ashrsi3
12720 : gen_ashrdi3) (low[0], low[0],
12721 GEN_INT (count - single_width)));
12722 }
12723 else
12724 {
12725 if (!rtx_equal_p (operands[0], operands[1]))
12726 emit_move_insn (operands[0], operands[1]);
12727 emit_insn ((mode == DImode
12728 ? gen_x86_shrd_1
12729 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12730 emit_insn ((mode == DImode
12731 ? gen_ashrsi3
12732 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12733 }
12734 }
12735 else
12736 {
12737 if (!rtx_equal_p (operands[0], operands[1]))
12738 emit_move_insn (operands[0], operands[1]);
12739
12740 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12741
12742 emit_insn ((mode == DImode
12743 ? gen_x86_shrd_1
12744 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12745 emit_insn ((mode == DImode
12746 ? gen_ashrsi3
12747 : gen_ashrdi3) (high[0], high[0], operands[2]));
12748
12749 if (TARGET_CMOVE && scratch)
12750 {
12751 emit_move_insn (scratch, high[0]);
12752 emit_insn ((mode == DImode
12753 ? gen_ashrsi3
12754 : gen_ashrdi3) (scratch, scratch,
12755 GEN_INT (single_width - 1)));
12756 emit_insn ((mode == DImode
12757 ? gen_x86_shift_adj_1
12758 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12759 scratch));
12760 }
12761 else
12762 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12763 }
12764 }
12765
12766 void
12767 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12768 {
12769 rtx low[2], high[2];
12770 int count;
12771 const int single_width = mode == DImode ? 32 : 64;
12772
12773 if (GET_CODE (operands[2]) == CONST_INT)
12774 {
12775 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12776 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12777
12778 if (count >= single_width)
12779 {
12780 emit_move_insn (low[0], high[1]);
12781 ix86_expand_clear (high[0]);
12782
12783 if (count > single_width)
12784 emit_insn ((mode == DImode
12785 ? gen_lshrsi3
12786 : gen_lshrdi3) (low[0], low[0],
12787 GEN_INT (count - single_width)));
12788 }
12789 else
12790 {
12791 if (!rtx_equal_p (operands[0], operands[1]))
12792 emit_move_insn (operands[0], operands[1]);
12793 emit_insn ((mode == DImode
12794 ? gen_x86_shrd_1
12795 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12796 emit_insn ((mode == DImode
12797 ? gen_lshrsi3
12798 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12799 }
12800 }
12801 else
12802 {
12803 if (!rtx_equal_p (operands[0], operands[1]))
12804 emit_move_insn (operands[0], operands[1]);
12805
12806 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12807
12808 emit_insn ((mode == DImode
12809 ? gen_x86_shrd_1
12810 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12811 emit_insn ((mode == DImode
12812 ? gen_lshrsi3
12813 : gen_lshrdi3) (high[0], high[0], operands[2]));
12814
12815 /* Heh. By reversing the arguments, we can reuse this pattern. */
12816 if (TARGET_CMOVE && scratch)
12817 {
12818 ix86_expand_clear (scratch);
12819 emit_insn ((mode == DImode
12820 ? gen_x86_shift_adj_1
12821 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12822 scratch));
12823 }
12824 else
12825 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12826 }
12827 }
12828
12829 /* Predict just emitted jump instruction to be taken with probability PROB. */
12830 static void
12831 predict_jump (int prob)
12832 {
12833 rtx insn = get_last_insn ();
12834 gcc_assert (GET_CODE (insn) == JUMP_INSN);
12835 REG_NOTES (insn)
12836 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12837 GEN_INT (prob),
12838 REG_NOTES (insn));
12839 }
12840
12841 /* Helper function for the string operations below. Dest VARIABLE whether
12842 it is aligned to VALUE bytes. If true, jump to the label. */
12843 static rtx
12844 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12845 {
12846 rtx label = gen_label_rtx ();
12847 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12848 if (GET_MODE (variable) == DImode)
12849 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12850 else
12851 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12852 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12853 1, label);
12854 if (epilogue)
12855 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12856 else
12857 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12858 return label;
12859 }
12860
12861 /* Adjust COUNTER by the VALUE. */
12862 static void
12863 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12864 {
12865 if (GET_MODE (countreg) == DImode)
12866 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12867 else
12868 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12869 }
12870
12871 /* Zero extend possibly SImode EXP to Pmode register. */
12872 rtx
12873 ix86_zero_extend_to_Pmode (rtx exp)
12874 {
12875 rtx r;
12876 if (GET_MODE (exp) == VOIDmode)
12877 return force_reg (Pmode, exp);
12878 if (GET_MODE (exp) == Pmode)
12879 return copy_to_mode_reg (Pmode, exp);
12880 r = gen_reg_rtx (Pmode);
12881 emit_insn (gen_zero_extendsidi2 (r, exp));
12882 return r;
12883 }
12884
12885 /* Divide COUNTREG by SCALE. */
12886 static rtx
12887 scale_counter (rtx countreg, int scale)
12888 {
12889 rtx sc;
12890 rtx piece_size_mask;
12891
12892 if (scale == 1)
12893 return countreg;
12894 if (GET_CODE (countreg) == CONST_INT)
12895 return GEN_INT (INTVAL (countreg) / scale);
12896 gcc_assert (REG_P (countreg));
12897
12898 piece_size_mask = GEN_INT (scale - 1);
12899 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12900 GEN_INT (exact_log2 (scale)),
12901 NULL, 1, OPTAB_DIRECT);
12902 return sc;
12903 }
12904
12905 /* When SRCPTR is non-NULL, output simple loop to move memory
12906 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12907 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12908 equivalent loop to set memory by VALUE (supposed to be in MODE).
12909
12910 The size is rounded down to whole number of chunk size moved at once.
12911 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12912
12913
12914 static void
12915 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12916 rtx destptr, rtx srcptr, rtx value,
12917 rtx count, enum machine_mode mode, int unroll,
12918 int expected_size)
12919 {
12920 rtx out_label, top_label, iter, tmp;
12921 enum machine_mode iter_mode;
12922 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12923 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12924 rtx size;
12925 rtx x_addr;
12926 rtx y_addr;
12927 int i;
12928
12929 iter_mode = GET_MODE (count);
12930 if (iter_mode == VOIDmode)
12931 iter_mode = word_mode;
12932
12933 top_label = gen_label_rtx ();
12934 out_label = gen_label_rtx ();
12935 iter = gen_reg_rtx (iter_mode);
12936
12937 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12938 NULL, 1, OPTAB_DIRECT);
12939 /* Those two should combine. */
12940 if (piece_size == const1_rtx)
12941 {
12942 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12943 true, out_label);
12944 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12945 }
12946 emit_move_insn (iter, const0_rtx);
12947
12948 emit_label (top_label);
12949
12950 tmp = convert_modes (Pmode, iter_mode, iter, true);
12951 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12952 destmem = change_address (destmem, mode, x_addr);
12953
12954 if (srcmem)
12955 {
12956 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12957 srcmem = change_address (srcmem, mode, y_addr);
12958
12959 /* When unrolling for chips that reorder memory reads and writes,
12960 we can save registers by using single temporary.
12961 Also using 4 temporaries is overkill in 32bit mode. */
12962 if (!TARGET_64BIT && 0)
12963 {
12964 for (i = 0; i < unroll; i++)
12965 {
12966 if (i)
12967 {
12968 destmem =
12969 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12970 srcmem =
12971 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12972 }
12973 emit_move_insn (destmem, srcmem);
12974 }
12975 }
12976 else
12977 {
12978 rtx tmpreg[4];
12979 gcc_assert (unroll <= 4);
12980 for (i = 0; i < unroll; i++)
12981 {
12982 tmpreg[i] = gen_reg_rtx (mode);
12983 if (i)
12984 {
12985 srcmem =
12986 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12987 }
12988 emit_move_insn (tmpreg[i], srcmem);
12989 }
12990 for (i = 0; i < unroll; i++)
12991 {
12992 if (i)
12993 {
12994 destmem =
12995 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12996 }
12997 emit_move_insn (destmem, tmpreg[i]);
12998 }
12999 }
13000 }
13001 else
13002 for (i = 0; i < unroll; i++)
13003 {
13004 if (i)
13005 destmem =
13006 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
13007 emit_move_insn (destmem, value);
13008 }
13009
13010 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
13011 true, OPTAB_LIB_WIDEN);
13012 if (tmp != iter)
13013 emit_move_insn (iter, tmp);
13014
13015 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
13016 true, top_label);
13017 if (expected_size != -1)
13018 {
13019 expected_size /= GET_MODE_SIZE (mode) * unroll;
13020 if (expected_size == 0)
13021 predict_jump (0);
13022 else if (expected_size > REG_BR_PROB_BASE)
13023 predict_jump (REG_BR_PROB_BASE - 1);
13024 else
13025 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
13026 }
13027 else
13028 predict_jump (REG_BR_PROB_BASE * 80 / 100);
13029 iter = ix86_zero_extend_to_Pmode (iter);
13030 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
13031 true, OPTAB_LIB_WIDEN);
13032 if (tmp != destptr)
13033 emit_move_insn (destptr, tmp);
13034 if (srcptr)
13035 {
13036 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
13037 true, OPTAB_LIB_WIDEN);
13038 if (tmp != srcptr)
13039 emit_move_insn (srcptr, tmp);
13040 }
13041 emit_label (out_label);
13042 }
13043
13044 /* Output "rep; mov" instruction.
13045 Arguments have same meaning as for previous function */
13046 static void
13047 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
13048 rtx destptr, rtx srcptr,
13049 rtx count,
13050 enum machine_mode mode)
13051 {
13052 rtx destexp;
13053 rtx srcexp;
13054 rtx countreg;
13055
13056 /* If the size is known, it is shorter to use rep movs. */
13057 if (mode == QImode && GET_CODE (count) == CONST_INT
13058 && !(INTVAL (count) & 3))
13059 mode = SImode;
13060
13061 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
13062 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
13063 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
13064 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
13065 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
13066 if (mode != QImode)
13067 {
13068 destexp = gen_rtx_ASHIFT (Pmode, countreg,
13069 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13070 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
13071 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
13072 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13073 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
13074 }
13075 else
13076 {
13077 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
13078 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
13079 }
13080 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
13081 destexp, srcexp));
13082 }
13083
13084 /* Output "rep; stos" instruction.
13085 Arguments have same meaning as for previous function */
13086 static void
13087 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
13088 rtx count,
13089 enum machine_mode mode)
13090 {
13091 rtx destexp;
13092 rtx countreg;
13093
13094 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
13095 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
13096 value = force_reg (mode, gen_lowpart (mode, value));
13097 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
13098 if (mode != QImode)
13099 {
13100 destexp = gen_rtx_ASHIFT (Pmode, countreg,
13101 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13102 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
13103 }
13104 else
13105 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
13106 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
13107 }
13108
13109 static void
13110 emit_strmov (rtx destmem, rtx srcmem,
13111 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
13112 {
13113 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13114 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13115 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13116 }
13117
13118 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13119 static void
13120 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13121 rtx destptr, rtx srcptr, rtx count, int max_size)
13122 {
13123 rtx src, dest;
13124 if (GET_CODE (count) == CONST_INT)
13125 {
13126 HOST_WIDE_INT countval = INTVAL (count);
13127 int offset = 0;
13128
13129 if ((countval & 0x16) && max_size > 16)
13130 {
13131 if (TARGET_64BIT)
13132 {
13133 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13134 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13135 }
13136 else
13137 gcc_unreachable ();
13138 offset += 16;
13139 }
13140 if ((countval & 0x08) && max_size > 8)
13141 {
13142 if (TARGET_64BIT)
13143 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13144 else
13145 {
13146 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13147 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13148 }
13149 offset += 8;
13150 }
13151 if ((countval & 0x04) && max_size > 4)
13152 {
13153 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13154 offset += 4;
13155 }
13156 if ((countval & 0x02) && max_size > 2)
13157 {
13158 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13159 offset += 2;
13160 }
13161 if ((countval & 0x01) && max_size > 1)
13162 {
13163 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13164 offset += 1;
13165 }
13166 return;
13167 }
13168 if (max_size > 8)
13169 {
13170 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13171 count, 1, OPTAB_DIRECT);
13172 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13173 count, QImode, 1, 4);
13174 return;
13175 }
13176
13177 /* When there are stringops, we can cheaply increase dest and src pointers.
13178 Otherwise we save code size by maintaining offset (zero is readily
13179 available from preceding rep operation) and using x86 addressing modes.
13180 */
13181 if (TARGET_SINGLE_STRINGOP)
13182 {
13183 if (max_size > 4)
13184 {
13185 rtx label = ix86_expand_aligntest (count, 4, true);
13186 src = change_address (srcmem, SImode, srcptr);
13187 dest = change_address (destmem, SImode, destptr);
13188 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13189 emit_label (label);
13190 LABEL_NUSES (label) = 1;
13191 }
13192 if (max_size > 2)
13193 {
13194 rtx label = ix86_expand_aligntest (count, 2, true);
13195 src = change_address (srcmem, HImode, srcptr);
13196 dest = change_address (destmem, HImode, destptr);
13197 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13198 emit_label (label);
13199 LABEL_NUSES (label) = 1;
13200 }
13201 if (max_size > 1)
13202 {
13203 rtx label = ix86_expand_aligntest (count, 1, true);
13204 src = change_address (srcmem, QImode, srcptr);
13205 dest = change_address (destmem, QImode, destptr);
13206 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13207 emit_label (label);
13208 LABEL_NUSES (label) = 1;
13209 }
13210 }
13211 else
13212 {
13213 rtx offset = force_reg (Pmode, const0_rtx);
13214 rtx tmp;
13215
13216 if (max_size > 4)
13217 {
13218 rtx label = ix86_expand_aligntest (count, 4, true);
13219 src = change_address (srcmem, SImode, srcptr);
13220 dest = change_address (destmem, SImode, destptr);
13221 emit_move_insn (dest, src);
13222 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13223 true, OPTAB_LIB_WIDEN);
13224 if (tmp != offset)
13225 emit_move_insn (offset, tmp);
13226 emit_label (label);
13227 LABEL_NUSES (label) = 1;
13228 }
13229 if (max_size > 2)
13230 {
13231 rtx label = ix86_expand_aligntest (count, 2, true);
13232 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13233 src = change_address (srcmem, HImode, tmp);
13234 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13235 dest = change_address (destmem, HImode, tmp);
13236 emit_move_insn (dest, src);
13237 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13238 true, OPTAB_LIB_WIDEN);
13239 if (tmp != offset)
13240 emit_move_insn (offset, tmp);
13241 emit_label (label);
13242 LABEL_NUSES (label) = 1;
13243 }
13244 if (max_size > 1)
13245 {
13246 rtx label = ix86_expand_aligntest (count, 1, true);
13247 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13248 src = change_address (srcmem, QImode, tmp);
13249 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13250 dest = change_address (destmem, QImode, tmp);
13251 emit_move_insn (dest, src);
13252 emit_label (label);
13253 LABEL_NUSES (label) = 1;
13254 }
13255 }
13256 }
13257
13258 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13259 static void
13260 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13261 rtx count, int max_size)
13262 {
13263 count =
13264 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13265 count, 1, OPTAB_DIRECT);
13266 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13267 gen_lowpart (QImode, value), count, QImode,
13268 1, max_size / 2);
13269 }
13270
13271 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13272 static void
13273 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13274 {
13275 rtx dest;
13276 if (GET_CODE (count) == CONST_INT)
13277 {
13278 HOST_WIDE_INT countval = INTVAL (count);
13279 int offset = 0;
13280
13281 if ((countval & 0x16) && max_size > 16)
13282 {
13283 if (TARGET_64BIT)
13284 {
13285 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13286 emit_insn (gen_strset (destptr, dest, value));
13287 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13288 emit_insn (gen_strset (destptr, dest, value));
13289 }
13290 else
13291 gcc_unreachable ();
13292 offset += 16;
13293 }
13294 if ((countval & 0x08) && max_size > 8)
13295 {
13296 if (TARGET_64BIT)
13297 {
13298 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13299 emit_insn (gen_strset (destptr, dest, value));
13300 }
13301 else
13302 {
13303 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13304 emit_insn (gen_strset (destptr, dest, value));
13305 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13306 emit_insn (gen_strset (destptr, dest, value));
13307 }
13308 offset += 8;
13309 }
13310 if ((countval & 0x04) && max_size > 4)
13311 {
13312 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13313 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13314 offset += 4;
13315 }
13316 if ((countval & 0x02) && max_size > 2)
13317 {
13318 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13319 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13320 offset += 2;
13321 }
13322 if ((countval & 0x01) && max_size > 1)
13323 {
13324 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13325 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13326 offset += 1;
13327 }
13328 return;
13329 }
13330 if (max_size > 32)
13331 {
13332 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13333 return;
13334 }
13335 if (max_size > 16)
13336 {
13337 rtx label = ix86_expand_aligntest (count, 16, true);
13338 if (TARGET_64BIT)
13339 {
13340 dest = change_address (destmem, DImode, destptr);
13341 emit_insn (gen_strset (destptr, dest, value));
13342 emit_insn (gen_strset (destptr, dest, value));
13343 }
13344 else
13345 {
13346 dest = change_address (destmem, SImode, destptr);
13347 emit_insn (gen_strset (destptr, dest, value));
13348 emit_insn (gen_strset (destptr, dest, value));
13349 emit_insn (gen_strset (destptr, dest, value));
13350 emit_insn (gen_strset (destptr, dest, value));
13351 }
13352 emit_label (label);
13353 LABEL_NUSES (label) = 1;
13354 }
13355 if (max_size > 8)
13356 {
13357 rtx label = ix86_expand_aligntest (count, 8, true);
13358 if (TARGET_64BIT)
13359 {
13360 dest = change_address (destmem, DImode, destptr);
13361 emit_insn (gen_strset (destptr, dest, value));
13362 }
13363 else
13364 {
13365 dest = change_address (destmem, SImode, destptr);
13366 emit_insn (gen_strset (destptr, dest, value));
13367 emit_insn (gen_strset (destptr, dest, value));
13368 }
13369 emit_label (label);
13370 LABEL_NUSES (label) = 1;
13371 }
13372 if (max_size > 4)
13373 {
13374 rtx label = ix86_expand_aligntest (count, 4, true);
13375 dest = change_address (destmem, SImode, destptr);
13376 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13377 emit_label (label);
13378 LABEL_NUSES (label) = 1;
13379 }
13380 if (max_size > 2)
13381 {
13382 rtx label = ix86_expand_aligntest (count, 2, true);
13383 dest = change_address (destmem, HImode, destptr);
13384 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13385 emit_label (label);
13386 LABEL_NUSES (label) = 1;
13387 }
13388 if (max_size > 1)
13389 {
13390 rtx label = ix86_expand_aligntest (count, 1, true);
13391 dest = change_address (destmem, QImode, destptr);
13392 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13393 emit_label (label);
13394 LABEL_NUSES (label) = 1;
13395 }
13396 }
13397
13398 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13399 DESIRED_ALIGNMENT. */
13400 static void
13401 expand_movmem_prologue (rtx destmem, rtx srcmem,
13402 rtx destptr, rtx srcptr, rtx count,
13403 int align, int desired_alignment)
13404 {
13405 if (align <= 1 && desired_alignment > 1)
13406 {
13407 rtx label = ix86_expand_aligntest (destptr, 1, false);
13408 srcmem = change_address (srcmem, QImode, srcptr);
13409 destmem = change_address (destmem, QImode, destptr);
13410 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13411 ix86_adjust_counter (count, 1);
13412 emit_label (label);
13413 LABEL_NUSES (label) = 1;
13414 }
13415 if (align <= 2 && desired_alignment > 2)
13416 {
13417 rtx label = ix86_expand_aligntest (destptr, 2, false);
13418 srcmem = change_address (srcmem, HImode, srcptr);
13419 destmem = change_address (destmem, HImode, destptr);
13420 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13421 ix86_adjust_counter (count, 2);
13422 emit_label (label);
13423 LABEL_NUSES (label) = 1;
13424 }
13425 if (align <= 4 && desired_alignment > 4)
13426 {
13427 rtx label = ix86_expand_aligntest (destptr, 4, false);
13428 srcmem = change_address (srcmem, SImode, srcptr);
13429 destmem = change_address (destmem, SImode, destptr);
13430 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13431 ix86_adjust_counter (count, 4);
13432 emit_label (label);
13433 LABEL_NUSES (label) = 1;
13434 }
13435 gcc_assert (desired_alignment <= 8);
13436 }
13437
13438 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13439 DESIRED_ALIGNMENT. */
13440 static void
13441 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13442 int align, int desired_alignment)
13443 {
13444 if (align <= 1 && desired_alignment > 1)
13445 {
13446 rtx label = ix86_expand_aligntest (destptr, 1, false);
13447 destmem = change_address (destmem, QImode, destptr);
13448 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13449 ix86_adjust_counter (count, 1);
13450 emit_label (label);
13451 LABEL_NUSES (label) = 1;
13452 }
13453 if (align <= 2 && desired_alignment > 2)
13454 {
13455 rtx label = ix86_expand_aligntest (destptr, 2, false);
13456 destmem = change_address (destmem, HImode, destptr);
13457 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13458 ix86_adjust_counter (count, 2);
13459 emit_label (label);
13460 LABEL_NUSES (label) = 1;
13461 }
13462 if (align <= 4 && desired_alignment > 4)
13463 {
13464 rtx label = ix86_expand_aligntest (destptr, 4, false);
13465 destmem = change_address (destmem, SImode, destptr);
13466 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13467 ix86_adjust_counter (count, 4);
13468 emit_label (label);
13469 LABEL_NUSES (label) = 1;
13470 }
13471 gcc_assert (desired_alignment <= 8);
13472 }
13473
13474 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13475 static enum stringop_alg
13476 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13477 int *dynamic_check)
13478 {
13479 const struct stringop_algs * algs;
13480
13481 *dynamic_check = -1;
13482 if (memset)
13483 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13484 else
13485 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13486 if (stringop_alg != no_stringop)
13487 return stringop_alg;
13488 /* rep; movq or rep; movl is the smallest variant. */
13489 else if (optimize_size)
13490 {
13491 if (!count || (count & 3))
13492 return rep_prefix_1_byte;
13493 else
13494 return rep_prefix_4_byte;
13495 }
13496 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13497 */
13498 else if (expected_size != -1 && expected_size < 4)
13499 return loop_1_byte;
13500 else if (expected_size != -1)
13501 {
13502 unsigned int i;
13503 enum stringop_alg alg = libcall;
13504 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13505 {
13506 gcc_assert (algs->size[i].max);
13507 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13508 {
13509 if (algs->size[i].alg != libcall)
13510 alg = algs->size[i].alg;
13511 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13512 last non-libcall inline algorithm. */
13513 if (TARGET_INLINE_ALL_STRINGOPS)
13514 {
13515 /* When the current size is best to be copied by a libcall,
13516 but we are still forced to inline, run the heuristic bellow
13517 that will pick code for medium sized blocks. */
13518 if (alg != libcall)
13519 return alg;
13520 break;
13521 }
13522 else
13523 return algs->size[i].alg;
13524 }
13525 }
13526 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13527 }
13528 /* When asked to inline the call anyway, try to pick meaningful choice.
13529 We look for maximal size of block that is faster to copy by hand and
13530 take blocks of at most of that size guessing that average size will
13531 be roughly half of the block.
13532
13533 If this turns out to be bad, we might simply specify the preferred
13534 choice in ix86_costs. */
13535 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13536 && algs->unknown_size == libcall)
13537 {
13538 int max = -1;
13539 enum stringop_alg alg;
13540 int i;
13541
13542 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13543 if (algs->size[i].alg != libcall && algs->size[i].alg)
13544 max = algs->size[i].max;
13545 if (max == -1)
13546 max = 4096;
13547 alg = decide_alg (count, max / 2, memset, dynamic_check);
13548 gcc_assert (*dynamic_check == -1);
13549 gcc_assert (alg != libcall);
13550 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13551 *dynamic_check = max;
13552 return alg;
13553 }
13554 return algs->unknown_size;
13555 }
13556
13557 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13558 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13559 static int
13560 decide_alignment (int align,
13561 enum stringop_alg alg,
13562 int expected_size)
13563 {
13564 int desired_align = 0;
13565 switch (alg)
13566 {
13567 case no_stringop:
13568 gcc_unreachable ();
13569 case loop:
13570 case unrolled_loop:
13571 desired_align = GET_MODE_SIZE (Pmode);
13572 break;
13573 case rep_prefix_8_byte:
13574 desired_align = 8;
13575 break;
13576 case rep_prefix_4_byte:
13577 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13578 copying whole cacheline at once. */
13579 if (TARGET_PENTIUMPRO)
13580 desired_align = 8;
13581 else
13582 desired_align = 4;
13583 break;
13584 case rep_prefix_1_byte:
13585 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13586 copying whole cacheline at once. */
13587 if (TARGET_PENTIUMPRO)
13588 desired_align = 8;
13589 else
13590 desired_align = 1;
13591 break;
13592 case loop_1_byte:
13593 desired_align = 1;
13594 break;
13595 case libcall:
13596 return 0;
13597 }
13598
13599 if (optimize_size)
13600 desired_align = 1;
13601 if (desired_align < align)
13602 desired_align = align;
13603 if (expected_size != -1 && expected_size < 4)
13604 desired_align = align;
13605 return desired_align;
13606 }
13607
13608 /* Expand string move (memcpy) operation. Use i386 string operations when
13609 profitable. expand_clrmem contains similar code. */
13610 int
13611 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13612 rtx expected_align_exp, rtx expected_size_exp)
13613 {
13614 rtx destreg;
13615 rtx srcreg;
13616 rtx label = NULL;
13617 rtx tmp;
13618 rtx jump_around_label = NULL;
13619 HOST_WIDE_INT align = 1;
13620 unsigned HOST_WIDE_INT count = 0;
13621 HOST_WIDE_INT expected_size = -1;
13622 int size_needed = 0;
13623 int desired_align = 0;
13624 enum stringop_alg alg;
13625 int dynamic_check;
13626 /* Precise placement on cld depends whether stringops will be emit in
13627 prologue, main copying body or epilogue. This variable keeps track
13628 if cld was already needed. */
13629 bool cld_done = false;
13630
13631 if (GET_CODE (align_exp) == CONST_INT)
13632 align = INTVAL (align_exp);
13633 /* i386 can do misaligned access on reasonably increased cost. */
13634 if (GET_CODE (expected_align_exp) == CONST_INT
13635 && INTVAL (expected_align_exp) > align)
13636 align = INTVAL (expected_align_exp);
13637 if (GET_CODE (count_exp) == CONST_INT)
13638 count = expected_size = INTVAL (count_exp);
13639 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13640 {
13641 expected_size = INTVAL (expected_size_exp);
13642 }
13643
13644 alg = decide_alg (count, expected_size, false, &dynamic_check);
13645 desired_align = decide_alignment (align, alg, expected_size);
13646
13647 if (!TARGET_ALIGN_STRINGOPS)
13648 align = desired_align;
13649
13650 if (alg == libcall)
13651 return 0;
13652 gcc_assert (alg != no_stringop);
13653 if (!count)
13654 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13655 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13656 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13657 switch (alg)
13658 {
13659 case libcall:
13660 case no_stringop:
13661 gcc_unreachable ();
13662 case loop:
13663 size_needed = GET_MODE_SIZE (Pmode);
13664 break;
13665 case unrolled_loop:
13666 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13667 break;
13668 case rep_prefix_8_byte:
13669 size_needed = 8;
13670 break;
13671 case rep_prefix_4_byte:
13672 size_needed = 4;
13673 break;
13674 case rep_prefix_1_byte:
13675 case loop_1_byte:
13676 size_needed = 1;
13677 break;
13678 }
13679
13680 /* Alignment code needs count to be in register. */
13681 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13682 {
13683 enum machine_mode mode = SImode;
13684 if (TARGET_64BIT && (count & ~0xffffffff))
13685 mode = DImode;
13686 count_exp = force_reg (mode, count_exp);
13687 }
13688 gcc_assert (desired_align >= 1 && align >= 1);
13689 /* Ensure that alignment prologue won't copy past end of block. */
13690 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13691 && !count)
13692 {
13693 int size = MAX (size_needed - 1, desired_align - align);
13694 if (TARGET_SINGLE_STRINGOP)
13695 emit_insn (gen_cld ()), cld_done = true;
13696 label = gen_label_rtx ();
13697 emit_cmp_and_jump_insns (count_exp,
13698 GEN_INT (size),
13699 LEU, 0, GET_MODE (count_exp), 1, label);
13700 if (expected_size == -1 || expected_size < size)
13701 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13702 else
13703 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13704 }
13705 /* Emit code to decide on runtime whether library call or inline should be
13706 used. */
13707 if (dynamic_check != -1)
13708 {
13709 rtx hot_label = gen_label_rtx ();
13710 jump_around_label = gen_label_rtx ();
13711 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13712 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13713 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13714 emit_block_move_via_libcall (dst, src, count_exp, false);
13715 emit_jump (jump_around_label);
13716 emit_label (hot_label);
13717 }
13718
13719
13720 /* Alignment prologue. */
13721 if (desired_align > align)
13722 {
13723 /* Except for the first move in epilogue, we no longer know
13724 constant offset in aliasing info. It don't seems to worth
13725 the pain to maintain it for the first move, so throw away
13726 the info early. */
13727 src = change_address (src, BLKmode, srcreg);
13728 dst = change_address (dst, BLKmode, destreg);
13729 if (TARGET_SINGLE_STRINGOP && !cld_done)
13730 emit_insn (gen_cld ()), cld_done = true;
13731 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13732 desired_align);
13733 }
13734 if (label && size_needed == 1)
13735 {
13736 emit_label (label);
13737 LABEL_NUSES (label) = 1;
13738 label = NULL;
13739 }
13740
13741 /* Main body. */
13742 switch (alg)
13743 {
13744 case libcall:
13745 case no_stringop:
13746 gcc_unreachable ();
13747 case loop_1_byte:
13748 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13749 count_exp, QImode, 1, expected_size);
13750 break;
13751 case loop:
13752 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13753 count_exp, Pmode, 1, expected_size);
13754 break;
13755 case unrolled_loop:
13756 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13757 registers for 4 temporaries anyway. */
13758 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13759 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13760 expected_size);
13761 break;
13762 case rep_prefix_8_byte:
13763 if (!cld_done)
13764 emit_insn (gen_cld ()), cld_done = true;
13765 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13766 DImode);
13767 break;
13768 case rep_prefix_4_byte:
13769 if (!cld_done)
13770 emit_insn (gen_cld ()), cld_done = true;
13771 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13772 SImode);
13773 break;
13774 case rep_prefix_1_byte:
13775 if (!cld_done)
13776 emit_insn (gen_cld ()), cld_done = true;
13777 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13778 QImode);
13779 break;
13780 }
13781 /* Adjust properly the offset of src and dest memory for aliasing. */
13782 if (GET_CODE (count_exp) == CONST_INT)
13783 {
13784 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13785 (count / size_needed) * size_needed);
13786 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13787 (count / size_needed) * size_needed);
13788 }
13789 else
13790 {
13791 src = change_address (src, BLKmode, srcreg);
13792 dst = change_address (dst, BLKmode, destreg);
13793 }
13794
13795 /* Epilogue to copy the remaining bytes. */
13796 if (label)
13797 {
13798 if (size_needed < desired_align - align)
13799 {
13800 tmp =
13801 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13802 GEN_INT (size_needed - 1), count_exp, 1,
13803 OPTAB_DIRECT);
13804 size_needed = desired_align - align + 1;
13805 if (tmp != count_exp)
13806 emit_move_insn (count_exp, tmp);
13807 }
13808 emit_label (label);
13809 LABEL_NUSES (label) = 1;
13810 }
13811 if (count_exp != const0_rtx && size_needed > 1)
13812 {
13813 if (TARGET_SINGLE_STRINGOP && !cld_done)
13814 emit_insn (gen_cld ()), cld_done = true;
13815 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13816 size_needed);
13817 }
13818 if (jump_around_label)
13819 emit_label (jump_around_label);
13820 return 1;
13821 }
13822
13823 /* Helper function for memcpy. For QImode value 0xXY produce
13824 0xXYXYXYXY of wide specified by MODE. This is essentially
13825 a * 0x10101010, but we can do slightly better than
13826 synth_mult by unwinding the sequence by hand on CPUs with
13827 slow multiply. */
13828 static rtx
13829 promote_duplicated_reg (enum machine_mode mode, rtx val)
13830 {
13831 enum machine_mode valmode = GET_MODE (val);
13832 rtx tmp;
13833 int nops = mode == DImode ? 3 : 2;
13834
13835 gcc_assert (mode == SImode || mode == DImode);
13836 if (val == const0_rtx)
13837 return copy_to_mode_reg (mode, const0_rtx);
13838 if (GET_CODE (val) == CONST_INT)
13839 {
13840 HOST_WIDE_INT v = INTVAL (val) & 255;
13841
13842 v |= v << 8;
13843 v |= v << 16;
13844 if (mode == DImode)
13845 v |= (v << 16) << 16;
13846 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13847 }
13848
13849 if (valmode == VOIDmode)
13850 valmode = QImode;
13851 if (valmode != QImode)
13852 val = gen_lowpart (QImode, val);
13853 if (mode == QImode)
13854 return val;
13855 if (!TARGET_PARTIAL_REG_STALL)
13856 nops--;
13857 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13858 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13859 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13860 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13861 {
13862 rtx reg = convert_modes (mode, QImode, val, true);
13863 tmp = promote_duplicated_reg (mode, const1_rtx);
13864 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13865 OPTAB_DIRECT);
13866 }
13867 else
13868 {
13869 rtx reg = convert_modes (mode, QImode, val, true);
13870
13871 if (!TARGET_PARTIAL_REG_STALL)
13872 if (mode == SImode)
13873 emit_insn (gen_movsi_insv_1 (reg, reg));
13874 else
13875 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13876 else
13877 {
13878 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13879 NULL, 1, OPTAB_DIRECT);
13880 reg =
13881 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13882 }
13883 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13884 NULL, 1, OPTAB_DIRECT);
13885 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13886 if (mode == SImode)
13887 return reg;
13888 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13889 NULL, 1, OPTAB_DIRECT);
13890 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13891 return reg;
13892 }
13893 }
13894
13895 /* Expand string clear operation (bzero). Use i386 string operations when
13896 profitable. expand_movmem contains similar code. */
13897 int
13898 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13899 rtx expected_align_exp, rtx expected_size_exp)
13900 {
13901 rtx destreg;
13902 rtx label = NULL;
13903 rtx tmp;
13904 rtx jump_around_label = NULL;
13905 HOST_WIDE_INT align = 1;
13906 unsigned HOST_WIDE_INT count = 0;
13907 HOST_WIDE_INT expected_size = -1;
13908 int size_needed = 0;
13909 int desired_align = 0;
13910 enum stringop_alg alg;
13911 /* Precise placement on cld depends whether stringops will be emit in
13912 prologue, main copying body or epilogue. This variable keeps track
13913 if cld was already needed. */
13914 bool cld_done = false;
13915 rtx promoted_val = val_exp;
13916 bool force_loopy_epilogue = false;
13917 int dynamic_check;
13918
13919 if (GET_CODE (align_exp) == CONST_INT)
13920 align = INTVAL (align_exp);
13921 /* i386 can do misaligned access on reasonably increased cost. */
13922 if (GET_CODE (expected_align_exp) == CONST_INT
13923 && INTVAL (expected_align_exp) > align)
13924 align = INTVAL (expected_align_exp);
13925 if (GET_CODE (count_exp) == CONST_INT)
13926 count = expected_size = INTVAL (count_exp);
13927 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13928 expected_size = INTVAL (expected_size_exp);
13929
13930 alg = decide_alg (count, expected_size, true, &dynamic_check);
13931 desired_align = decide_alignment (align, alg, expected_size);
13932
13933 if (!TARGET_ALIGN_STRINGOPS)
13934 align = desired_align;
13935
13936 if (alg == libcall)
13937 return 0;
13938 gcc_assert (alg != no_stringop);
13939 if (!count)
13940 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13941 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13942 switch (alg)
13943 {
13944 case libcall:
13945 case no_stringop:
13946 gcc_unreachable ();
13947 case loop:
13948 size_needed = GET_MODE_SIZE (Pmode);
13949 break;
13950 case unrolled_loop:
13951 size_needed = GET_MODE_SIZE (Pmode) * 4;
13952 break;
13953 case rep_prefix_8_byte:
13954 size_needed = 8;
13955 break;
13956 case rep_prefix_4_byte:
13957 size_needed = 4;
13958 break;
13959 case rep_prefix_1_byte:
13960 case loop_1_byte:
13961 size_needed = 1;
13962 break;
13963 }
13964 /* Alignment code needs count to be in register. */
13965 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13966 {
13967 enum machine_mode mode = SImode;
13968 if (TARGET_64BIT && (count & ~0xffffffff))
13969 mode = DImode;
13970 count_exp = force_reg (mode, count_exp);
13971 }
13972 /* Ensure that alignment prologue won't copy past end of block. */
13973 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13974 && !count)
13975 {
13976 int size = MAX (size_needed - 1, desired_align - align);
13977 /* To improve performance of small blocks, we jump around the promoting
13978 code, so we need to use QImode accesses in epilogue. */
13979 if (GET_CODE (val_exp) != CONST_INT && size_needed > 1)
13980 force_loopy_epilogue = true;
13981 else if (TARGET_SINGLE_STRINGOP)
13982 emit_insn (gen_cld ()), cld_done = true;
13983 label = gen_label_rtx ();
13984 emit_cmp_and_jump_insns (count_exp,
13985 GEN_INT (size),
13986 LEU, 0, GET_MODE (count_exp), 1, label);
13987 if (expected_size == -1 || expected_size <= size)
13988 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13989 else
13990 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13991 }
13992 if (dynamic_check != -1)
13993 {
13994 rtx hot_label = gen_label_rtx ();
13995 jump_around_label = gen_label_rtx ();
13996 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13997 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13998 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13999 set_storage_via_libcall (dst, count_exp, val_exp, false);
14000 emit_jump (jump_around_label);
14001 emit_label (hot_label);
14002 }
14003 if (TARGET_64BIT
14004 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
14005 promoted_val = promote_duplicated_reg (DImode, val_exp);
14006 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
14007 promoted_val = promote_duplicated_reg (SImode, val_exp);
14008 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
14009 promoted_val = promote_duplicated_reg (HImode, val_exp);
14010 else
14011 promoted_val = val_exp;
14012 gcc_assert (desired_align >= 1 && align >= 1);
14013 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
14014 && !count && !label)
14015 {
14016 int size = MAX (size_needed - 1, desired_align - align);
14017 if (TARGET_SINGLE_STRINGOP)
14018 emit_insn (gen_cld ()), cld_done = true;
14019 label = gen_label_rtx ();
14020 emit_cmp_and_jump_insns (count_exp,
14021 GEN_INT (size),
14022 LEU, 0, GET_MODE (count_exp), 1, label);
14023 if (expected_size == -1 || expected_size <= size)
14024 predict_jump (REG_BR_PROB_BASE * 60 / 100);
14025 else
14026 predict_jump (REG_BR_PROB_BASE * 20 / 100);
14027 }
14028 if (desired_align > align)
14029 {
14030 /* Except for the first move in epilogue, we no longer know
14031 constant offset in aliasing info. It don't seems to worth
14032 the pain to maintain it for the first move, so throw away
14033 the info early. */
14034 dst = change_address (dst, BLKmode, destreg);
14035 if (TARGET_SINGLE_STRINGOP && !cld_done)
14036 emit_insn (gen_cld ()), cld_done = true;
14037 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
14038 desired_align);
14039 }
14040 if (label && size_needed == 1)
14041 {
14042 emit_label (label);
14043 LABEL_NUSES (label) = 1;
14044 label = NULL;
14045 }
14046 switch (alg)
14047 {
14048 case libcall:
14049 case no_stringop:
14050 gcc_unreachable ();
14051 case loop_1_byte:
14052 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14053 count_exp, QImode, 1, expected_size);
14054 break;
14055 case loop:
14056 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14057 count_exp, Pmode, 1, expected_size);
14058 break;
14059 case unrolled_loop:
14060 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14061 count_exp, Pmode, 4, expected_size);
14062 break;
14063 case rep_prefix_8_byte:
14064 if (!cld_done)
14065 emit_insn (gen_cld ()), cld_done = true;
14066 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14067 DImode);
14068 break;
14069 case rep_prefix_4_byte:
14070 if (!cld_done)
14071 emit_insn (gen_cld ()), cld_done = true;
14072 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14073 SImode);
14074 break;
14075 case rep_prefix_1_byte:
14076 if (!cld_done)
14077 emit_insn (gen_cld ()), cld_done = true;
14078 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14079 QImode);
14080 break;
14081 }
14082 /* Adjust properly the offset of src and dest memory for aliasing. */
14083 if (GET_CODE (count_exp) == CONST_INT)
14084 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14085 (count / size_needed) * size_needed);
14086 else
14087 dst = change_address (dst, BLKmode, destreg);
14088
14089 if (label)
14090 {
14091 if (size_needed < desired_align - align)
14092 {
14093 tmp =
14094 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
14095 GEN_INT (size_needed - 1), count_exp, 1,
14096 OPTAB_DIRECT);
14097 size_needed = desired_align - align + 1;
14098 if (tmp != count_exp)
14099 emit_move_insn (count_exp, tmp);
14100 }
14101 emit_label (label);
14102 LABEL_NUSES (label) = 1;
14103 }
14104 if (count_exp != const0_rtx && size_needed > 1)
14105 {
14106 if (force_loopy_epilogue)
14107 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14108 size_needed);
14109 else
14110 {
14111 if (TARGET_SINGLE_STRINGOP && !cld_done)
14112 emit_insn (gen_cld ()), cld_done = true;
14113 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14114 size_needed);
14115 }
14116 }
14117 if (jump_around_label)
14118 emit_label (jump_around_label);
14119 return 1;
14120 }
14121
14122 /* Expand strlen. */
14123 int
14124 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
14125 {
14126 rtx addr, scratch1, scratch2, scratch3, scratch4;
14127
14128 /* The generic case of strlen expander is long. Avoid it's
14129 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
14130
14131 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14132 && !TARGET_INLINE_ALL_STRINGOPS
14133 && !optimize_size
14134 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
14135 return 0;
14136
14137 addr = force_reg (Pmode, XEXP (src, 0));
14138 scratch1 = gen_reg_rtx (Pmode);
14139
14140 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14141 && !optimize_size)
14142 {
14143 /* Well it seems that some optimizer does not combine a call like
14144 foo(strlen(bar), strlen(bar));
14145 when the move and the subtraction is done here. It does calculate
14146 the length just once when these instructions are done inside of
14147 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
14148 often used and I use one fewer register for the lifetime of
14149 output_strlen_unroll() this is better. */
14150
14151 emit_move_insn (out, addr);
14152
14153 ix86_expand_strlensi_unroll_1 (out, src, align);
14154
14155 /* strlensi_unroll_1 returns the address of the zero at the end of
14156 the string, like memchr(), so compute the length by subtracting
14157 the start address. */
14158 if (TARGET_64BIT)
14159 emit_insn (gen_subdi3 (out, out, addr));
14160 else
14161 emit_insn (gen_subsi3 (out, out, addr));
14162 }
14163 else
14164 {
14165 rtx unspec;
14166 scratch2 = gen_reg_rtx (Pmode);
14167 scratch3 = gen_reg_rtx (Pmode);
14168 scratch4 = force_reg (Pmode, constm1_rtx);
14169
14170 emit_move_insn (scratch3, addr);
14171 eoschar = force_reg (QImode, eoschar);
14172
14173 emit_insn (gen_cld ());
14174 src = replace_equiv_address_nv (src, scratch3);
14175
14176 /* If .md starts supporting :P, this can be done in .md. */
14177 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14178 scratch4), UNSPEC_SCAS);
14179 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14180 if (TARGET_64BIT)
14181 {
14182 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14183 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14184 }
14185 else
14186 {
14187 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14188 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14189 }
14190 }
14191 return 1;
14192 }
14193
14194 /* Expand the appropriate insns for doing strlen if not just doing
14195 repnz; scasb
14196
14197 out = result, initialized with the start address
14198 align_rtx = alignment of the address.
14199 scratch = scratch register, initialized with the startaddress when
14200 not aligned, otherwise undefined
14201
14202 This is just the body. It needs the initializations mentioned above and
14203 some address computing at the end. These things are done in i386.md. */
14204
14205 static void
14206 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14207 {
14208 int align;
14209 rtx tmp;
14210 rtx align_2_label = NULL_RTX;
14211 rtx align_3_label = NULL_RTX;
14212 rtx align_4_label = gen_label_rtx ();
14213 rtx end_0_label = gen_label_rtx ();
14214 rtx mem;
14215 rtx tmpreg = gen_reg_rtx (SImode);
14216 rtx scratch = gen_reg_rtx (SImode);
14217 rtx cmp;
14218
14219 align = 0;
14220 if (GET_CODE (align_rtx) == CONST_INT)
14221 align = INTVAL (align_rtx);
14222
14223 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14224
14225 /* Is there a known alignment and is it less than 4? */
14226 if (align < 4)
14227 {
14228 rtx scratch1 = gen_reg_rtx (Pmode);
14229 emit_move_insn (scratch1, out);
14230 /* Is there a known alignment and is it not 2? */
14231 if (align != 2)
14232 {
14233 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14234 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14235
14236 /* Leave just the 3 lower bits. */
14237 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14238 NULL_RTX, 0, OPTAB_WIDEN);
14239
14240 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14241 Pmode, 1, align_4_label);
14242 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14243 Pmode, 1, align_2_label);
14244 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14245 Pmode, 1, align_3_label);
14246 }
14247 else
14248 {
14249 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14250 check if is aligned to 4 - byte. */
14251
14252 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14253 NULL_RTX, 0, OPTAB_WIDEN);
14254
14255 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14256 Pmode, 1, align_4_label);
14257 }
14258
14259 mem = change_address (src, QImode, out);
14260
14261 /* Now compare the bytes. */
14262
14263 /* Compare the first n unaligned byte on a byte per byte basis. */
14264 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14265 QImode, 1, end_0_label);
14266
14267 /* Increment the address. */
14268 if (TARGET_64BIT)
14269 emit_insn (gen_adddi3 (out, out, const1_rtx));
14270 else
14271 emit_insn (gen_addsi3 (out, out, const1_rtx));
14272
14273 /* Not needed with an alignment of 2 */
14274 if (align != 2)
14275 {
14276 emit_label (align_2_label);
14277
14278 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14279 end_0_label);
14280
14281 if (TARGET_64BIT)
14282 emit_insn (gen_adddi3 (out, out, const1_rtx));
14283 else
14284 emit_insn (gen_addsi3 (out, out, const1_rtx));
14285
14286 emit_label (align_3_label);
14287 }
14288
14289 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14290 end_0_label);
14291
14292 if (TARGET_64BIT)
14293 emit_insn (gen_adddi3 (out, out, const1_rtx));
14294 else
14295 emit_insn (gen_addsi3 (out, out, const1_rtx));
14296 }
14297
14298 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14299 align this loop. It gives only huge programs, but does not help to
14300 speed up. */
14301 emit_label (align_4_label);
14302
14303 mem = change_address (src, SImode, out);
14304 emit_move_insn (scratch, mem);
14305 if (TARGET_64BIT)
14306 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14307 else
14308 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14309
14310 /* This formula yields a nonzero result iff one of the bytes is zero.
14311 This saves three branches inside loop and many cycles. */
14312
14313 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14314 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14315 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14316 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14317 gen_int_mode (0x80808080, SImode)));
14318 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14319 align_4_label);
14320
14321 if (TARGET_CMOVE)
14322 {
14323 rtx reg = gen_reg_rtx (SImode);
14324 rtx reg2 = gen_reg_rtx (Pmode);
14325 emit_move_insn (reg, tmpreg);
14326 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14327
14328 /* If zero is not in the first two bytes, move two bytes forward. */
14329 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14330 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14331 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14332 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14333 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14334 reg,
14335 tmpreg)));
14336 /* Emit lea manually to avoid clobbering of flags. */
14337 emit_insn (gen_rtx_SET (SImode, reg2,
14338 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14339
14340 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14341 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14342 emit_insn (gen_rtx_SET (VOIDmode, out,
14343 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14344 reg2,
14345 out)));
14346
14347 }
14348 else
14349 {
14350 rtx end_2_label = gen_label_rtx ();
14351 /* Is zero in the first two bytes? */
14352
14353 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14354 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14355 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14356 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14357 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14358 pc_rtx);
14359 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14360 JUMP_LABEL (tmp) = end_2_label;
14361
14362 /* Not in the first two. Move two bytes forward. */
14363 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14364 if (TARGET_64BIT)
14365 emit_insn (gen_adddi3 (out, out, const2_rtx));
14366 else
14367 emit_insn (gen_addsi3 (out, out, const2_rtx));
14368
14369 emit_label (end_2_label);
14370
14371 }
14372
14373 /* Avoid branch in fixing the byte. */
14374 tmpreg = gen_lowpart (QImode, tmpreg);
14375 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14376 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14377 if (TARGET_64BIT)
14378 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14379 else
14380 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14381
14382 emit_label (end_0_label);
14383 }
14384
14385 void
14386 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14387 rtx callarg2 ATTRIBUTE_UNUSED,
14388 rtx pop, int sibcall)
14389 {
14390 rtx use = NULL, call;
14391
14392 if (pop == const0_rtx)
14393 pop = NULL;
14394 gcc_assert (!TARGET_64BIT || !pop);
14395
14396 if (TARGET_MACHO && !TARGET_64BIT)
14397 {
14398 #if TARGET_MACHO
14399 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14400 fnaddr = machopic_indirect_call_target (fnaddr);
14401 #endif
14402 }
14403 else
14404 {
14405 /* Static functions and indirect calls don't need the pic register. */
14406 if (! TARGET_64BIT && flag_pic
14407 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14408 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14409 use_reg (&use, pic_offset_table_rtx);
14410 }
14411
14412 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14413 {
14414 rtx al = gen_rtx_REG (QImode, 0);
14415 emit_move_insn (al, callarg2);
14416 use_reg (&use, al);
14417 }
14418
14419 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14420 {
14421 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14422 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14423 }
14424 if (sibcall && TARGET_64BIT
14425 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14426 {
14427 rtx addr;
14428 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14429 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14430 emit_move_insn (fnaddr, addr);
14431 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14432 }
14433
14434 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14435 if (retval)
14436 call = gen_rtx_SET (VOIDmode, retval, call);
14437 if (pop)
14438 {
14439 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14440 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14441 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14442 }
14443
14444 call = emit_call_insn (call);
14445 if (use)
14446 CALL_INSN_FUNCTION_USAGE (call) = use;
14447 }
14448
14449 \f
14450 /* Clear stack slot assignments remembered from previous functions.
14451 This is called from INIT_EXPANDERS once before RTL is emitted for each
14452 function. */
14453
14454 static struct machine_function *
14455 ix86_init_machine_status (void)
14456 {
14457 struct machine_function *f;
14458
14459 f = ggc_alloc_cleared (sizeof (struct machine_function));
14460 f->use_fast_prologue_epilogue_nregs = -1;
14461 f->tls_descriptor_call_expanded_p = 0;
14462
14463 return f;
14464 }
14465
14466 /* Return a MEM corresponding to a stack slot with mode MODE.
14467 Allocate a new slot if necessary.
14468
14469 The RTL for a function can have several slots available: N is
14470 which slot to use. */
14471
14472 rtx
14473 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14474 {
14475 struct stack_local_entry *s;
14476
14477 gcc_assert (n < MAX_386_STACK_LOCALS);
14478
14479 for (s = ix86_stack_locals; s; s = s->next)
14480 if (s->mode == mode && s->n == n)
14481 return copy_rtx (s->rtl);
14482
14483 s = (struct stack_local_entry *)
14484 ggc_alloc (sizeof (struct stack_local_entry));
14485 s->n = n;
14486 s->mode = mode;
14487 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14488
14489 s->next = ix86_stack_locals;
14490 ix86_stack_locals = s;
14491 return s->rtl;
14492 }
14493
14494 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14495
14496 static GTY(()) rtx ix86_tls_symbol;
14497 rtx
14498 ix86_tls_get_addr (void)
14499 {
14500
14501 if (!ix86_tls_symbol)
14502 {
14503 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14504 (TARGET_ANY_GNU_TLS
14505 && !TARGET_64BIT)
14506 ? "___tls_get_addr"
14507 : "__tls_get_addr");
14508 }
14509
14510 return ix86_tls_symbol;
14511 }
14512
14513 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14514
14515 static GTY(()) rtx ix86_tls_module_base_symbol;
14516 rtx
14517 ix86_tls_module_base (void)
14518 {
14519
14520 if (!ix86_tls_module_base_symbol)
14521 {
14522 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14523 "_TLS_MODULE_BASE_");
14524 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14525 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14526 }
14527
14528 return ix86_tls_module_base_symbol;
14529 }
14530 \f
14531 /* Calculate the length of the memory address in the instruction
14532 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14533
14534 int
14535 memory_address_length (rtx addr)
14536 {
14537 struct ix86_address parts;
14538 rtx base, index, disp;
14539 int len;
14540 int ok;
14541
14542 if (GET_CODE (addr) == PRE_DEC
14543 || GET_CODE (addr) == POST_INC
14544 || GET_CODE (addr) == PRE_MODIFY
14545 || GET_CODE (addr) == POST_MODIFY)
14546 return 0;
14547
14548 ok = ix86_decompose_address (addr, &parts);
14549 gcc_assert (ok);
14550
14551 if (parts.base && GET_CODE (parts.base) == SUBREG)
14552 parts.base = SUBREG_REG (parts.base);
14553 if (parts.index && GET_CODE (parts.index) == SUBREG)
14554 parts.index = SUBREG_REG (parts.index);
14555
14556 base = parts.base;
14557 index = parts.index;
14558 disp = parts.disp;
14559 len = 0;
14560
14561 /* Rule of thumb:
14562 - esp as the base always wants an index,
14563 - ebp as the base always wants a displacement. */
14564
14565 /* Register Indirect. */
14566 if (base && !index && !disp)
14567 {
14568 /* esp (for its index) and ebp (for its displacement) need
14569 the two-byte modrm form. */
14570 if (addr == stack_pointer_rtx
14571 || addr == arg_pointer_rtx
14572 || addr == frame_pointer_rtx
14573 || addr == hard_frame_pointer_rtx)
14574 len = 1;
14575 }
14576
14577 /* Direct Addressing. */
14578 else if (disp && !base && !index)
14579 len = 4;
14580
14581 else
14582 {
14583 /* Find the length of the displacement constant. */
14584 if (disp)
14585 {
14586 if (base && satisfies_constraint_K (disp))
14587 len = 1;
14588 else
14589 len = 4;
14590 }
14591 /* ebp always wants a displacement. */
14592 else if (base == hard_frame_pointer_rtx)
14593 len = 1;
14594
14595 /* An index requires the two-byte modrm form.... */
14596 if (index
14597 /* ...like esp, which always wants an index. */
14598 || base == stack_pointer_rtx
14599 || base == arg_pointer_rtx
14600 || base == frame_pointer_rtx)
14601 len += 1;
14602 }
14603
14604 return len;
14605 }
14606
14607 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14608 is set, expect that insn have 8bit immediate alternative. */
14609 int
14610 ix86_attr_length_immediate_default (rtx insn, int shortform)
14611 {
14612 int len = 0;
14613 int i;
14614 extract_insn_cached (insn);
14615 for (i = recog_data.n_operands - 1; i >= 0; --i)
14616 if (CONSTANT_P (recog_data.operand[i]))
14617 {
14618 gcc_assert (!len);
14619 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14620 len = 1;
14621 else
14622 {
14623 switch (get_attr_mode (insn))
14624 {
14625 case MODE_QI:
14626 len+=1;
14627 break;
14628 case MODE_HI:
14629 len+=2;
14630 break;
14631 case MODE_SI:
14632 len+=4;
14633 break;
14634 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14635 case MODE_DI:
14636 len+=4;
14637 break;
14638 default:
14639 fatal_insn ("unknown insn mode", insn);
14640 }
14641 }
14642 }
14643 return len;
14644 }
14645 /* Compute default value for "length_address" attribute. */
14646 int
14647 ix86_attr_length_address_default (rtx insn)
14648 {
14649 int i;
14650
14651 if (get_attr_type (insn) == TYPE_LEA)
14652 {
14653 rtx set = PATTERN (insn);
14654
14655 if (GET_CODE (set) == PARALLEL)
14656 set = XVECEXP (set, 0, 0);
14657
14658 gcc_assert (GET_CODE (set) == SET);
14659
14660 return memory_address_length (SET_SRC (set));
14661 }
14662
14663 extract_insn_cached (insn);
14664 for (i = recog_data.n_operands - 1; i >= 0; --i)
14665 if (GET_CODE (recog_data.operand[i]) == MEM)
14666 {
14667 return memory_address_length (XEXP (recog_data.operand[i], 0));
14668 break;
14669 }
14670 return 0;
14671 }
14672 \f
14673 /* Return the maximum number of instructions a cpu can issue. */
14674
14675 static int
14676 ix86_issue_rate (void)
14677 {
14678 switch (ix86_tune)
14679 {
14680 case PROCESSOR_PENTIUM:
14681 case PROCESSOR_K6:
14682 return 2;
14683
14684 case PROCESSOR_PENTIUMPRO:
14685 case PROCESSOR_PENTIUM4:
14686 case PROCESSOR_ATHLON:
14687 case PROCESSOR_K8:
14688 case PROCESSOR_NOCONA:
14689 case PROCESSOR_GENERIC32:
14690 case PROCESSOR_GENERIC64:
14691 return 3;
14692
14693 case PROCESSOR_CORE2:
14694 return 4;
14695
14696 default:
14697 return 1;
14698 }
14699 }
14700
14701 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14702 by DEP_INSN and nothing set by DEP_INSN. */
14703
14704 static int
14705 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14706 {
14707 rtx set, set2;
14708
14709 /* Simplify the test for uninteresting insns. */
14710 if (insn_type != TYPE_SETCC
14711 && insn_type != TYPE_ICMOV
14712 && insn_type != TYPE_FCMOV
14713 && insn_type != TYPE_IBR)
14714 return 0;
14715
14716 if ((set = single_set (dep_insn)) != 0)
14717 {
14718 set = SET_DEST (set);
14719 set2 = NULL_RTX;
14720 }
14721 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14722 && XVECLEN (PATTERN (dep_insn), 0) == 2
14723 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14724 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14725 {
14726 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14727 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14728 }
14729 else
14730 return 0;
14731
14732 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
14733 return 0;
14734
14735 /* This test is true if the dependent insn reads the flags but
14736 not any other potentially set register. */
14737 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14738 return 0;
14739
14740 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14741 return 0;
14742
14743 return 1;
14744 }
14745
14746 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14747 address with operands set by DEP_INSN. */
14748
14749 static int
14750 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14751 {
14752 rtx addr;
14753
14754 if (insn_type == TYPE_LEA
14755 && TARGET_PENTIUM)
14756 {
14757 addr = PATTERN (insn);
14758
14759 if (GET_CODE (addr) == PARALLEL)
14760 addr = XVECEXP (addr, 0, 0);
14761
14762 gcc_assert (GET_CODE (addr) == SET);
14763
14764 addr = SET_SRC (addr);
14765 }
14766 else
14767 {
14768 int i;
14769 extract_insn_cached (insn);
14770 for (i = recog_data.n_operands - 1; i >= 0; --i)
14771 if (GET_CODE (recog_data.operand[i]) == MEM)
14772 {
14773 addr = XEXP (recog_data.operand[i], 0);
14774 goto found;
14775 }
14776 return 0;
14777 found:;
14778 }
14779
14780 return modified_in_p (addr, dep_insn);
14781 }
14782
14783 static int
14784 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14785 {
14786 enum attr_type insn_type, dep_insn_type;
14787 enum attr_memory memory;
14788 rtx set, set2;
14789 int dep_insn_code_number;
14790
14791 /* Anti and output dependencies have zero cost on all CPUs. */
14792 if (REG_NOTE_KIND (link) != 0)
14793 return 0;
14794
14795 dep_insn_code_number = recog_memoized (dep_insn);
14796
14797 /* If we can't recognize the insns, we can't really do anything. */
14798 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14799 return cost;
14800
14801 insn_type = get_attr_type (insn);
14802 dep_insn_type = get_attr_type (dep_insn);
14803
14804 switch (ix86_tune)
14805 {
14806 case PROCESSOR_PENTIUM:
14807 /* Address Generation Interlock adds a cycle of latency. */
14808 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14809 cost += 1;
14810
14811 /* ??? Compares pair with jump/setcc. */
14812 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14813 cost = 0;
14814
14815 /* Floating point stores require value to be ready one cycle earlier. */
14816 if (insn_type == TYPE_FMOV
14817 && get_attr_memory (insn) == MEMORY_STORE
14818 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14819 cost += 1;
14820 break;
14821
14822 case PROCESSOR_PENTIUMPRO:
14823 memory = get_attr_memory (insn);
14824
14825 /* INT->FP conversion is expensive. */
14826 if (get_attr_fp_int_src (dep_insn))
14827 cost += 5;
14828
14829 /* There is one cycle extra latency between an FP op and a store. */
14830 if (insn_type == TYPE_FMOV
14831 && (set = single_set (dep_insn)) != NULL_RTX
14832 && (set2 = single_set (insn)) != NULL_RTX
14833 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14834 && GET_CODE (SET_DEST (set2)) == MEM)
14835 cost += 1;
14836
14837 /* Show ability of reorder buffer to hide latency of load by executing
14838 in parallel with previous instruction in case
14839 previous instruction is not needed to compute the address. */
14840 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14841 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14842 {
14843 /* Claim moves to take one cycle, as core can issue one load
14844 at time and the next load can start cycle later. */
14845 if (dep_insn_type == TYPE_IMOV
14846 || dep_insn_type == TYPE_FMOV)
14847 cost = 1;
14848 else if (cost > 1)
14849 cost--;
14850 }
14851 break;
14852
14853 case PROCESSOR_K6:
14854 memory = get_attr_memory (insn);
14855
14856 /* The esp dependency is resolved before the instruction is really
14857 finished. */
14858 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14859 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14860 return 1;
14861
14862 /* INT->FP conversion is expensive. */
14863 if (get_attr_fp_int_src (dep_insn))
14864 cost += 5;
14865
14866 /* Show ability of reorder buffer to hide latency of load by executing
14867 in parallel with previous instruction in case
14868 previous instruction is not needed to compute the address. */
14869 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14870 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14871 {
14872 /* Claim moves to take one cycle, as core can issue one load
14873 at time and the next load can start cycle later. */
14874 if (dep_insn_type == TYPE_IMOV
14875 || dep_insn_type == TYPE_FMOV)
14876 cost = 1;
14877 else if (cost > 2)
14878 cost -= 2;
14879 else
14880 cost = 1;
14881 }
14882 break;
14883
14884 case PROCESSOR_ATHLON:
14885 case PROCESSOR_K8:
14886 case PROCESSOR_GENERIC32:
14887 case PROCESSOR_GENERIC64:
14888 memory = get_attr_memory (insn);
14889
14890 /* Show ability of reorder buffer to hide latency of load by executing
14891 in parallel with previous instruction in case
14892 previous instruction is not needed to compute the address. */
14893 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14894 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14895 {
14896 enum attr_unit unit = get_attr_unit (insn);
14897 int loadcost = 3;
14898
14899 /* Because of the difference between the length of integer and
14900 floating unit pipeline preparation stages, the memory operands
14901 for floating point are cheaper.
14902
14903 ??? For Athlon it the difference is most probably 2. */
14904 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14905 loadcost = 3;
14906 else
14907 loadcost = TARGET_ATHLON ? 2 : 0;
14908
14909 if (cost >= loadcost)
14910 cost -= loadcost;
14911 else
14912 cost = 0;
14913 }
14914
14915 default:
14916 break;
14917 }
14918
14919 return cost;
14920 }
14921
14922 /* How many alternative schedules to try. This should be as wide as the
14923 scheduling freedom in the DFA, but no wider. Making this value too
14924 large results extra work for the scheduler. */
14925
14926 static int
14927 ia32_multipass_dfa_lookahead (void)
14928 {
14929 if (ix86_tune == PROCESSOR_PENTIUM)
14930 return 2;
14931
14932 if (ix86_tune == PROCESSOR_PENTIUMPRO
14933 || ix86_tune == PROCESSOR_K6)
14934 return 1;
14935
14936 else
14937 return 0;
14938 }
14939
14940 \f
14941 /* Compute the alignment given to a constant that is being placed in memory.
14942 EXP is the constant and ALIGN is the alignment that the object would
14943 ordinarily have.
14944 The value of this function is used instead of that alignment to align
14945 the object. */
14946
14947 int
14948 ix86_constant_alignment (tree exp, int align)
14949 {
14950 if (TREE_CODE (exp) == REAL_CST)
14951 {
14952 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14953 return 64;
14954 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14955 return 128;
14956 }
14957 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14958 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14959 return BITS_PER_WORD;
14960
14961 return align;
14962 }
14963
14964 /* Compute the alignment for a static variable.
14965 TYPE is the data type, and ALIGN is the alignment that
14966 the object would ordinarily have. The value of this function is used
14967 instead of that alignment to align the object. */
14968
14969 int
14970 ix86_data_alignment (tree type, int align)
14971 {
14972 int max_align = optimize_size ? BITS_PER_WORD : 256;
14973
14974 if (AGGREGATE_TYPE_P (type)
14975 && TYPE_SIZE (type)
14976 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14977 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14978 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14979 && align < max_align)
14980 align = max_align;
14981
14982 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14983 to 16byte boundary. */
14984 if (TARGET_64BIT)
14985 {
14986 if (AGGREGATE_TYPE_P (type)
14987 && TYPE_SIZE (type)
14988 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14989 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14990 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14991 return 128;
14992 }
14993
14994 if (TREE_CODE (type) == ARRAY_TYPE)
14995 {
14996 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14997 return 64;
14998 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14999 return 128;
15000 }
15001 else if (TREE_CODE (type) == COMPLEX_TYPE)
15002 {
15003
15004 if (TYPE_MODE (type) == DCmode && align < 64)
15005 return 64;
15006 if (TYPE_MODE (type) == XCmode && align < 128)
15007 return 128;
15008 }
15009 else if ((TREE_CODE (type) == RECORD_TYPE
15010 || TREE_CODE (type) == UNION_TYPE
15011 || TREE_CODE (type) == QUAL_UNION_TYPE)
15012 && TYPE_FIELDS (type))
15013 {
15014 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15015 return 64;
15016 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15017 return 128;
15018 }
15019 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15020 || TREE_CODE (type) == INTEGER_TYPE)
15021 {
15022 if (TYPE_MODE (type) == DFmode && align < 64)
15023 return 64;
15024 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15025 return 128;
15026 }
15027
15028 return align;
15029 }
15030
15031 /* Compute the alignment for a local variable.
15032 TYPE is the data type, and ALIGN is the alignment that
15033 the object would ordinarily have. The value of this macro is used
15034 instead of that alignment to align the object. */
15035
15036 int
15037 ix86_local_alignment (tree type, int align)
15038 {
15039 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
15040 to 16byte boundary. */
15041 if (TARGET_64BIT)
15042 {
15043 if (AGGREGATE_TYPE_P (type)
15044 && TYPE_SIZE (type)
15045 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
15046 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
15047 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
15048 return 128;
15049 }
15050 if (TREE_CODE (type) == ARRAY_TYPE)
15051 {
15052 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
15053 return 64;
15054 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
15055 return 128;
15056 }
15057 else if (TREE_CODE (type) == COMPLEX_TYPE)
15058 {
15059 if (TYPE_MODE (type) == DCmode && align < 64)
15060 return 64;
15061 if (TYPE_MODE (type) == XCmode && align < 128)
15062 return 128;
15063 }
15064 else if ((TREE_CODE (type) == RECORD_TYPE
15065 || TREE_CODE (type) == UNION_TYPE
15066 || TREE_CODE (type) == QUAL_UNION_TYPE)
15067 && TYPE_FIELDS (type))
15068 {
15069 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15070 return 64;
15071 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15072 return 128;
15073 }
15074 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15075 || TREE_CODE (type) == INTEGER_TYPE)
15076 {
15077
15078 if (TYPE_MODE (type) == DFmode && align < 64)
15079 return 64;
15080 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15081 return 128;
15082 }
15083 return align;
15084 }
15085 \f
15086 /* Emit RTL insns to initialize the variable parts of a trampoline.
15087 FNADDR is an RTX for the address of the function's pure code.
15088 CXT is an RTX for the static chain value for the function. */
15089 void
15090 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15091 {
15092 if (!TARGET_64BIT)
15093 {
15094 /* Compute offset from the end of the jmp to the target function. */
15095 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15096 plus_constant (tramp, 10),
15097 NULL_RTX, 1, OPTAB_DIRECT);
15098 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15099 gen_int_mode (0xb9, QImode));
15100 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15101 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15102 gen_int_mode (0xe9, QImode));
15103 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15104 }
15105 else
15106 {
15107 int offset = 0;
15108 /* Try to load address using shorter movl instead of movabs.
15109 We may want to support movq for kernel mode, but kernel does not use
15110 trampolines at the moment. */
15111 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15112 {
15113 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15114 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15115 gen_int_mode (0xbb41, HImode));
15116 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15117 gen_lowpart (SImode, fnaddr));
15118 offset += 6;
15119 }
15120 else
15121 {
15122 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15123 gen_int_mode (0xbb49, HImode));
15124 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15125 fnaddr);
15126 offset += 10;
15127 }
15128 /* Load static chain using movabs to r10. */
15129 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15130 gen_int_mode (0xba49, HImode));
15131 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15132 cxt);
15133 offset += 10;
15134 /* Jump to the r11 */
15135 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15136 gen_int_mode (0xff49, HImode));
15137 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15138 gen_int_mode (0xe3, QImode));
15139 offset += 3;
15140 gcc_assert (offset <= TRAMPOLINE_SIZE);
15141 }
15142
15143 #ifdef ENABLE_EXECUTE_STACK
15144 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15145 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15146 #endif
15147 }
15148 \f
15149 /* Codes for all the SSE/MMX builtins. */
15150 enum ix86_builtins
15151 {
15152 IX86_BUILTIN_ADDPS,
15153 IX86_BUILTIN_ADDSS,
15154 IX86_BUILTIN_DIVPS,
15155 IX86_BUILTIN_DIVSS,
15156 IX86_BUILTIN_MULPS,
15157 IX86_BUILTIN_MULSS,
15158 IX86_BUILTIN_SUBPS,
15159 IX86_BUILTIN_SUBSS,
15160
15161 IX86_BUILTIN_CMPEQPS,
15162 IX86_BUILTIN_CMPLTPS,
15163 IX86_BUILTIN_CMPLEPS,
15164 IX86_BUILTIN_CMPGTPS,
15165 IX86_BUILTIN_CMPGEPS,
15166 IX86_BUILTIN_CMPNEQPS,
15167 IX86_BUILTIN_CMPNLTPS,
15168 IX86_BUILTIN_CMPNLEPS,
15169 IX86_BUILTIN_CMPNGTPS,
15170 IX86_BUILTIN_CMPNGEPS,
15171 IX86_BUILTIN_CMPORDPS,
15172 IX86_BUILTIN_CMPUNORDPS,
15173 IX86_BUILTIN_CMPEQSS,
15174 IX86_BUILTIN_CMPLTSS,
15175 IX86_BUILTIN_CMPLESS,
15176 IX86_BUILTIN_CMPNEQSS,
15177 IX86_BUILTIN_CMPNLTSS,
15178 IX86_BUILTIN_CMPNLESS,
15179 IX86_BUILTIN_CMPNGTSS,
15180 IX86_BUILTIN_CMPNGESS,
15181 IX86_BUILTIN_CMPORDSS,
15182 IX86_BUILTIN_CMPUNORDSS,
15183
15184 IX86_BUILTIN_COMIEQSS,
15185 IX86_BUILTIN_COMILTSS,
15186 IX86_BUILTIN_COMILESS,
15187 IX86_BUILTIN_COMIGTSS,
15188 IX86_BUILTIN_COMIGESS,
15189 IX86_BUILTIN_COMINEQSS,
15190 IX86_BUILTIN_UCOMIEQSS,
15191 IX86_BUILTIN_UCOMILTSS,
15192 IX86_BUILTIN_UCOMILESS,
15193 IX86_BUILTIN_UCOMIGTSS,
15194 IX86_BUILTIN_UCOMIGESS,
15195 IX86_BUILTIN_UCOMINEQSS,
15196
15197 IX86_BUILTIN_CVTPI2PS,
15198 IX86_BUILTIN_CVTPS2PI,
15199 IX86_BUILTIN_CVTSI2SS,
15200 IX86_BUILTIN_CVTSI642SS,
15201 IX86_BUILTIN_CVTSS2SI,
15202 IX86_BUILTIN_CVTSS2SI64,
15203 IX86_BUILTIN_CVTTPS2PI,
15204 IX86_BUILTIN_CVTTSS2SI,
15205 IX86_BUILTIN_CVTTSS2SI64,
15206
15207 IX86_BUILTIN_MAXPS,
15208 IX86_BUILTIN_MAXSS,
15209 IX86_BUILTIN_MINPS,
15210 IX86_BUILTIN_MINSS,
15211
15212 IX86_BUILTIN_LOADUPS,
15213 IX86_BUILTIN_STOREUPS,
15214 IX86_BUILTIN_MOVSS,
15215
15216 IX86_BUILTIN_MOVHLPS,
15217 IX86_BUILTIN_MOVLHPS,
15218 IX86_BUILTIN_LOADHPS,
15219 IX86_BUILTIN_LOADLPS,
15220 IX86_BUILTIN_STOREHPS,
15221 IX86_BUILTIN_STORELPS,
15222
15223 IX86_BUILTIN_MASKMOVQ,
15224 IX86_BUILTIN_MOVMSKPS,
15225 IX86_BUILTIN_PMOVMSKB,
15226
15227 IX86_BUILTIN_MOVNTPS,
15228 IX86_BUILTIN_MOVNTQ,
15229
15230 IX86_BUILTIN_LOADDQU,
15231 IX86_BUILTIN_STOREDQU,
15232
15233 IX86_BUILTIN_PACKSSWB,
15234 IX86_BUILTIN_PACKSSDW,
15235 IX86_BUILTIN_PACKUSWB,
15236
15237 IX86_BUILTIN_PADDB,
15238 IX86_BUILTIN_PADDW,
15239 IX86_BUILTIN_PADDD,
15240 IX86_BUILTIN_PADDQ,
15241 IX86_BUILTIN_PADDSB,
15242 IX86_BUILTIN_PADDSW,
15243 IX86_BUILTIN_PADDUSB,
15244 IX86_BUILTIN_PADDUSW,
15245 IX86_BUILTIN_PSUBB,
15246 IX86_BUILTIN_PSUBW,
15247 IX86_BUILTIN_PSUBD,
15248 IX86_BUILTIN_PSUBQ,
15249 IX86_BUILTIN_PSUBSB,
15250 IX86_BUILTIN_PSUBSW,
15251 IX86_BUILTIN_PSUBUSB,
15252 IX86_BUILTIN_PSUBUSW,
15253
15254 IX86_BUILTIN_PAND,
15255 IX86_BUILTIN_PANDN,
15256 IX86_BUILTIN_POR,
15257 IX86_BUILTIN_PXOR,
15258
15259 IX86_BUILTIN_PAVGB,
15260 IX86_BUILTIN_PAVGW,
15261
15262 IX86_BUILTIN_PCMPEQB,
15263 IX86_BUILTIN_PCMPEQW,
15264 IX86_BUILTIN_PCMPEQD,
15265 IX86_BUILTIN_PCMPGTB,
15266 IX86_BUILTIN_PCMPGTW,
15267 IX86_BUILTIN_PCMPGTD,
15268
15269 IX86_BUILTIN_PMADDWD,
15270
15271 IX86_BUILTIN_PMAXSW,
15272 IX86_BUILTIN_PMAXUB,
15273 IX86_BUILTIN_PMINSW,
15274 IX86_BUILTIN_PMINUB,
15275
15276 IX86_BUILTIN_PMULHUW,
15277 IX86_BUILTIN_PMULHW,
15278 IX86_BUILTIN_PMULLW,
15279
15280 IX86_BUILTIN_PSADBW,
15281 IX86_BUILTIN_PSHUFW,
15282
15283 IX86_BUILTIN_PSLLW,
15284 IX86_BUILTIN_PSLLD,
15285 IX86_BUILTIN_PSLLQ,
15286 IX86_BUILTIN_PSRAW,
15287 IX86_BUILTIN_PSRAD,
15288 IX86_BUILTIN_PSRLW,
15289 IX86_BUILTIN_PSRLD,
15290 IX86_BUILTIN_PSRLQ,
15291 IX86_BUILTIN_PSLLWI,
15292 IX86_BUILTIN_PSLLDI,
15293 IX86_BUILTIN_PSLLQI,
15294 IX86_BUILTIN_PSRAWI,
15295 IX86_BUILTIN_PSRADI,
15296 IX86_BUILTIN_PSRLWI,
15297 IX86_BUILTIN_PSRLDI,
15298 IX86_BUILTIN_PSRLQI,
15299
15300 IX86_BUILTIN_PUNPCKHBW,
15301 IX86_BUILTIN_PUNPCKHWD,
15302 IX86_BUILTIN_PUNPCKHDQ,
15303 IX86_BUILTIN_PUNPCKLBW,
15304 IX86_BUILTIN_PUNPCKLWD,
15305 IX86_BUILTIN_PUNPCKLDQ,
15306
15307 IX86_BUILTIN_SHUFPS,
15308
15309 IX86_BUILTIN_RCPPS,
15310 IX86_BUILTIN_RCPSS,
15311 IX86_BUILTIN_RSQRTPS,
15312 IX86_BUILTIN_RSQRTSS,
15313 IX86_BUILTIN_SQRTPS,
15314 IX86_BUILTIN_SQRTSS,
15315
15316 IX86_BUILTIN_UNPCKHPS,
15317 IX86_BUILTIN_UNPCKLPS,
15318
15319 IX86_BUILTIN_ANDPS,
15320 IX86_BUILTIN_ANDNPS,
15321 IX86_BUILTIN_ORPS,
15322 IX86_BUILTIN_XORPS,
15323
15324 IX86_BUILTIN_EMMS,
15325 IX86_BUILTIN_LDMXCSR,
15326 IX86_BUILTIN_STMXCSR,
15327 IX86_BUILTIN_SFENCE,
15328
15329 /* 3DNow! Original */
15330 IX86_BUILTIN_FEMMS,
15331 IX86_BUILTIN_PAVGUSB,
15332 IX86_BUILTIN_PF2ID,
15333 IX86_BUILTIN_PFACC,
15334 IX86_BUILTIN_PFADD,
15335 IX86_BUILTIN_PFCMPEQ,
15336 IX86_BUILTIN_PFCMPGE,
15337 IX86_BUILTIN_PFCMPGT,
15338 IX86_BUILTIN_PFMAX,
15339 IX86_BUILTIN_PFMIN,
15340 IX86_BUILTIN_PFMUL,
15341 IX86_BUILTIN_PFRCP,
15342 IX86_BUILTIN_PFRCPIT1,
15343 IX86_BUILTIN_PFRCPIT2,
15344 IX86_BUILTIN_PFRSQIT1,
15345 IX86_BUILTIN_PFRSQRT,
15346 IX86_BUILTIN_PFSUB,
15347 IX86_BUILTIN_PFSUBR,
15348 IX86_BUILTIN_PI2FD,
15349 IX86_BUILTIN_PMULHRW,
15350
15351 /* 3DNow! Athlon Extensions */
15352 IX86_BUILTIN_PF2IW,
15353 IX86_BUILTIN_PFNACC,
15354 IX86_BUILTIN_PFPNACC,
15355 IX86_BUILTIN_PI2FW,
15356 IX86_BUILTIN_PSWAPDSI,
15357 IX86_BUILTIN_PSWAPDSF,
15358
15359 /* SSE2 */
15360 IX86_BUILTIN_ADDPD,
15361 IX86_BUILTIN_ADDSD,
15362 IX86_BUILTIN_DIVPD,
15363 IX86_BUILTIN_DIVSD,
15364 IX86_BUILTIN_MULPD,
15365 IX86_BUILTIN_MULSD,
15366 IX86_BUILTIN_SUBPD,
15367 IX86_BUILTIN_SUBSD,
15368
15369 IX86_BUILTIN_CMPEQPD,
15370 IX86_BUILTIN_CMPLTPD,
15371 IX86_BUILTIN_CMPLEPD,
15372 IX86_BUILTIN_CMPGTPD,
15373 IX86_BUILTIN_CMPGEPD,
15374 IX86_BUILTIN_CMPNEQPD,
15375 IX86_BUILTIN_CMPNLTPD,
15376 IX86_BUILTIN_CMPNLEPD,
15377 IX86_BUILTIN_CMPNGTPD,
15378 IX86_BUILTIN_CMPNGEPD,
15379 IX86_BUILTIN_CMPORDPD,
15380 IX86_BUILTIN_CMPUNORDPD,
15381 IX86_BUILTIN_CMPNEPD,
15382 IX86_BUILTIN_CMPEQSD,
15383 IX86_BUILTIN_CMPLTSD,
15384 IX86_BUILTIN_CMPLESD,
15385 IX86_BUILTIN_CMPNEQSD,
15386 IX86_BUILTIN_CMPNLTSD,
15387 IX86_BUILTIN_CMPNLESD,
15388 IX86_BUILTIN_CMPORDSD,
15389 IX86_BUILTIN_CMPUNORDSD,
15390 IX86_BUILTIN_CMPNESD,
15391
15392 IX86_BUILTIN_COMIEQSD,
15393 IX86_BUILTIN_COMILTSD,
15394 IX86_BUILTIN_COMILESD,
15395 IX86_BUILTIN_COMIGTSD,
15396 IX86_BUILTIN_COMIGESD,
15397 IX86_BUILTIN_COMINEQSD,
15398 IX86_BUILTIN_UCOMIEQSD,
15399 IX86_BUILTIN_UCOMILTSD,
15400 IX86_BUILTIN_UCOMILESD,
15401 IX86_BUILTIN_UCOMIGTSD,
15402 IX86_BUILTIN_UCOMIGESD,
15403 IX86_BUILTIN_UCOMINEQSD,
15404
15405 IX86_BUILTIN_MAXPD,
15406 IX86_BUILTIN_MAXSD,
15407 IX86_BUILTIN_MINPD,
15408 IX86_BUILTIN_MINSD,
15409
15410 IX86_BUILTIN_ANDPD,
15411 IX86_BUILTIN_ANDNPD,
15412 IX86_BUILTIN_ORPD,
15413 IX86_BUILTIN_XORPD,
15414
15415 IX86_BUILTIN_SQRTPD,
15416 IX86_BUILTIN_SQRTSD,
15417
15418 IX86_BUILTIN_UNPCKHPD,
15419 IX86_BUILTIN_UNPCKLPD,
15420
15421 IX86_BUILTIN_SHUFPD,
15422
15423 IX86_BUILTIN_LOADUPD,
15424 IX86_BUILTIN_STOREUPD,
15425 IX86_BUILTIN_MOVSD,
15426
15427 IX86_BUILTIN_LOADHPD,
15428 IX86_BUILTIN_LOADLPD,
15429
15430 IX86_BUILTIN_CVTDQ2PD,
15431 IX86_BUILTIN_CVTDQ2PS,
15432
15433 IX86_BUILTIN_CVTPD2DQ,
15434 IX86_BUILTIN_CVTPD2PI,
15435 IX86_BUILTIN_CVTPD2PS,
15436 IX86_BUILTIN_CVTTPD2DQ,
15437 IX86_BUILTIN_CVTTPD2PI,
15438
15439 IX86_BUILTIN_CVTPI2PD,
15440 IX86_BUILTIN_CVTSI2SD,
15441 IX86_BUILTIN_CVTSI642SD,
15442
15443 IX86_BUILTIN_CVTSD2SI,
15444 IX86_BUILTIN_CVTSD2SI64,
15445 IX86_BUILTIN_CVTSD2SS,
15446 IX86_BUILTIN_CVTSS2SD,
15447 IX86_BUILTIN_CVTTSD2SI,
15448 IX86_BUILTIN_CVTTSD2SI64,
15449
15450 IX86_BUILTIN_CVTPS2DQ,
15451 IX86_BUILTIN_CVTPS2PD,
15452 IX86_BUILTIN_CVTTPS2DQ,
15453
15454 IX86_BUILTIN_MOVNTI,
15455 IX86_BUILTIN_MOVNTPD,
15456 IX86_BUILTIN_MOVNTDQ,
15457
15458 /* SSE2 MMX */
15459 IX86_BUILTIN_MASKMOVDQU,
15460 IX86_BUILTIN_MOVMSKPD,
15461 IX86_BUILTIN_PMOVMSKB128,
15462
15463 IX86_BUILTIN_PACKSSWB128,
15464 IX86_BUILTIN_PACKSSDW128,
15465 IX86_BUILTIN_PACKUSWB128,
15466
15467 IX86_BUILTIN_PADDB128,
15468 IX86_BUILTIN_PADDW128,
15469 IX86_BUILTIN_PADDD128,
15470 IX86_BUILTIN_PADDQ128,
15471 IX86_BUILTIN_PADDSB128,
15472 IX86_BUILTIN_PADDSW128,
15473 IX86_BUILTIN_PADDUSB128,
15474 IX86_BUILTIN_PADDUSW128,
15475 IX86_BUILTIN_PSUBB128,
15476 IX86_BUILTIN_PSUBW128,
15477 IX86_BUILTIN_PSUBD128,
15478 IX86_BUILTIN_PSUBQ128,
15479 IX86_BUILTIN_PSUBSB128,
15480 IX86_BUILTIN_PSUBSW128,
15481 IX86_BUILTIN_PSUBUSB128,
15482 IX86_BUILTIN_PSUBUSW128,
15483
15484 IX86_BUILTIN_PAND128,
15485 IX86_BUILTIN_PANDN128,
15486 IX86_BUILTIN_POR128,
15487 IX86_BUILTIN_PXOR128,
15488
15489 IX86_BUILTIN_PAVGB128,
15490 IX86_BUILTIN_PAVGW128,
15491
15492 IX86_BUILTIN_PCMPEQB128,
15493 IX86_BUILTIN_PCMPEQW128,
15494 IX86_BUILTIN_PCMPEQD128,
15495 IX86_BUILTIN_PCMPGTB128,
15496 IX86_BUILTIN_PCMPGTW128,
15497 IX86_BUILTIN_PCMPGTD128,
15498
15499 IX86_BUILTIN_PMADDWD128,
15500
15501 IX86_BUILTIN_PMAXSW128,
15502 IX86_BUILTIN_PMAXUB128,
15503 IX86_BUILTIN_PMINSW128,
15504 IX86_BUILTIN_PMINUB128,
15505
15506 IX86_BUILTIN_PMULUDQ,
15507 IX86_BUILTIN_PMULUDQ128,
15508 IX86_BUILTIN_PMULHUW128,
15509 IX86_BUILTIN_PMULHW128,
15510 IX86_BUILTIN_PMULLW128,
15511
15512 IX86_BUILTIN_PSADBW128,
15513 IX86_BUILTIN_PSHUFHW,
15514 IX86_BUILTIN_PSHUFLW,
15515 IX86_BUILTIN_PSHUFD,
15516
15517 IX86_BUILTIN_PSLLW128,
15518 IX86_BUILTIN_PSLLD128,
15519 IX86_BUILTIN_PSLLQ128,
15520 IX86_BUILTIN_PSRAW128,
15521 IX86_BUILTIN_PSRAD128,
15522 IX86_BUILTIN_PSRLW128,
15523 IX86_BUILTIN_PSRLD128,
15524 IX86_BUILTIN_PSRLQ128,
15525 IX86_BUILTIN_PSLLDQI128,
15526 IX86_BUILTIN_PSLLWI128,
15527 IX86_BUILTIN_PSLLDI128,
15528 IX86_BUILTIN_PSLLQI128,
15529 IX86_BUILTIN_PSRAWI128,
15530 IX86_BUILTIN_PSRADI128,
15531 IX86_BUILTIN_PSRLDQI128,
15532 IX86_BUILTIN_PSRLWI128,
15533 IX86_BUILTIN_PSRLDI128,
15534 IX86_BUILTIN_PSRLQI128,
15535
15536 IX86_BUILTIN_PUNPCKHBW128,
15537 IX86_BUILTIN_PUNPCKHWD128,
15538 IX86_BUILTIN_PUNPCKHDQ128,
15539 IX86_BUILTIN_PUNPCKHQDQ128,
15540 IX86_BUILTIN_PUNPCKLBW128,
15541 IX86_BUILTIN_PUNPCKLWD128,
15542 IX86_BUILTIN_PUNPCKLDQ128,
15543 IX86_BUILTIN_PUNPCKLQDQ128,
15544
15545 IX86_BUILTIN_CLFLUSH,
15546 IX86_BUILTIN_MFENCE,
15547 IX86_BUILTIN_LFENCE,
15548
15549 /* Prescott New Instructions. */
15550 IX86_BUILTIN_ADDSUBPS,
15551 IX86_BUILTIN_HADDPS,
15552 IX86_BUILTIN_HSUBPS,
15553 IX86_BUILTIN_MOVSHDUP,
15554 IX86_BUILTIN_MOVSLDUP,
15555 IX86_BUILTIN_ADDSUBPD,
15556 IX86_BUILTIN_HADDPD,
15557 IX86_BUILTIN_HSUBPD,
15558 IX86_BUILTIN_LDDQU,
15559
15560 IX86_BUILTIN_MONITOR,
15561 IX86_BUILTIN_MWAIT,
15562
15563 /* SSSE3. */
15564 IX86_BUILTIN_PHADDW,
15565 IX86_BUILTIN_PHADDD,
15566 IX86_BUILTIN_PHADDSW,
15567 IX86_BUILTIN_PHSUBW,
15568 IX86_BUILTIN_PHSUBD,
15569 IX86_BUILTIN_PHSUBSW,
15570 IX86_BUILTIN_PMADDUBSW,
15571 IX86_BUILTIN_PMULHRSW,
15572 IX86_BUILTIN_PSHUFB,
15573 IX86_BUILTIN_PSIGNB,
15574 IX86_BUILTIN_PSIGNW,
15575 IX86_BUILTIN_PSIGND,
15576 IX86_BUILTIN_PALIGNR,
15577 IX86_BUILTIN_PABSB,
15578 IX86_BUILTIN_PABSW,
15579 IX86_BUILTIN_PABSD,
15580
15581 IX86_BUILTIN_PHADDW128,
15582 IX86_BUILTIN_PHADDD128,
15583 IX86_BUILTIN_PHADDSW128,
15584 IX86_BUILTIN_PHSUBW128,
15585 IX86_BUILTIN_PHSUBD128,
15586 IX86_BUILTIN_PHSUBSW128,
15587 IX86_BUILTIN_PMADDUBSW128,
15588 IX86_BUILTIN_PMULHRSW128,
15589 IX86_BUILTIN_PSHUFB128,
15590 IX86_BUILTIN_PSIGNB128,
15591 IX86_BUILTIN_PSIGNW128,
15592 IX86_BUILTIN_PSIGND128,
15593 IX86_BUILTIN_PALIGNR128,
15594 IX86_BUILTIN_PABSB128,
15595 IX86_BUILTIN_PABSW128,
15596 IX86_BUILTIN_PABSD128,
15597
15598 IX86_BUILTIN_VEC_INIT_V2SI,
15599 IX86_BUILTIN_VEC_INIT_V4HI,
15600 IX86_BUILTIN_VEC_INIT_V8QI,
15601 IX86_BUILTIN_VEC_EXT_V2DF,
15602 IX86_BUILTIN_VEC_EXT_V2DI,
15603 IX86_BUILTIN_VEC_EXT_V4SF,
15604 IX86_BUILTIN_VEC_EXT_V4SI,
15605 IX86_BUILTIN_VEC_EXT_V8HI,
15606 IX86_BUILTIN_VEC_EXT_V2SI,
15607 IX86_BUILTIN_VEC_EXT_V4HI,
15608 IX86_BUILTIN_VEC_SET_V8HI,
15609 IX86_BUILTIN_VEC_SET_V4HI,
15610
15611 IX86_BUILTIN_MAX
15612 };
15613
15614 /* Table for the ix86 builtin decls. */
15615 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15616
15617 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15618 * if the target_flags include one of MASK. Stores the function decl
15619 * in the ix86_builtins array.
15620 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15621
15622 static inline tree
15623 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15624 {
15625 tree decl = NULL_TREE;
15626
15627 if (mask & target_flags
15628 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15629 {
15630 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15631 NULL, NULL_TREE);
15632 ix86_builtins[(int) code] = decl;
15633 }
15634
15635 return decl;
15636 }
15637
15638 /* Like def_builtin, but also marks the function decl "const". */
15639
15640 static inline tree
15641 def_builtin_const (int mask, const char *name, tree type,
15642 enum ix86_builtins code)
15643 {
15644 tree decl = def_builtin (mask, name, type, code);
15645 if (decl)
15646 TREE_READONLY (decl) = 1;
15647 return decl;
15648 }
15649
15650 /* Bits for builtin_description.flag. */
15651
15652 /* Set when we don't support the comparison natively, and should
15653 swap_comparison in order to support it. */
15654 #define BUILTIN_DESC_SWAP_OPERANDS 1
15655
15656 struct builtin_description
15657 {
15658 const unsigned int mask;
15659 const enum insn_code icode;
15660 const char *const name;
15661 const enum ix86_builtins code;
15662 const enum rtx_code comparison;
15663 const unsigned int flag;
15664 };
15665
15666 static const struct builtin_description bdesc_comi[] =
15667 {
15668 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15669 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15670 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15671 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15672 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15673 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15674 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15675 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15676 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15677 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15678 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15679 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15680 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15681 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15682 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15683 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15684 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15685 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15686 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15687 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15688 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15689 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15690 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15691 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15692 };
15693
15694 static const struct builtin_description bdesc_2arg[] =
15695 {
15696 /* SSE */
15697 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15698 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15699 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15700 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15701 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15702 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15703 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15704 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15705
15706 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15707 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15708 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15709 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15710 BUILTIN_DESC_SWAP_OPERANDS },
15711 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15712 BUILTIN_DESC_SWAP_OPERANDS },
15713 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15714 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15715 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15716 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15717 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15718 BUILTIN_DESC_SWAP_OPERANDS },
15719 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15720 BUILTIN_DESC_SWAP_OPERANDS },
15721 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15722 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15723 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15724 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15725 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15726 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15727 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15728 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15729 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15730 BUILTIN_DESC_SWAP_OPERANDS },
15731 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15732 BUILTIN_DESC_SWAP_OPERANDS },
15733 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15734
15735 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15736 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15737 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15738 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15739
15740 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15741 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15742 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15743 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15744
15745 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15746 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15747 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15748 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15749 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15750
15751 /* MMX */
15752 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15753 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15754 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15755 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15756 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15757 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15758 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15759 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15760
15761 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15762 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15763 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15764 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15765 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15766 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15767 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15768 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15769
15770 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15771 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15772 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15773
15774 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15775 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15776 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15777 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15778
15779 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15780 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15781
15782 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15783 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15784 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15785 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15786 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15787 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15788
15789 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15790 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15791 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15792 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15793
15794 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15795 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15796 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15797 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15798 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15799 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15800
15801 /* Special. */
15802 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15803 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15804 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15805
15806 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15807 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15808 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15809
15810 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15811 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15812 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15813 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15814 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15815 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15816
15817 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15818 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15819 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15820 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15821 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15822 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15823
15824 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15825 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15826 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15827 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15828
15829 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15830 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15831
15832 /* SSE2 */
15833 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15834 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15835 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15836 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15837 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15838 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15839 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15840 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15841
15842 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15843 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15844 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15845 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15846 BUILTIN_DESC_SWAP_OPERANDS },
15847 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15848 BUILTIN_DESC_SWAP_OPERANDS },
15849 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15850 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15851 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15852 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15853 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15854 BUILTIN_DESC_SWAP_OPERANDS },
15855 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15856 BUILTIN_DESC_SWAP_OPERANDS },
15857 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15858 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15859 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15860 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15861 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15862 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15863 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15864 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15865 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15866
15867 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15868 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15870 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15871
15872 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15873 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15874 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15875 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15876
15877 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15878 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15879 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15880
15881 /* SSE2 MMX */
15882 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15883 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15884 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15885 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15886 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15887 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15888 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15889 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15890
15891 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15892 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15893 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15894 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15895 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15896 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15897 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15898 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15899
15900 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15901 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15902
15903 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15904 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15905 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15906 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15907
15908 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15909 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15910
15911 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15912 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15913 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15914 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15915 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15916 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15917
15918 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15919 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15920 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15921 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15922
15923 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15924 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15925 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15926 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15927 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15928 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15929 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15930 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15931
15932 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15933 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15934 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15935
15936 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15937 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15938
15939 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15940 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15941
15942 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15943 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15944 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15945
15946 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15947 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15948 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15949
15950 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15951 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15952
15953 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15954
15955 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15956 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15957 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15958 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15959
15960 /* SSE3 MMX */
15961 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15962 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15963 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15964 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15965 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15966 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15967
15968 /* SSSE3 */
15969 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15970 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15971 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15972 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15973 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15974 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15975 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15976 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15977 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15978 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15979 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15980 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15981 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15982 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15983 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15984 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15985 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15986 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15987 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15988 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15989 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15990 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15991 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15992 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15993 };
15994
15995 static const struct builtin_description bdesc_1arg[] =
15996 {
15997 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15998 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15999
16000 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
16001 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
16002 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
16003
16004 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
16005 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
16006 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
16007 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
16008 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
16009 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
16010
16011 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
16012 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
16013
16014 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
16015
16016 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
16017 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
16018
16019 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
16020 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
16021 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
16022 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
16023 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
16024
16025 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
16026
16027 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
16028 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
16029 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
16030 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
16031
16032 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
16033 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
16034 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
16035
16036 /* SSE3 */
16037 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
16038 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
16039
16040 /* SSSE3 */
16041 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
16042 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
16043 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
16044 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
16045 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
16046 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
16047 };
16048
16049 static void
16050 ix86_init_builtins (void)
16051 {
16052 if (TARGET_MMX)
16053 ix86_init_mmx_sse_builtins ();
16054 }
16055
16056 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
16057 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
16058 builtins. */
16059 static void
16060 ix86_init_mmx_sse_builtins (void)
16061 {
16062 const struct builtin_description * d;
16063 size_t i;
16064
16065 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
16066 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16067 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16068 tree V2DI_type_node
16069 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16070 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16071 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16072 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16073 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16074 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
16075 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16076
16077 tree pchar_type_node = build_pointer_type (char_type_node);
16078 tree pcchar_type_node = build_pointer_type (
16079 build_type_variant (char_type_node, 1, 0));
16080 tree pfloat_type_node = build_pointer_type (float_type_node);
16081 tree pcfloat_type_node = build_pointer_type (
16082 build_type_variant (float_type_node, 1, 0));
16083 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16084 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16085 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16086
16087 /* Comparisons. */
16088 tree int_ftype_v4sf_v4sf
16089 = build_function_type_list (integer_type_node,
16090 V4SF_type_node, V4SF_type_node, NULL_TREE);
16091 tree v4si_ftype_v4sf_v4sf
16092 = build_function_type_list (V4SI_type_node,
16093 V4SF_type_node, V4SF_type_node, NULL_TREE);
16094 /* MMX/SSE/integer conversions. */
16095 tree int_ftype_v4sf
16096 = build_function_type_list (integer_type_node,
16097 V4SF_type_node, NULL_TREE);
16098 tree int64_ftype_v4sf
16099 = build_function_type_list (long_long_integer_type_node,
16100 V4SF_type_node, NULL_TREE);
16101 tree int_ftype_v8qi
16102 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16103 tree v4sf_ftype_v4sf_int
16104 = build_function_type_list (V4SF_type_node,
16105 V4SF_type_node, integer_type_node, NULL_TREE);
16106 tree v4sf_ftype_v4sf_int64
16107 = build_function_type_list (V4SF_type_node,
16108 V4SF_type_node, long_long_integer_type_node,
16109 NULL_TREE);
16110 tree v4sf_ftype_v4sf_v2si
16111 = build_function_type_list (V4SF_type_node,
16112 V4SF_type_node, V2SI_type_node, NULL_TREE);
16113
16114 /* Miscellaneous. */
16115 tree v8qi_ftype_v4hi_v4hi
16116 = build_function_type_list (V8QI_type_node,
16117 V4HI_type_node, V4HI_type_node, NULL_TREE);
16118 tree v4hi_ftype_v2si_v2si
16119 = build_function_type_list (V4HI_type_node,
16120 V2SI_type_node, V2SI_type_node, NULL_TREE);
16121 tree v4sf_ftype_v4sf_v4sf_int
16122 = build_function_type_list (V4SF_type_node,
16123 V4SF_type_node, V4SF_type_node,
16124 integer_type_node, NULL_TREE);
16125 tree v2si_ftype_v4hi_v4hi
16126 = build_function_type_list (V2SI_type_node,
16127 V4HI_type_node, V4HI_type_node, NULL_TREE);
16128 tree v4hi_ftype_v4hi_int
16129 = build_function_type_list (V4HI_type_node,
16130 V4HI_type_node, integer_type_node, NULL_TREE);
16131 tree v4hi_ftype_v4hi_di
16132 = build_function_type_list (V4HI_type_node,
16133 V4HI_type_node, long_long_unsigned_type_node,
16134 NULL_TREE);
16135 tree v2si_ftype_v2si_di
16136 = build_function_type_list (V2SI_type_node,
16137 V2SI_type_node, long_long_unsigned_type_node,
16138 NULL_TREE);
16139 tree void_ftype_void
16140 = build_function_type (void_type_node, void_list_node);
16141 tree void_ftype_unsigned
16142 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16143 tree void_ftype_unsigned_unsigned
16144 = build_function_type_list (void_type_node, unsigned_type_node,
16145 unsigned_type_node, NULL_TREE);
16146 tree void_ftype_pcvoid_unsigned_unsigned
16147 = build_function_type_list (void_type_node, const_ptr_type_node,
16148 unsigned_type_node, unsigned_type_node,
16149 NULL_TREE);
16150 tree unsigned_ftype_void
16151 = build_function_type (unsigned_type_node, void_list_node);
16152 tree v2si_ftype_v4sf
16153 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16154 /* Loads/stores. */
16155 tree void_ftype_v8qi_v8qi_pchar
16156 = build_function_type_list (void_type_node,
16157 V8QI_type_node, V8QI_type_node,
16158 pchar_type_node, NULL_TREE);
16159 tree v4sf_ftype_pcfloat
16160 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16161 /* @@@ the type is bogus */
16162 tree v4sf_ftype_v4sf_pv2si
16163 = build_function_type_list (V4SF_type_node,
16164 V4SF_type_node, pv2si_type_node, NULL_TREE);
16165 tree void_ftype_pv2si_v4sf
16166 = build_function_type_list (void_type_node,
16167 pv2si_type_node, V4SF_type_node, NULL_TREE);
16168 tree void_ftype_pfloat_v4sf
16169 = build_function_type_list (void_type_node,
16170 pfloat_type_node, V4SF_type_node, NULL_TREE);
16171 tree void_ftype_pdi_di
16172 = build_function_type_list (void_type_node,
16173 pdi_type_node, long_long_unsigned_type_node,
16174 NULL_TREE);
16175 tree void_ftype_pv2di_v2di
16176 = build_function_type_list (void_type_node,
16177 pv2di_type_node, V2DI_type_node, NULL_TREE);
16178 /* Normal vector unops. */
16179 tree v4sf_ftype_v4sf
16180 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16181 tree v16qi_ftype_v16qi
16182 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16183 tree v8hi_ftype_v8hi
16184 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16185 tree v4si_ftype_v4si
16186 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16187 tree v8qi_ftype_v8qi
16188 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16189 tree v4hi_ftype_v4hi
16190 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16191
16192 /* Normal vector binops. */
16193 tree v4sf_ftype_v4sf_v4sf
16194 = build_function_type_list (V4SF_type_node,
16195 V4SF_type_node, V4SF_type_node, NULL_TREE);
16196 tree v8qi_ftype_v8qi_v8qi
16197 = build_function_type_list (V8QI_type_node,
16198 V8QI_type_node, V8QI_type_node, NULL_TREE);
16199 tree v4hi_ftype_v4hi_v4hi
16200 = build_function_type_list (V4HI_type_node,
16201 V4HI_type_node, V4HI_type_node, NULL_TREE);
16202 tree v2si_ftype_v2si_v2si
16203 = build_function_type_list (V2SI_type_node,
16204 V2SI_type_node, V2SI_type_node, NULL_TREE);
16205 tree di_ftype_di_di
16206 = build_function_type_list (long_long_unsigned_type_node,
16207 long_long_unsigned_type_node,
16208 long_long_unsigned_type_node, NULL_TREE);
16209
16210 tree di_ftype_di_di_int
16211 = build_function_type_list (long_long_unsigned_type_node,
16212 long_long_unsigned_type_node,
16213 long_long_unsigned_type_node,
16214 integer_type_node, NULL_TREE);
16215
16216 tree v2si_ftype_v2sf
16217 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16218 tree v2sf_ftype_v2si
16219 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16220 tree v2si_ftype_v2si
16221 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16222 tree v2sf_ftype_v2sf
16223 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16224 tree v2sf_ftype_v2sf_v2sf
16225 = build_function_type_list (V2SF_type_node,
16226 V2SF_type_node, V2SF_type_node, NULL_TREE);
16227 tree v2si_ftype_v2sf_v2sf
16228 = build_function_type_list (V2SI_type_node,
16229 V2SF_type_node, V2SF_type_node, NULL_TREE);
16230 tree pint_type_node = build_pointer_type (integer_type_node);
16231 tree pdouble_type_node = build_pointer_type (double_type_node);
16232 tree pcdouble_type_node = build_pointer_type (
16233 build_type_variant (double_type_node, 1, 0));
16234 tree int_ftype_v2df_v2df
16235 = build_function_type_list (integer_type_node,
16236 V2DF_type_node, V2DF_type_node, NULL_TREE);
16237
16238 tree void_ftype_pcvoid
16239 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16240 tree v4sf_ftype_v4si
16241 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16242 tree v4si_ftype_v4sf
16243 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16244 tree v2df_ftype_v4si
16245 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16246 tree v4si_ftype_v2df
16247 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16248 tree v2si_ftype_v2df
16249 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16250 tree v4sf_ftype_v2df
16251 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16252 tree v2df_ftype_v2si
16253 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16254 tree v2df_ftype_v4sf
16255 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16256 tree int_ftype_v2df
16257 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16258 tree int64_ftype_v2df
16259 = build_function_type_list (long_long_integer_type_node,
16260 V2DF_type_node, NULL_TREE);
16261 tree v2df_ftype_v2df_int
16262 = build_function_type_list (V2DF_type_node,
16263 V2DF_type_node, integer_type_node, NULL_TREE);
16264 tree v2df_ftype_v2df_int64
16265 = build_function_type_list (V2DF_type_node,
16266 V2DF_type_node, long_long_integer_type_node,
16267 NULL_TREE);
16268 tree v4sf_ftype_v4sf_v2df
16269 = build_function_type_list (V4SF_type_node,
16270 V4SF_type_node, V2DF_type_node, NULL_TREE);
16271 tree v2df_ftype_v2df_v4sf
16272 = build_function_type_list (V2DF_type_node,
16273 V2DF_type_node, V4SF_type_node, NULL_TREE);
16274 tree v2df_ftype_v2df_v2df_int
16275 = build_function_type_list (V2DF_type_node,
16276 V2DF_type_node, V2DF_type_node,
16277 integer_type_node,
16278 NULL_TREE);
16279 tree v2df_ftype_v2df_pcdouble
16280 = build_function_type_list (V2DF_type_node,
16281 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16282 tree void_ftype_pdouble_v2df
16283 = build_function_type_list (void_type_node,
16284 pdouble_type_node, V2DF_type_node, NULL_TREE);
16285 tree void_ftype_pint_int
16286 = build_function_type_list (void_type_node,
16287 pint_type_node, integer_type_node, NULL_TREE);
16288 tree void_ftype_v16qi_v16qi_pchar
16289 = build_function_type_list (void_type_node,
16290 V16QI_type_node, V16QI_type_node,
16291 pchar_type_node, NULL_TREE);
16292 tree v2df_ftype_pcdouble
16293 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16294 tree v2df_ftype_v2df_v2df
16295 = build_function_type_list (V2DF_type_node,
16296 V2DF_type_node, V2DF_type_node, NULL_TREE);
16297 tree v16qi_ftype_v16qi_v16qi
16298 = build_function_type_list (V16QI_type_node,
16299 V16QI_type_node, V16QI_type_node, NULL_TREE);
16300 tree v8hi_ftype_v8hi_v8hi
16301 = build_function_type_list (V8HI_type_node,
16302 V8HI_type_node, V8HI_type_node, NULL_TREE);
16303 tree v4si_ftype_v4si_v4si
16304 = build_function_type_list (V4SI_type_node,
16305 V4SI_type_node, V4SI_type_node, NULL_TREE);
16306 tree v2di_ftype_v2di_v2di
16307 = build_function_type_list (V2DI_type_node,
16308 V2DI_type_node, V2DI_type_node, NULL_TREE);
16309 tree v2di_ftype_v2df_v2df
16310 = build_function_type_list (V2DI_type_node,
16311 V2DF_type_node, V2DF_type_node, NULL_TREE);
16312 tree v2df_ftype_v2df
16313 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16314 tree v2di_ftype_v2di_int
16315 = build_function_type_list (V2DI_type_node,
16316 V2DI_type_node, integer_type_node, NULL_TREE);
16317 tree v2di_ftype_v2di_v2di_int
16318 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16319 V2DI_type_node, integer_type_node, NULL_TREE);
16320 tree v4si_ftype_v4si_int
16321 = build_function_type_list (V4SI_type_node,
16322 V4SI_type_node, integer_type_node, NULL_TREE);
16323 tree v8hi_ftype_v8hi_int
16324 = build_function_type_list (V8HI_type_node,
16325 V8HI_type_node, integer_type_node, NULL_TREE);
16326 tree v8hi_ftype_v8hi_v2di
16327 = build_function_type_list (V8HI_type_node,
16328 V8HI_type_node, V2DI_type_node, NULL_TREE);
16329 tree v4si_ftype_v4si_v2di
16330 = build_function_type_list (V4SI_type_node,
16331 V4SI_type_node, V2DI_type_node, NULL_TREE);
16332 tree v4si_ftype_v8hi_v8hi
16333 = build_function_type_list (V4SI_type_node,
16334 V8HI_type_node, V8HI_type_node, NULL_TREE);
16335 tree di_ftype_v8qi_v8qi
16336 = build_function_type_list (long_long_unsigned_type_node,
16337 V8QI_type_node, V8QI_type_node, NULL_TREE);
16338 tree di_ftype_v2si_v2si
16339 = build_function_type_list (long_long_unsigned_type_node,
16340 V2SI_type_node, V2SI_type_node, NULL_TREE);
16341 tree v2di_ftype_v16qi_v16qi
16342 = build_function_type_list (V2DI_type_node,
16343 V16QI_type_node, V16QI_type_node, NULL_TREE);
16344 tree v2di_ftype_v4si_v4si
16345 = build_function_type_list (V2DI_type_node,
16346 V4SI_type_node, V4SI_type_node, NULL_TREE);
16347 tree int_ftype_v16qi
16348 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16349 tree v16qi_ftype_pcchar
16350 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16351 tree void_ftype_pchar_v16qi
16352 = build_function_type_list (void_type_node,
16353 pchar_type_node, V16QI_type_node, NULL_TREE);
16354
16355 tree float80_type;
16356 tree float128_type;
16357 tree ftype;
16358
16359 /* The __float80 type. */
16360 if (TYPE_MODE (long_double_type_node) == XFmode)
16361 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16362 "__float80");
16363 else
16364 {
16365 /* The __float80 type. */
16366 float80_type = make_node (REAL_TYPE);
16367 TYPE_PRECISION (float80_type) = 80;
16368 layout_type (float80_type);
16369 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16370 }
16371
16372 if (TARGET_64BIT)
16373 {
16374 float128_type = make_node (REAL_TYPE);
16375 TYPE_PRECISION (float128_type) = 128;
16376 layout_type (float128_type);
16377 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16378 }
16379
16380 /* Add all builtins that are more or less simple operations on two
16381 operands. */
16382 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16383 {
16384 /* Use one of the operands; the target can have a different mode for
16385 mask-generating compares. */
16386 enum machine_mode mode;
16387 tree type;
16388
16389 if (d->name == 0)
16390 continue;
16391 mode = insn_data[d->icode].operand[1].mode;
16392
16393 switch (mode)
16394 {
16395 case V16QImode:
16396 type = v16qi_ftype_v16qi_v16qi;
16397 break;
16398 case V8HImode:
16399 type = v8hi_ftype_v8hi_v8hi;
16400 break;
16401 case V4SImode:
16402 type = v4si_ftype_v4si_v4si;
16403 break;
16404 case V2DImode:
16405 type = v2di_ftype_v2di_v2di;
16406 break;
16407 case V2DFmode:
16408 type = v2df_ftype_v2df_v2df;
16409 break;
16410 case V4SFmode:
16411 type = v4sf_ftype_v4sf_v4sf;
16412 break;
16413 case V8QImode:
16414 type = v8qi_ftype_v8qi_v8qi;
16415 break;
16416 case V4HImode:
16417 type = v4hi_ftype_v4hi_v4hi;
16418 break;
16419 case V2SImode:
16420 type = v2si_ftype_v2si_v2si;
16421 break;
16422 case DImode:
16423 type = di_ftype_di_di;
16424 break;
16425
16426 default:
16427 gcc_unreachable ();
16428 }
16429
16430 /* Override for comparisons. */
16431 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16432 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16433 type = v4si_ftype_v4sf_v4sf;
16434
16435 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16436 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16437 type = v2di_ftype_v2df_v2df;
16438
16439 def_builtin (d->mask, d->name, type, d->code);
16440 }
16441
16442 /* Add all builtins that are more or less simple operations on 1 operand. */
16443 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16444 {
16445 enum machine_mode mode;
16446 tree type;
16447
16448 if (d->name == 0)
16449 continue;
16450 mode = insn_data[d->icode].operand[1].mode;
16451
16452 switch (mode)
16453 {
16454 case V16QImode:
16455 type = v16qi_ftype_v16qi;
16456 break;
16457 case V8HImode:
16458 type = v8hi_ftype_v8hi;
16459 break;
16460 case V4SImode:
16461 type = v4si_ftype_v4si;
16462 break;
16463 case V2DFmode:
16464 type = v2df_ftype_v2df;
16465 break;
16466 case V4SFmode:
16467 type = v4sf_ftype_v4sf;
16468 break;
16469 case V8QImode:
16470 type = v8qi_ftype_v8qi;
16471 break;
16472 case V4HImode:
16473 type = v4hi_ftype_v4hi;
16474 break;
16475 case V2SImode:
16476 type = v2si_ftype_v2si;
16477 break;
16478
16479 default:
16480 abort ();
16481 }
16482
16483 def_builtin (d->mask, d->name, type, d->code);
16484 }
16485
16486 /* Add the remaining MMX insns with somewhat more complicated types. */
16487 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16488 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16489 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16490 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16491
16492 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16493 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16494 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16495
16496 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16497 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16498
16499 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16500 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16501
16502 /* comi/ucomi insns. */
16503 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16504 if (d->mask == MASK_SSE2)
16505 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16506 else
16507 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16508
16509 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16510 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16511 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16512
16513 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16514 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16515 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16516 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16517 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16518 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16519 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16520 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16521 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16522 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16523 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16524
16525 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16526
16527 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16528 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16529
16530 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16531 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16532 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16533 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16534
16535 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16536 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16537 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16538 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16539
16540 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16541
16542 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16543
16544 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16545 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16546 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16547 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16548 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16549 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16550
16551 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16552
16553 /* Original 3DNow! */
16554 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16555 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16556 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16557 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16558 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16559 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16560 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16561 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16562 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16563 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16564 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16565 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16566 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16567 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16568 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16569 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16570 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16571 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16572 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16573 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16574
16575 /* 3DNow! extension as used in the Athlon CPU. */
16576 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16577 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16578 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16579 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16580 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16581 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16582
16583 /* SSE2 */
16584 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16585
16586 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16587 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16588
16589 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16590 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16591
16592 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16593 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16594 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16595 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16596 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16597
16598 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16599 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16600 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16601 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16602
16603 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16604 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16605
16606 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16607
16608 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16609 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16610
16611 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16612 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16613 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16614 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16615 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16616
16617 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16618
16619 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16620 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16621 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16622 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16623
16624 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16625 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16626 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16627
16628 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16629 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16630 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16631 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16632
16633 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16634 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16635 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16636
16637 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16638 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16639
16640 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16641 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16642
16643 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16644 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16645 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16646
16647 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16648 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16649 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16650
16651 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16652 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16653
16654 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16655 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16656 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16657 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16658
16659 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16660 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16661 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16662 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16663
16664 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16665 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16666
16667 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16668
16669 /* Prescott New Instructions. */
16670 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16671 void_ftype_pcvoid_unsigned_unsigned,
16672 IX86_BUILTIN_MONITOR);
16673 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16674 void_ftype_unsigned_unsigned,
16675 IX86_BUILTIN_MWAIT);
16676 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16677 v4sf_ftype_v4sf,
16678 IX86_BUILTIN_MOVSHDUP);
16679 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16680 v4sf_ftype_v4sf,
16681 IX86_BUILTIN_MOVSLDUP);
16682 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16683 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16684
16685 /* SSSE3. */
16686 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16687 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16688 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16689 IX86_BUILTIN_PALIGNR);
16690
16691 /* Access to the vec_init patterns. */
16692 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16693 integer_type_node, NULL_TREE);
16694 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16695 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16696
16697 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16698 short_integer_type_node,
16699 short_integer_type_node,
16700 short_integer_type_node, NULL_TREE);
16701 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16702 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16703
16704 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16705 char_type_node, char_type_node,
16706 char_type_node, char_type_node,
16707 char_type_node, char_type_node,
16708 char_type_node, NULL_TREE);
16709 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16710 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16711
16712 /* Access to the vec_extract patterns. */
16713 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16714 integer_type_node, NULL_TREE);
16715 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16716 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16717
16718 ftype = build_function_type_list (long_long_integer_type_node,
16719 V2DI_type_node, integer_type_node,
16720 NULL_TREE);
16721 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16722 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16723
16724 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16725 integer_type_node, NULL_TREE);
16726 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16727 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16728
16729 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16730 integer_type_node, NULL_TREE);
16731 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16732 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16733
16734 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16735 integer_type_node, NULL_TREE);
16736 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16737 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16738
16739 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16740 integer_type_node, NULL_TREE);
16741 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16742 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16743
16744 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16745 integer_type_node, NULL_TREE);
16746 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16747 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16748
16749 /* Access to the vec_set patterns. */
16750 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16751 intHI_type_node,
16752 integer_type_node, NULL_TREE);
16753 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16754 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16755
16756 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16757 intHI_type_node,
16758 integer_type_node, NULL_TREE);
16759 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16760 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16761 }
16762
16763 /* Errors in the source file can cause expand_expr to return const0_rtx
16764 where we expect a vector. To avoid crashing, use one of the vector
16765 clear instructions. */
16766 static rtx
16767 safe_vector_operand (rtx x, enum machine_mode mode)
16768 {
16769 if (x == const0_rtx)
16770 x = CONST0_RTX (mode);
16771 return x;
16772 }
16773
16774 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16775
16776 static rtx
16777 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16778 {
16779 rtx pat, xops[3];
16780 tree arg0 = TREE_VALUE (arglist);
16781 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16782 rtx op0 = expand_normal (arg0);
16783 rtx op1 = expand_normal (arg1);
16784 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16785 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16786 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16787
16788 if (VECTOR_MODE_P (mode0))
16789 op0 = safe_vector_operand (op0, mode0);
16790 if (VECTOR_MODE_P (mode1))
16791 op1 = safe_vector_operand (op1, mode1);
16792
16793 if (optimize || !target
16794 || GET_MODE (target) != tmode
16795 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16796 target = gen_reg_rtx (tmode);
16797
16798 if (GET_MODE (op1) == SImode && mode1 == TImode)
16799 {
16800 rtx x = gen_reg_rtx (V4SImode);
16801 emit_insn (gen_sse2_loadd (x, op1));
16802 op1 = gen_lowpart (TImode, x);
16803 }
16804
16805 /* The insn must want input operands in the same modes as the
16806 result. */
16807 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16808 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16809
16810 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16811 op0 = copy_to_mode_reg (mode0, op0);
16812 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16813 op1 = copy_to_mode_reg (mode1, op1);
16814
16815 /* ??? Using ix86_fixup_binary_operands is problematic when
16816 we've got mismatched modes. Fake it. */
16817
16818 xops[0] = target;
16819 xops[1] = op0;
16820 xops[2] = op1;
16821
16822 if (tmode == mode0 && tmode == mode1)
16823 {
16824 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16825 op0 = xops[1];
16826 op1 = xops[2];
16827 }
16828 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16829 {
16830 op0 = force_reg (mode0, op0);
16831 op1 = force_reg (mode1, op1);
16832 target = gen_reg_rtx (tmode);
16833 }
16834
16835 pat = GEN_FCN (icode) (target, op0, op1);
16836 if (! pat)
16837 return 0;
16838 emit_insn (pat);
16839 return target;
16840 }
16841
16842 /* Subroutine of ix86_expand_builtin to take care of stores. */
16843
16844 static rtx
16845 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16846 {
16847 rtx pat;
16848 tree arg0 = TREE_VALUE (arglist);
16849 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16850 rtx op0 = expand_normal (arg0);
16851 rtx op1 = expand_normal (arg1);
16852 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16853 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16854
16855 if (VECTOR_MODE_P (mode1))
16856 op1 = safe_vector_operand (op1, mode1);
16857
16858 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16859 op1 = copy_to_mode_reg (mode1, op1);
16860
16861 pat = GEN_FCN (icode) (op0, op1);
16862 if (pat)
16863 emit_insn (pat);
16864 return 0;
16865 }
16866
16867 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16868
16869 static rtx
16870 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16871 rtx target, int do_load)
16872 {
16873 rtx pat;
16874 tree arg0 = TREE_VALUE (arglist);
16875 rtx op0 = expand_normal (arg0);
16876 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16877 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16878
16879 if (optimize || !target
16880 || GET_MODE (target) != tmode
16881 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16882 target = gen_reg_rtx (tmode);
16883 if (do_load)
16884 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16885 else
16886 {
16887 if (VECTOR_MODE_P (mode0))
16888 op0 = safe_vector_operand (op0, mode0);
16889
16890 if ((optimize && !register_operand (op0, mode0))
16891 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16892 op0 = copy_to_mode_reg (mode0, op0);
16893 }
16894
16895 pat = GEN_FCN (icode) (target, op0);
16896 if (! pat)
16897 return 0;
16898 emit_insn (pat);
16899 return target;
16900 }
16901
16902 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16903 sqrtss, rsqrtss, rcpss. */
16904
16905 static rtx
16906 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16907 {
16908 rtx pat;
16909 tree arg0 = TREE_VALUE (arglist);
16910 rtx op1, op0 = expand_normal (arg0);
16911 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16912 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16913
16914 if (optimize || !target
16915 || GET_MODE (target) != tmode
16916 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16917 target = gen_reg_rtx (tmode);
16918
16919 if (VECTOR_MODE_P (mode0))
16920 op0 = safe_vector_operand (op0, mode0);
16921
16922 if ((optimize && !register_operand (op0, mode0))
16923 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16924 op0 = copy_to_mode_reg (mode0, op0);
16925
16926 op1 = op0;
16927 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16928 op1 = copy_to_mode_reg (mode0, op1);
16929
16930 pat = GEN_FCN (icode) (target, op0, op1);
16931 if (! pat)
16932 return 0;
16933 emit_insn (pat);
16934 return target;
16935 }
16936
16937 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16938
16939 static rtx
16940 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16941 rtx target)
16942 {
16943 rtx pat;
16944 tree arg0 = TREE_VALUE (arglist);
16945 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16946 rtx op0 = expand_normal (arg0);
16947 rtx op1 = expand_normal (arg1);
16948 rtx op2;
16949 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16950 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16951 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16952 enum rtx_code comparison = d->comparison;
16953
16954 if (VECTOR_MODE_P (mode0))
16955 op0 = safe_vector_operand (op0, mode0);
16956 if (VECTOR_MODE_P (mode1))
16957 op1 = safe_vector_operand (op1, mode1);
16958
16959 /* Swap operands if we have a comparison that isn't available in
16960 hardware. */
16961 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16962 {
16963 rtx tmp = gen_reg_rtx (mode1);
16964 emit_move_insn (tmp, op1);
16965 op1 = op0;
16966 op0 = tmp;
16967 }
16968
16969 if (optimize || !target
16970 || GET_MODE (target) != tmode
16971 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16972 target = gen_reg_rtx (tmode);
16973
16974 if ((optimize && !register_operand (op0, mode0))
16975 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16976 op0 = copy_to_mode_reg (mode0, op0);
16977 if ((optimize && !register_operand (op1, mode1))
16978 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16979 op1 = copy_to_mode_reg (mode1, op1);
16980
16981 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16982 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16983 if (! pat)
16984 return 0;
16985 emit_insn (pat);
16986 return target;
16987 }
16988
16989 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16990
16991 static rtx
16992 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16993 rtx target)
16994 {
16995 rtx pat;
16996 tree arg0 = TREE_VALUE (arglist);
16997 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16998 rtx op0 = expand_normal (arg0);
16999 rtx op1 = expand_normal (arg1);
17000 rtx op2;
17001 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
17002 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
17003 enum rtx_code comparison = d->comparison;
17004
17005 if (VECTOR_MODE_P (mode0))
17006 op0 = safe_vector_operand (op0, mode0);
17007 if (VECTOR_MODE_P (mode1))
17008 op1 = safe_vector_operand (op1, mode1);
17009
17010 /* Swap operands if we have a comparison that isn't available in
17011 hardware. */
17012 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
17013 {
17014 rtx tmp = op1;
17015 op1 = op0;
17016 op0 = tmp;
17017 }
17018
17019 target = gen_reg_rtx (SImode);
17020 emit_move_insn (target, const0_rtx);
17021 target = gen_rtx_SUBREG (QImode, target, 0);
17022
17023 if ((optimize && !register_operand (op0, mode0))
17024 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
17025 op0 = copy_to_mode_reg (mode0, op0);
17026 if ((optimize && !register_operand (op1, mode1))
17027 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
17028 op1 = copy_to_mode_reg (mode1, op1);
17029
17030 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
17031 pat = GEN_FCN (d->icode) (op0, op1);
17032 if (! pat)
17033 return 0;
17034 emit_insn (pat);
17035 emit_insn (gen_rtx_SET (VOIDmode,
17036 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
17037 gen_rtx_fmt_ee (comparison, QImode,
17038 SET_DEST (pat),
17039 const0_rtx)));
17040
17041 return SUBREG_REG (target);
17042 }
17043
17044 /* Return the integer constant in ARG. Constrain it to be in the range
17045 of the subparts of VEC_TYPE; issue an error if not. */
17046
17047 static int
17048 get_element_number (tree vec_type, tree arg)
17049 {
17050 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
17051
17052 if (!host_integerp (arg, 1)
17053 || (elt = tree_low_cst (arg, 1), elt > max))
17054 {
17055 error ("selector must be an integer constant in the range 0..%wi", max);
17056 return 0;
17057 }
17058
17059 return elt;
17060 }
17061
17062 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17063 ix86_expand_vector_init. We DO have language-level syntax for this, in
17064 the form of (type){ init-list }. Except that since we can't place emms
17065 instructions from inside the compiler, we can't allow the use of MMX
17066 registers unless the user explicitly asks for it. So we do *not* define
17067 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17068 we have builtins invoked by mmintrin.h that gives us license to emit
17069 these sorts of instructions. */
17070
17071 static rtx
17072 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
17073 {
17074 enum machine_mode tmode = TYPE_MODE (type);
17075 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17076 int i, n_elt = GET_MODE_NUNITS (tmode);
17077 rtvec v = rtvec_alloc (n_elt);
17078
17079 gcc_assert (VECTOR_MODE_P (tmode));
17080
17081 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
17082 {
17083 rtx x = expand_normal (TREE_VALUE (arglist));
17084 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17085 }
17086
17087 gcc_assert (arglist == NULL);
17088
17089 if (!target || !register_operand (target, tmode))
17090 target = gen_reg_rtx (tmode);
17091
17092 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17093 return target;
17094 }
17095
17096 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17097 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17098 had a language-level syntax for referencing vector elements. */
17099
17100 static rtx
17101 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
17102 {
17103 enum machine_mode tmode, mode0;
17104 tree arg0, arg1;
17105 int elt;
17106 rtx op0;
17107
17108 arg0 = TREE_VALUE (arglist);
17109 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17110
17111 op0 = expand_normal (arg0);
17112 elt = get_element_number (TREE_TYPE (arg0), arg1);
17113
17114 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17115 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17116 gcc_assert (VECTOR_MODE_P (mode0));
17117
17118 op0 = force_reg (mode0, op0);
17119
17120 if (optimize || !target || !register_operand (target, tmode))
17121 target = gen_reg_rtx (tmode);
17122
17123 ix86_expand_vector_extract (true, target, op0, elt);
17124
17125 return target;
17126 }
17127
17128 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17129 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17130 a language-level syntax for referencing vector elements. */
17131
17132 static rtx
17133 ix86_expand_vec_set_builtin (tree arglist)
17134 {
17135 enum machine_mode tmode, mode1;
17136 tree arg0, arg1, arg2;
17137 int elt;
17138 rtx op0, op1;
17139
17140 arg0 = TREE_VALUE (arglist);
17141 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17142 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17143
17144 tmode = TYPE_MODE (TREE_TYPE (arg0));
17145 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17146 gcc_assert (VECTOR_MODE_P (tmode));
17147
17148 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17149 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17150 elt = get_element_number (TREE_TYPE (arg0), arg2);
17151
17152 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17153 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17154
17155 op0 = force_reg (tmode, op0);
17156 op1 = force_reg (mode1, op1);
17157
17158 ix86_expand_vector_set (true, op0, op1, elt);
17159
17160 return op0;
17161 }
17162
17163 /* Expand an expression EXP that calls a built-in function,
17164 with result going to TARGET if that's convenient
17165 (and in mode MODE if that's convenient).
17166 SUBTARGET may be used as the target for computing one of EXP's operands.
17167 IGNORE is nonzero if the value is to be ignored. */
17168
17169 static rtx
17170 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17171 enum machine_mode mode ATTRIBUTE_UNUSED,
17172 int ignore ATTRIBUTE_UNUSED)
17173 {
17174 const struct builtin_description *d;
17175 size_t i;
17176 enum insn_code icode;
17177 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17178 tree arglist = TREE_OPERAND (exp, 1);
17179 tree arg0, arg1, arg2;
17180 rtx op0, op1, op2, pat;
17181 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17182 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17183
17184 switch (fcode)
17185 {
17186 case IX86_BUILTIN_EMMS:
17187 emit_insn (gen_mmx_emms ());
17188 return 0;
17189
17190 case IX86_BUILTIN_SFENCE:
17191 emit_insn (gen_sse_sfence ());
17192 return 0;
17193
17194 case IX86_BUILTIN_MASKMOVQ:
17195 case IX86_BUILTIN_MASKMOVDQU:
17196 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17197 ? CODE_FOR_mmx_maskmovq
17198 : CODE_FOR_sse2_maskmovdqu);
17199 /* Note the arg order is different from the operand order. */
17200 arg1 = TREE_VALUE (arglist);
17201 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17202 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17203 op0 = expand_normal (arg0);
17204 op1 = expand_normal (arg1);
17205 op2 = expand_normal (arg2);
17206 mode0 = insn_data[icode].operand[0].mode;
17207 mode1 = insn_data[icode].operand[1].mode;
17208 mode2 = insn_data[icode].operand[2].mode;
17209
17210 op0 = force_reg (Pmode, op0);
17211 op0 = gen_rtx_MEM (mode1, op0);
17212
17213 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17214 op0 = copy_to_mode_reg (mode0, op0);
17215 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17216 op1 = copy_to_mode_reg (mode1, op1);
17217 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17218 op2 = copy_to_mode_reg (mode2, op2);
17219 pat = GEN_FCN (icode) (op0, op1, op2);
17220 if (! pat)
17221 return 0;
17222 emit_insn (pat);
17223 return 0;
17224
17225 case IX86_BUILTIN_SQRTSS:
17226 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17227 case IX86_BUILTIN_RSQRTSS:
17228 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17229 case IX86_BUILTIN_RCPSS:
17230 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17231
17232 case IX86_BUILTIN_LOADUPS:
17233 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17234
17235 case IX86_BUILTIN_STOREUPS:
17236 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17237
17238 case IX86_BUILTIN_LOADHPS:
17239 case IX86_BUILTIN_LOADLPS:
17240 case IX86_BUILTIN_LOADHPD:
17241 case IX86_BUILTIN_LOADLPD:
17242 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17243 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17244 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17245 : CODE_FOR_sse2_loadlpd);
17246 arg0 = TREE_VALUE (arglist);
17247 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17248 op0 = expand_normal (arg0);
17249 op1 = expand_normal (arg1);
17250 tmode = insn_data[icode].operand[0].mode;
17251 mode0 = insn_data[icode].operand[1].mode;
17252 mode1 = insn_data[icode].operand[2].mode;
17253
17254 op0 = force_reg (mode0, op0);
17255 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17256 if (optimize || target == 0
17257 || GET_MODE (target) != tmode
17258 || !register_operand (target, tmode))
17259 target = gen_reg_rtx (tmode);
17260 pat = GEN_FCN (icode) (target, op0, op1);
17261 if (! pat)
17262 return 0;
17263 emit_insn (pat);
17264 return target;
17265
17266 case IX86_BUILTIN_STOREHPS:
17267 case IX86_BUILTIN_STORELPS:
17268 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17269 : CODE_FOR_sse_storelps);
17270 arg0 = TREE_VALUE (arglist);
17271 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17272 op0 = expand_normal (arg0);
17273 op1 = expand_normal (arg1);
17274 mode0 = insn_data[icode].operand[0].mode;
17275 mode1 = insn_data[icode].operand[1].mode;
17276
17277 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17278 op1 = force_reg (mode1, op1);
17279
17280 pat = GEN_FCN (icode) (op0, op1);
17281 if (! pat)
17282 return 0;
17283 emit_insn (pat);
17284 return const0_rtx;
17285
17286 case IX86_BUILTIN_MOVNTPS:
17287 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17288 case IX86_BUILTIN_MOVNTQ:
17289 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17290
17291 case IX86_BUILTIN_LDMXCSR:
17292 op0 = expand_normal (TREE_VALUE (arglist));
17293 target = assign_386_stack_local (SImode, SLOT_TEMP);
17294 emit_move_insn (target, op0);
17295 emit_insn (gen_sse_ldmxcsr (target));
17296 return 0;
17297
17298 case IX86_BUILTIN_STMXCSR:
17299 target = assign_386_stack_local (SImode, SLOT_TEMP);
17300 emit_insn (gen_sse_stmxcsr (target));
17301 return copy_to_mode_reg (SImode, target);
17302
17303 case IX86_BUILTIN_SHUFPS:
17304 case IX86_BUILTIN_SHUFPD:
17305 icode = (fcode == IX86_BUILTIN_SHUFPS
17306 ? CODE_FOR_sse_shufps
17307 : CODE_FOR_sse2_shufpd);
17308 arg0 = TREE_VALUE (arglist);
17309 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17310 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17311 op0 = expand_normal (arg0);
17312 op1 = expand_normal (arg1);
17313 op2 = expand_normal (arg2);
17314 tmode = insn_data[icode].operand[0].mode;
17315 mode0 = insn_data[icode].operand[1].mode;
17316 mode1 = insn_data[icode].operand[2].mode;
17317 mode2 = insn_data[icode].operand[3].mode;
17318
17319 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17320 op0 = copy_to_mode_reg (mode0, op0);
17321 if ((optimize && !register_operand (op1, mode1))
17322 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17323 op1 = copy_to_mode_reg (mode1, op1);
17324 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17325 {
17326 /* @@@ better error message */
17327 error ("mask must be an immediate");
17328 return gen_reg_rtx (tmode);
17329 }
17330 if (optimize || target == 0
17331 || GET_MODE (target) != tmode
17332 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17333 target = gen_reg_rtx (tmode);
17334 pat = GEN_FCN (icode) (target, op0, op1, op2);
17335 if (! pat)
17336 return 0;
17337 emit_insn (pat);
17338 return target;
17339
17340 case IX86_BUILTIN_PSHUFW:
17341 case IX86_BUILTIN_PSHUFD:
17342 case IX86_BUILTIN_PSHUFHW:
17343 case IX86_BUILTIN_PSHUFLW:
17344 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17345 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17346 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17347 : CODE_FOR_mmx_pshufw);
17348 arg0 = TREE_VALUE (arglist);
17349 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17350 op0 = expand_normal (arg0);
17351 op1 = expand_normal (arg1);
17352 tmode = insn_data[icode].operand[0].mode;
17353 mode1 = insn_data[icode].operand[1].mode;
17354 mode2 = insn_data[icode].operand[2].mode;
17355
17356 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17357 op0 = copy_to_mode_reg (mode1, op0);
17358 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17359 {
17360 /* @@@ better error message */
17361 error ("mask must be an immediate");
17362 return const0_rtx;
17363 }
17364 if (target == 0
17365 || GET_MODE (target) != tmode
17366 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17367 target = gen_reg_rtx (tmode);
17368 pat = GEN_FCN (icode) (target, op0, op1);
17369 if (! pat)
17370 return 0;
17371 emit_insn (pat);
17372 return target;
17373
17374 case IX86_BUILTIN_PSLLDQI128:
17375 case IX86_BUILTIN_PSRLDQI128:
17376 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17377 : CODE_FOR_sse2_lshrti3);
17378 arg0 = TREE_VALUE (arglist);
17379 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17380 op0 = expand_normal (arg0);
17381 op1 = expand_normal (arg1);
17382 tmode = insn_data[icode].operand[0].mode;
17383 mode1 = insn_data[icode].operand[1].mode;
17384 mode2 = insn_data[icode].operand[2].mode;
17385
17386 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17387 {
17388 op0 = copy_to_reg (op0);
17389 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17390 }
17391 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17392 {
17393 error ("shift must be an immediate");
17394 return const0_rtx;
17395 }
17396 target = gen_reg_rtx (V2DImode);
17397 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17398 if (! pat)
17399 return 0;
17400 emit_insn (pat);
17401 return target;
17402
17403 case IX86_BUILTIN_FEMMS:
17404 emit_insn (gen_mmx_femms ());
17405 return NULL_RTX;
17406
17407 case IX86_BUILTIN_PAVGUSB:
17408 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17409
17410 case IX86_BUILTIN_PF2ID:
17411 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17412
17413 case IX86_BUILTIN_PFACC:
17414 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17415
17416 case IX86_BUILTIN_PFADD:
17417 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17418
17419 case IX86_BUILTIN_PFCMPEQ:
17420 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17421
17422 case IX86_BUILTIN_PFCMPGE:
17423 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17424
17425 case IX86_BUILTIN_PFCMPGT:
17426 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17427
17428 case IX86_BUILTIN_PFMAX:
17429 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17430
17431 case IX86_BUILTIN_PFMIN:
17432 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17433
17434 case IX86_BUILTIN_PFMUL:
17435 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17436
17437 case IX86_BUILTIN_PFRCP:
17438 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17439
17440 case IX86_BUILTIN_PFRCPIT1:
17441 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17442
17443 case IX86_BUILTIN_PFRCPIT2:
17444 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17445
17446 case IX86_BUILTIN_PFRSQIT1:
17447 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17448
17449 case IX86_BUILTIN_PFRSQRT:
17450 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17451
17452 case IX86_BUILTIN_PFSUB:
17453 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17454
17455 case IX86_BUILTIN_PFSUBR:
17456 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17457
17458 case IX86_BUILTIN_PI2FD:
17459 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17460
17461 case IX86_BUILTIN_PMULHRW:
17462 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17463
17464 case IX86_BUILTIN_PF2IW:
17465 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17466
17467 case IX86_BUILTIN_PFNACC:
17468 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17469
17470 case IX86_BUILTIN_PFPNACC:
17471 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17472
17473 case IX86_BUILTIN_PI2FW:
17474 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17475
17476 case IX86_BUILTIN_PSWAPDSI:
17477 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17478
17479 case IX86_BUILTIN_PSWAPDSF:
17480 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17481
17482 case IX86_BUILTIN_SQRTSD:
17483 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17484 case IX86_BUILTIN_LOADUPD:
17485 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17486 case IX86_BUILTIN_STOREUPD:
17487 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17488
17489 case IX86_BUILTIN_MFENCE:
17490 emit_insn (gen_sse2_mfence ());
17491 return 0;
17492 case IX86_BUILTIN_LFENCE:
17493 emit_insn (gen_sse2_lfence ());
17494 return 0;
17495
17496 case IX86_BUILTIN_CLFLUSH:
17497 arg0 = TREE_VALUE (arglist);
17498 op0 = expand_normal (arg0);
17499 icode = CODE_FOR_sse2_clflush;
17500 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17501 op0 = copy_to_mode_reg (Pmode, op0);
17502
17503 emit_insn (gen_sse2_clflush (op0));
17504 return 0;
17505
17506 case IX86_BUILTIN_MOVNTPD:
17507 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17508 case IX86_BUILTIN_MOVNTDQ:
17509 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17510 case IX86_BUILTIN_MOVNTI:
17511 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17512
17513 case IX86_BUILTIN_LOADDQU:
17514 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17515 case IX86_BUILTIN_STOREDQU:
17516 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17517
17518 case IX86_BUILTIN_MONITOR:
17519 arg0 = TREE_VALUE (arglist);
17520 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17521 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17522 op0 = expand_normal (arg0);
17523 op1 = expand_normal (arg1);
17524 op2 = expand_normal (arg2);
17525 if (!REG_P (op0))
17526 op0 = copy_to_mode_reg (Pmode, op0);
17527 if (!REG_P (op1))
17528 op1 = copy_to_mode_reg (SImode, op1);
17529 if (!REG_P (op2))
17530 op2 = copy_to_mode_reg (SImode, op2);
17531 if (!TARGET_64BIT)
17532 emit_insn (gen_sse3_monitor (op0, op1, op2));
17533 else
17534 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17535 return 0;
17536
17537 case IX86_BUILTIN_MWAIT:
17538 arg0 = TREE_VALUE (arglist);
17539 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17540 op0 = expand_normal (arg0);
17541 op1 = expand_normal (arg1);
17542 if (!REG_P (op0))
17543 op0 = copy_to_mode_reg (SImode, op0);
17544 if (!REG_P (op1))
17545 op1 = copy_to_mode_reg (SImode, op1);
17546 emit_insn (gen_sse3_mwait (op0, op1));
17547 return 0;
17548
17549 case IX86_BUILTIN_LDDQU:
17550 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17551 target, 1);
17552
17553 case IX86_BUILTIN_PALIGNR:
17554 case IX86_BUILTIN_PALIGNR128:
17555 if (fcode == IX86_BUILTIN_PALIGNR)
17556 {
17557 icode = CODE_FOR_ssse3_palignrdi;
17558 mode = DImode;
17559 }
17560 else
17561 {
17562 icode = CODE_FOR_ssse3_palignrti;
17563 mode = V2DImode;
17564 }
17565 arg0 = TREE_VALUE (arglist);
17566 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17567 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17568 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17569 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17570 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17571 tmode = insn_data[icode].operand[0].mode;
17572 mode1 = insn_data[icode].operand[1].mode;
17573 mode2 = insn_data[icode].operand[2].mode;
17574 mode3 = insn_data[icode].operand[3].mode;
17575
17576 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17577 {
17578 op0 = copy_to_reg (op0);
17579 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17580 }
17581 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17582 {
17583 op1 = copy_to_reg (op1);
17584 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17585 }
17586 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17587 {
17588 error ("shift must be an immediate");
17589 return const0_rtx;
17590 }
17591 target = gen_reg_rtx (mode);
17592 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17593 op0, op1, op2);
17594 if (! pat)
17595 return 0;
17596 emit_insn (pat);
17597 return target;
17598
17599 case IX86_BUILTIN_VEC_INIT_V2SI:
17600 case IX86_BUILTIN_VEC_INIT_V4HI:
17601 case IX86_BUILTIN_VEC_INIT_V8QI:
17602 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17603
17604 case IX86_BUILTIN_VEC_EXT_V2DF:
17605 case IX86_BUILTIN_VEC_EXT_V2DI:
17606 case IX86_BUILTIN_VEC_EXT_V4SF:
17607 case IX86_BUILTIN_VEC_EXT_V4SI:
17608 case IX86_BUILTIN_VEC_EXT_V8HI:
17609 case IX86_BUILTIN_VEC_EXT_V2SI:
17610 case IX86_BUILTIN_VEC_EXT_V4HI:
17611 return ix86_expand_vec_ext_builtin (arglist, target);
17612
17613 case IX86_BUILTIN_VEC_SET_V8HI:
17614 case IX86_BUILTIN_VEC_SET_V4HI:
17615 return ix86_expand_vec_set_builtin (arglist);
17616
17617 default:
17618 break;
17619 }
17620
17621 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17622 if (d->code == fcode)
17623 {
17624 /* Compares are treated specially. */
17625 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17626 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17627 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17628 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17629 return ix86_expand_sse_compare (d, arglist, target);
17630
17631 return ix86_expand_binop_builtin (d->icode, arglist, target);
17632 }
17633
17634 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17635 if (d->code == fcode)
17636 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17637
17638 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17639 if (d->code == fcode)
17640 return ix86_expand_sse_comi (d, arglist, target);
17641
17642 gcc_unreachable ();
17643 }
17644
17645 /* Returns a function decl for a vectorized version of the builtin function
17646 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17647 if it is not available. */
17648
17649 static tree
17650 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17651 {
17652 enum machine_mode el_mode;
17653 int n;
17654
17655 if (TREE_CODE (type) != VECTOR_TYPE)
17656 return NULL_TREE;
17657
17658 el_mode = TYPE_MODE (TREE_TYPE (type));
17659 n = TYPE_VECTOR_SUBPARTS (type);
17660
17661 switch (fn)
17662 {
17663 case BUILT_IN_SQRT:
17664 if (el_mode == DFmode && n == 2)
17665 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17666 return NULL_TREE;
17667
17668 case BUILT_IN_SQRTF:
17669 if (el_mode == SFmode && n == 4)
17670 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17671 return NULL_TREE;
17672
17673 default:
17674 ;
17675 }
17676
17677 return NULL_TREE;
17678 }
17679
17680 /* Store OPERAND to the memory after reload is completed. This means
17681 that we can't easily use assign_stack_local. */
17682 rtx
17683 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17684 {
17685 rtx result;
17686
17687 gcc_assert (reload_completed);
17688 if (TARGET_RED_ZONE)
17689 {
17690 result = gen_rtx_MEM (mode,
17691 gen_rtx_PLUS (Pmode,
17692 stack_pointer_rtx,
17693 GEN_INT (-RED_ZONE_SIZE)));
17694 emit_move_insn (result, operand);
17695 }
17696 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17697 {
17698 switch (mode)
17699 {
17700 case HImode:
17701 case SImode:
17702 operand = gen_lowpart (DImode, operand);
17703 /* FALLTHRU */
17704 case DImode:
17705 emit_insn (
17706 gen_rtx_SET (VOIDmode,
17707 gen_rtx_MEM (DImode,
17708 gen_rtx_PRE_DEC (DImode,
17709 stack_pointer_rtx)),
17710 operand));
17711 break;
17712 default:
17713 gcc_unreachable ();
17714 }
17715 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17716 }
17717 else
17718 {
17719 switch (mode)
17720 {
17721 case DImode:
17722 {
17723 rtx operands[2];
17724 split_di (&operand, 1, operands, operands + 1);
17725 emit_insn (
17726 gen_rtx_SET (VOIDmode,
17727 gen_rtx_MEM (SImode,
17728 gen_rtx_PRE_DEC (Pmode,
17729 stack_pointer_rtx)),
17730 operands[1]));
17731 emit_insn (
17732 gen_rtx_SET (VOIDmode,
17733 gen_rtx_MEM (SImode,
17734 gen_rtx_PRE_DEC (Pmode,
17735 stack_pointer_rtx)),
17736 operands[0]));
17737 }
17738 break;
17739 case HImode:
17740 /* Store HImodes as SImodes. */
17741 operand = gen_lowpart (SImode, operand);
17742 /* FALLTHRU */
17743 case SImode:
17744 emit_insn (
17745 gen_rtx_SET (VOIDmode,
17746 gen_rtx_MEM (GET_MODE (operand),
17747 gen_rtx_PRE_DEC (SImode,
17748 stack_pointer_rtx)),
17749 operand));
17750 break;
17751 default:
17752 gcc_unreachable ();
17753 }
17754 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17755 }
17756 return result;
17757 }
17758
17759 /* Free operand from the memory. */
17760 void
17761 ix86_free_from_memory (enum machine_mode mode)
17762 {
17763 if (!TARGET_RED_ZONE)
17764 {
17765 int size;
17766
17767 if (mode == DImode || TARGET_64BIT)
17768 size = 8;
17769 else
17770 size = 4;
17771 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17772 to pop or add instruction if registers are available. */
17773 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17774 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17775 GEN_INT (size))));
17776 }
17777 }
17778
17779 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17780 QImode must go into class Q_REGS.
17781 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17782 movdf to do mem-to-mem moves through integer regs. */
17783 enum reg_class
17784 ix86_preferred_reload_class (rtx x, enum reg_class class)
17785 {
17786 enum machine_mode mode = GET_MODE (x);
17787
17788 /* We're only allowed to return a subclass of CLASS. Many of the
17789 following checks fail for NO_REGS, so eliminate that early. */
17790 if (class == NO_REGS)
17791 return NO_REGS;
17792
17793 /* All classes can load zeros. */
17794 if (x == CONST0_RTX (mode))
17795 return class;
17796
17797 /* Force constants into memory if we are loading a (nonzero) constant into
17798 an MMX or SSE register. This is because there are no MMX/SSE instructions
17799 to load from a constant. */
17800 if (CONSTANT_P (x)
17801 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17802 return NO_REGS;
17803
17804 /* Prefer SSE regs only, if we can use them for math. */
17805 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17806 return SSE_CLASS_P (class) ? class : NO_REGS;
17807
17808 /* Floating-point constants need more complex checks. */
17809 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17810 {
17811 /* General regs can load everything. */
17812 if (reg_class_subset_p (class, GENERAL_REGS))
17813 return class;
17814
17815 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17816 zero above. We only want to wind up preferring 80387 registers if
17817 we plan on doing computation with them. */
17818 if (TARGET_80387
17819 && standard_80387_constant_p (x))
17820 {
17821 /* Limit class to non-sse. */
17822 if (class == FLOAT_SSE_REGS)
17823 return FLOAT_REGS;
17824 if (class == FP_TOP_SSE_REGS)
17825 return FP_TOP_REG;
17826 if (class == FP_SECOND_SSE_REGS)
17827 return FP_SECOND_REG;
17828 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17829 return class;
17830 }
17831
17832 return NO_REGS;
17833 }
17834
17835 /* Generally when we see PLUS here, it's the function invariant
17836 (plus soft-fp const_int). Which can only be computed into general
17837 regs. */
17838 if (GET_CODE (x) == PLUS)
17839 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17840
17841 /* QImode constants are easy to load, but non-constant QImode data
17842 must go into Q_REGS. */
17843 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17844 {
17845 if (reg_class_subset_p (class, Q_REGS))
17846 return class;
17847 if (reg_class_subset_p (Q_REGS, class))
17848 return Q_REGS;
17849 return NO_REGS;
17850 }
17851
17852 return class;
17853 }
17854
17855 /* Discourage putting floating-point values in SSE registers unless
17856 SSE math is being used, and likewise for the 387 registers. */
17857 enum reg_class
17858 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17859 {
17860 enum machine_mode mode = GET_MODE (x);
17861
17862 /* Restrict the output reload class to the register bank that we are doing
17863 math on. If we would like not to return a subset of CLASS, reject this
17864 alternative: if reload cannot do this, it will still use its choice. */
17865 mode = GET_MODE (x);
17866 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17867 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17868
17869 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17870 {
17871 if (class == FP_TOP_SSE_REGS)
17872 return FP_TOP_REG;
17873 else if (class == FP_SECOND_SSE_REGS)
17874 return FP_SECOND_REG;
17875 else
17876 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17877 }
17878
17879 return class;
17880 }
17881
17882 /* If we are copying between general and FP registers, we need a memory
17883 location. The same is true for SSE and MMX registers.
17884
17885 The macro can't work reliably when one of the CLASSES is class containing
17886 registers from multiple units (SSE, MMX, integer). We avoid this by never
17887 combining those units in single alternative in the machine description.
17888 Ensure that this constraint holds to avoid unexpected surprises.
17889
17890 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17891 enforce these sanity checks. */
17892
17893 int
17894 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17895 enum machine_mode mode, int strict)
17896 {
17897 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17898 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17899 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17900 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17901 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17902 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17903 {
17904 gcc_assert (!strict);
17905 return true;
17906 }
17907
17908 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17909 return true;
17910
17911 /* ??? This is a lie. We do have moves between mmx/general, and for
17912 mmx/sse2. But by saying we need secondary memory we discourage the
17913 register allocator from using the mmx registers unless needed. */
17914 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17915 return true;
17916
17917 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17918 {
17919 /* SSE1 doesn't have any direct moves from other classes. */
17920 if (!TARGET_SSE2)
17921 return true;
17922
17923 /* If the target says that inter-unit moves are more expensive
17924 than moving through memory, then don't generate them. */
17925 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17926 return true;
17927
17928 /* Between SSE and general, we have moves no larger than word size. */
17929 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17930 return true;
17931
17932 /* ??? For the cost of one register reformat penalty, we could use
17933 the same instructions to move SFmode and DFmode data, but the
17934 relevant move patterns don't support those alternatives. */
17935 if (mode == SFmode || mode == DFmode)
17936 return true;
17937 }
17938
17939 return false;
17940 }
17941
17942 /* Return true if the registers in CLASS cannot represent the change from
17943 modes FROM to TO. */
17944
17945 bool
17946 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17947 enum reg_class class)
17948 {
17949 if (from == to)
17950 return false;
17951
17952 /* x87 registers can't do subreg at all, as all values are reformatted
17953 to extended precision. */
17954 if (MAYBE_FLOAT_CLASS_P (class))
17955 return true;
17956
17957 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17958 {
17959 /* Vector registers do not support QI or HImode loads. If we don't
17960 disallow a change to these modes, reload will assume it's ok to
17961 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17962 the vec_dupv4hi pattern. */
17963 if (GET_MODE_SIZE (from) < 4)
17964 return true;
17965
17966 /* Vector registers do not support subreg with nonzero offsets, which
17967 are otherwise valid for integer registers. Since we can't see
17968 whether we have a nonzero offset from here, prohibit all
17969 nonparadoxical subregs changing size. */
17970 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17971 return true;
17972 }
17973
17974 return false;
17975 }
17976
17977 /* Return the cost of moving data from a register in class CLASS1 to
17978 one in class CLASS2.
17979
17980 It is not required that the cost always equal 2 when FROM is the same as TO;
17981 on some machines it is expensive to move between registers if they are not
17982 general registers. */
17983
17984 int
17985 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17986 enum reg_class class2)
17987 {
17988 /* In case we require secondary memory, compute cost of the store followed
17989 by load. In order to avoid bad register allocation choices, we need
17990 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17991
17992 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17993 {
17994 int cost = 1;
17995
17996 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17997 MEMORY_MOVE_COST (mode, class1, 1));
17998 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17999 MEMORY_MOVE_COST (mode, class2, 1));
18000
18001 /* In case of copying from general_purpose_register we may emit multiple
18002 stores followed by single load causing memory size mismatch stall.
18003 Count this as arbitrarily high cost of 20. */
18004 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
18005 cost += 20;
18006
18007 /* In the case of FP/MMX moves, the registers actually overlap, and we
18008 have to switch modes in order to treat them differently. */
18009 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
18010 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
18011 cost += 20;
18012
18013 return cost;
18014 }
18015
18016 /* Moves between SSE/MMX and integer unit are expensive. */
18017 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
18018 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
18019 return ix86_cost->mmxsse_to_integer;
18020 if (MAYBE_FLOAT_CLASS_P (class1))
18021 return ix86_cost->fp_move;
18022 if (MAYBE_SSE_CLASS_P (class1))
18023 return ix86_cost->sse_move;
18024 if (MAYBE_MMX_CLASS_P (class1))
18025 return ix86_cost->mmx_move;
18026 return 2;
18027 }
18028
18029 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
18030
18031 bool
18032 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
18033 {
18034 /* Flags and only flags can only hold CCmode values. */
18035 if (CC_REGNO_P (regno))
18036 return GET_MODE_CLASS (mode) == MODE_CC;
18037 if (GET_MODE_CLASS (mode) == MODE_CC
18038 || GET_MODE_CLASS (mode) == MODE_RANDOM
18039 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
18040 return 0;
18041 if (FP_REGNO_P (regno))
18042 return VALID_FP_MODE_P (mode);
18043 if (SSE_REGNO_P (regno))
18044 {
18045 /* We implement the move patterns for all vector modes into and
18046 out of SSE registers, even when no operation instructions
18047 are available. */
18048 return (VALID_SSE_REG_MODE (mode)
18049 || VALID_SSE2_REG_MODE (mode)
18050 || VALID_MMX_REG_MODE (mode)
18051 || VALID_MMX_REG_MODE_3DNOW (mode));
18052 }
18053 if (MMX_REGNO_P (regno))
18054 {
18055 /* We implement the move patterns for 3DNOW modes even in MMX mode,
18056 so if the register is available at all, then we can move data of
18057 the given mode into or out of it. */
18058 return (VALID_MMX_REG_MODE (mode)
18059 || VALID_MMX_REG_MODE_3DNOW (mode));
18060 }
18061
18062 if (mode == QImode)
18063 {
18064 /* Take care for QImode values - they can be in non-QI regs,
18065 but then they do cause partial register stalls. */
18066 if (regno < 4 || TARGET_64BIT)
18067 return 1;
18068 if (!TARGET_PARTIAL_REG_STALL)
18069 return 1;
18070 return reload_in_progress || reload_completed;
18071 }
18072 /* We handle both integer and floats in the general purpose registers. */
18073 else if (VALID_INT_MODE_P (mode))
18074 return 1;
18075 else if (VALID_FP_MODE_P (mode))
18076 return 1;
18077 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
18078 on to use that value in smaller contexts, this can easily force a
18079 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
18080 supporting DImode, allow it. */
18081 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
18082 return 1;
18083
18084 return 0;
18085 }
18086
18087 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
18088 tieable integer mode. */
18089
18090 static bool
18091 ix86_tieable_integer_mode_p (enum machine_mode mode)
18092 {
18093 switch (mode)
18094 {
18095 case HImode:
18096 case SImode:
18097 return true;
18098
18099 case QImode:
18100 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
18101
18102 case DImode:
18103 return TARGET_64BIT;
18104
18105 default:
18106 return false;
18107 }
18108 }
18109
18110 /* Return true if MODE1 is accessible in a register that can hold MODE2
18111 without copying. That is, all register classes that can hold MODE2
18112 can also hold MODE1. */
18113
18114 bool
18115 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
18116 {
18117 if (mode1 == mode2)
18118 return true;
18119
18120 if (ix86_tieable_integer_mode_p (mode1)
18121 && ix86_tieable_integer_mode_p (mode2))
18122 return true;
18123
18124 /* MODE2 being XFmode implies fp stack or general regs, which means we
18125 can tie any smaller floating point modes to it. Note that we do not
18126 tie this with TFmode. */
18127 if (mode2 == XFmode)
18128 return mode1 == SFmode || mode1 == DFmode;
18129
18130 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
18131 that we can tie it with SFmode. */
18132 if (mode2 == DFmode)
18133 return mode1 == SFmode;
18134
18135 /* If MODE2 is only appropriate for an SSE register, then tie with
18136 any other mode acceptable to SSE registers. */
18137 if (GET_MODE_SIZE (mode2) >= 8
18138 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
18139 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
18140
18141 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
18142 with any other mode acceptable to MMX registers. */
18143 if (GET_MODE_SIZE (mode2) == 8
18144 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
18145 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
18146
18147 return false;
18148 }
18149
18150 /* Return the cost of moving data of mode M between a
18151 register and memory. A value of 2 is the default; this cost is
18152 relative to those in `REGISTER_MOVE_COST'.
18153
18154 If moving between registers and memory is more expensive than
18155 between two registers, you should define this macro to express the
18156 relative cost.
18157
18158 Model also increased moving costs of QImode registers in non
18159 Q_REGS classes.
18160 */
18161 int
18162 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18163 {
18164 if (FLOAT_CLASS_P (class))
18165 {
18166 int index;
18167 switch (mode)
18168 {
18169 case SFmode:
18170 index = 0;
18171 break;
18172 case DFmode:
18173 index = 1;
18174 break;
18175 case XFmode:
18176 index = 2;
18177 break;
18178 default:
18179 return 100;
18180 }
18181 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18182 }
18183 if (SSE_CLASS_P (class))
18184 {
18185 int index;
18186 switch (GET_MODE_SIZE (mode))
18187 {
18188 case 4:
18189 index = 0;
18190 break;
18191 case 8:
18192 index = 1;
18193 break;
18194 case 16:
18195 index = 2;
18196 break;
18197 default:
18198 return 100;
18199 }
18200 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18201 }
18202 if (MMX_CLASS_P (class))
18203 {
18204 int index;
18205 switch (GET_MODE_SIZE (mode))
18206 {
18207 case 4:
18208 index = 0;
18209 break;
18210 case 8:
18211 index = 1;
18212 break;
18213 default:
18214 return 100;
18215 }
18216 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18217 }
18218 switch (GET_MODE_SIZE (mode))
18219 {
18220 case 1:
18221 if (in)
18222 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18223 : ix86_cost->movzbl_load);
18224 else
18225 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18226 : ix86_cost->int_store[0] + 4);
18227 break;
18228 case 2:
18229 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18230 default:
18231 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18232 if (mode == TFmode)
18233 mode = XFmode;
18234 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18235 * (((int) GET_MODE_SIZE (mode)
18236 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18237 }
18238 }
18239
18240 /* Compute a (partial) cost for rtx X. Return true if the complete
18241 cost has been computed, and false if subexpressions should be
18242 scanned. In either case, *TOTAL contains the cost result. */
18243
18244 static bool
18245 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18246 {
18247 enum machine_mode mode = GET_MODE (x);
18248
18249 switch (code)
18250 {
18251 case CONST_INT:
18252 case CONST:
18253 case LABEL_REF:
18254 case SYMBOL_REF:
18255 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18256 *total = 3;
18257 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18258 *total = 2;
18259 else if (flag_pic && SYMBOLIC_CONST (x)
18260 && (!TARGET_64BIT
18261 || (!GET_CODE (x) != LABEL_REF
18262 && (GET_CODE (x) != SYMBOL_REF
18263 || !SYMBOL_REF_LOCAL_P (x)))))
18264 *total = 1;
18265 else
18266 *total = 0;
18267 return true;
18268
18269 case CONST_DOUBLE:
18270 if (mode == VOIDmode)
18271 *total = 0;
18272 else
18273 switch (standard_80387_constant_p (x))
18274 {
18275 case 1: /* 0.0 */
18276 *total = 1;
18277 break;
18278 default: /* Other constants */
18279 *total = 2;
18280 break;
18281 case 0:
18282 case -1:
18283 /* Start with (MEM (SYMBOL_REF)), since that's where
18284 it'll probably end up. Add a penalty for size. */
18285 *total = (COSTS_N_INSNS (1)
18286 + (flag_pic != 0 && !TARGET_64BIT)
18287 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18288 break;
18289 }
18290 return true;
18291
18292 case ZERO_EXTEND:
18293 /* The zero extensions is often completely free on x86_64, so make
18294 it as cheap as possible. */
18295 if (TARGET_64BIT && mode == DImode
18296 && GET_MODE (XEXP (x, 0)) == SImode)
18297 *total = 1;
18298 else if (TARGET_ZERO_EXTEND_WITH_AND)
18299 *total = ix86_cost->add;
18300 else
18301 *total = ix86_cost->movzx;
18302 return false;
18303
18304 case SIGN_EXTEND:
18305 *total = ix86_cost->movsx;
18306 return false;
18307
18308 case ASHIFT:
18309 if (GET_CODE (XEXP (x, 1)) == CONST_INT
18310 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18311 {
18312 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18313 if (value == 1)
18314 {
18315 *total = ix86_cost->add;
18316 return false;
18317 }
18318 if ((value == 2 || value == 3)
18319 && ix86_cost->lea <= ix86_cost->shift_const)
18320 {
18321 *total = ix86_cost->lea;
18322 return false;
18323 }
18324 }
18325 /* FALLTHRU */
18326
18327 case ROTATE:
18328 case ASHIFTRT:
18329 case LSHIFTRT:
18330 case ROTATERT:
18331 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18332 {
18333 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18334 {
18335 if (INTVAL (XEXP (x, 1)) > 32)
18336 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18337 else
18338 *total = ix86_cost->shift_const * 2;
18339 }
18340 else
18341 {
18342 if (GET_CODE (XEXP (x, 1)) == AND)
18343 *total = ix86_cost->shift_var * 2;
18344 else
18345 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18346 }
18347 }
18348 else
18349 {
18350 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18351 *total = ix86_cost->shift_const;
18352 else
18353 *total = ix86_cost->shift_var;
18354 }
18355 return false;
18356
18357 case MULT:
18358 if (FLOAT_MODE_P (mode))
18359 {
18360 *total = ix86_cost->fmul;
18361 return false;
18362 }
18363 else
18364 {
18365 rtx op0 = XEXP (x, 0);
18366 rtx op1 = XEXP (x, 1);
18367 int nbits;
18368 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18369 {
18370 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18371 for (nbits = 0; value != 0; value &= value - 1)
18372 nbits++;
18373 }
18374 else
18375 /* This is arbitrary. */
18376 nbits = 7;
18377
18378 /* Compute costs correctly for widening multiplication. */
18379 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18380 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18381 == GET_MODE_SIZE (mode))
18382 {
18383 int is_mulwiden = 0;
18384 enum machine_mode inner_mode = GET_MODE (op0);
18385
18386 if (GET_CODE (op0) == GET_CODE (op1))
18387 is_mulwiden = 1, op1 = XEXP (op1, 0);
18388 else if (GET_CODE (op1) == CONST_INT)
18389 {
18390 if (GET_CODE (op0) == SIGN_EXTEND)
18391 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18392 == INTVAL (op1);
18393 else
18394 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18395 }
18396
18397 if (is_mulwiden)
18398 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18399 }
18400
18401 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18402 + nbits * ix86_cost->mult_bit
18403 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18404
18405 return true;
18406 }
18407
18408 case DIV:
18409 case UDIV:
18410 case MOD:
18411 case UMOD:
18412 if (FLOAT_MODE_P (mode))
18413 *total = ix86_cost->fdiv;
18414 else
18415 *total = ix86_cost->divide[MODE_INDEX (mode)];
18416 return false;
18417
18418 case PLUS:
18419 if (FLOAT_MODE_P (mode))
18420 *total = ix86_cost->fadd;
18421 else if (GET_MODE_CLASS (mode) == MODE_INT
18422 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18423 {
18424 if (GET_CODE (XEXP (x, 0)) == PLUS
18425 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18426 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
18427 && CONSTANT_P (XEXP (x, 1)))
18428 {
18429 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18430 if (val == 2 || val == 4 || val == 8)
18431 {
18432 *total = ix86_cost->lea;
18433 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18434 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18435 outer_code);
18436 *total += rtx_cost (XEXP (x, 1), outer_code);
18437 return true;
18438 }
18439 }
18440 else if (GET_CODE (XEXP (x, 0)) == MULT
18441 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
18442 {
18443 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18444 if (val == 2 || val == 4 || val == 8)
18445 {
18446 *total = ix86_cost->lea;
18447 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18448 *total += rtx_cost (XEXP (x, 1), outer_code);
18449 return true;
18450 }
18451 }
18452 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18453 {
18454 *total = ix86_cost->lea;
18455 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18456 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18457 *total += rtx_cost (XEXP (x, 1), outer_code);
18458 return true;
18459 }
18460 }
18461 /* FALLTHRU */
18462
18463 case MINUS:
18464 if (FLOAT_MODE_P (mode))
18465 {
18466 *total = ix86_cost->fadd;
18467 return false;
18468 }
18469 /* FALLTHRU */
18470
18471 case AND:
18472 case IOR:
18473 case XOR:
18474 if (!TARGET_64BIT && mode == DImode)
18475 {
18476 *total = (ix86_cost->add * 2
18477 + (rtx_cost (XEXP (x, 0), outer_code)
18478 << (GET_MODE (XEXP (x, 0)) != DImode))
18479 + (rtx_cost (XEXP (x, 1), outer_code)
18480 << (GET_MODE (XEXP (x, 1)) != DImode)));
18481 return true;
18482 }
18483 /* FALLTHRU */
18484
18485 case NEG:
18486 if (FLOAT_MODE_P (mode))
18487 {
18488 *total = ix86_cost->fchs;
18489 return false;
18490 }
18491 /* FALLTHRU */
18492
18493 case NOT:
18494 if (!TARGET_64BIT && mode == DImode)
18495 *total = ix86_cost->add * 2;
18496 else
18497 *total = ix86_cost->add;
18498 return false;
18499
18500 case COMPARE:
18501 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18502 && XEXP (XEXP (x, 0), 1) == const1_rtx
18503 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
18504 && XEXP (x, 1) == const0_rtx)
18505 {
18506 /* This kind of construct is implemented using test[bwl].
18507 Treat it as if we had an AND. */
18508 *total = (ix86_cost->add
18509 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18510 + rtx_cost (const1_rtx, outer_code));
18511 return true;
18512 }
18513 return false;
18514
18515 case FLOAT_EXTEND:
18516 if (!TARGET_SSE_MATH
18517 || mode == XFmode
18518 || (mode == DFmode && !TARGET_SSE2))
18519 *total = 0;
18520 return false;
18521
18522 case ABS:
18523 if (FLOAT_MODE_P (mode))
18524 *total = ix86_cost->fabs;
18525 return false;
18526
18527 case SQRT:
18528 if (FLOAT_MODE_P (mode))
18529 *total = ix86_cost->fsqrt;
18530 return false;
18531
18532 case UNSPEC:
18533 if (XINT (x, 1) == UNSPEC_TP)
18534 *total = 0;
18535 return false;
18536
18537 default:
18538 return false;
18539 }
18540 }
18541
18542 #if TARGET_MACHO
18543
18544 static int current_machopic_label_num;
18545
18546 /* Given a symbol name and its associated stub, write out the
18547 definition of the stub. */
18548
18549 void
18550 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18551 {
18552 unsigned int length;
18553 char *binder_name, *symbol_name, lazy_ptr_name[32];
18554 int label = ++current_machopic_label_num;
18555
18556 /* For 64-bit we shouldn't get here. */
18557 gcc_assert (!TARGET_64BIT);
18558
18559 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18560 symb = (*targetm.strip_name_encoding) (symb);
18561
18562 length = strlen (stub);
18563 binder_name = alloca (length + 32);
18564 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18565
18566 length = strlen (symb);
18567 symbol_name = alloca (length + 32);
18568 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18569
18570 sprintf (lazy_ptr_name, "L%d$lz", label);
18571
18572 if (MACHOPIC_PURE)
18573 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18574 else
18575 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18576
18577 fprintf (file, "%s:\n", stub);
18578 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18579
18580 if (MACHOPIC_PURE)
18581 {
18582 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18583 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18584 fprintf (file, "\tjmp\t*%%edx\n");
18585 }
18586 else
18587 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18588
18589 fprintf (file, "%s:\n", binder_name);
18590
18591 if (MACHOPIC_PURE)
18592 {
18593 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18594 fprintf (file, "\tpushl\t%%eax\n");
18595 }
18596 else
18597 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18598
18599 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18600
18601 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18602 fprintf (file, "%s:\n", lazy_ptr_name);
18603 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18604 fprintf (file, "\t.long %s\n", binder_name);
18605 }
18606
18607 void
18608 darwin_x86_file_end (void)
18609 {
18610 darwin_file_end ();
18611 ix86_file_end ();
18612 }
18613 #endif /* TARGET_MACHO */
18614
18615 /* Order the registers for register allocator. */
18616
18617 void
18618 x86_order_regs_for_local_alloc (void)
18619 {
18620 int pos = 0;
18621 int i;
18622
18623 /* First allocate the local general purpose registers. */
18624 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18625 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18626 reg_alloc_order [pos++] = i;
18627
18628 /* Global general purpose registers. */
18629 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18630 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18631 reg_alloc_order [pos++] = i;
18632
18633 /* x87 registers come first in case we are doing FP math
18634 using them. */
18635 if (!TARGET_SSE_MATH)
18636 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18637 reg_alloc_order [pos++] = i;
18638
18639 /* SSE registers. */
18640 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18641 reg_alloc_order [pos++] = i;
18642 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18643 reg_alloc_order [pos++] = i;
18644
18645 /* x87 registers. */
18646 if (TARGET_SSE_MATH)
18647 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18648 reg_alloc_order [pos++] = i;
18649
18650 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18651 reg_alloc_order [pos++] = i;
18652
18653 /* Initialize the rest of array as we do not allocate some registers
18654 at all. */
18655 while (pos < FIRST_PSEUDO_REGISTER)
18656 reg_alloc_order [pos++] = 0;
18657 }
18658
18659 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18660 struct attribute_spec.handler. */
18661 static tree
18662 ix86_handle_struct_attribute (tree *node, tree name,
18663 tree args ATTRIBUTE_UNUSED,
18664 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18665 {
18666 tree *type = NULL;
18667 if (DECL_P (*node))
18668 {
18669 if (TREE_CODE (*node) == TYPE_DECL)
18670 type = &TREE_TYPE (*node);
18671 }
18672 else
18673 type = node;
18674
18675 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18676 || TREE_CODE (*type) == UNION_TYPE)))
18677 {
18678 warning (OPT_Wattributes, "%qs attribute ignored",
18679 IDENTIFIER_POINTER (name));
18680 *no_add_attrs = true;
18681 }
18682
18683 else if ((is_attribute_p ("ms_struct", name)
18684 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18685 || ((is_attribute_p ("gcc_struct", name)
18686 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18687 {
18688 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18689 IDENTIFIER_POINTER (name));
18690 *no_add_attrs = true;
18691 }
18692
18693 return NULL_TREE;
18694 }
18695
18696 static bool
18697 ix86_ms_bitfield_layout_p (tree record_type)
18698 {
18699 return (TARGET_MS_BITFIELD_LAYOUT &&
18700 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18701 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18702 }
18703
18704 /* Returns an expression indicating where the this parameter is
18705 located on entry to the FUNCTION. */
18706
18707 static rtx
18708 x86_this_parameter (tree function)
18709 {
18710 tree type = TREE_TYPE (function);
18711
18712 if (TARGET_64BIT)
18713 {
18714 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18715 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18716 }
18717
18718 if (ix86_function_regparm (type, function) > 0)
18719 {
18720 tree parm;
18721
18722 parm = TYPE_ARG_TYPES (type);
18723 /* Figure out whether or not the function has a variable number of
18724 arguments. */
18725 for (; parm; parm = TREE_CHAIN (parm))
18726 if (TREE_VALUE (parm) == void_type_node)
18727 break;
18728 /* If not, the this parameter is in the first argument. */
18729 if (parm)
18730 {
18731 int regno = 0;
18732 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18733 regno = 2;
18734 return gen_rtx_REG (SImode, regno);
18735 }
18736 }
18737
18738 if (aggregate_value_p (TREE_TYPE (type), type))
18739 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18740 else
18741 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18742 }
18743
18744 /* Determine whether x86_output_mi_thunk can succeed. */
18745
18746 static bool
18747 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18748 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18749 HOST_WIDE_INT vcall_offset, tree function)
18750 {
18751 /* 64-bit can handle anything. */
18752 if (TARGET_64BIT)
18753 return true;
18754
18755 /* For 32-bit, everything's fine if we have one free register. */
18756 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18757 return true;
18758
18759 /* Need a free register for vcall_offset. */
18760 if (vcall_offset)
18761 return false;
18762
18763 /* Need a free register for GOT references. */
18764 if (flag_pic && !(*targetm.binds_local_p) (function))
18765 return false;
18766
18767 /* Otherwise ok. */
18768 return true;
18769 }
18770
18771 /* Output the assembler code for a thunk function. THUNK_DECL is the
18772 declaration for the thunk function itself, FUNCTION is the decl for
18773 the target function. DELTA is an immediate constant offset to be
18774 added to THIS. If VCALL_OFFSET is nonzero, the word at
18775 *(*this + vcall_offset) should be added to THIS. */
18776
18777 static void
18778 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18779 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18780 HOST_WIDE_INT vcall_offset, tree function)
18781 {
18782 rtx xops[3];
18783 rtx this = x86_this_parameter (function);
18784 rtx this_reg, tmp;
18785
18786 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18787 pull it in now and let DELTA benefit. */
18788 if (REG_P (this))
18789 this_reg = this;
18790 else if (vcall_offset)
18791 {
18792 /* Put the this parameter into %eax. */
18793 xops[0] = this;
18794 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18795 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18796 }
18797 else
18798 this_reg = NULL_RTX;
18799
18800 /* Adjust the this parameter by a fixed constant. */
18801 if (delta)
18802 {
18803 xops[0] = GEN_INT (delta);
18804 xops[1] = this_reg ? this_reg : this;
18805 if (TARGET_64BIT)
18806 {
18807 if (!x86_64_general_operand (xops[0], DImode))
18808 {
18809 tmp = gen_rtx_REG (DImode, R10_REG);
18810 xops[1] = tmp;
18811 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18812 xops[0] = tmp;
18813 xops[1] = this;
18814 }
18815 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18816 }
18817 else
18818 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18819 }
18820
18821 /* Adjust the this parameter by a value stored in the vtable. */
18822 if (vcall_offset)
18823 {
18824 if (TARGET_64BIT)
18825 tmp = gen_rtx_REG (DImode, R10_REG);
18826 else
18827 {
18828 int tmp_regno = 2 /* ECX */;
18829 if (lookup_attribute ("fastcall",
18830 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18831 tmp_regno = 0 /* EAX */;
18832 tmp = gen_rtx_REG (SImode, tmp_regno);
18833 }
18834
18835 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18836 xops[1] = tmp;
18837 if (TARGET_64BIT)
18838 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18839 else
18840 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18841
18842 /* Adjust the this parameter. */
18843 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18844 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18845 {
18846 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18847 xops[0] = GEN_INT (vcall_offset);
18848 xops[1] = tmp2;
18849 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18850 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18851 }
18852 xops[1] = this_reg;
18853 if (TARGET_64BIT)
18854 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18855 else
18856 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18857 }
18858
18859 /* If necessary, drop THIS back to its stack slot. */
18860 if (this_reg && this_reg != this)
18861 {
18862 xops[0] = this_reg;
18863 xops[1] = this;
18864 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18865 }
18866
18867 xops[0] = XEXP (DECL_RTL (function), 0);
18868 if (TARGET_64BIT)
18869 {
18870 if (!flag_pic || (*targetm.binds_local_p) (function))
18871 output_asm_insn ("jmp\t%P0", xops);
18872 else
18873 {
18874 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18875 tmp = gen_rtx_CONST (Pmode, tmp);
18876 tmp = gen_rtx_MEM (QImode, tmp);
18877 xops[0] = tmp;
18878 output_asm_insn ("jmp\t%A0", xops);
18879 }
18880 }
18881 else
18882 {
18883 if (!flag_pic || (*targetm.binds_local_p) (function))
18884 output_asm_insn ("jmp\t%P0", xops);
18885 else
18886 #if TARGET_MACHO
18887 if (TARGET_MACHO)
18888 {
18889 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18890 tmp = (gen_rtx_SYMBOL_REF
18891 (Pmode,
18892 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18893 tmp = gen_rtx_MEM (QImode, tmp);
18894 xops[0] = tmp;
18895 output_asm_insn ("jmp\t%0", xops);
18896 }
18897 else
18898 #endif /* TARGET_MACHO */
18899 {
18900 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18901 output_set_got (tmp, NULL_RTX);
18902
18903 xops[1] = tmp;
18904 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18905 output_asm_insn ("jmp\t{*}%1", xops);
18906 }
18907 }
18908 }
18909
18910 static void
18911 x86_file_start (void)
18912 {
18913 default_file_start ();
18914 #if TARGET_MACHO
18915 darwin_file_start ();
18916 #endif
18917 if (X86_FILE_START_VERSION_DIRECTIVE)
18918 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18919 if (X86_FILE_START_FLTUSED)
18920 fputs ("\t.global\t__fltused\n", asm_out_file);
18921 if (ix86_asm_dialect == ASM_INTEL)
18922 fputs ("\t.intel_syntax\n", asm_out_file);
18923 }
18924
18925 int
18926 x86_field_alignment (tree field, int computed)
18927 {
18928 enum machine_mode mode;
18929 tree type = TREE_TYPE (field);
18930
18931 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18932 return computed;
18933 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18934 ? get_inner_array_type (type) : type);
18935 if (mode == DFmode || mode == DCmode
18936 || GET_MODE_CLASS (mode) == MODE_INT
18937 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18938 return MIN (32, computed);
18939 return computed;
18940 }
18941
18942 /* Output assembler code to FILE to increment profiler label # LABELNO
18943 for profiling a function entry. */
18944 void
18945 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18946 {
18947 if (TARGET_64BIT)
18948 if (flag_pic)
18949 {
18950 #ifndef NO_PROFILE_COUNTERS
18951 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18952 #endif
18953 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18954 }
18955 else
18956 {
18957 #ifndef NO_PROFILE_COUNTERS
18958 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18959 #endif
18960 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18961 }
18962 else if (flag_pic)
18963 {
18964 #ifndef NO_PROFILE_COUNTERS
18965 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18966 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18967 #endif
18968 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18969 }
18970 else
18971 {
18972 #ifndef NO_PROFILE_COUNTERS
18973 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18974 PROFILE_COUNT_REGISTER);
18975 #endif
18976 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18977 }
18978 }
18979
18980 /* We don't have exact information about the insn sizes, but we may assume
18981 quite safely that we are informed about all 1 byte insns and memory
18982 address sizes. This is enough to eliminate unnecessary padding in
18983 99% of cases. */
18984
18985 static int
18986 min_insn_size (rtx insn)
18987 {
18988 int l = 0;
18989
18990 if (!INSN_P (insn) || !active_insn_p (insn))
18991 return 0;
18992
18993 /* Discard alignments we've emit and jump instructions. */
18994 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18995 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18996 return 0;
18997 if (GET_CODE (insn) == JUMP_INSN
18998 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18999 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
19000 return 0;
19001
19002 /* Important case - calls are always 5 bytes.
19003 It is common to have many calls in the row. */
19004 if (GET_CODE (insn) == CALL_INSN
19005 && symbolic_reference_mentioned_p (PATTERN (insn))
19006 && !SIBLING_CALL_P (insn))
19007 return 5;
19008 if (get_attr_length (insn) <= 1)
19009 return 1;
19010
19011 /* For normal instructions we may rely on the sizes of addresses
19012 and the presence of symbol to require 4 bytes of encoding.
19013 This is not the case for jumps where references are PC relative. */
19014 if (GET_CODE (insn) != JUMP_INSN)
19015 {
19016 l = get_attr_length_address (insn);
19017 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
19018 l = 4;
19019 }
19020 if (l)
19021 return 1+l;
19022 else
19023 return 2;
19024 }
19025
19026 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
19027 window. */
19028
19029 static void
19030 ix86_avoid_jump_misspredicts (void)
19031 {
19032 rtx insn, start = get_insns ();
19033 int nbytes = 0, njumps = 0;
19034 int isjump = 0;
19035
19036 /* Look for all minimal intervals of instructions containing 4 jumps.
19037 The intervals are bounded by START and INSN. NBYTES is the total
19038 size of instructions in the interval including INSN and not including
19039 START. When the NBYTES is smaller than 16 bytes, it is possible
19040 that the end of START and INSN ends up in the same 16byte page.
19041
19042 The smallest offset in the page INSN can start is the case where START
19043 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
19044 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
19045 */
19046 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
19047 {
19048
19049 nbytes += min_insn_size (insn);
19050 if (dump_file)
19051 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
19052 INSN_UID (insn), min_insn_size (insn));
19053 if ((GET_CODE (insn) == JUMP_INSN
19054 && GET_CODE (PATTERN (insn)) != ADDR_VEC
19055 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
19056 || GET_CODE (insn) == CALL_INSN)
19057 njumps++;
19058 else
19059 continue;
19060
19061 while (njumps > 3)
19062 {
19063 start = NEXT_INSN (start);
19064 if ((GET_CODE (start) == JUMP_INSN
19065 && GET_CODE (PATTERN (start)) != ADDR_VEC
19066 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
19067 || GET_CODE (start) == CALL_INSN)
19068 njumps--, isjump = 1;
19069 else
19070 isjump = 0;
19071 nbytes -= min_insn_size (start);
19072 }
19073 gcc_assert (njumps >= 0);
19074 if (dump_file)
19075 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
19076 INSN_UID (start), INSN_UID (insn), nbytes);
19077
19078 if (njumps == 3 && isjump && nbytes < 16)
19079 {
19080 int padsize = 15 - nbytes + min_insn_size (insn);
19081
19082 if (dump_file)
19083 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
19084 INSN_UID (insn), padsize);
19085 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
19086 }
19087 }
19088 }
19089
19090 /* AMD Athlon works faster
19091 when RET is not destination of conditional jump or directly preceded
19092 by other jump instruction. We avoid the penalty by inserting NOP just
19093 before the RET instructions in such cases. */
19094 static void
19095 ix86_pad_returns (void)
19096 {
19097 edge e;
19098 edge_iterator ei;
19099
19100 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
19101 {
19102 basic_block bb = e->src;
19103 rtx ret = BB_END (bb);
19104 rtx prev;
19105 bool replace = false;
19106
19107 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
19108 || !maybe_hot_bb_p (bb))
19109 continue;
19110 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
19111 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
19112 break;
19113 if (prev && GET_CODE (prev) == CODE_LABEL)
19114 {
19115 edge e;
19116 edge_iterator ei;
19117
19118 FOR_EACH_EDGE (e, ei, bb->preds)
19119 if (EDGE_FREQUENCY (e) && e->src->index >= 0
19120 && !(e->flags & EDGE_FALLTHRU))
19121 replace = true;
19122 }
19123 if (!replace)
19124 {
19125 prev = prev_active_insn (ret);
19126 if (prev
19127 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
19128 || GET_CODE (prev) == CALL_INSN))
19129 replace = true;
19130 /* Empty functions get branch mispredict even when the jump destination
19131 is not visible to us. */
19132 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
19133 replace = true;
19134 }
19135 if (replace)
19136 {
19137 emit_insn_before (gen_return_internal_long (), ret);
19138 delete_insn (ret);
19139 }
19140 }
19141 }
19142
19143 /* Implement machine specific optimizations. We implement padding of returns
19144 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
19145 static void
19146 ix86_reorg (void)
19147 {
19148 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
19149 ix86_pad_returns ();
19150 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19151 ix86_avoid_jump_misspredicts ();
19152 }
19153
19154 /* Return nonzero when QImode register that must be represented via REX prefix
19155 is used. */
19156 bool
19157 x86_extended_QIreg_mentioned_p (rtx insn)
19158 {
19159 int i;
19160 extract_insn_cached (insn);
19161 for (i = 0; i < recog_data.n_operands; i++)
19162 if (REG_P (recog_data.operand[i])
19163 && REGNO (recog_data.operand[i]) >= 4)
19164 return true;
19165 return false;
19166 }
19167
19168 /* Return nonzero when P points to register encoded via REX prefix.
19169 Called via for_each_rtx. */
19170 static int
19171 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19172 {
19173 unsigned int regno;
19174 if (!REG_P (*p))
19175 return 0;
19176 regno = REGNO (*p);
19177 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19178 }
19179
19180 /* Return true when INSN mentions register that must be encoded using REX
19181 prefix. */
19182 bool
19183 x86_extended_reg_mentioned_p (rtx insn)
19184 {
19185 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19186 }
19187
19188 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19189 optabs would emit if we didn't have TFmode patterns. */
19190
19191 void
19192 x86_emit_floatuns (rtx operands[2])
19193 {
19194 rtx neglab, donelab, i0, i1, f0, in, out;
19195 enum machine_mode mode, inmode;
19196
19197 inmode = GET_MODE (operands[1]);
19198 gcc_assert (inmode == SImode || inmode == DImode);
19199
19200 out = operands[0];
19201 in = force_reg (inmode, operands[1]);
19202 mode = GET_MODE (out);
19203 neglab = gen_label_rtx ();
19204 donelab = gen_label_rtx ();
19205 i1 = gen_reg_rtx (Pmode);
19206 f0 = gen_reg_rtx (mode);
19207
19208 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19209
19210 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19211 emit_jump_insn (gen_jump (donelab));
19212 emit_barrier ();
19213
19214 emit_label (neglab);
19215
19216 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19217 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19218 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19219 expand_float (f0, i0, 0);
19220 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19221
19222 emit_label (donelab);
19223 }
19224 \f
19225 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19226 with all elements equal to VAR. Return true if successful. */
19227
19228 static bool
19229 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19230 rtx target, rtx val)
19231 {
19232 enum machine_mode smode, wsmode, wvmode;
19233 rtx x;
19234
19235 switch (mode)
19236 {
19237 case V2SImode:
19238 case V2SFmode:
19239 if (!mmx_ok)
19240 return false;
19241 /* FALLTHRU */
19242
19243 case V2DFmode:
19244 case V2DImode:
19245 case V4SFmode:
19246 case V4SImode:
19247 val = force_reg (GET_MODE_INNER (mode), val);
19248 x = gen_rtx_VEC_DUPLICATE (mode, val);
19249 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19250 return true;
19251
19252 case V4HImode:
19253 if (!mmx_ok)
19254 return false;
19255 if (TARGET_SSE || TARGET_3DNOW_A)
19256 {
19257 val = gen_lowpart (SImode, val);
19258 x = gen_rtx_TRUNCATE (HImode, val);
19259 x = gen_rtx_VEC_DUPLICATE (mode, x);
19260 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19261 return true;
19262 }
19263 else
19264 {
19265 smode = HImode;
19266 wsmode = SImode;
19267 wvmode = V2SImode;
19268 goto widen;
19269 }
19270
19271 case V8QImode:
19272 if (!mmx_ok)
19273 return false;
19274 smode = QImode;
19275 wsmode = HImode;
19276 wvmode = V4HImode;
19277 goto widen;
19278 case V8HImode:
19279 if (TARGET_SSE2)
19280 {
19281 rtx tmp1, tmp2;
19282 /* Extend HImode to SImode using a paradoxical SUBREG. */
19283 tmp1 = gen_reg_rtx (SImode);
19284 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19285 /* Insert the SImode value as low element of V4SImode vector. */
19286 tmp2 = gen_reg_rtx (V4SImode);
19287 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19288 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19289 CONST0_RTX (V4SImode),
19290 const1_rtx);
19291 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19292 /* Cast the V4SImode vector back to a V8HImode vector. */
19293 tmp1 = gen_reg_rtx (V8HImode);
19294 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19295 /* Duplicate the low short through the whole low SImode word. */
19296 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19297 /* Cast the V8HImode vector back to a V4SImode vector. */
19298 tmp2 = gen_reg_rtx (V4SImode);
19299 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19300 /* Replicate the low element of the V4SImode vector. */
19301 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19302 /* Cast the V2SImode back to V8HImode, and store in target. */
19303 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19304 return true;
19305 }
19306 smode = HImode;
19307 wsmode = SImode;
19308 wvmode = V4SImode;
19309 goto widen;
19310 case V16QImode:
19311 if (TARGET_SSE2)
19312 {
19313 rtx tmp1, tmp2;
19314 /* Extend QImode to SImode using a paradoxical SUBREG. */
19315 tmp1 = gen_reg_rtx (SImode);
19316 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19317 /* Insert the SImode value as low element of V4SImode vector. */
19318 tmp2 = gen_reg_rtx (V4SImode);
19319 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19320 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19321 CONST0_RTX (V4SImode),
19322 const1_rtx);
19323 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19324 /* Cast the V4SImode vector back to a V16QImode vector. */
19325 tmp1 = gen_reg_rtx (V16QImode);
19326 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19327 /* Duplicate the low byte through the whole low SImode word. */
19328 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19329 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19330 /* Cast the V16QImode vector back to a V4SImode vector. */
19331 tmp2 = gen_reg_rtx (V4SImode);
19332 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19333 /* Replicate the low element of the V4SImode vector. */
19334 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19335 /* Cast the V2SImode back to V16QImode, and store in target. */
19336 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19337 return true;
19338 }
19339 smode = QImode;
19340 wsmode = HImode;
19341 wvmode = V8HImode;
19342 goto widen;
19343 widen:
19344 /* Replicate the value once into the next wider mode and recurse. */
19345 val = convert_modes (wsmode, smode, val, true);
19346 x = expand_simple_binop (wsmode, ASHIFT, val,
19347 GEN_INT (GET_MODE_BITSIZE (smode)),
19348 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19349 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19350
19351 x = gen_reg_rtx (wvmode);
19352 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19353 gcc_unreachable ();
19354 emit_move_insn (target, gen_lowpart (mode, x));
19355 return true;
19356
19357 default:
19358 return false;
19359 }
19360 }
19361
19362 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19363 whose ONE_VAR element is VAR, and other elements are zero. Return true
19364 if successful. */
19365
19366 static bool
19367 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19368 rtx target, rtx var, int one_var)
19369 {
19370 enum machine_mode vsimode;
19371 rtx new_target;
19372 rtx x, tmp;
19373
19374 switch (mode)
19375 {
19376 case V2SFmode:
19377 case V2SImode:
19378 if (!mmx_ok)
19379 return false;
19380 /* FALLTHRU */
19381
19382 case V2DFmode:
19383 case V2DImode:
19384 if (one_var != 0)
19385 return false;
19386 var = force_reg (GET_MODE_INNER (mode), var);
19387 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19388 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19389 return true;
19390
19391 case V4SFmode:
19392 case V4SImode:
19393 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19394 new_target = gen_reg_rtx (mode);
19395 else
19396 new_target = target;
19397 var = force_reg (GET_MODE_INNER (mode), var);
19398 x = gen_rtx_VEC_DUPLICATE (mode, var);
19399 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19400 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19401 if (one_var != 0)
19402 {
19403 /* We need to shuffle the value to the correct position, so
19404 create a new pseudo to store the intermediate result. */
19405
19406 /* With SSE2, we can use the integer shuffle insns. */
19407 if (mode != V4SFmode && TARGET_SSE2)
19408 {
19409 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19410 GEN_INT (1),
19411 GEN_INT (one_var == 1 ? 0 : 1),
19412 GEN_INT (one_var == 2 ? 0 : 1),
19413 GEN_INT (one_var == 3 ? 0 : 1)));
19414 if (target != new_target)
19415 emit_move_insn (target, new_target);
19416 return true;
19417 }
19418
19419 /* Otherwise convert the intermediate result to V4SFmode and
19420 use the SSE1 shuffle instructions. */
19421 if (mode != V4SFmode)
19422 {
19423 tmp = gen_reg_rtx (V4SFmode);
19424 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19425 }
19426 else
19427 tmp = new_target;
19428
19429 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19430 GEN_INT (1),
19431 GEN_INT (one_var == 1 ? 0 : 1),
19432 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19433 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19434
19435 if (mode != V4SFmode)
19436 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19437 else if (tmp != target)
19438 emit_move_insn (target, tmp);
19439 }
19440 else if (target != new_target)
19441 emit_move_insn (target, new_target);
19442 return true;
19443
19444 case V8HImode:
19445 case V16QImode:
19446 vsimode = V4SImode;
19447 goto widen;
19448 case V4HImode:
19449 case V8QImode:
19450 if (!mmx_ok)
19451 return false;
19452 vsimode = V2SImode;
19453 goto widen;
19454 widen:
19455 if (one_var != 0)
19456 return false;
19457
19458 /* Zero extend the variable element to SImode and recurse. */
19459 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19460
19461 x = gen_reg_rtx (vsimode);
19462 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19463 var, one_var))
19464 gcc_unreachable ();
19465
19466 emit_move_insn (target, gen_lowpart (mode, x));
19467 return true;
19468
19469 default:
19470 return false;
19471 }
19472 }
19473
19474 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19475 consisting of the values in VALS. It is known that all elements
19476 except ONE_VAR are constants. Return true if successful. */
19477
19478 static bool
19479 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19480 rtx target, rtx vals, int one_var)
19481 {
19482 rtx var = XVECEXP (vals, 0, one_var);
19483 enum machine_mode wmode;
19484 rtx const_vec, x;
19485
19486 const_vec = copy_rtx (vals);
19487 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19488 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19489
19490 switch (mode)
19491 {
19492 case V2DFmode:
19493 case V2DImode:
19494 case V2SFmode:
19495 case V2SImode:
19496 /* For the two element vectors, it's just as easy to use
19497 the general case. */
19498 return false;
19499
19500 case V4SFmode:
19501 case V4SImode:
19502 case V8HImode:
19503 case V4HImode:
19504 break;
19505
19506 case V16QImode:
19507 wmode = V8HImode;
19508 goto widen;
19509 case V8QImode:
19510 wmode = V4HImode;
19511 goto widen;
19512 widen:
19513 /* There's no way to set one QImode entry easily. Combine
19514 the variable value with its adjacent constant value, and
19515 promote to an HImode set. */
19516 x = XVECEXP (vals, 0, one_var ^ 1);
19517 if (one_var & 1)
19518 {
19519 var = convert_modes (HImode, QImode, var, true);
19520 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19522 x = GEN_INT (INTVAL (x) & 0xff);
19523 }
19524 else
19525 {
19526 var = convert_modes (HImode, QImode, var, true);
19527 x = gen_int_mode (INTVAL (x) << 8, HImode);
19528 }
19529 if (x != const0_rtx)
19530 var = expand_simple_binop (HImode, IOR, var, x, var,
19531 1, OPTAB_LIB_WIDEN);
19532
19533 x = gen_reg_rtx (wmode);
19534 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19535 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19536
19537 emit_move_insn (target, gen_lowpart (mode, x));
19538 return true;
19539
19540 default:
19541 return false;
19542 }
19543
19544 emit_move_insn (target, const_vec);
19545 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19546 return true;
19547 }
19548
19549 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19550 all values variable, and none identical. */
19551
19552 static void
19553 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19554 rtx target, rtx vals)
19555 {
19556 enum machine_mode half_mode = GET_MODE_INNER (mode);
19557 rtx op0 = NULL, op1 = NULL;
19558 bool use_vec_concat = false;
19559
19560 switch (mode)
19561 {
19562 case V2SFmode:
19563 case V2SImode:
19564 if (!mmx_ok && !TARGET_SSE)
19565 break;
19566 /* FALLTHRU */
19567
19568 case V2DFmode:
19569 case V2DImode:
19570 /* For the two element vectors, we always implement VEC_CONCAT. */
19571 op0 = XVECEXP (vals, 0, 0);
19572 op1 = XVECEXP (vals, 0, 1);
19573 use_vec_concat = true;
19574 break;
19575
19576 case V4SFmode:
19577 half_mode = V2SFmode;
19578 goto half;
19579 case V4SImode:
19580 half_mode = V2SImode;
19581 goto half;
19582 half:
19583 {
19584 rtvec v;
19585
19586 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19587 Recurse to load the two halves. */
19588
19589 op0 = gen_reg_rtx (half_mode);
19590 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19591 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19592
19593 op1 = gen_reg_rtx (half_mode);
19594 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19595 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19596
19597 use_vec_concat = true;
19598 }
19599 break;
19600
19601 case V8HImode:
19602 case V16QImode:
19603 case V4HImode:
19604 case V8QImode:
19605 break;
19606
19607 default:
19608 gcc_unreachable ();
19609 }
19610
19611 if (use_vec_concat)
19612 {
19613 if (!register_operand (op0, half_mode))
19614 op0 = force_reg (half_mode, op0);
19615 if (!register_operand (op1, half_mode))
19616 op1 = force_reg (half_mode, op1);
19617
19618 emit_insn (gen_rtx_SET (VOIDmode, target,
19619 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19620 }
19621 else
19622 {
19623 int i, j, n_elts, n_words, n_elt_per_word;
19624 enum machine_mode inner_mode;
19625 rtx words[4], shift;
19626
19627 inner_mode = GET_MODE_INNER (mode);
19628 n_elts = GET_MODE_NUNITS (mode);
19629 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19630 n_elt_per_word = n_elts / n_words;
19631 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19632
19633 for (i = 0; i < n_words; ++i)
19634 {
19635 rtx word = NULL_RTX;
19636
19637 for (j = 0; j < n_elt_per_word; ++j)
19638 {
19639 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19640 elt = convert_modes (word_mode, inner_mode, elt, true);
19641
19642 if (j == 0)
19643 word = elt;
19644 else
19645 {
19646 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19647 word, 1, OPTAB_LIB_WIDEN);
19648 word = expand_simple_binop (word_mode, IOR, word, elt,
19649 word, 1, OPTAB_LIB_WIDEN);
19650 }
19651 }
19652
19653 words[i] = word;
19654 }
19655
19656 if (n_words == 1)
19657 emit_move_insn (target, gen_lowpart (mode, words[0]));
19658 else if (n_words == 2)
19659 {
19660 rtx tmp = gen_reg_rtx (mode);
19661 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19662 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19663 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19664 emit_move_insn (target, tmp);
19665 }
19666 else if (n_words == 4)
19667 {
19668 rtx tmp = gen_reg_rtx (V4SImode);
19669 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19670 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19671 emit_move_insn (target, gen_lowpart (mode, tmp));
19672 }
19673 else
19674 gcc_unreachable ();
19675 }
19676 }
19677
19678 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19679 instructions unless MMX_OK is true. */
19680
19681 void
19682 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19683 {
19684 enum machine_mode mode = GET_MODE (target);
19685 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19686 int n_elts = GET_MODE_NUNITS (mode);
19687 int n_var = 0, one_var = -1;
19688 bool all_same = true, all_const_zero = true;
19689 int i;
19690 rtx x;
19691
19692 for (i = 0; i < n_elts; ++i)
19693 {
19694 x = XVECEXP (vals, 0, i);
19695 if (!CONSTANT_P (x))
19696 n_var++, one_var = i;
19697 else if (x != CONST0_RTX (inner_mode))
19698 all_const_zero = false;
19699 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19700 all_same = false;
19701 }
19702
19703 /* Constants are best loaded from the constant pool. */
19704 if (n_var == 0)
19705 {
19706 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19707 return;
19708 }
19709
19710 /* If all values are identical, broadcast the value. */
19711 if (all_same
19712 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19713 XVECEXP (vals, 0, 0)))
19714 return;
19715
19716 /* Values where only one field is non-constant are best loaded from
19717 the pool and overwritten via move later. */
19718 if (n_var == 1)
19719 {
19720 if (all_const_zero
19721 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19722 XVECEXP (vals, 0, one_var),
19723 one_var))
19724 return;
19725
19726 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19727 return;
19728 }
19729
19730 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19731 }
19732
19733 void
19734 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19735 {
19736 enum machine_mode mode = GET_MODE (target);
19737 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19738 bool use_vec_merge = false;
19739 rtx tmp;
19740
19741 switch (mode)
19742 {
19743 case V2SFmode:
19744 case V2SImode:
19745 if (mmx_ok)
19746 {
19747 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19748 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19749 if (elt == 0)
19750 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19751 else
19752 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19753 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19754 return;
19755 }
19756 break;
19757
19758 case V2DFmode:
19759 case V2DImode:
19760 {
19761 rtx op0, op1;
19762
19763 /* For the two element vectors, we implement a VEC_CONCAT with
19764 the extraction of the other element. */
19765
19766 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19767 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19768
19769 if (elt == 0)
19770 op0 = val, op1 = tmp;
19771 else
19772 op0 = tmp, op1 = val;
19773
19774 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19775 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19776 }
19777 return;
19778
19779 case V4SFmode:
19780 switch (elt)
19781 {
19782 case 0:
19783 use_vec_merge = true;
19784 break;
19785
19786 case 1:
19787 /* tmp = target = A B C D */
19788 tmp = copy_to_reg (target);
19789 /* target = A A B B */
19790 emit_insn (gen_sse_unpcklps (target, target, target));
19791 /* target = X A B B */
19792 ix86_expand_vector_set (false, target, val, 0);
19793 /* target = A X C D */
19794 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19795 GEN_INT (1), GEN_INT (0),
19796 GEN_INT (2+4), GEN_INT (3+4)));
19797 return;
19798
19799 case 2:
19800 /* tmp = target = A B C D */
19801 tmp = copy_to_reg (target);
19802 /* tmp = X B C D */
19803 ix86_expand_vector_set (false, tmp, val, 0);
19804 /* target = A B X D */
19805 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19806 GEN_INT (0), GEN_INT (1),
19807 GEN_INT (0+4), GEN_INT (3+4)));
19808 return;
19809
19810 case 3:
19811 /* tmp = target = A B C D */
19812 tmp = copy_to_reg (target);
19813 /* tmp = X B C D */
19814 ix86_expand_vector_set (false, tmp, val, 0);
19815 /* target = A B X D */
19816 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19817 GEN_INT (0), GEN_INT (1),
19818 GEN_INT (2+4), GEN_INT (0+4)));
19819 return;
19820
19821 default:
19822 gcc_unreachable ();
19823 }
19824 break;
19825
19826 case V4SImode:
19827 /* Element 0 handled by vec_merge below. */
19828 if (elt == 0)
19829 {
19830 use_vec_merge = true;
19831 break;
19832 }
19833
19834 if (TARGET_SSE2)
19835 {
19836 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19837 store into element 0, then shuffle them back. */
19838
19839 rtx order[4];
19840
19841 order[0] = GEN_INT (elt);
19842 order[1] = const1_rtx;
19843 order[2] = const2_rtx;
19844 order[3] = GEN_INT (3);
19845 order[elt] = const0_rtx;
19846
19847 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19848 order[1], order[2], order[3]));
19849
19850 ix86_expand_vector_set (false, target, val, 0);
19851
19852 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19853 order[1], order[2], order[3]));
19854 }
19855 else
19856 {
19857 /* For SSE1, we have to reuse the V4SF code. */
19858 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19859 gen_lowpart (SFmode, val), elt);
19860 }
19861 return;
19862
19863 case V8HImode:
19864 use_vec_merge = TARGET_SSE2;
19865 break;
19866 case V4HImode:
19867 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19868 break;
19869
19870 case V16QImode:
19871 case V8QImode:
19872 default:
19873 break;
19874 }
19875
19876 if (use_vec_merge)
19877 {
19878 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19879 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19880 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19881 }
19882 else
19883 {
19884 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19885
19886 emit_move_insn (mem, target);
19887
19888 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19889 emit_move_insn (tmp, val);
19890
19891 emit_move_insn (target, mem);
19892 }
19893 }
19894
19895 void
19896 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19897 {
19898 enum machine_mode mode = GET_MODE (vec);
19899 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19900 bool use_vec_extr = false;
19901 rtx tmp;
19902
19903 switch (mode)
19904 {
19905 case V2SImode:
19906 case V2SFmode:
19907 if (!mmx_ok)
19908 break;
19909 /* FALLTHRU */
19910
19911 case V2DFmode:
19912 case V2DImode:
19913 use_vec_extr = true;
19914 break;
19915
19916 case V4SFmode:
19917 switch (elt)
19918 {
19919 case 0:
19920 tmp = vec;
19921 break;
19922
19923 case 1:
19924 case 3:
19925 tmp = gen_reg_rtx (mode);
19926 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19927 GEN_INT (elt), GEN_INT (elt),
19928 GEN_INT (elt+4), GEN_INT (elt+4)));
19929 break;
19930
19931 case 2:
19932 tmp = gen_reg_rtx (mode);
19933 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19934 break;
19935
19936 default:
19937 gcc_unreachable ();
19938 }
19939 vec = tmp;
19940 use_vec_extr = true;
19941 elt = 0;
19942 break;
19943
19944 case V4SImode:
19945 if (TARGET_SSE2)
19946 {
19947 switch (elt)
19948 {
19949 case 0:
19950 tmp = vec;
19951 break;
19952
19953 case 1:
19954 case 3:
19955 tmp = gen_reg_rtx (mode);
19956 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19957 GEN_INT (elt), GEN_INT (elt),
19958 GEN_INT (elt), GEN_INT (elt)));
19959 break;
19960
19961 case 2:
19962 tmp = gen_reg_rtx (mode);
19963 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19964 break;
19965
19966 default:
19967 gcc_unreachable ();
19968 }
19969 vec = tmp;
19970 use_vec_extr = true;
19971 elt = 0;
19972 }
19973 else
19974 {
19975 /* For SSE1, we have to reuse the V4SF code. */
19976 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19977 gen_lowpart (V4SFmode, vec), elt);
19978 return;
19979 }
19980 break;
19981
19982 case V8HImode:
19983 use_vec_extr = TARGET_SSE2;
19984 break;
19985 case V4HImode:
19986 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19987 break;
19988
19989 case V16QImode:
19990 case V8QImode:
19991 /* ??? Could extract the appropriate HImode element and shift. */
19992 default:
19993 break;
19994 }
19995
19996 if (use_vec_extr)
19997 {
19998 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19999 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
20000
20001 /* Let the rtl optimizers know about the zero extension performed. */
20002 if (inner_mode == HImode)
20003 {
20004 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
20005 target = gen_lowpart (SImode, target);
20006 }
20007
20008 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
20009 }
20010 else
20011 {
20012 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
20013
20014 emit_move_insn (mem, vec);
20015
20016 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
20017 emit_move_insn (target, tmp);
20018 }
20019 }
20020
20021 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
20022 pattern to reduce; DEST is the destination; IN is the input vector. */
20023
20024 void
20025 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
20026 {
20027 rtx tmp1, tmp2, tmp3;
20028
20029 tmp1 = gen_reg_rtx (V4SFmode);
20030 tmp2 = gen_reg_rtx (V4SFmode);
20031 tmp3 = gen_reg_rtx (V4SFmode);
20032
20033 emit_insn (gen_sse_movhlps (tmp1, in, in));
20034 emit_insn (fn (tmp2, tmp1, in));
20035
20036 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
20037 GEN_INT (1), GEN_INT (1),
20038 GEN_INT (1+4), GEN_INT (1+4)));
20039 emit_insn (fn (dest, tmp2, tmp3));
20040 }
20041 \f
20042 /* Target hook for scalar_mode_supported_p. */
20043 static bool
20044 ix86_scalar_mode_supported_p (enum machine_mode mode)
20045 {
20046 if (DECIMAL_FLOAT_MODE_P (mode))
20047 return true;
20048 else
20049 return default_scalar_mode_supported_p (mode);
20050 }
20051
20052 /* Implements target hook vector_mode_supported_p. */
20053 static bool
20054 ix86_vector_mode_supported_p (enum machine_mode mode)
20055 {
20056 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
20057 return true;
20058 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
20059 return true;
20060 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
20061 return true;
20062 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
20063 return true;
20064 return false;
20065 }
20066
20067 /* Worker function for TARGET_MD_ASM_CLOBBERS.
20068
20069 We do this in the new i386 backend to maintain source compatibility
20070 with the old cc0-based compiler. */
20071
20072 static tree
20073 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
20074 tree inputs ATTRIBUTE_UNUSED,
20075 tree clobbers)
20076 {
20077 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
20078 clobbers);
20079 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
20080 clobbers);
20081 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
20082 clobbers);
20083 return clobbers;
20084 }
20085
20086 /* Return true if this goes in small data/bss. */
20087
20088 static bool
20089 ix86_in_large_data_p (tree exp)
20090 {
20091 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
20092 return false;
20093
20094 /* Functions are never large data. */
20095 if (TREE_CODE (exp) == FUNCTION_DECL)
20096 return false;
20097
20098 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
20099 {
20100 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
20101 if (strcmp (section, ".ldata") == 0
20102 || strcmp (section, ".lbss") == 0)
20103 return true;
20104 return false;
20105 }
20106 else
20107 {
20108 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
20109
20110 /* If this is an incomplete type with size 0, then we can't put it
20111 in data because it might be too big when completed. */
20112 if (!size || size > ix86_section_threshold)
20113 return true;
20114 }
20115
20116 return false;
20117 }
20118 static void
20119 ix86_encode_section_info (tree decl, rtx rtl, int first)
20120 {
20121 default_encode_section_info (decl, rtl, first);
20122
20123 if (TREE_CODE (decl) == VAR_DECL
20124 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
20125 && ix86_in_large_data_p (decl))
20126 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
20127 }
20128
20129 /* Worker function for REVERSE_CONDITION. */
20130
20131 enum rtx_code
20132 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
20133 {
20134 return (mode != CCFPmode && mode != CCFPUmode
20135 ? reverse_condition (code)
20136 : reverse_condition_maybe_unordered (code));
20137 }
20138
20139 /* Output code to perform an x87 FP register move, from OPERANDS[1]
20140 to OPERANDS[0]. */
20141
20142 const char *
20143 output_387_reg_move (rtx insn, rtx *operands)
20144 {
20145 if (REG_P (operands[1])
20146 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
20147 {
20148 if (REGNO (operands[0]) == FIRST_STACK_REG)
20149 return output_387_ffreep (operands, 0);
20150 return "fstp\t%y0";
20151 }
20152 if (STACK_TOP_P (operands[0]))
20153 return "fld%z1\t%y1";
20154 return "fst\t%y0";
20155 }
20156
20157 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20158 FP status register is set. */
20159
20160 void
20161 ix86_emit_fp_unordered_jump (rtx label)
20162 {
20163 rtx reg = gen_reg_rtx (HImode);
20164 rtx temp;
20165
20166 emit_insn (gen_x86_fnstsw_1 (reg));
20167
20168 if (TARGET_USE_SAHF)
20169 {
20170 emit_insn (gen_x86_sahf_1 (reg));
20171
20172 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20173 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20174 }
20175 else
20176 {
20177 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20178
20179 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20180 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20181 }
20182
20183 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20184 gen_rtx_LABEL_REF (VOIDmode, label),
20185 pc_rtx);
20186 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20187 emit_jump_insn (temp);
20188 }
20189
20190 /* Output code to perform a log1p XFmode calculation. */
20191
20192 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20193 {
20194 rtx label1 = gen_label_rtx ();
20195 rtx label2 = gen_label_rtx ();
20196
20197 rtx tmp = gen_reg_rtx (XFmode);
20198 rtx tmp2 = gen_reg_rtx (XFmode);
20199
20200 emit_insn (gen_absxf2 (tmp, op1));
20201 emit_insn (gen_cmpxf (tmp,
20202 CONST_DOUBLE_FROM_REAL_VALUE (
20203 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20204 XFmode)));
20205 emit_jump_insn (gen_bge (label1));
20206
20207 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20208 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
20209 emit_jump (label2);
20210
20211 emit_label (label1);
20212 emit_move_insn (tmp, CONST1_RTX (XFmode));
20213 emit_insn (gen_addxf3 (tmp, op1, tmp));
20214 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20215 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
20216
20217 emit_label (label2);
20218 }
20219
20220 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20221
20222 static void
20223 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20224 tree decl)
20225 {
20226 /* With Binutils 2.15, the "@unwind" marker must be specified on
20227 every occurrence of the ".eh_frame" section, not just the first
20228 one. */
20229 if (TARGET_64BIT
20230 && strcmp (name, ".eh_frame") == 0)
20231 {
20232 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20233 flags & SECTION_WRITE ? "aw" : "a");
20234 return;
20235 }
20236 default_elf_asm_named_section (name, flags, decl);
20237 }
20238
20239 /* Return the mangling of TYPE if it is an extended fundamental type. */
20240
20241 static const char *
20242 ix86_mangle_fundamental_type (tree type)
20243 {
20244 switch (TYPE_MODE (type))
20245 {
20246 case TFmode:
20247 /* __float128 is "g". */
20248 return "g";
20249 case XFmode:
20250 /* "long double" or __float80 is "e". */
20251 return "e";
20252 default:
20253 return NULL;
20254 }
20255 }
20256
20257 /* For 32-bit code we can save PIC register setup by using
20258 __stack_chk_fail_local hidden function instead of calling
20259 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20260 register, so it is better to call __stack_chk_fail directly. */
20261
20262 static tree
20263 ix86_stack_protect_fail (void)
20264 {
20265 return TARGET_64BIT
20266 ? default_external_stack_protect_fail ()
20267 : default_hidden_stack_protect_fail ();
20268 }
20269
20270 /* Select a format to encode pointers in exception handling data. CODE
20271 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20272 true if the symbol may be affected by dynamic relocations.
20273
20274 ??? All x86 object file formats are capable of representing this.
20275 After all, the relocation needed is the same as for the call insn.
20276 Whether or not a particular assembler allows us to enter such, I
20277 guess we'll have to see. */
20278 int
20279 asm_preferred_eh_data_format (int code, int global)
20280 {
20281 if (flag_pic)
20282 {
20283 int type = DW_EH_PE_sdata8;
20284 if (!TARGET_64BIT
20285 || ix86_cmodel == CM_SMALL_PIC
20286 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20287 type = DW_EH_PE_sdata4;
20288 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20289 }
20290 if (ix86_cmodel == CM_SMALL
20291 || (ix86_cmodel == CM_MEDIUM && code))
20292 return DW_EH_PE_udata4;
20293 return DW_EH_PE_absptr;
20294 }
20295 \f
20296 /* Expand copysign from SIGN to the positive value ABS_VALUE
20297 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20298 the sign-bit. */
20299 static void
20300 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20301 {
20302 enum machine_mode mode = GET_MODE (sign);
20303 rtx sgn = gen_reg_rtx (mode);
20304 if (mask == NULL_RTX)
20305 {
20306 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20307 if (!VECTOR_MODE_P (mode))
20308 {
20309 /* We need to generate a scalar mode mask in this case. */
20310 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20311 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20312 mask = gen_reg_rtx (mode);
20313 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20314 }
20315 }
20316 else
20317 mask = gen_rtx_NOT (mode, mask);
20318 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20319 gen_rtx_AND (mode, mask, sign)));
20320 emit_insn (gen_rtx_SET (VOIDmode, result,
20321 gen_rtx_IOR (mode, abs_value, sgn)));
20322 }
20323
20324 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20325 mask for masking out the sign-bit is stored in *SMASK, if that is
20326 non-null. */
20327 static rtx
20328 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20329 {
20330 enum machine_mode mode = GET_MODE (op0);
20331 rtx xa, mask;
20332
20333 xa = gen_reg_rtx (mode);
20334 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20335 if (!VECTOR_MODE_P (mode))
20336 {
20337 /* We need to generate a scalar mode mask in this case. */
20338 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20339 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20340 mask = gen_reg_rtx (mode);
20341 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20342 }
20343 emit_insn (gen_rtx_SET (VOIDmode, xa,
20344 gen_rtx_AND (mode, op0, mask)));
20345
20346 if (smask)
20347 *smask = mask;
20348
20349 return xa;
20350 }
20351
20352 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20353 swapping the operands if SWAP_OPERANDS is true. The expanded
20354 code is a forward jump to a newly created label in case the
20355 comparison is true. The generated label rtx is returned. */
20356 static rtx
20357 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20358 bool swap_operands)
20359 {
20360 rtx label, tmp;
20361
20362 if (swap_operands)
20363 {
20364 tmp = op0;
20365 op0 = op1;
20366 op1 = tmp;
20367 }
20368
20369 label = gen_label_rtx ();
20370 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20371 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20372 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20373 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20374 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20375 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20376 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20377 JUMP_LABEL (tmp) = label;
20378
20379 return label;
20380 }
20381
20382 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20383 using comparison code CODE. Operands are swapped for the comparison if
20384 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20385 static rtx
20386 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20387 bool swap_operands)
20388 {
20389 enum machine_mode mode = GET_MODE (op0);
20390 rtx mask = gen_reg_rtx (mode);
20391
20392 if (swap_operands)
20393 {
20394 rtx tmp = op0;
20395 op0 = op1;
20396 op1 = tmp;
20397 }
20398
20399 if (mode == DFmode)
20400 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20401 gen_rtx_fmt_ee (code, mode, op0, op1)));
20402 else
20403 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20404 gen_rtx_fmt_ee (code, mode, op0, op1)));
20405
20406 return mask;
20407 }
20408
20409 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20410 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20411 static rtx
20412 ix86_gen_TWO52 (enum machine_mode mode)
20413 {
20414 REAL_VALUE_TYPE TWO52r;
20415 rtx TWO52;
20416
20417 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20418 TWO52 = const_double_from_real_value (TWO52r, mode);
20419 TWO52 = force_reg (mode, TWO52);
20420
20421 return TWO52;
20422 }
20423
20424 /* Expand SSE sequence for computing lround from OP1 storing
20425 into OP0. */
20426 void
20427 ix86_expand_lround (rtx op0, rtx op1)
20428 {
20429 /* C code for the stuff we're doing below:
20430 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20431 return (long)tmp;
20432 */
20433 enum machine_mode mode = GET_MODE (op1);
20434 const struct real_format *fmt;
20435 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20436 rtx adj;
20437
20438 /* load nextafter (0.5, 0.0) */
20439 fmt = REAL_MODE_FORMAT (mode);
20440 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20441 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20442
20443 /* adj = copysign (0.5, op1) */
20444 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20445 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20446
20447 /* adj = op1 + adj */
20448 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20449
20450 /* op0 = (imode)adj */
20451 expand_fix (op0, adj, 0);
20452 }
20453
20454 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20455 into OPERAND0. */
20456 void
20457 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20458 {
20459 /* C code for the stuff we're doing below (for do_floor):
20460 xi = (long)op1;
20461 xi -= (double)xi > op1 ? 1 : 0;
20462 return xi;
20463 */
20464 enum machine_mode fmode = GET_MODE (op1);
20465 enum machine_mode imode = GET_MODE (op0);
20466 rtx ireg, freg, label, tmp;
20467
20468 /* reg = (long)op1 */
20469 ireg = gen_reg_rtx (imode);
20470 expand_fix (ireg, op1, 0);
20471
20472 /* freg = (double)reg */
20473 freg = gen_reg_rtx (fmode);
20474 expand_float (freg, ireg, 0);
20475
20476 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20477 label = ix86_expand_sse_compare_and_jump (UNLE,
20478 freg, op1, !do_floor);
20479 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20480 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20481 emit_move_insn (ireg, tmp);
20482
20483 emit_label (label);
20484 LABEL_NUSES (label) = 1;
20485
20486 emit_move_insn (op0, ireg);
20487 }
20488
20489 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20490 result in OPERAND0. */
20491 void
20492 ix86_expand_rint (rtx operand0, rtx operand1)
20493 {
20494 /* C code for the stuff we're doing below:
20495 xa = fabs (operand1);
20496 if (!isless (xa, 2**52))
20497 return operand1;
20498 xa = xa + 2**52 - 2**52;
20499 return copysign (xa, operand1);
20500 */
20501 enum machine_mode mode = GET_MODE (operand0);
20502 rtx res, xa, label, TWO52, mask;
20503
20504 res = gen_reg_rtx (mode);
20505 emit_move_insn (res, operand1);
20506
20507 /* xa = abs (operand1) */
20508 xa = ix86_expand_sse_fabs (res, &mask);
20509
20510 /* if (!isless (xa, TWO52)) goto label; */
20511 TWO52 = ix86_gen_TWO52 (mode);
20512 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20513
20514 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20515 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20516
20517 ix86_sse_copysign_to_positive (res, xa, res, mask);
20518
20519 emit_label (label);
20520 LABEL_NUSES (label) = 1;
20521
20522 emit_move_insn (operand0, res);
20523 }
20524
20525 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20526 into OPERAND0. */
20527 void
20528 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20529 {
20530 /* C code for the stuff we expand below.
20531 double xa = fabs (x), x2;
20532 if (!isless (xa, TWO52))
20533 return x;
20534 xa = xa + TWO52 - TWO52;
20535 x2 = copysign (xa, x);
20536 Compensate. Floor:
20537 if (x2 > x)
20538 x2 -= 1;
20539 Compensate. Ceil:
20540 if (x2 < x)
20541 x2 -= -1;
20542 return x2;
20543 */
20544 enum machine_mode mode = GET_MODE (operand0);
20545 rtx xa, TWO52, tmp, label, one, res, mask;
20546
20547 TWO52 = ix86_gen_TWO52 (mode);
20548
20549 /* Temporary for holding the result, initialized to the input
20550 operand to ease control flow. */
20551 res = gen_reg_rtx (mode);
20552 emit_move_insn (res, operand1);
20553
20554 /* xa = abs (operand1) */
20555 xa = ix86_expand_sse_fabs (res, &mask);
20556
20557 /* if (!isless (xa, TWO52)) goto label; */
20558 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20559
20560 /* xa = xa + TWO52 - TWO52; */
20561 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20562 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20563
20564 /* xa = copysign (xa, operand1) */
20565 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20566
20567 /* generate 1.0 or -1.0 */
20568 one = force_reg (mode,
20569 const_double_from_real_value (do_floor
20570 ? dconst1 : dconstm1, mode));
20571
20572 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20573 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20574 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20575 gen_rtx_AND (mode, one, tmp)));
20576 /* We always need to subtract here to preserve signed zero. */
20577 tmp = expand_simple_binop (mode, MINUS,
20578 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20579 emit_move_insn (res, tmp);
20580
20581 emit_label (label);
20582 LABEL_NUSES (label) = 1;
20583
20584 emit_move_insn (operand0, res);
20585 }
20586
20587 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20588 into OPERAND0. */
20589 void
20590 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20591 {
20592 /* C code for the stuff we expand below.
20593 double xa = fabs (x), x2;
20594 if (!isless (xa, TWO52))
20595 return x;
20596 x2 = (double)(long)x;
20597 Compensate. Floor:
20598 if (x2 > x)
20599 x2 -= 1;
20600 Compensate. Ceil:
20601 if (x2 < x)
20602 x2 += 1;
20603 if (HONOR_SIGNED_ZEROS (mode))
20604 return copysign (x2, x);
20605 return x2;
20606 */
20607 enum machine_mode mode = GET_MODE (operand0);
20608 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20609
20610 TWO52 = ix86_gen_TWO52 (mode);
20611
20612 /* Temporary for holding the result, initialized to the input
20613 operand to ease control flow. */
20614 res = gen_reg_rtx (mode);
20615 emit_move_insn (res, operand1);
20616
20617 /* xa = abs (operand1) */
20618 xa = ix86_expand_sse_fabs (res, &mask);
20619
20620 /* if (!isless (xa, TWO52)) goto label; */
20621 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20622
20623 /* xa = (double)(long)x */
20624 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20625 expand_fix (xi, res, 0);
20626 expand_float (xa, xi, 0);
20627
20628 /* generate 1.0 */
20629 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20630
20631 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20632 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20633 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20634 gen_rtx_AND (mode, one, tmp)));
20635 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20636 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20637 emit_move_insn (res, tmp);
20638
20639 if (HONOR_SIGNED_ZEROS (mode))
20640 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20641
20642 emit_label (label);
20643 LABEL_NUSES (label) = 1;
20644
20645 emit_move_insn (operand0, res);
20646 }
20647
20648 /* Expand SSE sequence for computing round from OPERAND1 storing
20649 into OPERAND0. Sequence that works without relying on DImode truncation
20650 via cvttsd2siq that is only available on 64bit targets. */
20651 void
20652 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20653 {
20654 /* C code for the stuff we expand below.
20655 double xa = fabs (x), xa2, x2;
20656 if (!isless (xa, TWO52))
20657 return x;
20658 Using the absolute value and copying back sign makes
20659 -0.0 -> -0.0 correct.
20660 xa2 = xa + TWO52 - TWO52;
20661 Compensate.
20662 dxa = xa2 - xa;
20663 if (dxa <= -0.5)
20664 xa2 += 1;
20665 else if (dxa > 0.5)
20666 xa2 -= 1;
20667 x2 = copysign (xa2, x);
20668 return x2;
20669 */
20670 enum machine_mode mode = GET_MODE (operand0);
20671 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20672
20673 TWO52 = ix86_gen_TWO52 (mode);
20674
20675 /* Temporary for holding the result, initialized to the input
20676 operand to ease control flow. */
20677 res = gen_reg_rtx (mode);
20678 emit_move_insn (res, operand1);
20679
20680 /* xa = abs (operand1) */
20681 xa = ix86_expand_sse_fabs (res, &mask);
20682
20683 /* if (!isless (xa, TWO52)) goto label; */
20684 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20685
20686 /* xa2 = xa + TWO52 - TWO52; */
20687 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20688 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20689
20690 /* dxa = xa2 - xa; */
20691 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20692
20693 /* generate 0.5, 1.0 and -0.5 */
20694 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20695 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20696 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20697 0, OPTAB_DIRECT);
20698
20699 /* Compensate. */
20700 tmp = gen_reg_rtx (mode);
20701 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20702 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20703 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20704 gen_rtx_AND (mode, one, tmp)));
20705 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20706 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20707 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20708 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20709 gen_rtx_AND (mode, one, tmp)));
20710 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20711
20712 /* res = copysign (xa2, operand1) */
20713 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20714
20715 emit_label (label);
20716 LABEL_NUSES (label) = 1;
20717
20718 emit_move_insn (operand0, res);
20719 }
20720
20721 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20722 into OPERAND0. */
20723 void
20724 ix86_expand_trunc (rtx operand0, rtx operand1)
20725 {
20726 /* C code for SSE variant we expand below.
20727 double xa = fabs (x), x2;
20728 if (!isless (xa, TWO52))
20729 return x;
20730 x2 = (double)(long)x;
20731 if (HONOR_SIGNED_ZEROS (mode))
20732 return copysign (x2, x);
20733 return x2;
20734 */
20735 enum machine_mode mode = GET_MODE (operand0);
20736 rtx xa, xi, TWO52, label, res, mask;
20737
20738 TWO52 = ix86_gen_TWO52 (mode);
20739
20740 /* Temporary for holding the result, initialized to the input
20741 operand to ease control flow. */
20742 res = gen_reg_rtx (mode);
20743 emit_move_insn (res, operand1);
20744
20745 /* xa = abs (operand1) */
20746 xa = ix86_expand_sse_fabs (res, &mask);
20747
20748 /* if (!isless (xa, TWO52)) goto label; */
20749 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20750
20751 /* x = (double)(long)x */
20752 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20753 expand_fix (xi, res, 0);
20754 expand_float (res, xi, 0);
20755
20756 if (HONOR_SIGNED_ZEROS (mode))
20757 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20758
20759 emit_label (label);
20760 LABEL_NUSES (label) = 1;
20761
20762 emit_move_insn (operand0, res);
20763 }
20764
20765 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20766 into OPERAND0. */
20767 void
20768 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20769 {
20770 enum machine_mode mode = GET_MODE (operand0);
20771 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20772
20773 /* C code for SSE variant we expand below.
20774 double xa = fabs (x), x2;
20775 if (!isless (xa, TWO52))
20776 return x;
20777 xa2 = xa + TWO52 - TWO52;
20778 Compensate:
20779 if (xa2 > xa)
20780 xa2 -= 1.0;
20781 x2 = copysign (xa2, x);
20782 return x2;
20783 */
20784
20785 TWO52 = ix86_gen_TWO52 (mode);
20786
20787 /* Temporary for holding the result, initialized to the input
20788 operand to ease control flow. */
20789 res = gen_reg_rtx (mode);
20790 emit_move_insn (res, operand1);
20791
20792 /* xa = abs (operand1) */
20793 xa = ix86_expand_sse_fabs (res, &smask);
20794
20795 /* if (!isless (xa, TWO52)) goto label; */
20796 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20797
20798 /* res = xa + TWO52 - TWO52; */
20799 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20800 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20801 emit_move_insn (res, tmp);
20802
20803 /* generate 1.0 */
20804 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20805
20806 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20807 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20808 emit_insn (gen_rtx_SET (VOIDmode, mask,
20809 gen_rtx_AND (mode, mask, one)));
20810 tmp = expand_simple_binop (mode, MINUS,
20811 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20812 emit_move_insn (res, tmp);
20813
20814 /* res = copysign (res, operand1) */
20815 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20816
20817 emit_label (label);
20818 LABEL_NUSES (label) = 1;
20819
20820 emit_move_insn (operand0, res);
20821 }
20822
20823 /* Expand SSE sequence for computing round from OPERAND1 storing
20824 into OPERAND0. */
20825 void
20826 ix86_expand_round (rtx operand0, rtx operand1)
20827 {
20828 /* C code for the stuff we're doing below:
20829 double xa = fabs (x);
20830 if (!isless (xa, TWO52))
20831 return x;
20832 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20833 return copysign (xa, x);
20834 */
20835 enum machine_mode mode = GET_MODE (operand0);
20836 rtx res, TWO52, xa, label, xi, half, mask;
20837 const struct real_format *fmt;
20838 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20839
20840 /* Temporary for holding the result, initialized to the input
20841 operand to ease control flow. */
20842 res = gen_reg_rtx (mode);
20843 emit_move_insn (res, operand1);
20844
20845 TWO52 = ix86_gen_TWO52 (mode);
20846 xa = ix86_expand_sse_fabs (res, &mask);
20847 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20848
20849 /* load nextafter (0.5, 0.0) */
20850 fmt = REAL_MODE_FORMAT (mode);
20851 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20852 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20853
20854 /* xa = xa + 0.5 */
20855 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20856 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20857
20858 /* xa = (double)(int64_t)xa */
20859 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20860 expand_fix (xi, xa, 0);
20861 expand_float (xa, xi, 0);
20862
20863 /* res = copysign (xa, operand1) */
20864 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20865
20866 emit_label (label);
20867 LABEL_NUSES (label) = 1;
20868
20869 emit_move_insn (operand0, res);
20870 }
20871
20872 #include "gt-i386.h"